xref: /openbmc/qemu/linux-user/syscall.c (revision 8500476f)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include "linux_loop.h"
115 #include "uname.h"
116 
117 #include "qemu.h"
118 #include "qemu/guest-random.h"
119 #include "user/syscall-trace.h"
120 #include "qapi/error.h"
121 #include "fd-trans.h"
122 #include "tcg/tcg.h"
123 
124 #ifndef CLONE_IO
125 #define CLONE_IO                0x80000000      /* Clone io context */
126 #endif
127 
128 /* We can't directly call the host clone syscall, because this will
129  * badly confuse libc (breaking mutexes, for example). So we must
130  * divide clone flags into:
131  *  * flag combinations that look like pthread_create()
132  *  * flag combinations that look like fork()
133  *  * flags we can implement within QEMU itself
134  *  * flags we can't support and will return an error for
135  */
136 /* For thread creation, all these flags must be present; for
137  * fork, none must be present.
138  */
139 #define CLONE_THREAD_FLAGS                              \
140     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
141      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
142 
143 /* These flags are ignored:
144  * CLONE_DETACHED is now ignored by the kernel;
145  * CLONE_IO is just an optimisation hint to the I/O scheduler
146  */
147 #define CLONE_IGNORED_FLAGS                     \
148     (CLONE_DETACHED | CLONE_IO)
149 
150 /* Flags for fork which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_FORK_FLAGS               \
152     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
153      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
154 
155 /* Flags for thread creation which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
157     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
158      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
159 
160 #define CLONE_INVALID_FORK_FLAGS                                        \
161     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
162 
163 #define CLONE_INVALID_THREAD_FLAGS                                      \
164     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
165        CLONE_IGNORED_FLAGS))
166 
167 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
168  * have almost all been allocated. We cannot support any of
169  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
170  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
171  * The checks against the invalid thread masks above will catch these.
172  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
173  */
174 
175 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
176  * once. This exercises the codepaths for restart.
177  */
178 //#define DEBUG_ERESTARTSYS
179 
180 //#include <linux/msdos_fs.h>
181 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
182 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
183 
184 #undef _syscall0
185 #undef _syscall1
186 #undef _syscall2
187 #undef _syscall3
188 #undef _syscall4
189 #undef _syscall5
190 #undef _syscall6
191 
192 #define _syscall0(type,name)		\
193 static type name (void)			\
194 {					\
195 	return syscall(__NR_##name);	\
196 }
197 
198 #define _syscall1(type,name,type1,arg1)		\
199 static type name (type1 arg1)			\
200 {						\
201 	return syscall(__NR_##name, arg1);	\
202 }
203 
204 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
205 static type name (type1 arg1,type2 arg2)		\
206 {							\
207 	return syscall(__NR_##name, arg1, arg2);	\
208 }
209 
210 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
211 static type name (type1 arg1,type2 arg2,type3 arg3)		\
212 {								\
213 	return syscall(__NR_##name, arg1, arg2, arg3);		\
214 }
215 
216 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
218 {										\
219 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
220 }
221 
222 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
223 		  type5,arg5)							\
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
225 {										\
226 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
227 }
228 
229 
230 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
231 		  type5,arg5,type6,arg6)					\
232 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
233                   type6 arg6)							\
234 {										\
235 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
236 }
237 
238 
239 #define __NR_sys_uname __NR_uname
240 #define __NR_sys_getcwd1 __NR_getcwd
241 #define __NR_sys_getdents __NR_getdents
242 #define __NR_sys_getdents64 __NR_getdents64
243 #define __NR_sys_getpriority __NR_getpriority
244 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
245 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
246 #define __NR_sys_syslog __NR_syslog
247 #define __NR_sys_futex __NR_futex
248 #define __NR_sys_inotify_init __NR_inotify_init
249 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
250 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
251 #define __NR_sys_statx __NR_statx
252 
253 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
254 #define __NR__llseek __NR_lseek
255 #endif
256 
257 /* Newer kernel ports have llseek() instead of _llseek() */
258 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
259 #define TARGET_NR__llseek TARGET_NR_llseek
260 #endif
261 
262 #define __NR_sys_gettid __NR_gettid
263 _syscall0(int, sys_gettid)
264 
265 /* For the 64-bit guest on 32-bit host case we must emulate
266  * getdents using getdents64, because otherwise the host
267  * might hand us back more dirent records than we can fit
268  * into the guest buffer after structure format conversion.
269  * Otherwise we emulate getdents with getdents if the host has it.
270  */
271 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
272 #define EMULATE_GETDENTS_WITH_GETDENTS
273 #endif
274 
275 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
276 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
277 #endif
278 #if (defined(TARGET_NR_getdents) && \
279       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
280     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
281 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
282 #endif
283 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
284 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
285           loff_t *, res, uint, wh);
286 #endif
287 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
288 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
289           siginfo_t *, uinfo)
290 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
291 #ifdef __NR_exit_group
292 _syscall1(int,exit_group,int,error_code)
293 #endif
294 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
295 _syscall1(int,set_tid_address,int *,tidptr)
296 #endif
297 #if defined(TARGET_NR_futex) && defined(__NR_futex)
298 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
299           const struct timespec *,timeout,int *,uaddr2,int,val3)
300 #endif
301 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
302 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
303           unsigned long *, user_mask_ptr);
304 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
305 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
306           unsigned long *, user_mask_ptr);
307 #define __NR_sys_getcpu __NR_getcpu
308 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
309 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
310           void *, arg);
311 _syscall2(int, capget, struct __user_cap_header_struct *, header,
312           struct __user_cap_data_struct *, data);
313 _syscall2(int, capset, struct __user_cap_header_struct *, header,
314           struct __user_cap_data_struct *, data);
315 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
316 _syscall2(int, ioprio_get, int, which, int, who)
317 #endif
318 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
319 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
320 #endif
321 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
322 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
323 #endif
324 
325 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
326 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
327           unsigned long, idx1, unsigned long, idx2)
328 #endif
329 
330 /*
331  * It is assumed that struct statx is architecture independent.
332  */
333 #if defined(TARGET_NR_statx) && defined(__NR_statx)
334 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
335           unsigned int, mask, struct target_statx *, statxbuf)
336 #endif
337 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
338 _syscall2(int, membarrier, int, cmd, int, flags)
339 #endif
340 
341 static bitmask_transtbl fcntl_flags_tbl[] = {
342   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
343   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
344   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
345   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
346   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
347   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
348   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
349   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
350   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
351   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
352   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
353   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
354   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
355 #if defined(O_DIRECT)
356   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
357 #endif
358 #if defined(O_NOATIME)
359   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
360 #endif
361 #if defined(O_CLOEXEC)
362   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
363 #endif
364 #if defined(O_PATH)
365   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
366 #endif
367 #if defined(O_TMPFILE)
368   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
369 #endif
370   /* Don't terminate the list prematurely on 64-bit host+guest.  */
371 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
372   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
373 #endif
374   { 0, 0, 0, 0 }
375 };
376 
377 static int sys_getcwd1(char *buf, size_t size)
378 {
379   if (getcwd(buf, size) == NULL) {
380       /* getcwd() sets errno */
381       return (-1);
382   }
383   return strlen(buf)+1;
384 }
385 
386 #ifdef TARGET_NR_utimensat
387 #if defined(__NR_utimensat)
388 #define __NR_sys_utimensat __NR_utimensat
389 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
390           const struct timespec *,tsp,int,flags)
391 #else
392 static int sys_utimensat(int dirfd, const char *pathname,
393                          const struct timespec times[2], int flags)
394 {
395     errno = ENOSYS;
396     return -1;
397 }
398 #endif
399 #endif /* TARGET_NR_utimensat */
400 
401 #ifdef TARGET_NR_renameat2
402 #if defined(__NR_renameat2)
403 #define __NR_sys_renameat2 __NR_renameat2
404 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
405           const char *, new, unsigned int, flags)
406 #else
407 static int sys_renameat2(int oldfd, const char *old,
408                          int newfd, const char *new, int flags)
409 {
410     if (flags == 0) {
411         return renameat(oldfd, old, newfd, new);
412     }
413     errno = ENOSYS;
414     return -1;
415 }
416 #endif
417 #endif /* TARGET_NR_renameat2 */
418 
419 #ifdef CONFIG_INOTIFY
420 #include <sys/inotify.h>
421 
422 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
423 static int sys_inotify_init(void)
424 {
425   return (inotify_init());
426 }
427 #endif
428 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
429 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
430 {
431   return (inotify_add_watch(fd, pathname, mask));
432 }
433 #endif
434 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
435 static int sys_inotify_rm_watch(int fd, int32_t wd)
436 {
437   return (inotify_rm_watch(fd, wd));
438 }
439 #endif
440 #ifdef CONFIG_INOTIFY1
441 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
442 static int sys_inotify_init1(int flags)
443 {
444   return (inotify_init1(flags));
445 }
446 #endif
447 #endif
448 #else
449 /* Userspace can usually survive runtime without inotify */
450 #undef TARGET_NR_inotify_init
451 #undef TARGET_NR_inotify_init1
452 #undef TARGET_NR_inotify_add_watch
453 #undef TARGET_NR_inotify_rm_watch
454 #endif /* CONFIG_INOTIFY  */
455 
456 #if defined(TARGET_NR_prlimit64)
457 #ifndef __NR_prlimit64
458 # define __NR_prlimit64 -1
459 #endif
460 #define __NR_sys_prlimit64 __NR_prlimit64
461 /* The glibc rlimit structure may not be that used by the underlying syscall */
462 struct host_rlimit64 {
463     uint64_t rlim_cur;
464     uint64_t rlim_max;
465 };
466 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
467           const struct host_rlimit64 *, new_limit,
468           struct host_rlimit64 *, old_limit)
469 #endif
470 
471 
472 #if defined(TARGET_NR_timer_create)
473 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
474 static timer_t g_posix_timers[32] = { 0, } ;
475 
476 static inline int next_free_host_timer(void)
477 {
478     int k ;
479     /* FIXME: Does finding the next free slot require a lock? */
480     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
481         if (g_posix_timers[k] == 0) {
482             g_posix_timers[k] = (timer_t) 1;
483             return k;
484         }
485     }
486     return -1;
487 }
488 #endif
489 
490 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
491 #ifdef TARGET_ARM
492 static inline int regpairs_aligned(void *cpu_env, int num)
493 {
494     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
495 }
496 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
497 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
498 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
499 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
500  * of registers which translates to the same as ARM/MIPS, because we start with
501  * r3 as arg1 */
502 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
503 #elif defined(TARGET_SH4)
504 /* SH4 doesn't align register pairs, except for p{read,write}64 */
505 static inline int regpairs_aligned(void *cpu_env, int num)
506 {
507     switch (num) {
508     case TARGET_NR_pread64:
509     case TARGET_NR_pwrite64:
510         return 1;
511 
512     default:
513         return 0;
514     }
515 }
516 #elif defined(TARGET_XTENSA)
517 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
518 #else
519 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
520 #endif
521 
522 #define ERRNO_TABLE_SIZE 1200
523 
524 /* target_to_host_errno_table[] is initialized from
525  * host_to_target_errno_table[] in syscall_init(). */
526 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
527 };
528 
529 /*
530  * This list is the union of errno values overridden in asm-<arch>/errno.h
531  * minus the errnos that are not actually generic to all archs.
532  */
533 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
534     [EAGAIN]		= TARGET_EAGAIN,
535     [EIDRM]		= TARGET_EIDRM,
536     [ECHRNG]		= TARGET_ECHRNG,
537     [EL2NSYNC]		= TARGET_EL2NSYNC,
538     [EL3HLT]		= TARGET_EL3HLT,
539     [EL3RST]		= TARGET_EL3RST,
540     [ELNRNG]		= TARGET_ELNRNG,
541     [EUNATCH]		= TARGET_EUNATCH,
542     [ENOCSI]		= TARGET_ENOCSI,
543     [EL2HLT]		= TARGET_EL2HLT,
544     [EDEADLK]		= TARGET_EDEADLK,
545     [ENOLCK]		= TARGET_ENOLCK,
546     [EBADE]		= TARGET_EBADE,
547     [EBADR]		= TARGET_EBADR,
548     [EXFULL]		= TARGET_EXFULL,
549     [ENOANO]		= TARGET_ENOANO,
550     [EBADRQC]		= TARGET_EBADRQC,
551     [EBADSLT]		= TARGET_EBADSLT,
552     [EBFONT]		= TARGET_EBFONT,
553     [ENOSTR]		= TARGET_ENOSTR,
554     [ENODATA]		= TARGET_ENODATA,
555     [ETIME]		= TARGET_ETIME,
556     [ENOSR]		= TARGET_ENOSR,
557     [ENONET]		= TARGET_ENONET,
558     [ENOPKG]		= TARGET_ENOPKG,
559     [EREMOTE]		= TARGET_EREMOTE,
560     [ENOLINK]		= TARGET_ENOLINK,
561     [EADV]		= TARGET_EADV,
562     [ESRMNT]		= TARGET_ESRMNT,
563     [ECOMM]		= TARGET_ECOMM,
564     [EPROTO]		= TARGET_EPROTO,
565     [EDOTDOT]		= TARGET_EDOTDOT,
566     [EMULTIHOP]		= TARGET_EMULTIHOP,
567     [EBADMSG]		= TARGET_EBADMSG,
568     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
569     [EOVERFLOW]		= TARGET_EOVERFLOW,
570     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
571     [EBADFD]		= TARGET_EBADFD,
572     [EREMCHG]		= TARGET_EREMCHG,
573     [ELIBACC]		= TARGET_ELIBACC,
574     [ELIBBAD]		= TARGET_ELIBBAD,
575     [ELIBSCN]		= TARGET_ELIBSCN,
576     [ELIBMAX]		= TARGET_ELIBMAX,
577     [ELIBEXEC]		= TARGET_ELIBEXEC,
578     [EILSEQ]		= TARGET_EILSEQ,
579     [ENOSYS]		= TARGET_ENOSYS,
580     [ELOOP]		= TARGET_ELOOP,
581     [ERESTART]		= TARGET_ERESTART,
582     [ESTRPIPE]		= TARGET_ESTRPIPE,
583     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
584     [EUSERS]		= TARGET_EUSERS,
585     [ENOTSOCK]		= TARGET_ENOTSOCK,
586     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
587     [EMSGSIZE]		= TARGET_EMSGSIZE,
588     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
589     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
590     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
591     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
592     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
593     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
594     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
595     [EADDRINUSE]	= TARGET_EADDRINUSE,
596     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
597     [ENETDOWN]		= TARGET_ENETDOWN,
598     [ENETUNREACH]	= TARGET_ENETUNREACH,
599     [ENETRESET]		= TARGET_ENETRESET,
600     [ECONNABORTED]	= TARGET_ECONNABORTED,
601     [ECONNRESET]	= TARGET_ECONNRESET,
602     [ENOBUFS]		= TARGET_ENOBUFS,
603     [EISCONN]		= TARGET_EISCONN,
604     [ENOTCONN]		= TARGET_ENOTCONN,
605     [EUCLEAN]		= TARGET_EUCLEAN,
606     [ENOTNAM]		= TARGET_ENOTNAM,
607     [ENAVAIL]		= TARGET_ENAVAIL,
608     [EISNAM]		= TARGET_EISNAM,
609     [EREMOTEIO]		= TARGET_EREMOTEIO,
610     [EDQUOT]            = TARGET_EDQUOT,
611     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
612     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
613     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
614     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
615     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
616     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
617     [EALREADY]		= TARGET_EALREADY,
618     [EINPROGRESS]	= TARGET_EINPROGRESS,
619     [ESTALE]		= TARGET_ESTALE,
620     [ECANCELED]		= TARGET_ECANCELED,
621     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
622     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
623 #ifdef ENOKEY
624     [ENOKEY]		= TARGET_ENOKEY,
625 #endif
626 #ifdef EKEYEXPIRED
627     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
628 #endif
629 #ifdef EKEYREVOKED
630     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
631 #endif
632 #ifdef EKEYREJECTED
633     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
634 #endif
635 #ifdef EOWNERDEAD
636     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
637 #endif
638 #ifdef ENOTRECOVERABLE
639     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
640 #endif
641 #ifdef ENOMSG
642     [ENOMSG]            = TARGET_ENOMSG,
643 #endif
644 #ifdef ERKFILL
645     [ERFKILL]           = TARGET_ERFKILL,
646 #endif
647 #ifdef EHWPOISON
648     [EHWPOISON]         = TARGET_EHWPOISON,
649 #endif
650 };
651 
652 static inline int host_to_target_errno(int err)
653 {
654     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
655         host_to_target_errno_table[err]) {
656         return host_to_target_errno_table[err];
657     }
658     return err;
659 }
660 
661 static inline int target_to_host_errno(int err)
662 {
663     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
664         target_to_host_errno_table[err]) {
665         return target_to_host_errno_table[err];
666     }
667     return err;
668 }
669 
670 static inline abi_long get_errno(abi_long ret)
671 {
672     if (ret == -1)
673         return -host_to_target_errno(errno);
674     else
675         return ret;
676 }
677 
678 const char *target_strerror(int err)
679 {
680     if (err == TARGET_ERESTARTSYS) {
681         return "To be restarted";
682     }
683     if (err == TARGET_QEMU_ESIGRETURN) {
684         return "Successful exit from sigreturn";
685     }
686 
687     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
688         return NULL;
689     }
690     return strerror(target_to_host_errno(err));
691 }
692 
693 #define safe_syscall0(type, name) \
694 static type safe_##name(void) \
695 { \
696     return safe_syscall(__NR_##name); \
697 }
698 
699 #define safe_syscall1(type, name, type1, arg1) \
700 static type safe_##name(type1 arg1) \
701 { \
702     return safe_syscall(__NR_##name, arg1); \
703 }
704 
705 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
706 static type safe_##name(type1 arg1, type2 arg2) \
707 { \
708     return safe_syscall(__NR_##name, arg1, arg2); \
709 }
710 
711 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
713 { \
714     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
715 }
716 
717 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
718     type4, arg4) \
719 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
720 { \
721     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
722 }
723 
724 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
725     type4, arg4, type5, arg5) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
727     type5 arg5) \
728 { \
729     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
730 }
731 
732 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
733     type4, arg4, type5, arg5, type6, arg6) \
734 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
735     type5 arg5, type6 arg6) \
736 { \
737     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
738 }
739 
740 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
741 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
742 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
743               int, flags, mode_t, mode)
744 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
745               struct rusage *, rusage)
746 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
747               int, options, struct rusage *, rusage)
748 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
749 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
750               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
751 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
752               struct timespec *, tsp, const sigset_t *, sigmask,
753               size_t, sigsetsize)
754 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
755               int, maxevents, int, timeout, const sigset_t *, sigmask,
756               size_t, sigsetsize)
757 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
758               const struct timespec *,timeout,int *,uaddr2,int,val3)
759 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
760 safe_syscall2(int, kill, pid_t, pid, int, sig)
761 safe_syscall2(int, tkill, int, tid, int, sig)
762 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
763 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
764 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
765 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
766               unsigned long, pos_l, unsigned long, pos_h)
767 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
768               unsigned long, pos_l, unsigned long, pos_h)
769 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
770               socklen_t, addrlen)
771 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
772               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
773 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
774               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
775 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
776 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
777 safe_syscall2(int, flock, int, fd, int, operation)
778 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
779               const struct timespec *, uts, size_t, sigsetsize)
780 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
781               int, flags)
782 safe_syscall2(int, nanosleep, const struct timespec *, req,
783               struct timespec *, rem)
784 #ifdef TARGET_NR_clock_nanosleep
785 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
786               const struct timespec *, req, struct timespec *, rem)
787 #endif
788 #ifdef __NR_ipc
789 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
790               void *, ptr, long, fifth)
791 #endif
792 #ifdef __NR_msgsnd
793 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
794               int, flags)
795 #endif
796 #ifdef __NR_msgrcv
797 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
798               long, msgtype, int, flags)
799 #endif
800 #ifdef __NR_semtimedop
801 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
802               unsigned, nsops, const struct timespec *, timeout)
803 #endif
804 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
805 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
806               size_t, len, unsigned, prio, const struct timespec *, timeout)
807 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
808               size_t, len, unsigned *, prio, const struct timespec *, timeout)
809 #endif
810 /* We do ioctl like this rather than via safe_syscall3 to preserve the
811  * "third argument might be integer or pointer or not present" behaviour of
812  * the libc function.
813  */
814 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
815 /* Similarly for fcntl. Note that callers must always:
816  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
817  *  use the flock64 struct rather than unsuffixed flock
818  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
819  */
820 #ifdef __NR_fcntl64
821 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
822 #else
823 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
824 #endif
825 
826 static inline int host_to_target_sock_type(int host_type)
827 {
828     int target_type;
829 
830     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
831     case SOCK_DGRAM:
832         target_type = TARGET_SOCK_DGRAM;
833         break;
834     case SOCK_STREAM:
835         target_type = TARGET_SOCK_STREAM;
836         break;
837     default:
838         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
839         break;
840     }
841 
842 #if defined(SOCK_CLOEXEC)
843     if (host_type & SOCK_CLOEXEC) {
844         target_type |= TARGET_SOCK_CLOEXEC;
845     }
846 #endif
847 
848 #if defined(SOCK_NONBLOCK)
849     if (host_type & SOCK_NONBLOCK) {
850         target_type |= TARGET_SOCK_NONBLOCK;
851     }
852 #endif
853 
854     return target_type;
855 }
856 
857 static abi_ulong target_brk;
858 static abi_ulong target_original_brk;
859 static abi_ulong brk_page;
860 
861 void target_set_brk(abi_ulong new_brk)
862 {
863     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
864     brk_page = HOST_PAGE_ALIGN(target_brk);
865 }
866 
867 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
868 #define DEBUGF_BRK(message, args...)
869 
870 /* do_brk() must return target values and target errnos. */
871 abi_long do_brk(abi_ulong new_brk)
872 {
873     abi_long mapped_addr;
874     abi_ulong new_alloc_size;
875 
876     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
877 
878     if (!new_brk) {
879         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
880         return target_brk;
881     }
882     if (new_brk < target_original_brk) {
883         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
884                    target_brk);
885         return target_brk;
886     }
887 
888     /* If the new brk is less than the highest page reserved to the
889      * target heap allocation, set it and we're almost done...  */
890     if (new_brk <= brk_page) {
891         /* Heap contents are initialized to zero, as for anonymous
892          * mapped pages.  */
893         if (new_brk > target_brk) {
894             memset(g2h(target_brk), 0, new_brk - target_brk);
895         }
896 	target_brk = new_brk;
897         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
898 	return target_brk;
899     }
900 
901     /* We need to allocate more memory after the brk... Note that
902      * we don't use MAP_FIXED because that will map over the top of
903      * any existing mapping (like the one with the host libc or qemu
904      * itself); instead we treat "mapped but at wrong address" as
905      * a failure and unmap again.
906      */
907     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
908     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
909                                         PROT_READ|PROT_WRITE,
910                                         MAP_ANON|MAP_PRIVATE, 0, 0));
911 
912     if (mapped_addr == brk_page) {
913         /* Heap contents are initialized to zero, as for anonymous
914          * mapped pages.  Technically the new pages are already
915          * initialized to zero since they *are* anonymous mapped
916          * pages, however we have to take care with the contents that
917          * come from the remaining part of the previous page: it may
918          * contains garbage data due to a previous heap usage (grown
919          * then shrunken).  */
920         memset(g2h(target_brk), 0, brk_page - target_brk);
921 
922         target_brk = new_brk;
923         brk_page = HOST_PAGE_ALIGN(target_brk);
924         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
925             target_brk);
926         return target_brk;
927     } else if (mapped_addr != -1) {
928         /* Mapped but at wrong address, meaning there wasn't actually
929          * enough space for this brk.
930          */
931         target_munmap(mapped_addr, new_alloc_size);
932         mapped_addr = -1;
933         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
934     }
935     else {
936         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
937     }
938 
939 #if defined(TARGET_ALPHA)
940     /* We (partially) emulate OSF/1 on Alpha, which requires we
941        return a proper errno, not an unchanged brk value.  */
942     return -TARGET_ENOMEM;
943 #endif
944     /* For everything else, return the previous break. */
945     return target_brk;
946 }
947 
948 static inline abi_long copy_from_user_fdset(fd_set *fds,
949                                             abi_ulong target_fds_addr,
950                                             int n)
951 {
952     int i, nw, j, k;
953     abi_ulong b, *target_fds;
954 
955     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
956     if (!(target_fds = lock_user(VERIFY_READ,
957                                  target_fds_addr,
958                                  sizeof(abi_ulong) * nw,
959                                  1)))
960         return -TARGET_EFAULT;
961 
962     FD_ZERO(fds);
963     k = 0;
964     for (i = 0; i < nw; i++) {
965         /* grab the abi_ulong */
966         __get_user(b, &target_fds[i]);
967         for (j = 0; j < TARGET_ABI_BITS; j++) {
968             /* check the bit inside the abi_ulong */
969             if ((b >> j) & 1)
970                 FD_SET(k, fds);
971             k++;
972         }
973     }
974 
975     unlock_user(target_fds, target_fds_addr, 0);
976 
977     return 0;
978 }
979 
980 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
981                                                  abi_ulong target_fds_addr,
982                                                  int n)
983 {
984     if (target_fds_addr) {
985         if (copy_from_user_fdset(fds, target_fds_addr, n))
986             return -TARGET_EFAULT;
987         *fds_ptr = fds;
988     } else {
989         *fds_ptr = NULL;
990     }
991     return 0;
992 }
993 
994 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
995                                           const fd_set *fds,
996                                           int n)
997 {
998     int i, nw, j, k;
999     abi_long v;
1000     abi_ulong *target_fds;
1001 
1002     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1003     if (!(target_fds = lock_user(VERIFY_WRITE,
1004                                  target_fds_addr,
1005                                  sizeof(abi_ulong) * nw,
1006                                  0)))
1007         return -TARGET_EFAULT;
1008 
1009     k = 0;
1010     for (i = 0; i < nw; i++) {
1011         v = 0;
1012         for (j = 0; j < TARGET_ABI_BITS; j++) {
1013             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1014             k++;
1015         }
1016         __put_user(v, &target_fds[i]);
1017     }
1018 
1019     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1020 
1021     return 0;
1022 }
1023 
1024 #if defined(__alpha__)
1025 #define HOST_HZ 1024
1026 #else
1027 #define HOST_HZ 100
1028 #endif
1029 
1030 static inline abi_long host_to_target_clock_t(long ticks)
1031 {
1032 #if HOST_HZ == TARGET_HZ
1033     return ticks;
1034 #else
1035     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1036 #endif
1037 }
1038 
1039 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1040                                              const struct rusage *rusage)
1041 {
1042     struct target_rusage *target_rusage;
1043 
1044     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1045         return -TARGET_EFAULT;
1046     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1047     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1048     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1049     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1050     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1051     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1052     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1053     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1054     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1055     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1056     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1057     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1058     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1059     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1060     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1061     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1062     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1063     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1064     unlock_user_struct(target_rusage, target_addr, 1);
1065 
1066     return 0;
1067 }
1068 
1069 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1070 {
1071     abi_ulong target_rlim_swap;
1072     rlim_t result;
1073 
1074     target_rlim_swap = tswapal(target_rlim);
1075     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1076         return RLIM_INFINITY;
1077 
1078     result = target_rlim_swap;
1079     if (target_rlim_swap != (rlim_t)result)
1080         return RLIM_INFINITY;
1081 
1082     return result;
1083 }
1084 
1085 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1086 {
1087     abi_ulong target_rlim_swap;
1088     abi_ulong result;
1089 
1090     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1091         target_rlim_swap = TARGET_RLIM_INFINITY;
1092     else
1093         target_rlim_swap = rlim;
1094     result = tswapal(target_rlim_swap);
1095 
1096     return result;
1097 }
1098 
1099 static inline int target_to_host_resource(int code)
1100 {
1101     switch (code) {
1102     case TARGET_RLIMIT_AS:
1103         return RLIMIT_AS;
1104     case TARGET_RLIMIT_CORE:
1105         return RLIMIT_CORE;
1106     case TARGET_RLIMIT_CPU:
1107         return RLIMIT_CPU;
1108     case TARGET_RLIMIT_DATA:
1109         return RLIMIT_DATA;
1110     case TARGET_RLIMIT_FSIZE:
1111         return RLIMIT_FSIZE;
1112     case TARGET_RLIMIT_LOCKS:
1113         return RLIMIT_LOCKS;
1114     case TARGET_RLIMIT_MEMLOCK:
1115         return RLIMIT_MEMLOCK;
1116     case TARGET_RLIMIT_MSGQUEUE:
1117         return RLIMIT_MSGQUEUE;
1118     case TARGET_RLIMIT_NICE:
1119         return RLIMIT_NICE;
1120     case TARGET_RLIMIT_NOFILE:
1121         return RLIMIT_NOFILE;
1122     case TARGET_RLIMIT_NPROC:
1123         return RLIMIT_NPROC;
1124     case TARGET_RLIMIT_RSS:
1125         return RLIMIT_RSS;
1126     case TARGET_RLIMIT_RTPRIO:
1127         return RLIMIT_RTPRIO;
1128     case TARGET_RLIMIT_SIGPENDING:
1129         return RLIMIT_SIGPENDING;
1130     case TARGET_RLIMIT_STACK:
1131         return RLIMIT_STACK;
1132     default:
1133         return code;
1134     }
1135 }
1136 
1137 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1138                                               abi_ulong target_tv_addr)
1139 {
1140     struct target_timeval *target_tv;
1141 
1142     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1143         return -TARGET_EFAULT;
1144     }
1145 
1146     __get_user(tv->tv_sec, &target_tv->tv_sec);
1147     __get_user(tv->tv_usec, &target_tv->tv_usec);
1148 
1149     unlock_user_struct(target_tv, target_tv_addr, 0);
1150 
1151     return 0;
1152 }
1153 
1154 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1155                                             const struct timeval *tv)
1156 {
1157     struct target_timeval *target_tv;
1158 
1159     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1160         return -TARGET_EFAULT;
1161     }
1162 
1163     __put_user(tv->tv_sec, &target_tv->tv_sec);
1164     __put_user(tv->tv_usec, &target_tv->tv_usec);
1165 
1166     unlock_user_struct(target_tv, target_tv_addr, 1);
1167 
1168     return 0;
1169 }
1170 
1171 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1172                                              const struct timeval *tv)
1173 {
1174     struct target__kernel_sock_timeval *target_tv;
1175 
1176     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1177         return -TARGET_EFAULT;
1178     }
1179 
1180     __put_user(tv->tv_sec, &target_tv->tv_sec);
1181     __put_user(tv->tv_usec, &target_tv->tv_usec);
1182 
1183     unlock_user_struct(target_tv, target_tv_addr, 1);
1184 
1185     return 0;
1186 }
1187 
1188 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1189                                                abi_ulong target_addr)
1190 {
1191     struct target_timespec *target_ts;
1192 
1193     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1194         return -TARGET_EFAULT;
1195     }
1196     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1197     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1198     unlock_user_struct(target_ts, target_addr, 0);
1199     return 0;
1200 }
1201 
1202 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1203                                                struct timespec *host_ts)
1204 {
1205     struct target_timespec *target_ts;
1206 
1207     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1208         return -TARGET_EFAULT;
1209     }
1210     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1211     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1212     unlock_user_struct(target_ts, target_addr, 1);
1213     return 0;
1214 }
1215 
1216 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1217                                                  struct timespec *host_ts)
1218 {
1219     struct target__kernel_timespec *target_ts;
1220 
1221     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1222         return -TARGET_EFAULT;
1223     }
1224     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1225     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1226     unlock_user_struct(target_ts, target_addr, 1);
1227     return 0;
1228 }
1229 
1230 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1231                                                abi_ulong target_tz_addr)
1232 {
1233     struct target_timezone *target_tz;
1234 
1235     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1236         return -TARGET_EFAULT;
1237     }
1238 
1239     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1240     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1241 
1242     unlock_user_struct(target_tz, target_tz_addr, 0);
1243 
1244     return 0;
1245 }
1246 
1247 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1248 #include <mqueue.h>
1249 
1250 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1251                                               abi_ulong target_mq_attr_addr)
1252 {
1253     struct target_mq_attr *target_mq_attr;
1254 
1255     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1256                           target_mq_attr_addr, 1))
1257         return -TARGET_EFAULT;
1258 
1259     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1260     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1261     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1262     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1263 
1264     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1265 
1266     return 0;
1267 }
1268 
1269 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1270                                             const struct mq_attr *attr)
1271 {
1272     struct target_mq_attr *target_mq_attr;
1273 
1274     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1275                           target_mq_attr_addr, 0))
1276         return -TARGET_EFAULT;
1277 
1278     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1279     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1280     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1281     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1282 
1283     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1284 
1285     return 0;
1286 }
1287 #endif
1288 
1289 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1290 /* do_select() must return target values and target errnos. */
1291 static abi_long do_select(int n,
1292                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1293                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1294 {
1295     fd_set rfds, wfds, efds;
1296     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1297     struct timeval tv;
1298     struct timespec ts, *ts_ptr;
1299     abi_long ret;
1300 
1301     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1302     if (ret) {
1303         return ret;
1304     }
1305     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1306     if (ret) {
1307         return ret;
1308     }
1309     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1310     if (ret) {
1311         return ret;
1312     }
1313 
1314     if (target_tv_addr) {
1315         if (copy_from_user_timeval(&tv, target_tv_addr))
1316             return -TARGET_EFAULT;
1317         ts.tv_sec = tv.tv_sec;
1318         ts.tv_nsec = tv.tv_usec * 1000;
1319         ts_ptr = &ts;
1320     } else {
1321         ts_ptr = NULL;
1322     }
1323 
1324     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1325                                   ts_ptr, NULL));
1326 
1327     if (!is_error(ret)) {
1328         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1329             return -TARGET_EFAULT;
1330         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1331             return -TARGET_EFAULT;
1332         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1333             return -TARGET_EFAULT;
1334 
1335         if (target_tv_addr) {
1336             tv.tv_sec = ts.tv_sec;
1337             tv.tv_usec = ts.tv_nsec / 1000;
1338             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1339                 return -TARGET_EFAULT;
1340             }
1341         }
1342     }
1343 
1344     return ret;
1345 }
1346 
1347 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1348 static abi_long do_old_select(abi_ulong arg1)
1349 {
1350     struct target_sel_arg_struct *sel;
1351     abi_ulong inp, outp, exp, tvp;
1352     long nsel;
1353 
1354     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1355         return -TARGET_EFAULT;
1356     }
1357 
1358     nsel = tswapal(sel->n);
1359     inp = tswapal(sel->inp);
1360     outp = tswapal(sel->outp);
1361     exp = tswapal(sel->exp);
1362     tvp = tswapal(sel->tvp);
1363 
1364     unlock_user_struct(sel, arg1, 0);
1365 
1366     return do_select(nsel, inp, outp, exp, tvp);
1367 }
1368 #endif
1369 #endif
1370 
1371 static abi_long do_pipe2(int host_pipe[], int flags)
1372 {
1373 #ifdef CONFIG_PIPE2
1374     return pipe2(host_pipe, flags);
1375 #else
1376     return -ENOSYS;
1377 #endif
1378 }
1379 
1380 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1381                         int flags, int is_pipe2)
1382 {
1383     int host_pipe[2];
1384     abi_long ret;
1385     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1386 
1387     if (is_error(ret))
1388         return get_errno(ret);
1389 
1390     /* Several targets have special calling conventions for the original
1391        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1392     if (!is_pipe2) {
1393 #if defined(TARGET_ALPHA)
1394         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1395         return host_pipe[0];
1396 #elif defined(TARGET_MIPS)
1397         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1398         return host_pipe[0];
1399 #elif defined(TARGET_SH4)
1400         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1401         return host_pipe[0];
1402 #elif defined(TARGET_SPARC)
1403         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1404         return host_pipe[0];
1405 #endif
1406     }
1407 
1408     if (put_user_s32(host_pipe[0], pipedes)
1409         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1410         return -TARGET_EFAULT;
1411     return get_errno(ret);
1412 }
1413 
1414 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1415                                               abi_ulong target_addr,
1416                                               socklen_t len)
1417 {
1418     struct target_ip_mreqn *target_smreqn;
1419 
1420     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1421     if (!target_smreqn)
1422         return -TARGET_EFAULT;
1423     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1424     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1425     if (len == sizeof(struct target_ip_mreqn))
1426         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1427     unlock_user(target_smreqn, target_addr, 0);
1428 
1429     return 0;
1430 }
1431 
1432 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1433                                                abi_ulong target_addr,
1434                                                socklen_t len)
1435 {
1436     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1437     sa_family_t sa_family;
1438     struct target_sockaddr *target_saddr;
1439 
1440     if (fd_trans_target_to_host_addr(fd)) {
1441         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1442     }
1443 
1444     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1445     if (!target_saddr)
1446         return -TARGET_EFAULT;
1447 
1448     sa_family = tswap16(target_saddr->sa_family);
1449 
1450     /* Oops. The caller might send a incomplete sun_path; sun_path
1451      * must be terminated by \0 (see the manual page), but
1452      * unfortunately it is quite common to specify sockaddr_un
1453      * length as "strlen(x->sun_path)" while it should be
1454      * "strlen(...) + 1". We'll fix that here if needed.
1455      * Linux kernel has a similar feature.
1456      */
1457 
1458     if (sa_family == AF_UNIX) {
1459         if (len < unix_maxlen && len > 0) {
1460             char *cp = (char*)target_saddr;
1461 
1462             if ( cp[len-1] && !cp[len] )
1463                 len++;
1464         }
1465         if (len > unix_maxlen)
1466             len = unix_maxlen;
1467     }
1468 
1469     memcpy(addr, target_saddr, len);
1470     addr->sa_family = sa_family;
1471     if (sa_family == AF_NETLINK) {
1472         struct sockaddr_nl *nladdr;
1473 
1474         nladdr = (struct sockaddr_nl *)addr;
1475         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1476         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1477     } else if (sa_family == AF_PACKET) {
1478 	struct target_sockaddr_ll *lladdr;
1479 
1480 	lladdr = (struct target_sockaddr_ll *)addr;
1481 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1482 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1483     }
1484     unlock_user(target_saddr, target_addr, 0);
1485 
1486     return 0;
1487 }
1488 
1489 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1490                                                struct sockaddr *addr,
1491                                                socklen_t len)
1492 {
1493     struct target_sockaddr *target_saddr;
1494 
1495     if (len == 0) {
1496         return 0;
1497     }
1498     assert(addr);
1499 
1500     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1501     if (!target_saddr)
1502         return -TARGET_EFAULT;
1503     memcpy(target_saddr, addr, len);
1504     if (len >= offsetof(struct target_sockaddr, sa_family) +
1505         sizeof(target_saddr->sa_family)) {
1506         target_saddr->sa_family = tswap16(addr->sa_family);
1507     }
1508     if (addr->sa_family == AF_NETLINK &&
1509         len >= sizeof(struct target_sockaddr_nl)) {
1510         struct target_sockaddr_nl *target_nl =
1511                (struct target_sockaddr_nl *)target_saddr;
1512         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1513         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1514     } else if (addr->sa_family == AF_PACKET) {
1515         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1516         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1517         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1518     } else if (addr->sa_family == AF_INET6 &&
1519                len >= sizeof(struct target_sockaddr_in6)) {
1520         struct target_sockaddr_in6 *target_in6 =
1521                (struct target_sockaddr_in6 *)target_saddr;
1522         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1523     }
1524     unlock_user(target_saddr, target_addr, len);
1525 
1526     return 0;
1527 }
1528 
1529 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1530                                            struct target_msghdr *target_msgh)
1531 {
1532     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1533     abi_long msg_controllen;
1534     abi_ulong target_cmsg_addr;
1535     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1536     socklen_t space = 0;
1537 
1538     msg_controllen = tswapal(target_msgh->msg_controllen);
1539     if (msg_controllen < sizeof (struct target_cmsghdr))
1540         goto the_end;
1541     target_cmsg_addr = tswapal(target_msgh->msg_control);
1542     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1543     target_cmsg_start = target_cmsg;
1544     if (!target_cmsg)
1545         return -TARGET_EFAULT;
1546 
1547     while (cmsg && target_cmsg) {
1548         void *data = CMSG_DATA(cmsg);
1549         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1550 
1551         int len = tswapal(target_cmsg->cmsg_len)
1552             - sizeof(struct target_cmsghdr);
1553 
1554         space += CMSG_SPACE(len);
1555         if (space > msgh->msg_controllen) {
1556             space -= CMSG_SPACE(len);
1557             /* This is a QEMU bug, since we allocated the payload
1558              * area ourselves (unlike overflow in host-to-target
1559              * conversion, which is just the guest giving us a buffer
1560              * that's too small). It can't happen for the payload types
1561              * we currently support; if it becomes an issue in future
1562              * we would need to improve our allocation strategy to
1563              * something more intelligent than "twice the size of the
1564              * target buffer we're reading from".
1565              */
1566             gemu_log("Host cmsg overflow\n");
1567             break;
1568         }
1569 
1570         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1571             cmsg->cmsg_level = SOL_SOCKET;
1572         } else {
1573             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1574         }
1575         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1576         cmsg->cmsg_len = CMSG_LEN(len);
1577 
1578         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1579             int *fd = (int *)data;
1580             int *target_fd = (int *)target_data;
1581             int i, numfds = len / sizeof(int);
1582 
1583             for (i = 0; i < numfds; i++) {
1584                 __get_user(fd[i], target_fd + i);
1585             }
1586         } else if (cmsg->cmsg_level == SOL_SOCKET
1587                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1588             struct ucred *cred = (struct ucred *)data;
1589             struct target_ucred *target_cred =
1590                 (struct target_ucred *)target_data;
1591 
1592             __get_user(cred->pid, &target_cred->pid);
1593             __get_user(cred->uid, &target_cred->uid);
1594             __get_user(cred->gid, &target_cred->gid);
1595         } else {
1596             gemu_log("Unsupported ancillary data: %d/%d\n",
1597                                         cmsg->cmsg_level, cmsg->cmsg_type);
1598             memcpy(data, target_data, len);
1599         }
1600 
1601         cmsg = CMSG_NXTHDR(msgh, cmsg);
1602         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1603                                          target_cmsg_start);
1604     }
1605     unlock_user(target_cmsg, target_cmsg_addr, 0);
1606  the_end:
1607     msgh->msg_controllen = space;
1608     return 0;
1609 }
1610 
1611 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1612                                            struct msghdr *msgh)
1613 {
1614     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1615     abi_long msg_controllen;
1616     abi_ulong target_cmsg_addr;
1617     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1618     socklen_t space = 0;
1619 
1620     msg_controllen = tswapal(target_msgh->msg_controllen);
1621     if (msg_controllen < sizeof (struct target_cmsghdr))
1622         goto the_end;
1623     target_cmsg_addr = tswapal(target_msgh->msg_control);
1624     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1625     target_cmsg_start = target_cmsg;
1626     if (!target_cmsg)
1627         return -TARGET_EFAULT;
1628 
1629     while (cmsg && target_cmsg) {
1630         void *data = CMSG_DATA(cmsg);
1631         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1632 
1633         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1634         int tgt_len, tgt_space;
1635 
1636         /* We never copy a half-header but may copy half-data;
1637          * this is Linux's behaviour in put_cmsg(). Note that
1638          * truncation here is a guest problem (which we report
1639          * to the guest via the CTRUNC bit), unlike truncation
1640          * in target_to_host_cmsg, which is a QEMU bug.
1641          */
1642         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1643             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1644             break;
1645         }
1646 
1647         if (cmsg->cmsg_level == SOL_SOCKET) {
1648             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1649         } else {
1650             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1651         }
1652         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1653 
1654         /* Payload types which need a different size of payload on
1655          * the target must adjust tgt_len here.
1656          */
1657         tgt_len = len;
1658         switch (cmsg->cmsg_level) {
1659         case SOL_SOCKET:
1660             switch (cmsg->cmsg_type) {
1661             case SO_TIMESTAMP:
1662                 tgt_len = sizeof(struct target_timeval);
1663                 break;
1664             default:
1665                 break;
1666             }
1667             break;
1668         default:
1669             break;
1670         }
1671 
1672         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1673             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1674             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1675         }
1676 
1677         /* We must now copy-and-convert len bytes of payload
1678          * into tgt_len bytes of destination space. Bear in mind
1679          * that in both source and destination we may be dealing
1680          * with a truncated value!
1681          */
1682         switch (cmsg->cmsg_level) {
1683         case SOL_SOCKET:
1684             switch (cmsg->cmsg_type) {
1685             case SCM_RIGHTS:
1686             {
1687                 int *fd = (int *)data;
1688                 int *target_fd = (int *)target_data;
1689                 int i, numfds = tgt_len / sizeof(int);
1690 
1691                 for (i = 0; i < numfds; i++) {
1692                     __put_user(fd[i], target_fd + i);
1693                 }
1694                 break;
1695             }
1696             case SO_TIMESTAMP:
1697             {
1698                 struct timeval *tv = (struct timeval *)data;
1699                 struct target_timeval *target_tv =
1700                     (struct target_timeval *)target_data;
1701 
1702                 if (len != sizeof(struct timeval) ||
1703                     tgt_len != sizeof(struct target_timeval)) {
1704                     goto unimplemented;
1705                 }
1706 
1707                 /* copy struct timeval to target */
1708                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1709                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1710                 break;
1711             }
1712             case SCM_CREDENTIALS:
1713             {
1714                 struct ucred *cred = (struct ucred *)data;
1715                 struct target_ucred *target_cred =
1716                     (struct target_ucred *)target_data;
1717 
1718                 __put_user(cred->pid, &target_cred->pid);
1719                 __put_user(cred->uid, &target_cred->uid);
1720                 __put_user(cred->gid, &target_cred->gid);
1721                 break;
1722             }
1723             default:
1724                 goto unimplemented;
1725             }
1726             break;
1727 
1728         case SOL_IP:
1729             switch (cmsg->cmsg_type) {
1730             case IP_TTL:
1731             {
1732                 uint32_t *v = (uint32_t *)data;
1733                 uint32_t *t_int = (uint32_t *)target_data;
1734 
1735                 if (len != sizeof(uint32_t) ||
1736                     tgt_len != sizeof(uint32_t)) {
1737                     goto unimplemented;
1738                 }
1739                 __put_user(*v, t_int);
1740                 break;
1741             }
1742             case IP_RECVERR:
1743             {
1744                 struct errhdr_t {
1745                    struct sock_extended_err ee;
1746                    struct sockaddr_in offender;
1747                 };
1748                 struct errhdr_t *errh = (struct errhdr_t *)data;
1749                 struct errhdr_t *target_errh =
1750                     (struct errhdr_t *)target_data;
1751 
1752                 if (len != sizeof(struct errhdr_t) ||
1753                     tgt_len != sizeof(struct errhdr_t)) {
1754                     goto unimplemented;
1755                 }
1756                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1757                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1758                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1759                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1760                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1761                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1762                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1763                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1764                     (void *) &errh->offender, sizeof(errh->offender));
1765                 break;
1766             }
1767             default:
1768                 goto unimplemented;
1769             }
1770             break;
1771 
1772         case SOL_IPV6:
1773             switch (cmsg->cmsg_type) {
1774             case IPV6_HOPLIMIT:
1775             {
1776                 uint32_t *v = (uint32_t *)data;
1777                 uint32_t *t_int = (uint32_t *)target_data;
1778 
1779                 if (len != sizeof(uint32_t) ||
1780                     tgt_len != sizeof(uint32_t)) {
1781                     goto unimplemented;
1782                 }
1783                 __put_user(*v, t_int);
1784                 break;
1785             }
1786             case IPV6_RECVERR:
1787             {
1788                 struct errhdr6_t {
1789                    struct sock_extended_err ee;
1790                    struct sockaddr_in6 offender;
1791                 };
1792                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1793                 struct errhdr6_t *target_errh =
1794                     (struct errhdr6_t *)target_data;
1795 
1796                 if (len != sizeof(struct errhdr6_t) ||
1797                     tgt_len != sizeof(struct errhdr6_t)) {
1798                     goto unimplemented;
1799                 }
1800                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1801                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1802                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1803                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1804                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1805                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1806                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1807                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1808                     (void *) &errh->offender, sizeof(errh->offender));
1809                 break;
1810             }
1811             default:
1812                 goto unimplemented;
1813             }
1814             break;
1815 
1816         default:
1817         unimplemented:
1818             gemu_log("Unsupported ancillary data: %d/%d\n",
1819                                         cmsg->cmsg_level, cmsg->cmsg_type);
1820             memcpy(target_data, data, MIN(len, tgt_len));
1821             if (tgt_len > len) {
1822                 memset(target_data + len, 0, tgt_len - len);
1823             }
1824         }
1825 
1826         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1827         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1828         if (msg_controllen < tgt_space) {
1829             tgt_space = msg_controllen;
1830         }
1831         msg_controllen -= tgt_space;
1832         space += tgt_space;
1833         cmsg = CMSG_NXTHDR(msgh, cmsg);
1834         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1835                                          target_cmsg_start);
1836     }
1837     unlock_user(target_cmsg, target_cmsg_addr, space);
1838  the_end:
1839     target_msgh->msg_controllen = tswapal(space);
1840     return 0;
1841 }
1842 
1843 /* do_setsockopt() Must return target values and target errnos. */
1844 static abi_long do_setsockopt(int sockfd, int level, int optname,
1845                               abi_ulong optval_addr, socklen_t optlen)
1846 {
1847     abi_long ret;
1848     int val;
1849     struct ip_mreqn *ip_mreq;
1850     struct ip_mreq_source *ip_mreq_source;
1851 
1852     switch(level) {
1853     case SOL_TCP:
1854         /* TCP options all take an 'int' value.  */
1855         if (optlen < sizeof(uint32_t))
1856             return -TARGET_EINVAL;
1857 
1858         if (get_user_u32(val, optval_addr))
1859             return -TARGET_EFAULT;
1860         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1861         break;
1862     case SOL_IP:
1863         switch(optname) {
1864         case IP_TOS:
1865         case IP_TTL:
1866         case IP_HDRINCL:
1867         case IP_ROUTER_ALERT:
1868         case IP_RECVOPTS:
1869         case IP_RETOPTS:
1870         case IP_PKTINFO:
1871         case IP_MTU_DISCOVER:
1872         case IP_RECVERR:
1873         case IP_RECVTTL:
1874         case IP_RECVTOS:
1875 #ifdef IP_FREEBIND
1876         case IP_FREEBIND:
1877 #endif
1878         case IP_MULTICAST_TTL:
1879         case IP_MULTICAST_LOOP:
1880             val = 0;
1881             if (optlen >= sizeof(uint32_t)) {
1882                 if (get_user_u32(val, optval_addr))
1883                     return -TARGET_EFAULT;
1884             } else if (optlen >= 1) {
1885                 if (get_user_u8(val, optval_addr))
1886                     return -TARGET_EFAULT;
1887             }
1888             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1889             break;
1890         case IP_ADD_MEMBERSHIP:
1891         case IP_DROP_MEMBERSHIP:
1892             if (optlen < sizeof (struct target_ip_mreq) ||
1893                 optlen > sizeof (struct target_ip_mreqn))
1894                 return -TARGET_EINVAL;
1895 
1896             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1897             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1898             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1899             break;
1900 
1901         case IP_BLOCK_SOURCE:
1902         case IP_UNBLOCK_SOURCE:
1903         case IP_ADD_SOURCE_MEMBERSHIP:
1904         case IP_DROP_SOURCE_MEMBERSHIP:
1905             if (optlen != sizeof (struct target_ip_mreq_source))
1906                 return -TARGET_EINVAL;
1907 
1908             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1909             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1910             unlock_user (ip_mreq_source, optval_addr, 0);
1911             break;
1912 
1913         default:
1914             goto unimplemented;
1915         }
1916         break;
1917     case SOL_IPV6:
1918         switch (optname) {
1919         case IPV6_MTU_DISCOVER:
1920         case IPV6_MTU:
1921         case IPV6_V6ONLY:
1922         case IPV6_RECVPKTINFO:
1923         case IPV6_UNICAST_HOPS:
1924         case IPV6_MULTICAST_HOPS:
1925         case IPV6_MULTICAST_LOOP:
1926         case IPV6_RECVERR:
1927         case IPV6_RECVHOPLIMIT:
1928         case IPV6_2292HOPLIMIT:
1929         case IPV6_CHECKSUM:
1930         case IPV6_ADDRFORM:
1931         case IPV6_2292PKTINFO:
1932         case IPV6_RECVTCLASS:
1933         case IPV6_RECVRTHDR:
1934         case IPV6_2292RTHDR:
1935         case IPV6_RECVHOPOPTS:
1936         case IPV6_2292HOPOPTS:
1937         case IPV6_RECVDSTOPTS:
1938         case IPV6_2292DSTOPTS:
1939         case IPV6_TCLASS:
1940 #ifdef IPV6_RECVPATHMTU
1941         case IPV6_RECVPATHMTU:
1942 #endif
1943 #ifdef IPV6_TRANSPARENT
1944         case IPV6_TRANSPARENT:
1945 #endif
1946 #ifdef IPV6_FREEBIND
1947         case IPV6_FREEBIND:
1948 #endif
1949 #ifdef IPV6_RECVORIGDSTADDR
1950         case IPV6_RECVORIGDSTADDR:
1951 #endif
1952             val = 0;
1953             if (optlen < sizeof(uint32_t)) {
1954                 return -TARGET_EINVAL;
1955             }
1956             if (get_user_u32(val, optval_addr)) {
1957                 return -TARGET_EFAULT;
1958             }
1959             ret = get_errno(setsockopt(sockfd, level, optname,
1960                                        &val, sizeof(val)));
1961             break;
1962         case IPV6_PKTINFO:
1963         {
1964             struct in6_pktinfo pki;
1965 
1966             if (optlen < sizeof(pki)) {
1967                 return -TARGET_EINVAL;
1968             }
1969 
1970             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1971                 return -TARGET_EFAULT;
1972             }
1973 
1974             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1975 
1976             ret = get_errno(setsockopt(sockfd, level, optname,
1977                                        &pki, sizeof(pki)));
1978             break;
1979         }
1980         case IPV6_ADD_MEMBERSHIP:
1981         case IPV6_DROP_MEMBERSHIP:
1982         {
1983             struct ipv6_mreq ipv6mreq;
1984 
1985             if (optlen < sizeof(ipv6mreq)) {
1986                 return -TARGET_EINVAL;
1987             }
1988 
1989             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1990                 return -TARGET_EFAULT;
1991             }
1992 
1993             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1994 
1995             ret = get_errno(setsockopt(sockfd, level, optname,
1996                                        &ipv6mreq, sizeof(ipv6mreq)));
1997             break;
1998         }
1999         default:
2000             goto unimplemented;
2001         }
2002         break;
2003     case SOL_ICMPV6:
2004         switch (optname) {
2005         case ICMPV6_FILTER:
2006         {
2007             struct icmp6_filter icmp6f;
2008 
2009             if (optlen > sizeof(icmp6f)) {
2010                 optlen = sizeof(icmp6f);
2011             }
2012 
2013             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2014                 return -TARGET_EFAULT;
2015             }
2016 
2017             for (val = 0; val < 8; val++) {
2018                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2019             }
2020 
2021             ret = get_errno(setsockopt(sockfd, level, optname,
2022                                        &icmp6f, optlen));
2023             break;
2024         }
2025         default:
2026             goto unimplemented;
2027         }
2028         break;
2029     case SOL_RAW:
2030         switch (optname) {
2031         case ICMP_FILTER:
2032         case IPV6_CHECKSUM:
2033             /* those take an u32 value */
2034             if (optlen < sizeof(uint32_t)) {
2035                 return -TARGET_EINVAL;
2036             }
2037 
2038             if (get_user_u32(val, optval_addr)) {
2039                 return -TARGET_EFAULT;
2040             }
2041             ret = get_errno(setsockopt(sockfd, level, optname,
2042                                        &val, sizeof(val)));
2043             break;
2044 
2045         default:
2046             goto unimplemented;
2047         }
2048         break;
2049 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2050     case SOL_ALG:
2051         switch (optname) {
2052         case ALG_SET_KEY:
2053         {
2054             char *alg_key = g_malloc(optlen);
2055 
2056             if (!alg_key) {
2057                 return -TARGET_ENOMEM;
2058             }
2059             if (copy_from_user(alg_key, optval_addr, optlen)) {
2060                 g_free(alg_key);
2061                 return -TARGET_EFAULT;
2062             }
2063             ret = get_errno(setsockopt(sockfd, level, optname,
2064                                        alg_key, optlen));
2065             g_free(alg_key);
2066             break;
2067         }
2068         case ALG_SET_AEAD_AUTHSIZE:
2069         {
2070             ret = get_errno(setsockopt(sockfd, level, optname,
2071                                        NULL, optlen));
2072             break;
2073         }
2074         default:
2075             goto unimplemented;
2076         }
2077         break;
2078 #endif
2079     case TARGET_SOL_SOCKET:
2080         switch (optname) {
2081         case TARGET_SO_RCVTIMEO:
2082         {
2083                 struct timeval tv;
2084 
2085                 optname = SO_RCVTIMEO;
2086 
2087 set_timeout:
2088                 if (optlen != sizeof(struct target_timeval)) {
2089                     return -TARGET_EINVAL;
2090                 }
2091 
2092                 if (copy_from_user_timeval(&tv, optval_addr)) {
2093                     return -TARGET_EFAULT;
2094                 }
2095 
2096                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2097                                 &tv, sizeof(tv)));
2098                 return ret;
2099         }
2100         case TARGET_SO_SNDTIMEO:
2101                 optname = SO_SNDTIMEO;
2102                 goto set_timeout;
2103         case TARGET_SO_ATTACH_FILTER:
2104         {
2105                 struct target_sock_fprog *tfprog;
2106                 struct target_sock_filter *tfilter;
2107                 struct sock_fprog fprog;
2108                 struct sock_filter *filter;
2109                 int i;
2110 
2111                 if (optlen != sizeof(*tfprog)) {
2112                     return -TARGET_EINVAL;
2113                 }
2114                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2115                     return -TARGET_EFAULT;
2116                 }
2117                 if (!lock_user_struct(VERIFY_READ, tfilter,
2118                                       tswapal(tfprog->filter), 0)) {
2119                     unlock_user_struct(tfprog, optval_addr, 1);
2120                     return -TARGET_EFAULT;
2121                 }
2122 
2123                 fprog.len = tswap16(tfprog->len);
2124                 filter = g_try_new(struct sock_filter, fprog.len);
2125                 if (filter == NULL) {
2126                     unlock_user_struct(tfilter, tfprog->filter, 1);
2127                     unlock_user_struct(tfprog, optval_addr, 1);
2128                     return -TARGET_ENOMEM;
2129                 }
2130                 for (i = 0; i < fprog.len; i++) {
2131                     filter[i].code = tswap16(tfilter[i].code);
2132                     filter[i].jt = tfilter[i].jt;
2133                     filter[i].jf = tfilter[i].jf;
2134                     filter[i].k = tswap32(tfilter[i].k);
2135                 }
2136                 fprog.filter = filter;
2137 
2138                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2139                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2140                 g_free(filter);
2141 
2142                 unlock_user_struct(tfilter, tfprog->filter, 1);
2143                 unlock_user_struct(tfprog, optval_addr, 1);
2144                 return ret;
2145         }
2146 	case TARGET_SO_BINDTODEVICE:
2147 	{
2148 		char *dev_ifname, *addr_ifname;
2149 
2150 		if (optlen > IFNAMSIZ - 1) {
2151 		    optlen = IFNAMSIZ - 1;
2152 		}
2153 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2154 		if (!dev_ifname) {
2155 		    return -TARGET_EFAULT;
2156 		}
2157 		optname = SO_BINDTODEVICE;
2158 		addr_ifname = alloca(IFNAMSIZ);
2159 		memcpy(addr_ifname, dev_ifname, optlen);
2160 		addr_ifname[optlen] = 0;
2161 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2162                                            addr_ifname, optlen));
2163 		unlock_user (dev_ifname, optval_addr, 0);
2164 		return ret;
2165 	}
2166         case TARGET_SO_LINGER:
2167         {
2168                 struct linger lg;
2169                 struct target_linger *tlg;
2170 
2171                 if (optlen != sizeof(struct target_linger)) {
2172                     return -TARGET_EINVAL;
2173                 }
2174                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2175                     return -TARGET_EFAULT;
2176                 }
2177                 __get_user(lg.l_onoff, &tlg->l_onoff);
2178                 __get_user(lg.l_linger, &tlg->l_linger);
2179                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2180                                 &lg, sizeof(lg)));
2181                 unlock_user_struct(tlg, optval_addr, 0);
2182                 return ret;
2183         }
2184             /* Options with 'int' argument.  */
2185         case TARGET_SO_DEBUG:
2186 		optname = SO_DEBUG;
2187 		break;
2188         case TARGET_SO_REUSEADDR:
2189 		optname = SO_REUSEADDR;
2190 		break;
2191 #ifdef SO_REUSEPORT
2192         case TARGET_SO_REUSEPORT:
2193                 optname = SO_REUSEPORT;
2194                 break;
2195 #endif
2196         case TARGET_SO_TYPE:
2197 		optname = SO_TYPE;
2198 		break;
2199         case TARGET_SO_ERROR:
2200 		optname = SO_ERROR;
2201 		break;
2202         case TARGET_SO_DONTROUTE:
2203 		optname = SO_DONTROUTE;
2204 		break;
2205         case TARGET_SO_BROADCAST:
2206 		optname = SO_BROADCAST;
2207 		break;
2208         case TARGET_SO_SNDBUF:
2209 		optname = SO_SNDBUF;
2210 		break;
2211         case TARGET_SO_SNDBUFFORCE:
2212                 optname = SO_SNDBUFFORCE;
2213                 break;
2214         case TARGET_SO_RCVBUF:
2215 		optname = SO_RCVBUF;
2216 		break;
2217         case TARGET_SO_RCVBUFFORCE:
2218                 optname = SO_RCVBUFFORCE;
2219                 break;
2220         case TARGET_SO_KEEPALIVE:
2221 		optname = SO_KEEPALIVE;
2222 		break;
2223         case TARGET_SO_OOBINLINE:
2224 		optname = SO_OOBINLINE;
2225 		break;
2226         case TARGET_SO_NO_CHECK:
2227 		optname = SO_NO_CHECK;
2228 		break;
2229         case TARGET_SO_PRIORITY:
2230 		optname = SO_PRIORITY;
2231 		break;
2232 #ifdef SO_BSDCOMPAT
2233         case TARGET_SO_BSDCOMPAT:
2234 		optname = SO_BSDCOMPAT;
2235 		break;
2236 #endif
2237         case TARGET_SO_PASSCRED:
2238 		optname = SO_PASSCRED;
2239 		break;
2240         case TARGET_SO_PASSSEC:
2241                 optname = SO_PASSSEC;
2242                 break;
2243         case TARGET_SO_TIMESTAMP:
2244 		optname = SO_TIMESTAMP;
2245 		break;
2246         case TARGET_SO_RCVLOWAT:
2247 		optname = SO_RCVLOWAT;
2248 		break;
2249         default:
2250             goto unimplemented;
2251         }
2252 	if (optlen < sizeof(uint32_t))
2253             return -TARGET_EINVAL;
2254 
2255 	if (get_user_u32(val, optval_addr))
2256             return -TARGET_EFAULT;
2257 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2258         break;
2259 #ifdef SOL_NETLINK
2260     case SOL_NETLINK:
2261         switch (optname) {
2262         case NETLINK_PKTINFO:
2263         case NETLINK_ADD_MEMBERSHIP:
2264         case NETLINK_DROP_MEMBERSHIP:
2265         case NETLINK_BROADCAST_ERROR:
2266         case NETLINK_NO_ENOBUFS:
2267 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2268         case NETLINK_LISTEN_ALL_NSID:
2269         case NETLINK_CAP_ACK:
2270 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2271 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2272         case NETLINK_EXT_ACK:
2273 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2274 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2275         case NETLINK_GET_STRICT_CHK:
2276 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2277             break;
2278         default:
2279             goto unimplemented;
2280         }
2281         val = 0;
2282         if (optlen < sizeof(uint32_t)) {
2283             return -TARGET_EINVAL;
2284         }
2285         if (get_user_u32(val, optval_addr)) {
2286             return -TARGET_EFAULT;
2287         }
2288         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2289                                    sizeof(val)));
2290         break;
2291 #endif /* SOL_NETLINK */
2292     default:
2293     unimplemented:
2294         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2295         ret = -TARGET_ENOPROTOOPT;
2296     }
2297     return ret;
2298 }
2299 
2300 /* do_getsockopt() Must return target values and target errnos. */
2301 static abi_long do_getsockopt(int sockfd, int level, int optname,
2302                               abi_ulong optval_addr, abi_ulong optlen)
2303 {
2304     abi_long ret;
2305     int len, val;
2306     socklen_t lv;
2307 
2308     switch(level) {
2309     case TARGET_SOL_SOCKET:
2310         level = SOL_SOCKET;
2311         switch (optname) {
2312         /* These don't just return a single integer */
2313         case TARGET_SO_RCVTIMEO:
2314         case TARGET_SO_SNDTIMEO:
2315         case TARGET_SO_PEERNAME:
2316             goto unimplemented;
2317         case TARGET_SO_PEERCRED: {
2318             struct ucred cr;
2319             socklen_t crlen;
2320             struct target_ucred *tcr;
2321 
2322             if (get_user_u32(len, optlen)) {
2323                 return -TARGET_EFAULT;
2324             }
2325             if (len < 0) {
2326                 return -TARGET_EINVAL;
2327             }
2328 
2329             crlen = sizeof(cr);
2330             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2331                                        &cr, &crlen));
2332             if (ret < 0) {
2333                 return ret;
2334             }
2335             if (len > crlen) {
2336                 len = crlen;
2337             }
2338             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2339                 return -TARGET_EFAULT;
2340             }
2341             __put_user(cr.pid, &tcr->pid);
2342             __put_user(cr.uid, &tcr->uid);
2343             __put_user(cr.gid, &tcr->gid);
2344             unlock_user_struct(tcr, optval_addr, 1);
2345             if (put_user_u32(len, optlen)) {
2346                 return -TARGET_EFAULT;
2347             }
2348             break;
2349         }
2350         case TARGET_SO_PEERSEC: {
2351             char *name;
2352 
2353             if (get_user_u32(len, optlen)) {
2354                 return -TARGET_EFAULT;
2355             }
2356             if (len < 0) {
2357                 return -TARGET_EINVAL;
2358             }
2359             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2360             if (!name) {
2361                 return -TARGET_EFAULT;
2362             }
2363             lv = len;
2364             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2365                                        name, &lv));
2366             if (put_user_u32(lv, optlen)) {
2367                 ret = -TARGET_EFAULT;
2368             }
2369             unlock_user(name, optval_addr, lv);
2370             break;
2371         }
2372         case TARGET_SO_LINGER:
2373         {
2374             struct linger lg;
2375             socklen_t lglen;
2376             struct target_linger *tlg;
2377 
2378             if (get_user_u32(len, optlen)) {
2379                 return -TARGET_EFAULT;
2380             }
2381             if (len < 0) {
2382                 return -TARGET_EINVAL;
2383             }
2384 
2385             lglen = sizeof(lg);
2386             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2387                                        &lg, &lglen));
2388             if (ret < 0) {
2389                 return ret;
2390             }
2391             if (len > lglen) {
2392                 len = lglen;
2393             }
2394             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2395                 return -TARGET_EFAULT;
2396             }
2397             __put_user(lg.l_onoff, &tlg->l_onoff);
2398             __put_user(lg.l_linger, &tlg->l_linger);
2399             unlock_user_struct(tlg, optval_addr, 1);
2400             if (put_user_u32(len, optlen)) {
2401                 return -TARGET_EFAULT;
2402             }
2403             break;
2404         }
2405         /* Options with 'int' argument.  */
2406         case TARGET_SO_DEBUG:
2407             optname = SO_DEBUG;
2408             goto int_case;
2409         case TARGET_SO_REUSEADDR:
2410             optname = SO_REUSEADDR;
2411             goto int_case;
2412 #ifdef SO_REUSEPORT
2413         case TARGET_SO_REUSEPORT:
2414             optname = SO_REUSEPORT;
2415             goto int_case;
2416 #endif
2417         case TARGET_SO_TYPE:
2418             optname = SO_TYPE;
2419             goto int_case;
2420         case TARGET_SO_ERROR:
2421             optname = SO_ERROR;
2422             goto int_case;
2423         case TARGET_SO_DONTROUTE:
2424             optname = SO_DONTROUTE;
2425             goto int_case;
2426         case TARGET_SO_BROADCAST:
2427             optname = SO_BROADCAST;
2428             goto int_case;
2429         case TARGET_SO_SNDBUF:
2430             optname = SO_SNDBUF;
2431             goto int_case;
2432         case TARGET_SO_RCVBUF:
2433             optname = SO_RCVBUF;
2434             goto int_case;
2435         case TARGET_SO_KEEPALIVE:
2436             optname = SO_KEEPALIVE;
2437             goto int_case;
2438         case TARGET_SO_OOBINLINE:
2439             optname = SO_OOBINLINE;
2440             goto int_case;
2441         case TARGET_SO_NO_CHECK:
2442             optname = SO_NO_CHECK;
2443             goto int_case;
2444         case TARGET_SO_PRIORITY:
2445             optname = SO_PRIORITY;
2446             goto int_case;
2447 #ifdef SO_BSDCOMPAT
2448         case TARGET_SO_BSDCOMPAT:
2449             optname = SO_BSDCOMPAT;
2450             goto int_case;
2451 #endif
2452         case TARGET_SO_PASSCRED:
2453             optname = SO_PASSCRED;
2454             goto int_case;
2455         case TARGET_SO_TIMESTAMP:
2456             optname = SO_TIMESTAMP;
2457             goto int_case;
2458         case TARGET_SO_RCVLOWAT:
2459             optname = SO_RCVLOWAT;
2460             goto int_case;
2461         case TARGET_SO_ACCEPTCONN:
2462             optname = SO_ACCEPTCONN;
2463             goto int_case;
2464         default:
2465             goto int_case;
2466         }
2467         break;
2468     case SOL_TCP:
2469         /* TCP options all take an 'int' value.  */
2470     int_case:
2471         if (get_user_u32(len, optlen))
2472             return -TARGET_EFAULT;
2473         if (len < 0)
2474             return -TARGET_EINVAL;
2475         lv = sizeof(lv);
2476         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2477         if (ret < 0)
2478             return ret;
2479         if (optname == SO_TYPE) {
2480             val = host_to_target_sock_type(val);
2481         }
2482         if (len > lv)
2483             len = lv;
2484         if (len == 4) {
2485             if (put_user_u32(val, optval_addr))
2486                 return -TARGET_EFAULT;
2487         } else {
2488             if (put_user_u8(val, optval_addr))
2489                 return -TARGET_EFAULT;
2490         }
2491         if (put_user_u32(len, optlen))
2492             return -TARGET_EFAULT;
2493         break;
2494     case SOL_IP:
2495         switch(optname) {
2496         case IP_TOS:
2497         case IP_TTL:
2498         case IP_HDRINCL:
2499         case IP_ROUTER_ALERT:
2500         case IP_RECVOPTS:
2501         case IP_RETOPTS:
2502         case IP_PKTINFO:
2503         case IP_MTU_DISCOVER:
2504         case IP_RECVERR:
2505         case IP_RECVTOS:
2506 #ifdef IP_FREEBIND
2507         case IP_FREEBIND:
2508 #endif
2509         case IP_MULTICAST_TTL:
2510         case IP_MULTICAST_LOOP:
2511             if (get_user_u32(len, optlen))
2512                 return -TARGET_EFAULT;
2513             if (len < 0)
2514                 return -TARGET_EINVAL;
2515             lv = sizeof(lv);
2516             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2517             if (ret < 0)
2518                 return ret;
2519             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2520                 len = 1;
2521                 if (put_user_u32(len, optlen)
2522                     || put_user_u8(val, optval_addr))
2523                     return -TARGET_EFAULT;
2524             } else {
2525                 if (len > sizeof(int))
2526                     len = sizeof(int);
2527                 if (put_user_u32(len, optlen)
2528                     || put_user_u32(val, optval_addr))
2529                     return -TARGET_EFAULT;
2530             }
2531             break;
2532         default:
2533             ret = -TARGET_ENOPROTOOPT;
2534             break;
2535         }
2536         break;
2537     case SOL_IPV6:
2538         switch (optname) {
2539         case IPV6_MTU_DISCOVER:
2540         case IPV6_MTU:
2541         case IPV6_V6ONLY:
2542         case IPV6_RECVPKTINFO:
2543         case IPV6_UNICAST_HOPS:
2544         case IPV6_MULTICAST_HOPS:
2545         case IPV6_MULTICAST_LOOP:
2546         case IPV6_RECVERR:
2547         case IPV6_RECVHOPLIMIT:
2548         case IPV6_2292HOPLIMIT:
2549         case IPV6_CHECKSUM:
2550         case IPV6_ADDRFORM:
2551         case IPV6_2292PKTINFO:
2552         case IPV6_RECVTCLASS:
2553         case IPV6_RECVRTHDR:
2554         case IPV6_2292RTHDR:
2555         case IPV6_RECVHOPOPTS:
2556         case IPV6_2292HOPOPTS:
2557         case IPV6_RECVDSTOPTS:
2558         case IPV6_2292DSTOPTS:
2559         case IPV6_TCLASS:
2560 #ifdef IPV6_RECVPATHMTU
2561         case IPV6_RECVPATHMTU:
2562 #endif
2563 #ifdef IPV6_TRANSPARENT
2564         case IPV6_TRANSPARENT:
2565 #endif
2566 #ifdef IPV6_FREEBIND
2567         case IPV6_FREEBIND:
2568 #endif
2569 #ifdef IPV6_RECVORIGDSTADDR
2570         case IPV6_RECVORIGDSTADDR:
2571 #endif
2572             if (get_user_u32(len, optlen))
2573                 return -TARGET_EFAULT;
2574             if (len < 0)
2575                 return -TARGET_EINVAL;
2576             lv = sizeof(lv);
2577             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2578             if (ret < 0)
2579                 return ret;
2580             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2581                 len = 1;
2582                 if (put_user_u32(len, optlen)
2583                     || put_user_u8(val, optval_addr))
2584                     return -TARGET_EFAULT;
2585             } else {
2586                 if (len > sizeof(int))
2587                     len = sizeof(int);
2588                 if (put_user_u32(len, optlen)
2589                     || put_user_u32(val, optval_addr))
2590                     return -TARGET_EFAULT;
2591             }
2592             break;
2593         default:
2594             ret = -TARGET_ENOPROTOOPT;
2595             break;
2596         }
2597         break;
2598 #ifdef SOL_NETLINK
2599     case SOL_NETLINK:
2600         switch (optname) {
2601         case NETLINK_PKTINFO:
2602         case NETLINK_BROADCAST_ERROR:
2603         case NETLINK_NO_ENOBUFS:
2604 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2605         case NETLINK_LISTEN_ALL_NSID:
2606         case NETLINK_CAP_ACK:
2607 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2608 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2609         case NETLINK_EXT_ACK:
2610 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2611 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2612         case NETLINK_GET_STRICT_CHK:
2613 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2614             if (get_user_u32(len, optlen)) {
2615                 return -TARGET_EFAULT;
2616             }
2617             if (len != sizeof(val)) {
2618                 return -TARGET_EINVAL;
2619             }
2620             lv = len;
2621             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2622             if (ret < 0) {
2623                 return ret;
2624             }
2625             if (put_user_u32(lv, optlen)
2626                 || put_user_u32(val, optval_addr)) {
2627                 return -TARGET_EFAULT;
2628             }
2629             break;
2630 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2631         case NETLINK_LIST_MEMBERSHIPS:
2632         {
2633             uint32_t *results;
2634             int i;
2635             if (get_user_u32(len, optlen)) {
2636                 return -TARGET_EFAULT;
2637             }
2638             if (len < 0) {
2639                 return -TARGET_EINVAL;
2640             }
2641             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2642             if (!results) {
2643                 return -TARGET_EFAULT;
2644             }
2645             lv = len;
2646             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2647             if (ret < 0) {
2648                 unlock_user(results, optval_addr, 0);
2649                 return ret;
2650             }
2651             /* swap host endianess to target endianess. */
2652             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2653                 results[i] = tswap32(results[i]);
2654             }
2655             if (put_user_u32(lv, optlen)) {
2656                 return -TARGET_EFAULT;
2657             }
2658             unlock_user(results, optval_addr, 0);
2659             break;
2660         }
2661 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2662         default:
2663             goto unimplemented;
2664         }
2665         break;
2666 #endif /* SOL_NETLINK */
2667     default:
2668     unimplemented:
2669         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2670                  level, optname);
2671         ret = -TARGET_EOPNOTSUPP;
2672         break;
2673     }
2674     return ret;
2675 }
2676 
2677 /* Convert target low/high pair representing file offset into the host
2678  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2679  * as the kernel doesn't handle them either.
2680  */
2681 static void target_to_host_low_high(abi_ulong tlow,
2682                                     abi_ulong thigh,
2683                                     unsigned long *hlow,
2684                                     unsigned long *hhigh)
2685 {
2686     uint64_t off = tlow |
2687         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2688         TARGET_LONG_BITS / 2;
2689 
2690     *hlow = off;
2691     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2692 }
2693 
2694 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2695                                 abi_ulong count, int copy)
2696 {
2697     struct target_iovec *target_vec;
2698     struct iovec *vec;
2699     abi_ulong total_len, max_len;
2700     int i;
2701     int err = 0;
2702     bool bad_address = false;
2703 
2704     if (count == 0) {
2705         errno = 0;
2706         return NULL;
2707     }
2708     if (count > IOV_MAX) {
2709         errno = EINVAL;
2710         return NULL;
2711     }
2712 
2713     vec = g_try_new0(struct iovec, count);
2714     if (vec == NULL) {
2715         errno = ENOMEM;
2716         return NULL;
2717     }
2718 
2719     target_vec = lock_user(VERIFY_READ, target_addr,
2720                            count * sizeof(struct target_iovec), 1);
2721     if (target_vec == NULL) {
2722         err = EFAULT;
2723         goto fail2;
2724     }
2725 
2726     /* ??? If host page size > target page size, this will result in a
2727        value larger than what we can actually support.  */
2728     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2729     total_len = 0;
2730 
2731     for (i = 0; i < count; i++) {
2732         abi_ulong base = tswapal(target_vec[i].iov_base);
2733         abi_long len = tswapal(target_vec[i].iov_len);
2734 
2735         if (len < 0) {
2736             err = EINVAL;
2737             goto fail;
2738         } else if (len == 0) {
2739             /* Zero length pointer is ignored.  */
2740             vec[i].iov_base = 0;
2741         } else {
2742             vec[i].iov_base = lock_user(type, base, len, copy);
2743             /* If the first buffer pointer is bad, this is a fault.  But
2744              * subsequent bad buffers will result in a partial write; this
2745              * is realized by filling the vector with null pointers and
2746              * zero lengths. */
2747             if (!vec[i].iov_base) {
2748                 if (i == 0) {
2749                     err = EFAULT;
2750                     goto fail;
2751                 } else {
2752                     bad_address = true;
2753                 }
2754             }
2755             if (bad_address) {
2756                 len = 0;
2757             }
2758             if (len > max_len - total_len) {
2759                 len = max_len - total_len;
2760             }
2761         }
2762         vec[i].iov_len = len;
2763         total_len += len;
2764     }
2765 
2766     unlock_user(target_vec, target_addr, 0);
2767     return vec;
2768 
2769  fail:
2770     while (--i >= 0) {
2771         if (tswapal(target_vec[i].iov_len) > 0) {
2772             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2773         }
2774     }
2775     unlock_user(target_vec, target_addr, 0);
2776  fail2:
2777     g_free(vec);
2778     errno = err;
2779     return NULL;
2780 }
2781 
2782 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2783                          abi_ulong count, int copy)
2784 {
2785     struct target_iovec *target_vec;
2786     int i;
2787 
2788     target_vec = lock_user(VERIFY_READ, target_addr,
2789                            count * sizeof(struct target_iovec), 1);
2790     if (target_vec) {
2791         for (i = 0; i < count; i++) {
2792             abi_ulong base = tswapal(target_vec[i].iov_base);
2793             abi_long len = tswapal(target_vec[i].iov_len);
2794             if (len < 0) {
2795                 break;
2796             }
2797             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2798         }
2799         unlock_user(target_vec, target_addr, 0);
2800     }
2801 
2802     g_free(vec);
2803 }
2804 
2805 static inline int target_to_host_sock_type(int *type)
2806 {
2807     int host_type = 0;
2808     int target_type = *type;
2809 
2810     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2811     case TARGET_SOCK_DGRAM:
2812         host_type = SOCK_DGRAM;
2813         break;
2814     case TARGET_SOCK_STREAM:
2815         host_type = SOCK_STREAM;
2816         break;
2817     default:
2818         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2819         break;
2820     }
2821     if (target_type & TARGET_SOCK_CLOEXEC) {
2822 #if defined(SOCK_CLOEXEC)
2823         host_type |= SOCK_CLOEXEC;
2824 #else
2825         return -TARGET_EINVAL;
2826 #endif
2827     }
2828     if (target_type & TARGET_SOCK_NONBLOCK) {
2829 #if defined(SOCK_NONBLOCK)
2830         host_type |= SOCK_NONBLOCK;
2831 #elif !defined(O_NONBLOCK)
2832         return -TARGET_EINVAL;
2833 #endif
2834     }
2835     *type = host_type;
2836     return 0;
2837 }
2838 
2839 /* Try to emulate socket type flags after socket creation.  */
2840 static int sock_flags_fixup(int fd, int target_type)
2841 {
2842 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2843     if (target_type & TARGET_SOCK_NONBLOCK) {
2844         int flags = fcntl(fd, F_GETFL);
2845         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2846             close(fd);
2847             return -TARGET_EINVAL;
2848         }
2849     }
2850 #endif
2851     return fd;
2852 }
2853 
2854 /* do_socket() Must return target values and target errnos. */
2855 static abi_long do_socket(int domain, int type, int protocol)
2856 {
2857     int target_type = type;
2858     int ret;
2859 
2860     ret = target_to_host_sock_type(&type);
2861     if (ret) {
2862         return ret;
2863     }
2864 
2865     if (domain == PF_NETLINK && !(
2866 #ifdef CONFIG_RTNETLINK
2867          protocol == NETLINK_ROUTE ||
2868 #endif
2869          protocol == NETLINK_KOBJECT_UEVENT ||
2870          protocol == NETLINK_AUDIT)) {
2871         return -EPFNOSUPPORT;
2872     }
2873 
2874     if (domain == AF_PACKET ||
2875         (domain == AF_INET && type == SOCK_PACKET)) {
2876         protocol = tswap16(protocol);
2877     }
2878 
2879     ret = get_errno(socket(domain, type, protocol));
2880     if (ret >= 0) {
2881         ret = sock_flags_fixup(ret, target_type);
2882         if (type == SOCK_PACKET) {
2883             /* Manage an obsolete case :
2884              * if socket type is SOCK_PACKET, bind by name
2885              */
2886             fd_trans_register(ret, &target_packet_trans);
2887         } else if (domain == PF_NETLINK) {
2888             switch (protocol) {
2889 #ifdef CONFIG_RTNETLINK
2890             case NETLINK_ROUTE:
2891                 fd_trans_register(ret, &target_netlink_route_trans);
2892                 break;
2893 #endif
2894             case NETLINK_KOBJECT_UEVENT:
2895                 /* nothing to do: messages are strings */
2896                 break;
2897             case NETLINK_AUDIT:
2898                 fd_trans_register(ret, &target_netlink_audit_trans);
2899                 break;
2900             default:
2901                 g_assert_not_reached();
2902             }
2903         }
2904     }
2905     return ret;
2906 }
2907 
2908 /* do_bind() Must return target values and target errnos. */
2909 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2910                         socklen_t addrlen)
2911 {
2912     void *addr;
2913     abi_long ret;
2914 
2915     if ((int)addrlen < 0) {
2916         return -TARGET_EINVAL;
2917     }
2918 
2919     addr = alloca(addrlen+1);
2920 
2921     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2922     if (ret)
2923         return ret;
2924 
2925     return get_errno(bind(sockfd, addr, addrlen));
2926 }
2927 
2928 /* do_connect() Must return target values and target errnos. */
2929 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2930                            socklen_t addrlen)
2931 {
2932     void *addr;
2933     abi_long ret;
2934 
2935     if ((int)addrlen < 0) {
2936         return -TARGET_EINVAL;
2937     }
2938 
2939     addr = alloca(addrlen+1);
2940 
2941     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2942     if (ret)
2943         return ret;
2944 
2945     return get_errno(safe_connect(sockfd, addr, addrlen));
2946 }
2947 
2948 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2949 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2950                                       int flags, int send)
2951 {
2952     abi_long ret, len;
2953     struct msghdr msg;
2954     abi_ulong count;
2955     struct iovec *vec;
2956     abi_ulong target_vec;
2957 
2958     if (msgp->msg_name) {
2959         msg.msg_namelen = tswap32(msgp->msg_namelen);
2960         msg.msg_name = alloca(msg.msg_namelen+1);
2961         ret = target_to_host_sockaddr(fd, msg.msg_name,
2962                                       tswapal(msgp->msg_name),
2963                                       msg.msg_namelen);
2964         if (ret == -TARGET_EFAULT) {
2965             /* For connected sockets msg_name and msg_namelen must
2966              * be ignored, so returning EFAULT immediately is wrong.
2967              * Instead, pass a bad msg_name to the host kernel, and
2968              * let it decide whether to return EFAULT or not.
2969              */
2970             msg.msg_name = (void *)-1;
2971         } else if (ret) {
2972             goto out2;
2973         }
2974     } else {
2975         msg.msg_name = NULL;
2976         msg.msg_namelen = 0;
2977     }
2978     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2979     msg.msg_control = alloca(msg.msg_controllen);
2980     memset(msg.msg_control, 0, msg.msg_controllen);
2981 
2982     msg.msg_flags = tswap32(msgp->msg_flags);
2983 
2984     count = tswapal(msgp->msg_iovlen);
2985     target_vec = tswapal(msgp->msg_iov);
2986 
2987     if (count > IOV_MAX) {
2988         /* sendrcvmsg returns a different errno for this condition than
2989          * readv/writev, so we must catch it here before lock_iovec() does.
2990          */
2991         ret = -TARGET_EMSGSIZE;
2992         goto out2;
2993     }
2994 
2995     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2996                      target_vec, count, send);
2997     if (vec == NULL) {
2998         ret = -host_to_target_errno(errno);
2999         goto out2;
3000     }
3001     msg.msg_iovlen = count;
3002     msg.msg_iov = vec;
3003 
3004     if (send) {
3005         if (fd_trans_target_to_host_data(fd)) {
3006             void *host_msg;
3007 
3008             host_msg = g_malloc(msg.msg_iov->iov_len);
3009             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3010             ret = fd_trans_target_to_host_data(fd)(host_msg,
3011                                                    msg.msg_iov->iov_len);
3012             if (ret >= 0) {
3013                 msg.msg_iov->iov_base = host_msg;
3014                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3015             }
3016             g_free(host_msg);
3017         } else {
3018             ret = target_to_host_cmsg(&msg, msgp);
3019             if (ret == 0) {
3020                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3021             }
3022         }
3023     } else {
3024         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3025         if (!is_error(ret)) {
3026             len = ret;
3027             if (fd_trans_host_to_target_data(fd)) {
3028                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3029                                                MIN(msg.msg_iov->iov_len, len));
3030             } else {
3031                 ret = host_to_target_cmsg(msgp, &msg);
3032             }
3033             if (!is_error(ret)) {
3034                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3035                 msgp->msg_flags = tswap32(msg.msg_flags);
3036                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3037                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3038                                     msg.msg_name, msg.msg_namelen);
3039                     if (ret) {
3040                         goto out;
3041                     }
3042                 }
3043 
3044                 ret = len;
3045             }
3046         }
3047     }
3048 
3049 out:
3050     unlock_iovec(vec, target_vec, count, !send);
3051 out2:
3052     return ret;
3053 }
3054 
3055 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3056                                int flags, int send)
3057 {
3058     abi_long ret;
3059     struct target_msghdr *msgp;
3060 
3061     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3062                           msgp,
3063                           target_msg,
3064                           send ? 1 : 0)) {
3065         return -TARGET_EFAULT;
3066     }
3067     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3068     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3069     return ret;
3070 }
3071 
3072 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3073  * so it might not have this *mmsg-specific flag either.
3074  */
3075 #ifndef MSG_WAITFORONE
3076 #define MSG_WAITFORONE 0x10000
3077 #endif
3078 
3079 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3080                                 unsigned int vlen, unsigned int flags,
3081                                 int send)
3082 {
3083     struct target_mmsghdr *mmsgp;
3084     abi_long ret = 0;
3085     int i;
3086 
3087     if (vlen > UIO_MAXIOV) {
3088         vlen = UIO_MAXIOV;
3089     }
3090 
3091     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3092     if (!mmsgp) {
3093         return -TARGET_EFAULT;
3094     }
3095 
3096     for (i = 0; i < vlen; i++) {
3097         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3098         if (is_error(ret)) {
3099             break;
3100         }
3101         mmsgp[i].msg_len = tswap32(ret);
3102         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3103         if (flags & MSG_WAITFORONE) {
3104             flags |= MSG_DONTWAIT;
3105         }
3106     }
3107 
3108     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3109 
3110     /* Return number of datagrams sent if we sent any at all;
3111      * otherwise return the error.
3112      */
3113     if (i) {
3114         return i;
3115     }
3116     return ret;
3117 }
3118 
3119 /* do_accept4() Must return target values and target errnos. */
3120 static abi_long do_accept4(int fd, abi_ulong target_addr,
3121                            abi_ulong target_addrlen_addr, int flags)
3122 {
3123     socklen_t addrlen, ret_addrlen;
3124     void *addr;
3125     abi_long ret;
3126     int host_flags;
3127 
3128     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3129 
3130     if (target_addr == 0) {
3131         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3132     }
3133 
3134     /* linux returns EINVAL if addrlen pointer is invalid */
3135     if (get_user_u32(addrlen, target_addrlen_addr))
3136         return -TARGET_EINVAL;
3137 
3138     if ((int)addrlen < 0) {
3139         return -TARGET_EINVAL;
3140     }
3141 
3142     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3143         return -TARGET_EINVAL;
3144 
3145     addr = alloca(addrlen);
3146 
3147     ret_addrlen = addrlen;
3148     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3149     if (!is_error(ret)) {
3150         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3151         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3152             ret = -TARGET_EFAULT;
3153         }
3154     }
3155     return ret;
3156 }
3157 
3158 /* do_getpeername() Must return target values and target errnos. */
3159 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3160                                abi_ulong target_addrlen_addr)
3161 {
3162     socklen_t addrlen, ret_addrlen;
3163     void *addr;
3164     abi_long ret;
3165 
3166     if (get_user_u32(addrlen, target_addrlen_addr))
3167         return -TARGET_EFAULT;
3168 
3169     if ((int)addrlen < 0) {
3170         return -TARGET_EINVAL;
3171     }
3172 
3173     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3174         return -TARGET_EFAULT;
3175 
3176     addr = alloca(addrlen);
3177 
3178     ret_addrlen = addrlen;
3179     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3180     if (!is_error(ret)) {
3181         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3182         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3183             ret = -TARGET_EFAULT;
3184         }
3185     }
3186     return ret;
3187 }
3188 
3189 /* do_getsockname() Must return target values and target errnos. */
3190 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3191                                abi_ulong target_addrlen_addr)
3192 {
3193     socklen_t addrlen, ret_addrlen;
3194     void *addr;
3195     abi_long ret;
3196 
3197     if (get_user_u32(addrlen, target_addrlen_addr))
3198         return -TARGET_EFAULT;
3199 
3200     if ((int)addrlen < 0) {
3201         return -TARGET_EINVAL;
3202     }
3203 
3204     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3205         return -TARGET_EFAULT;
3206 
3207     addr = alloca(addrlen);
3208 
3209     ret_addrlen = addrlen;
3210     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3211     if (!is_error(ret)) {
3212         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3213         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3214             ret = -TARGET_EFAULT;
3215         }
3216     }
3217     return ret;
3218 }
3219 
3220 /* do_socketpair() Must return target values and target errnos. */
3221 static abi_long do_socketpair(int domain, int type, int protocol,
3222                               abi_ulong target_tab_addr)
3223 {
3224     int tab[2];
3225     abi_long ret;
3226 
3227     target_to_host_sock_type(&type);
3228 
3229     ret = get_errno(socketpair(domain, type, protocol, tab));
3230     if (!is_error(ret)) {
3231         if (put_user_s32(tab[0], target_tab_addr)
3232             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3233             ret = -TARGET_EFAULT;
3234     }
3235     return ret;
3236 }
3237 
3238 /* do_sendto() Must return target values and target errnos. */
3239 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3240                           abi_ulong target_addr, socklen_t addrlen)
3241 {
3242     void *addr;
3243     void *host_msg;
3244     void *copy_msg = NULL;
3245     abi_long ret;
3246 
3247     if ((int)addrlen < 0) {
3248         return -TARGET_EINVAL;
3249     }
3250 
3251     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3252     if (!host_msg)
3253         return -TARGET_EFAULT;
3254     if (fd_trans_target_to_host_data(fd)) {
3255         copy_msg = host_msg;
3256         host_msg = g_malloc(len);
3257         memcpy(host_msg, copy_msg, len);
3258         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3259         if (ret < 0) {
3260             goto fail;
3261         }
3262     }
3263     if (target_addr) {
3264         addr = alloca(addrlen+1);
3265         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3266         if (ret) {
3267             goto fail;
3268         }
3269         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3270     } else {
3271         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3272     }
3273 fail:
3274     if (copy_msg) {
3275         g_free(host_msg);
3276         host_msg = copy_msg;
3277     }
3278     unlock_user(host_msg, msg, 0);
3279     return ret;
3280 }
3281 
3282 /* do_recvfrom() Must return target values and target errnos. */
3283 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3284                             abi_ulong target_addr,
3285                             abi_ulong target_addrlen)
3286 {
3287     socklen_t addrlen, ret_addrlen;
3288     void *addr;
3289     void *host_msg;
3290     abi_long ret;
3291 
3292     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3293     if (!host_msg)
3294         return -TARGET_EFAULT;
3295     if (target_addr) {
3296         if (get_user_u32(addrlen, target_addrlen)) {
3297             ret = -TARGET_EFAULT;
3298             goto fail;
3299         }
3300         if ((int)addrlen < 0) {
3301             ret = -TARGET_EINVAL;
3302             goto fail;
3303         }
3304         addr = alloca(addrlen);
3305         ret_addrlen = addrlen;
3306         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3307                                       addr, &ret_addrlen));
3308     } else {
3309         addr = NULL; /* To keep compiler quiet.  */
3310         addrlen = 0; /* To keep compiler quiet.  */
3311         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3312     }
3313     if (!is_error(ret)) {
3314         if (fd_trans_host_to_target_data(fd)) {
3315             abi_long trans;
3316             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3317             if (is_error(trans)) {
3318                 ret = trans;
3319                 goto fail;
3320             }
3321         }
3322         if (target_addr) {
3323             host_to_target_sockaddr(target_addr, addr,
3324                                     MIN(addrlen, ret_addrlen));
3325             if (put_user_u32(ret_addrlen, target_addrlen)) {
3326                 ret = -TARGET_EFAULT;
3327                 goto fail;
3328             }
3329         }
3330         unlock_user(host_msg, msg, len);
3331     } else {
3332 fail:
3333         unlock_user(host_msg, msg, 0);
3334     }
3335     return ret;
3336 }
3337 
3338 #ifdef TARGET_NR_socketcall
3339 /* do_socketcall() must return target values and target errnos. */
3340 static abi_long do_socketcall(int num, abi_ulong vptr)
3341 {
3342     static const unsigned nargs[] = { /* number of arguments per operation */
3343         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3344         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3345         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3346         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3347         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3348         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3349         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3350         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3351         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3352         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3353         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3354         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3355         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3356         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3357         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3358         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3359         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3360         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3361         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3362         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3363     };
3364     abi_long a[6]; /* max 6 args */
3365     unsigned i;
3366 
3367     /* check the range of the first argument num */
3368     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3369     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3370         return -TARGET_EINVAL;
3371     }
3372     /* ensure we have space for args */
3373     if (nargs[num] > ARRAY_SIZE(a)) {
3374         return -TARGET_EINVAL;
3375     }
3376     /* collect the arguments in a[] according to nargs[] */
3377     for (i = 0; i < nargs[num]; ++i) {
3378         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3379             return -TARGET_EFAULT;
3380         }
3381     }
3382     /* now when we have the args, invoke the appropriate underlying function */
3383     switch (num) {
3384     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3385         return do_socket(a[0], a[1], a[2]);
3386     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3387         return do_bind(a[0], a[1], a[2]);
3388     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3389         return do_connect(a[0], a[1], a[2]);
3390     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3391         return get_errno(listen(a[0], a[1]));
3392     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3393         return do_accept4(a[0], a[1], a[2], 0);
3394     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3395         return do_getsockname(a[0], a[1], a[2]);
3396     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3397         return do_getpeername(a[0], a[1], a[2]);
3398     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3399         return do_socketpair(a[0], a[1], a[2], a[3]);
3400     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3401         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3402     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3403         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3404     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3405         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3406     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3407         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3408     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3409         return get_errno(shutdown(a[0], a[1]));
3410     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3411         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3412     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3413         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3414     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3415         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3416     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3417         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3418     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3419         return do_accept4(a[0], a[1], a[2], a[3]);
3420     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3421         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3422     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3423         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3424     default:
3425         gemu_log("Unsupported socketcall: %d\n", num);
3426         return -TARGET_EINVAL;
3427     }
3428 }
3429 #endif
3430 
3431 #define N_SHM_REGIONS	32
3432 
3433 static struct shm_region {
3434     abi_ulong start;
3435     abi_ulong size;
3436     bool in_use;
3437 } shm_regions[N_SHM_REGIONS];
3438 
3439 #ifndef TARGET_SEMID64_DS
3440 /* asm-generic version of this struct */
3441 struct target_semid64_ds
3442 {
3443   struct target_ipc_perm sem_perm;
3444   abi_ulong sem_otime;
3445 #if TARGET_ABI_BITS == 32
3446   abi_ulong __unused1;
3447 #endif
3448   abi_ulong sem_ctime;
3449 #if TARGET_ABI_BITS == 32
3450   abi_ulong __unused2;
3451 #endif
3452   abi_ulong sem_nsems;
3453   abi_ulong __unused3;
3454   abi_ulong __unused4;
3455 };
3456 #endif
3457 
3458 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3459                                                abi_ulong target_addr)
3460 {
3461     struct target_ipc_perm *target_ip;
3462     struct target_semid64_ds *target_sd;
3463 
3464     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3465         return -TARGET_EFAULT;
3466     target_ip = &(target_sd->sem_perm);
3467     host_ip->__key = tswap32(target_ip->__key);
3468     host_ip->uid = tswap32(target_ip->uid);
3469     host_ip->gid = tswap32(target_ip->gid);
3470     host_ip->cuid = tswap32(target_ip->cuid);
3471     host_ip->cgid = tswap32(target_ip->cgid);
3472 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3473     host_ip->mode = tswap32(target_ip->mode);
3474 #else
3475     host_ip->mode = tswap16(target_ip->mode);
3476 #endif
3477 #if defined(TARGET_PPC)
3478     host_ip->__seq = tswap32(target_ip->__seq);
3479 #else
3480     host_ip->__seq = tswap16(target_ip->__seq);
3481 #endif
3482     unlock_user_struct(target_sd, target_addr, 0);
3483     return 0;
3484 }
3485 
3486 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3487                                                struct ipc_perm *host_ip)
3488 {
3489     struct target_ipc_perm *target_ip;
3490     struct target_semid64_ds *target_sd;
3491 
3492     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3493         return -TARGET_EFAULT;
3494     target_ip = &(target_sd->sem_perm);
3495     target_ip->__key = tswap32(host_ip->__key);
3496     target_ip->uid = tswap32(host_ip->uid);
3497     target_ip->gid = tswap32(host_ip->gid);
3498     target_ip->cuid = tswap32(host_ip->cuid);
3499     target_ip->cgid = tswap32(host_ip->cgid);
3500 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3501     target_ip->mode = tswap32(host_ip->mode);
3502 #else
3503     target_ip->mode = tswap16(host_ip->mode);
3504 #endif
3505 #if defined(TARGET_PPC)
3506     target_ip->__seq = tswap32(host_ip->__seq);
3507 #else
3508     target_ip->__seq = tswap16(host_ip->__seq);
3509 #endif
3510     unlock_user_struct(target_sd, target_addr, 1);
3511     return 0;
3512 }
3513 
3514 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3515                                                abi_ulong target_addr)
3516 {
3517     struct target_semid64_ds *target_sd;
3518 
3519     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3520         return -TARGET_EFAULT;
3521     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3522         return -TARGET_EFAULT;
3523     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3524     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3525     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3526     unlock_user_struct(target_sd, target_addr, 0);
3527     return 0;
3528 }
3529 
3530 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3531                                                struct semid_ds *host_sd)
3532 {
3533     struct target_semid64_ds *target_sd;
3534 
3535     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3536         return -TARGET_EFAULT;
3537     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3538         return -TARGET_EFAULT;
3539     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3540     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3541     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3542     unlock_user_struct(target_sd, target_addr, 1);
3543     return 0;
3544 }
3545 
3546 struct target_seminfo {
3547     int semmap;
3548     int semmni;
3549     int semmns;
3550     int semmnu;
3551     int semmsl;
3552     int semopm;
3553     int semume;
3554     int semusz;
3555     int semvmx;
3556     int semaem;
3557 };
3558 
3559 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3560                                               struct seminfo *host_seminfo)
3561 {
3562     struct target_seminfo *target_seminfo;
3563     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3564         return -TARGET_EFAULT;
3565     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3566     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3567     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3568     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3569     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3570     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3571     __put_user(host_seminfo->semume, &target_seminfo->semume);
3572     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3573     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3574     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3575     unlock_user_struct(target_seminfo, target_addr, 1);
3576     return 0;
3577 }
3578 
3579 union semun {
3580 	int val;
3581 	struct semid_ds *buf;
3582 	unsigned short *array;
3583 	struct seminfo *__buf;
3584 };
3585 
3586 union target_semun {
3587 	int val;
3588 	abi_ulong buf;
3589 	abi_ulong array;
3590 	abi_ulong __buf;
3591 };
3592 
3593 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3594                                                abi_ulong target_addr)
3595 {
3596     int nsems;
3597     unsigned short *array;
3598     union semun semun;
3599     struct semid_ds semid_ds;
3600     int i, ret;
3601 
3602     semun.buf = &semid_ds;
3603 
3604     ret = semctl(semid, 0, IPC_STAT, semun);
3605     if (ret == -1)
3606         return get_errno(ret);
3607 
3608     nsems = semid_ds.sem_nsems;
3609 
3610     *host_array = g_try_new(unsigned short, nsems);
3611     if (!*host_array) {
3612         return -TARGET_ENOMEM;
3613     }
3614     array = lock_user(VERIFY_READ, target_addr,
3615                       nsems*sizeof(unsigned short), 1);
3616     if (!array) {
3617         g_free(*host_array);
3618         return -TARGET_EFAULT;
3619     }
3620 
3621     for(i=0; i<nsems; i++) {
3622         __get_user((*host_array)[i], &array[i]);
3623     }
3624     unlock_user(array, target_addr, 0);
3625 
3626     return 0;
3627 }
3628 
3629 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3630                                                unsigned short **host_array)
3631 {
3632     int nsems;
3633     unsigned short *array;
3634     union semun semun;
3635     struct semid_ds semid_ds;
3636     int i, ret;
3637 
3638     semun.buf = &semid_ds;
3639 
3640     ret = semctl(semid, 0, IPC_STAT, semun);
3641     if (ret == -1)
3642         return get_errno(ret);
3643 
3644     nsems = semid_ds.sem_nsems;
3645 
3646     array = lock_user(VERIFY_WRITE, target_addr,
3647                       nsems*sizeof(unsigned short), 0);
3648     if (!array)
3649         return -TARGET_EFAULT;
3650 
3651     for(i=0; i<nsems; i++) {
3652         __put_user((*host_array)[i], &array[i]);
3653     }
3654     g_free(*host_array);
3655     unlock_user(array, target_addr, 1);
3656 
3657     return 0;
3658 }
3659 
3660 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3661                                  abi_ulong target_arg)
3662 {
3663     union target_semun target_su = { .buf = target_arg };
3664     union semun arg;
3665     struct semid_ds dsarg;
3666     unsigned short *array = NULL;
3667     struct seminfo seminfo;
3668     abi_long ret = -TARGET_EINVAL;
3669     abi_long err;
3670     cmd &= 0xff;
3671 
3672     switch( cmd ) {
3673 	case GETVAL:
3674 	case SETVAL:
3675             /* In 64 bit cross-endian situations, we will erroneously pick up
3676              * the wrong half of the union for the "val" element.  To rectify
3677              * this, the entire 8-byte structure is byteswapped, followed by
3678 	     * a swap of the 4 byte val field. In other cases, the data is
3679 	     * already in proper host byte order. */
3680 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3681 		target_su.buf = tswapal(target_su.buf);
3682 		arg.val = tswap32(target_su.val);
3683 	    } else {
3684 		arg.val = target_su.val;
3685 	    }
3686             ret = get_errno(semctl(semid, semnum, cmd, arg));
3687             break;
3688 	case GETALL:
3689 	case SETALL:
3690             err = target_to_host_semarray(semid, &array, target_su.array);
3691             if (err)
3692                 return err;
3693             arg.array = array;
3694             ret = get_errno(semctl(semid, semnum, cmd, arg));
3695             err = host_to_target_semarray(semid, target_su.array, &array);
3696             if (err)
3697                 return err;
3698             break;
3699 	case IPC_STAT:
3700 	case IPC_SET:
3701 	case SEM_STAT:
3702             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3703             if (err)
3704                 return err;
3705             arg.buf = &dsarg;
3706             ret = get_errno(semctl(semid, semnum, cmd, arg));
3707             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3708             if (err)
3709                 return err;
3710             break;
3711 	case IPC_INFO:
3712 	case SEM_INFO:
3713             arg.__buf = &seminfo;
3714             ret = get_errno(semctl(semid, semnum, cmd, arg));
3715             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3716             if (err)
3717                 return err;
3718             break;
3719 	case IPC_RMID:
3720 	case GETPID:
3721 	case GETNCNT:
3722 	case GETZCNT:
3723             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3724             break;
3725     }
3726 
3727     return ret;
3728 }
3729 
3730 struct target_sembuf {
3731     unsigned short sem_num;
3732     short sem_op;
3733     short sem_flg;
3734 };
3735 
3736 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3737                                              abi_ulong target_addr,
3738                                              unsigned nsops)
3739 {
3740     struct target_sembuf *target_sembuf;
3741     int i;
3742 
3743     target_sembuf = lock_user(VERIFY_READ, target_addr,
3744                               nsops*sizeof(struct target_sembuf), 1);
3745     if (!target_sembuf)
3746         return -TARGET_EFAULT;
3747 
3748     for(i=0; i<nsops; i++) {
3749         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3750         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3751         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3752     }
3753 
3754     unlock_user(target_sembuf, target_addr, 0);
3755 
3756     return 0;
3757 }
3758 
3759 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3760 {
3761     struct sembuf sops[nsops];
3762     abi_long ret;
3763 
3764     if (target_to_host_sembuf(sops, ptr, nsops))
3765         return -TARGET_EFAULT;
3766 
3767     ret = -TARGET_ENOSYS;
3768 #ifdef __NR_semtimedop
3769     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3770 #endif
3771 #ifdef __NR_ipc
3772     if (ret == -TARGET_ENOSYS) {
3773         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3774     }
3775 #endif
3776     return ret;
3777 }
3778 
3779 struct target_msqid_ds
3780 {
3781     struct target_ipc_perm msg_perm;
3782     abi_ulong msg_stime;
3783 #if TARGET_ABI_BITS == 32
3784     abi_ulong __unused1;
3785 #endif
3786     abi_ulong msg_rtime;
3787 #if TARGET_ABI_BITS == 32
3788     abi_ulong __unused2;
3789 #endif
3790     abi_ulong msg_ctime;
3791 #if TARGET_ABI_BITS == 32
3792     abi_ulong __unused3;
3793 #endif
3794     abi_ulong __msg_cbytes;
3795     abi_ulong msg_qnum;
3796     abi_ulong msg_qbytes;
3797     abi_ulong msg_lspid;
3798     abi_ulong msg_lrpid;
3799     abi_ulong __unused4;
3800     abi_ulong __unused5;
3801 };
3802 
3803 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3804                                                abi_ulong target_addr)
3805 {
3806     struct target_msqid_ds *target_md;
3807 
3808     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3809         return -TARGET_EFAULT;
3810     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3811         return -TARGET_EFAULT;
3812     host_md->msg_stime = tswapal(target_md->msg_stime);
3813     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3814     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3815     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3816     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3817     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3818     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3819     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3820     unlock_user_struct(target_md, target_addr, 0);
3821     return 0;
3822 }
3823 
3824 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3825                                                struct msqid_ds *host_md)
3826 {
3827     struct target_msqid_ds *target_md;
3828 
3829     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3830         return -TARGET_EFAULT;
3831     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3832         return -TARGET_EFAULT;
3833     target_md->msg_stime = tswapal(host_md->msg_stime);
3834     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3835     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3836     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3837     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3838     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3839     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3840     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3841     unlock_user_struct(target_md, target_addr, 1);
3842     return 0;
3843 }
3844 
3845 struct target_msginfo {
3846     int msgpool;
3847     int msgmap;
3848     int msgmax;
3849     int msgmnb;
3850     int msgmni;
3851     int msgssz;
3852     int msgtql;
3853     unsigned short int msgseg;
3854 };
3855 
3856 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3857                                               struct msginfo *host_msginfo)
3858 {
3859     struct target_msginfo *target_msginfo;
3860     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3861         return -TARGET_EFAULT;
3862     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3863     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3864     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3865     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3866     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3867     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3868     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3869     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3870     unlock_user_struct(target_msginfo, target_addr, 1);
3871     return 0;
3872 }
3873 
3874 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3875 {
3876     struct msqid_ds dsarg;
3877     struct msginfo msginfo;
3878     abi_long ret = -TARGET_EINVAL;
3879 
3880     cmd &= 0xff;
3881 
3882     switch (cmd) {
3883     case IPC_STAT:
3884     case IPC_SET:
3885     case MSG_STAT:
3886         if (target_to_host_msqid_ds(&dsarg,ptr))
3887             return -TARGET_EFAULT;
3888         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3889         if (host_to_target_msqid_ds(ptr,&dsarg))
3890             return -TARGET_EFAULT;
3891         break;
3892     case IPC_RMID:
3893         ret = get_errno(msgctl(msgid, cmd, NULL));
3894         break;
3895     case IPC_INFO:
3896     case MSG_INFO:
3897         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3898         if (host_to_target_msginfo(ptr, &msginfo))
3899             return -TARGET_EFAULT;
3900         break;
3901     }
3902 
3903     return ret;
3904 }
3905 
3906 struct target_msgbuf {
3907     abi_long mtype;
3908     char	mtext[1];
3909 };
3910 
3911 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3912                                  ssize_t msgsz, int msgflg)
3913 {
3914     struct target_msgbuf *target_mb;
3915     struct msgbuf *host_mb;
3916     abi_long ret = 0;
3917 
3918     if (msgsz < 0) {
3919         return -TARGET_EINVAL;
3920     }
3921 
3922     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3923         return -TARGET_EFAULT;
3924     host_mb = g_try_malloc(msgsz + sizeof(long));
3925     if (!host_mb) {
3926         unlock_user_struct(target_mb, msgp, 0);
3927         return -TARGET_ENOMEM;
3928     }
3929     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3930     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3931     ret = -TARGET_ENOSYS;
3932 #ifdef __NR_msgsnd
3933     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3934 #endif
3935 #ifdef __NR_ipc
3936     if (ret == -TARGET_ENOSYS) {
3937         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3938                                  host_mb, 0));
3939     }
3940 #endif
3941     g_free(host_mb);
3942     unlock_user_struct(target_mb, msgp, 0);
3943 
3944     return ret;
3945 }
3946 
3947 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3948                                  ssize_t msgsz, abi_long msgtyp,
3949                                  int msgflg)
3950 {
3951     struct target_msgbuf *target_mb;
3952     char *target_mtext;
3953     struct msgbuf *host_mb;
3954     abi_long ret = 0;
3955 
3956     if (msgsz < 0) {
3957         return -TARGET_EINVAL;
3958     }
3959 
3960     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3961         return -TARGET_EFAULT;
3962 
3963     host_mb = g_try_malloc(msgsz + sizeof(long));
3964     if (!host_mb) {
3965         ret = -TARGET_ENOMEM;
3966         goto end;
3967     }
3968     ret = -TARGET_ENOSYS;
3969 #ifdef __NR_msgrcv
3970     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3971 #endif
3972 #ifdef __NR_ipc
3973     if (ret == -TARGET_ENOSYS) {
3974         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3975                         msgflg, host_mb, msgtyp));
3976     }
3977 #endif
3978 
3979     if (ret > 0) {
3980         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3981         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3982         if (!target_mtext) {
3983             ret = -TARGET_EFAULT;
3984             goto end;
3985         }
3986         memcpy(target_mb->mtext, host_mb->mtext, ret);
3987         unlock_user(target_mtext, target_mtext_addr, ret);
3988     }
3989 
3990     target_mb->mtype = tswapal(host_mb->mtype);
3991 
3992 end:
3993     if (target_mb)
3994         unlock_user_struct(target_mb, msgp, 1);
3995     g_free(host_mb);
3996     return ret;
3997 }
3998 
3999 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4000                                                abi_ulong target_addr)
4001 {
4002     struct target_shmid_ds *target_sd;
4003 
4004     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4005         return -TARGET_EFAULT;
4006     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4007         return -TARGET_EFAULT;
4008     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4009     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4010     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4011     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4012     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4013     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4014     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4015     unlock_user_struct(target_sd, target_addr, 0);
4016     return 0;
4017 }
4018 
4019 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4020                                                struct shmid_ds *host_sd)
4021 {
4022     struct target_shmid_ds *target_sd;
4023 
4024     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4025         return -TARGET_EFAULT;
4026     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4027         return -TARGET_EFAULT;
4028     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4029     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4030     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4031     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4032     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4033     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4034     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4035     unlock_user_struct(target_sd, target_addr, 1);
4036     return 0;
4037 }
4038 
4039 struct  target_shminfo {
4040     abi_ulong shmmax;
4041     abi_ulong shmmin;
4042     abi_ulong shmmni;
4043     abi_ulong shmseg;
4044     abi_ulong shmall;
4045 };
4046 
4047 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4048                                               struct shminfo *host_shminfo)
4049 {
4050     struct target_shminfo *target_shminfo;
4051     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4052         return -TARGET_EFAULT;
4053     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4054     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4055     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4056     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4057     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4058     unlock_user_struct(target_shminfo, target_addr, 1);
4059     return 0;
4060 }
4061 
4062 struct target_shm_info {
4063     int used_ids;
4064     abi_ulong shm_tot;
4065     abi_ulong shm_rss;
4066     abi_ulong shm_swp;
4067     abi_ulong swap_attempts;
4068     abi_ulong swap_successes;
4069 };
4070 
4071 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4072                                                struct shm_info *host_shm_info)
4073 {
4074     struct target_shm_info *target_shm_info;
4075     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4076         return -TARGET_EFAULT;
4077     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4078     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4079     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4080     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4081     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4082     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4083     unlock_user_struct(target_shm_info, target_addr, 1);
4084     return 0;
4085 }
4086 
4087 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4088 {
4089     struct shmid_ds dsarg;
4090     struct shminfo shminfo;
4091     struct shm_info shm_info;
4092     abi_long ret = -TARGET_EINVAL;
4093 
4094     cmd &= 0xff;
4095 
4096     switch(cmd) {
4097     case IPC_STAT:
4098     case IPC_SET:
4099     case SHM_STAT:
4100         if (target_to_host_shmid_ds(&dsarg, buf))
4101             return -TARGET_EFAULT;
4102         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4103         if (host_to_target_shmid_ds(buf, &dsarg))
4104             return -TARGET_EFAULT;
4105         break;
4106     case IPC_INFO:
4107         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4108         if (host_to_target_shminfo(buf, &shminfo))
4109             return -TARGET_EFAULT;
4110         break;
4111     case SHM_INFO:
4112         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4113         if (host_to_target_shm_info(buf, &shm_info))
4114             return -TARGET_EFAULT;
4115         break;
4116     case IPC_RMID:
4117     case SHM_LOCK:
4118     case SHM_UNLOCK:
4119         ret = get_errno(shmctl(shmid, cmd, NULL));
4120         break;
4121     }
4122 
4123     return ret;
4124 }
4125 
4126 #ifndef TARGET_FORCE_SHMLBA
4127 /* For most architectures, SHMLBA is the same as the page size;
4128  * some architectures have larger values, in which case they should
4129  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4130  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4131  * and defining its own value for SHMLBA.
4132  *
4133  * The kernel also permits SHMLBA to be set by the architecture to a
4134  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4135  * this means that addresses are rounded to the large size if
4136  * SHM_RND is set but addresses not aligned to that size are not rejected
4137  * as long as they are at least page-aligned. Since the only architecture
4138  * which uses this is ia64 this code doesn't provide for that oddity.
4139  */
4140 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4141 {
4142     return TARGET_PAGE_SIZE;
4143 }
4144 #endif
4145 
4146 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4147                                  int shmid, abi_ulong shmaddr, int shmflg)
4148 {
4149     abi_long raddr;
4150     void *host_raddr;
4151     struct shmid_ds shm_info;
4152     int i,ret;
4153     abi_ulong shmlba;
4154 
4155     /* find out the length of the shared memory segment */
4156     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4157     if (is_error(ret)) {
4158         /* can't get length, bail out */
4159         return ret;
4160     }
4161 
4162     shmlba = target_shmlba(cpu_env);
4163 
4164     if (shmaddr & (shmlba - 1)) {
4165         if (shmflg & SHM_RND) {
4166             shmaddr &= ~(shmlba - 1);
4167         } else {
4168             return -TARGET_EINVAL;
4169         }
4170     }
4171     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4172         return -TARGET_EINVAL;
4173     }
4174 
4175     mmap_lock();
4176 
4177     if (shmaddr)
4178         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4179     else {
4180         abi_ulong mmap_start;
4181 
4182         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4183         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4184 
4185         if (mmap_start == -1) {
4186             errno = ENOMEM;
4187             host_raddr = (void *)-1;
4188         } else
4189             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4190     }
4191 
4192     if (host_raddr == (void *)-1) {
4193         mmap_unlock();
4194         return get_errno((long)host_raddr);
4195     }
4196     raddr=h2g((unsigned long)host_raddr);
4197 
4198     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4199                    PAGE_VALID | PAGE_READ |
4200                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4201 
4202     for (i = 0; i < N_SHM_REGIONS; i++) {
4203         if (!shm_regions[i].in_use) {
4204             shm_regions[i].in_use = true;
4205             shm_regions[i].start = raddr;
4206             shm_regions[i].size = shm_info.shm_segsz;
4207             break;
4208         }
4209     }
4210 
4211     mmap_unlock();
4212     return raddr;
4213 
4214 }
4215 
4216 static inline abi_long do_shmdt(abi_ulong shmaddr)
4217 {
4218     int i;
4219     abi_long rv;
4220 
4221     mmap_lock();
4222 
4223     for (i = 0; i < N_SHM_REGIONS; ++i) {
4224         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4225             shm_regions[i].in_use = false;
4226             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4227             break;
4228         }
4229     }
4230     rv = get_errno(shmdt(g2h(shmaddr)));
4231 
4232     mmap_unlock();
4233 
4234     return rv;
4235 }
4236 
4237 #ifdef TARGET_NR_ipc
4238 /* ??? This only works with linear mappings.  */
4239 /* do_ipc() must return target values and target errnos. */
4240 static abi_long do_ipc(CPUArchState *cpu_env,
4241                        unsigned int call, abi_long first,
4242                        abi_long second, abi_long third,
4243                        abi_long ptr, abi_long fifth)
4244 {
4245     int version;
4246     abi_long ret = 0;
4247 
4248     version = call >> 16;
4249     call &= 0xffff;
4250 
4251     switch (call) {
4252     case IPCOP_semop:
4253         ret = do_semop(first, ptr, second);
4254         break;
4255 
4256     case IPCOP_semget:
4257         ret = get_errno(semget(first, second, third));
4258         break;
4259 
4260     case IPCOP_semctl: {
4261         /* The semun argument to semctl is passed by value, so dereference the
4262          * ptr argument. */
4263         abi_ulong atptr;
4264         get_user_ual(atptr, ptr);
4265         ret = do_semctl(first, second, third, atptr);
4266         break;
4267     }
4268 
4269     case IPCOP_msgget:
4270         ret = get_errno(msgget(first, second));
4271         break;
4272 
4273     case IPCOP_msgsnd:
4274         ret = do_msgsnd(first, ptr, second, third);
4275         break;
4276 
4277     case IPCOP_msgctl:
4278         ret = do_msgctl(first, second, ptr);
4279         break;
4280 
4281     case IPCOP_msgrcv:
4282         switch (version) {
4283         case 0:
4284             {
4285                 struct target_ipc_kludge {
4286                     abi_long msgp;
4287                     abi_long msgtyp;
4288                 } *tmp;
4289 
4290                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4291                     ret = -TARGET_EFAULT;
4292                     break;
4293                 }
4294 
4295                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4296 
4297                 unlock_user_struct(tmp, ptr, 0);
4298                 break;
4299             }
4300         default:
4301             ret = do_msgrcv(first, ptr, second, fifth, third);
4302         }
4303         break;
4304 
4305     case IPCOP_shmat:
4306         switch (version) {
4307         default:
4308         {
4309             abi_ulong raddr;
4310             raddr = do_shmat(cpu_env, first, ptr, second);
4311             if (is_error(raddr))
4312                 return get_errno(raddr);
4313             if (put_user_ual(raddr, third))
4314                 return -TARGET_EFAULT;
4315             break;
4316         }
4317         case 1:
4318             ret = -TARGET_EINVAL;
4319             break;
4320         }
4321 	break;
4322     case IPCOP_shmdt:
4323         ret = do_shmdt(ptr);
4324 	break;
4325 
4326     case IPCOP_shmget:
4327 	/* IPC_* flag values are the same on all linux platforms */
4328 	ret = get_errno(shmget(first, second, third));
4329 	break;
4330 
4331 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4332     case IPCOP_shmctl:
4333         ret = do_shmctl(first, second, ptr);
4334         break;
4335     default:
4336 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4337 	ret = -TARGET_ENOSYS;
4338 	break;
4339     }
4340     return ret;
4341 }
4342 #endif
4343 
4344 /* kernel structure types definitions */
4345 
4346 #define STRUCT(name, ...) STRUCT_ ## name,
4347 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4348 enum {
4349 #include "syscall_types.h"
4350 STRUCT_MAX
4351 };
4352 #undef STRUCT
4353 #undef STRUCT_SPECIAL
4354 
4355 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4356 #define STRUCT_SPECIAL(name)
4357 #include "syscall_types.h"
4358 #undef STRUCT
4359 #undef STRUCT_SPECIAL
4360 
4361 typedef struct IOCTLEntry IOCTLEntry;
4362 
4363 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4364                              int fd, int cmd, abi_long arg);
4365 
4366 struct IOCTLEntry {
4367     int target_cmd;
4368     unsigned int host_cmd;
4369     const char *name;
4370     int access;
4371     do_ioctl_fn *do_ioctl;
4372     const argtype arg_type[5];
4373 };
4374 
4375 #define IOC_R 0x0001
4376 #define IOC_W 0x0002
4377 #define IOC_RW (IOC_R | IOC_W)
4378 
4379 #define MAX_STRUCT_SIZE 4096
4380 
4381 #ifdef CONFIG_FIEMAP
4382 /* So fiemap access checks don't overflow on 32 bit systems.
4383  * This is very slightly smaller than the limit imposed by
4384  * the underlying kernel.
4385  */
4386 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4387                             / sizeof(struct fiemap_extent))
4388 
4389 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4390                                        int fd, int cmd, abi_long arg)
4391 {
4392     /* The parameter for this ioctl is a struct fiemap followed
4393      * by an array of struct fiemap_extent whose size is set
4394      * in fiemap->fm_extent_count. The array is filled in by the
4395      * ioctl.
4396      */
4397     int target_size_in, target_size_out;
4398     struct fiemap *fm;
4399     const argtype *arg_type = ie->arg_type;
4400     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4401     void *argptr, *p;
4402     abi_long ret;
4403     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4404     uint32_t outbufsz;
4405     int free_fm = 0;
4406 
4407     assert(arg_type[0] == TYPE_PTR);
4408     assert(ie->access == IOC_RW);
4409     arg_type++;
4410     target_size_in = thunk_type_size(arg_type, 0);
4411     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4412     if (!argptr) {
4413         return -TARGET_EFAULT;
4414     }
4415     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4416     unlock_user(argptr, arg, 0);
4417     fm = (struct fiemap *)buf_temp;
4418     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4419         return -TARGET_EINVAL;
4420     }
4421 
4422     outbufsz = sizeof (*fm) +
4423         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4424 
4425     if (outbufsz > MAX_STRUCT_SIZE) {
4426         /* We can't fit all the extents into the fixed size buffer.
4427          * Allocate one that is large enough and use it instead.
4428          */
4429         fm = g_try_malloc(outbufsz);
4430         if (!fm) {
4431             return -TARGET_ENOMEM;
4432         }
4433         memcpy(fm, buf_temp, sizeof(struct fiemap));
4434         free_fm = 1;
4435     }
4436     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4437     if (!is_error(ret)) {
4438         target_size_out = target_size_in;
4439         /* An extent_count of 0 means we were only counting the extents
4440          * so there are no structs to copy
4441          */
4442         if (fm->fm_extent_count != 0) {
4443             target_size_out += fm->fm_mapped_extents * extent_size;
4444         }
4445         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4446         if (!argptr) {
4447             ret = -TARGET_EFAULT;
4448         } else {
4449             /* Convert the struct fiemap */
4450             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4451             if (fm->fm_extent_count != 0) {
4452                 p = argptr + target_size_in;
4453                 /* ...and then all the struct fiemap_extents */
4454                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4455                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4456                                   THUNK_TARGET);
4457                     p += extent_size;
4458                 }
4459             }
4460             unlock_user(argptr, arg, target_size_out);
4461         }
4462     }
4463     if (free_fm) {
4464         g_free(fm);
4465     }
4466     return ret;
4467 }
4468 #endif
4469 
4470 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4471                                 int fd, int cmd, abi_long arg)
4472 {
4473     const argtype *arg_type = ie->arg_type;
4474     int target_size;
4475     void *argptr;
4476     int ret;
4477     struct ifconf *host_ifconf;
4478     uint32_t outbufsz;
4479     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4480     int target_ifreq_size;
4481     int nb_ifreq;
4482     int free_buf = 0;
4483     int i;
4484     int target_ifc_len;
4485     abi_long target_ifc_buf;
4486     int host_ifc_len;
4487     char *host_ifc_buf;
4488 
4489     assert(arg_type[0] == TYPE_PTR);
4490     assert(ie->access == IOC_RW);
4491 
4492     arg_type++;
4493     target_size = thunk_type_size(arg_type, 0);
4494 
4495     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4496     if (!argptr)
4497         return -TARGET_EFAULT;
4498     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4499     unlock_user(argptr, arg, 0);
4500 
4501     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4502     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4503     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4504 
4505     if (target_ifc_buf != 0) {
4506         target_ifc_len = host_ifconf->ifc_len;
4507         nb_ifreq = target_ifc_len / target_ifreq_size;
4508         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4509 
4510         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4511         if (outbufsz > MAX_STRUCT_SIZE) {
4512             /*
4513              * We can't fit all the extents into the fixed size buffer.
4514              * Allocate one that is large enough and use it instead.
4515              */
4516             host_ifconf = malloc(outbufsz);
4517             if (!host_ifconf) {
4518                 return -TARGET_ENOMEM;
4519             }
4520             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4521             free_buf = 1;
4522         }
4523         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4524 
4525         host_ifconf->ifc_len = host_ifc_len;
4526     } else {
4527       host_ifc_buf = NULL;
4528     }
4529     host_ifconf->ifc_buf = host_ifc_buf;
4530 
4531     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4532     if (!is_error(ret)) {
4533 	/* convert host ifc_len to target ifc_len */
4534 
4535         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4536         target_ifc_len = nb_ifreq * target_ifreq_size;
4537         host_ifconf->ifc_len = target_ifc_len;
4538 
4539 	/* restore target ifc_buf */
4540 
4541         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4542 
4543 	/* copy struct ifconf to target user */
4544 
4545         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4546         if (!argptr)
4547             return -TARGET_EFAULT;
4548         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4549         unlock_user(argptr, arg, target_size);
4550 
4551         if (target_ifc_buf != 0) {
4552             /* copy ifreq[] to target user */
4553             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4554             for (i = 0; i < nb_ifreq ; i++) {
4555                 thunk_convert(argptr + i * target_ifreq_size,
4556                               host_ifc_buf + i * sizeof(struct ifreq),
4557                               ifreq_arg_type, THUNK_TARGET);
4558             }
4559             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4560         }
4561     }
4562 
4563     if (free_buf) {
4564         free(host_ifconf);
4565     }
4566 
4567     return ret;
4568 }
4569 
4570 #if defined(CONFIG_USBFS)
4571 #if HOST_LONG_BITS > 64
4572 #error USBDEVFS thunks do not support >64 bit hosts yet.
4573 #endif
4574 struct live_urb {
4575     uint64_t target_urb_adr;
4576     uint64_t target_buf_adr;
4577     char *target_buf_ptr;
4578     struct usbdevfs_urb host_urb;
4579 };
4580 
4581 static GHashTable *usbdevfs_urb_hashtable(void)
4582 {
4583     static GHashTable *urb_hashtable;
4584 
4585     if (!urb_hashtable) {
4586         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4587     }
4588     return urb_hashtable;
4589 }
4590 
4591 static void urb_hashtable_insert(struct live_urb *urb)
4592 {
4593     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4594     g_hash_table_insert(urb_hashtable, urb, urb);
4595 }
4596 
4597 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4598 {
4599     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4600     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4601 }
4602 
4603 static void urb_hashtable_remove(struct live_urb *urb)
4604 {
4605     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4606     g_hash_table_remove(urb_hashtable, urb);
4607 }
4608 
4609 static abi_long
4610 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4611                           int fd, int cmd, abi_long arg)
4612 {
4613     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4614     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4615     struct live_urb *lurb;
4616     void *argptr;
4617     uint64_t hurb;
4618     int target_size;
4619     uintptr_t target_urb_adr;
4620     abi_long ret;
4621 
4622     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4623 
4624     memset(buf_temp, 0, sizeof(uint64_t));
4625     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4626     if (is_error(ret)) {
4627         return ret;
4628     }
4629 
4630     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4631     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4632     if (!lurb->target_urb_adr) {
4633         return -TARGET_EFAULT;
4634     }
4635     urb_hashtable_remove(lurb);
4636     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4637         lurb->host_urb.buffer_length);
4638     lurb->target_buf_ptr = NULL;
4639 
4640     /* restore the guest buffer pointer */
4641     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4642 
4643     /* update the guest urb struct */
4644     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4645     if (!argptr) {
4646         g_free(lurb);
4647         return -TARGET_EFAULT;
4648     }
4649     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4650     unlock_user(argptr, lurb->target_urb_adr, target_size);
4651 
4652     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4653     /* write back the urb handle */
4654     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4655     if (!argptr) {
4656         g_free(lurb);
4657         return -TARGET_EFAULT;
4658     }
4659 
4660     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4661     target_urb_adr = lurb->target_urb_adr;
4662     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4663     unlock_user(argptr, arg, target_size);
4664 
4665     g_free(lurb);
4666     return ret;
4667 }
4668 
4669 static abi_long
4670 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4671                              uint8_t *buf_temp __attribute__((unused)),
4672                              int fd, int cmd, abi_long arg)
4673 {
4674     struct live_urb *lurb;
4675 
4676     /* map target address back to host URB with metadata. */
4677     lurb = urb_hashtable_lookup(arg);
4678     if (!lurb) {
4679         return -TARGET_EFAULT;
4680     }
4681     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4682 }
4683 
4684 static abi_long
4685 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4686                             int fd, int cmd, abi_long arg)
4687 {
4688     const argtype *arg_type = ie->arg_type;
4689     int target_size;
4690     abi_long ret;
4691     void *argptr;
4692     int rw_dir;
4693     struct live_urb *lurb;
4694 
4695     /*
4696      * each submitted URB needs to map to a unique ID for the
4697      * kernel, and that unique ID needs to be a pointer to
4698      * host memory.  hence, we need to malloc for each URB.
4699      * isochronous transfers have a variable length struct.
4700      */
4701     arg_type++;
4702     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4703 
4704     /* construct host copy of urb and metadata */
4705     lurb = g_try_malloc0(sizeof(struct live_urb));
4706     if (!lurb) {
4707         return -TARGET_ENOMEM;
4708     }
4709 
4710     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4711     if (!argptr) {
4712         g_free(lurb);
4713         return -TARGET_EFAULT;
4714     }
4715     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4716     unlock_user(argptr, arg, 0);
4717 
4718     lurb->target_urb_adr = arg;
4719     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4720 
4721     /* buffer space used depends on endpoint type so lock the entire buffer */
4722     /* control type urbs should check the buffer contents for true direction */
4723     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4724     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4725         lurb->host_urb.buffer_length, 1);
4726     if (lurb->target_buf_ptr == NULL) {
4727         g_free(lurb);
4728         return -TARGET_EFAULT;
4729     }
4730 
4731     /* update buffer pointer in host copy */
4732     lurb->host_urb.buffer = lurb->target_buf_ptr;
4733 
4734     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4735     if (is_error(ret)) {
4736         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4737         g_free(lurb);
4738     } else {
4739         urb_hashtable_insert(lurb);
4740     }
4741 
4742     return ret;
4743 }
4744 #endif /* CONFIG_USBFS */
4745 
4746 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4747                             int cmd, abi_long arg)
4748 {
4749     void *argptr;
4750     struct dm_ioctl *host_dm;
4751     abi_long guest_data;
4752     uint32_t guest_data_size;
4753     int target_size;
4754     const argtype *arg_type = ie->arg_type;
4755     abi_long ret;
4756     void *big_buf = NULL;
4757     char *host_data;
4758 
4759     arg_type++;
4760     target_size = thunk_type_size(arg_type, 0);
4761     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4762     if (!argptr) {
4763         ret = -TARGET_EFAULT;
4764         goto out;
4765     }
4766     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4767     unlock_user(argptr, arg, 0);
4768 
4769     /* buf_temp is too small, so fetch things into a bigger buffer */
4770     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4771     memcpy(big_buf, buf_temp, target_size);
4772     buf_temp = big_buf;
4773     host_dm = big_buf;
4774 
4775     guest_data = arg + host_dm->data_start;
4776     if ((guest_data - arg) < 0) {
4777         ret = -TARGET_EINVAL;
4778         goto out;
4779     }
4780     guest_data_size = host_dm->data_size - host_dm->data_start;
4781     host_data = (char*)host_dm + host_dm->data_start;
4782 
4783     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4784     if (!argptr) {
4785         ret = -TARGET_EFAULT;
4786         goto out;
4787     }
4788 
4789     switch (ie->host_cmd) {
4790     case DM_REMOVE_ALL:
4791     case DM_LIST_DEVICES:
4792     case DM_DEV_CREATE:
4793     case DM_DEV_REMOVE:
4794     case DM_DEV_SUSPEND:
4795     case DM_DEV_STATUS:
4796     case DM_DEV_WAIT:
4797     case DM_TABLE_STATUS:
4798     case DM_TABLE_CLEAR:
4799     case DM_TABLE_DEPS:
4800     case DM_LIST_VERSIONS:
4801         /* no input data */
4802         break;
4803     case DM_DEV_RENAME:
4804     case DM_DEV_SET_GEOMETRY:
4805         /* data contains only strings */
4806         memcpy(host_data, argptr, guest_data_size);
4807         break;
4808     case DM_TARGET_MSG:
4809         memcpy(host_data, argptr, guest_data_size);
4810         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4811         break;
4812     case DM_TABLE_LOAD:
4813     {
4814         void *gspec = argptr;
4815         void *cur_data = host_data;
4816         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4817         int spec_size = thunk_type_size(arg_type, 0);
4818         int i;
4819 
4820         for (i = 0; i < host_dm->target_count; i++) {
4821             struct dm_target_spec *spec = cur_data;
4822             uint32_t next;
4823             int slen;
4824 
4825             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4826             slen = strlen((char*)gspec + spec_size) + 1;
4827             next = spec->next;
4828             spec->next = sizeof(*spec) + slen;
4829             strcpy((char*)&spec[1], gspec + spec_size);
4830             gspec += next;
4831             cur_data += spec->next;
4832         }
4833         break;
4834     }
4835     default:
4836         ret = -TARGET_EINVAL;
4837         unlock_user(argptr, guest_data, 0);
4838         goto out;
4839     }
4840     unlock_user(argptr, guest_data, 0);
4841 
4842     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4843     if (!is_error(ret)) {
4844         guest_data = arg + host_dm->data_start;
4845         guest_data_size = host_dm->data_size - host_dm->data_start;
4846         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4847         switch (ie->host_cmd) {
4848         case DM_REMOVE_ALL:
4849         case DM_DEV_CREATE:
4850         case DM_DEV_REMOVE:
4851         case DM_DEV_RENAME:
4852         case DM_DEV_SUSPEND:
4853         case DM_DEV_STATUS:
4854         case DM_TABLE_LOAD:
4855         case DM_TABLE_CLEAR:
4856         case DM_TARGET_MSG:
4857         case DM_DEV_SET_GEOMETRY:
4858             /* no return data */
4859             break;
4860         case DM_LIST_DEVICES:
4861         {
4862             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4863             uint32_t remaining_data = guest_data_size;
4864             void *cur_data = argptr;
4865             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4866             int nl_size = 12; /* can't use thunk_size due to alignment */
4867 
4868             while (1) {
4869                 uint32_t next = nl->next;
4870                 if (next) {
4871                     nl->next = nl_size + (strlen(nl->name) + 1);
4872                 }
4873                 if (remaining_data < nl->next) {
4874                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4875                     break;
4876                 }
4877                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4878                 strcpy(cur_data + nl_size, nl->name);
4879                 cur_data += nl->next;
4880                 remaining_data -= nl->next;
4881                 if (!next) {
4882                     break;
4883                 }
4884                 nl = (void*)nl + next;
4885             }
4886             break;
4887         }
4888         case DM_DEV_WAIT:
4889         case DM_TABLE_STATUS:
4890         {
4891             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4892             void *cur_data = argptr;
4893             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4894             int spec_size = thunk_type_size(arg_type, 0);
4895             int i;
4896 
4897             for (i = 0; i < host_dm->target_count; i++) {
4898                 uint32_t next = spec->next;
4899                 int slen = strlen((char*)&spec[1]) + 1;
4900                 spec->next = (cur_data - argptr) + spec_size + slen;
4901                 if (guest_data_size < spec->next) {
4902                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4903                     break;
4904                 }
4905                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4906                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4907                 cur_data = argptr + spec->next;
4908                 spec = (void*)host_dm + host_dm->data_start + next;
4909             }
4910             break;
4911         }
4912         case DM_TABLE_DEPS:
4913         {
4914             void *hdata = (void*)host_dm + host_dm->data_start;
4915             int count = *(uint32_t*)hdata;
4916             uint64_t *hdev = hdata + 8;
4917             uint64_t *gdev = argptr + 8;
4918             int i;
4919 
4920             *(uint32_t*)argptr = tswap32(count);
4921             for (i = 0; i < count; i++) {
4922                 *gdev = tswap64(*hdev);
4923                 gdev++;
4924                 hdev++;
4925             }
4926             break;
4927         }
4928         case DM_LIST_VERSIONS:
4929         {
4930             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4931             uint32_t remaining_data = guest_data_size;
4932             void *cur_data = argptr;
4933             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4934             int vers_size = thunk_type_size(arg_type, 0);
4935 
4936             while (1) {
4937                 uint32_t next = vers->next;
4938                 if (next) {
4939                     vers->next = vers_size + (strlen(vers->name) + 1);
4940                 }
4941                 if (remaining_data < vers->next) {
4942                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4943                     break;
4944                 }
4945                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4946                 strcpy(cur_data + vers_size, vers->name);
4947                 cur_data += vers->next;
4948                 remaining_data -= vers->next;
4949                 if (!next) {
4950                     break;
4951                 }
4952                 vers = (void*)vers + next;
4953             }
4954             break;
4955         }
4956         default:
4957             unlock_user(argptr, guest_data, 0);
4958             ret = -TARGET_EINVAL;
4959             goto out;
4960         }
4961         unlock_user(argptr, guest_data, guest_data_size);
4962 
4963         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4964         if (!argptr) {
4965             ret = -TARGET_EFAULT;
4966             goto out;
4967         }
4968         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4969         unlock_user(argptr, arg, target_size);
4970     }
4971 out:
4972     g_free(big_buf);
4973     return ret;
4974 }
4975 
4976 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4977                                int cmd, abi_long arg)
4978 {
4979     void *argptr;
4980     int target_size;
4981     const argtype *arg_type = ie->arg_type;
4982     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4983     abi_long ret;
4984 
4985     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4986     struct blkpg_partition host_part;
4987 
4988     /* Read and convert blkpg */
4989     arg_type++;
4990     target_size = thunk_type_size(arg_type, 0);
4991     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4992     if (!argptr) {
4993         ret = -TARGET_EFAULT;
4994         goto out;
4995     }
4996     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4997     unlock_user(argptr, arg, 0);
4998 
4999     switch (host_blkpg->op) {
5000     case BLKPG_ADD_PARTITION:
5001     case BLKPG_DEL_PARTITION:
5002         /* payload is struct blkpg_partition */
5003         break;
5004     default:
5005         /* Unknown opcode */
5006         ret = -TARGET_EINVAL;
5007         goto out;
5008     }
5009 
5010     /* Read and convert blkpg->data */
5011     arg = (abi_long)(uintptr_t)host_blkpg->data;
5012     target_size = thunk_type_size(part_arg_type, 0);
5013     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5014     if (!argptr) {
5015         ret = -TARGET_EFAULT;
5016         goto out;
5017     }
5018     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5019     unlock_user(argptr, arg, 0);
5020 
5021     /* Swizzle the data pointer to our local copy and call! */
5022     host_blkpg->data = &host_part;
5023     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5024 
5025 out:
5026     return ret;
5027 }
5028 
5029 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5030                                 int fd, int cmd, abi_long arg)
5031 {
5032     const argtype *arg_type = ie->arg_type;
5033     const StructEntry *se;
5034     const argtype *field_types;
5035     const int *dst_offsets, *src_offsets;
5036     int target_size;
5037     void *argptr;
5038     abi_ulong *target_rt_dev_ptr = NULL;
5039     unsigned long *host_rt_dev_ptr = NULL;
5040     abi_long ret;
5041     int i;
5042 
5043     assert(ie->access == IOC_W);
5044     assert(*arg_type == TYPE_PTR);
5045     arg_type++;
5046     assert(*arg_type == TYPE_STRUCT);
5047     target_size = thunk_type_size(arg_type, 0);
5048     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5049     if (!argptr) {
5050         return -TARGET_EFAULT;
5051     }
5052     arg_type++;
5053     assert(*arg_type == (int)STRUCT_rtentry);
5054     se = struct_entries + *arg_type++;
5055     assert(se->convert[0] == NULL);
5056     /* convert struct here to be able to catch rt_dev string */
5057     field_types = se->field_types;
5058     dst_offsets = se->field_offsets[THUNK_HOST];
5059     src_offsets = se->field_offsets[THUNK_TARGET];
5060     for (i = 0; i < se->nb_fields; i++) {
5061         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5062             assert(*field_types == TYPE_PTRVOID);
5063             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5064             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5065             if (*target_rt_dev_ptr != 0) {
5066                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5067                                                   tswapal(*target_rt_dev_ptr));
5068                 if (!*host_rt_dev_ptr) {
5069                     unlock_user(argptr, arg, 0);
5070                     return -TARGET_EFAULT;
5071                 }
5072             } else {
5073                 *host_rt_dev_ptr = 0;
5074             }
5075             field_types++;
5076             continue;
5077         }
5078         field_types = thunk_convert(buf_temp + dst_offsets[i],
5079                                     argptr + src_offsets[i],
5080                                     field_types, THUNK_HOST);
5081     }
5082     unlock_user(argptr, arg, 0);
5083 
5084     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5085 
5086     assert(host_rt_dev_ptr != NULL);
5087     assert(target_rt_dev_ptr != NULL);
5088     if (*host_rt_dev_ptr != 0) {
5089         unlock_user((void *)*host_rt_dev_ptr,
5090                     *target_rt_dev_ptr, 0);
5091     }
5092     return ret;
5093 }
5094 
5095 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5096                                      int fd, int cmd, abi_long arg)
5097 {
5098     int sig = target_to_host_signal(arg);
5099     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5100 }
5101 
5102 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5103                                     int fd, int cmd, abi_long arg)
5104 {
5105     struct timeval tv;
5106     abi_long ret;
5107 
5108     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5109     if (is_error(ret)) {
5110         return ret;
5111     }
5112 
5113     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5114         if (copy_to_user_timeval(arg, &tv)) {
5115             return -TARGET_EFAULT;
5116         }
5117     } else {
5118         if (copy_to_user_timeval64(arg, &tv)) {
5119             return -TARGET_EFAULT;
5120         }
5121     }
5122 
5123     return ret;
5124 }
5125 
5126 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5127                                       int fd, int cmd, abi_long arg)
5128 {
5129     struct timespec ts;
5130     abi_long ret;
5131 
5132     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5133     if (is_error(ret)) {
5134         return ret;
5135     }
5136 
5137     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5138         if (host_to_target_timespec(arg, &ts)) {
5139             return -TARGET_EFAULT;
5140         }
5141     } else{
5142         if (host_to_target_timespec64(arg, &ts)) {
5143             return -TARGET_EFAULT;
5144         }
5145     }
5146 
5147     return ret;
5148 }
5149 
5150 #ifdef TIOCGPTPEER
5151 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5152                                      int fd, int cmd, abi_long arg)
5153 {
5154     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5155     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5156 }
5157 #endif
5158 
5159 static IOCTLEntry ioctl_entries[] = {
5160 #define IOCTL(cmd, access, ...) \
5161     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5162 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5163     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5164 #define IOCTL_IGNORE(cmd) \
5165     { TARGET_ ## cmd, 0, #cmd },
5166 #include "ioctls.h"
5167     { 0, 0, },
5168 };
5169 
5170 /* ??? Implement proper locking for ioctls.  */
5171 /* do_ioctl() Must return target values and target errnos. */
5172 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5173 {
5174     const IOCTLEntry *ie;
5175     const argtype *arg_type;
5176     abi_long ret;
5177     uint8_t buf_temp[MAX_STRUCT_SIZE];
5178     int target_size;
5179     void *argptr;
5180 
5181     ie = ioctl_entries;
5182     for(;;) {
5183         if (ie->target_cmd == 0) {
5184             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5185             return -TARGET_ENOSYS;
5186         }
5187         if (ie->target_cmd == cmd)
5188             break;
5189         ie++;
5190     }
5191     arg_type = ie->arg_type;
5192     if (ie->do_ioctl) {
5193         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5194     } else if (!ie->host_cmd) {
5195         /* Some architectures define BSD ioctls in their headers
5196            that are not implemented in Linux.  */
5197         return -TARGET_ENOSYS;
5198     }
5199 
5200     switch(arg_type[0]) {
5201     case TYPE_NULL:
5202         /* no argument */
5203         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5204         break;
5205     case TYPE_PTRVOID:
5206     case TYPE_INT:
5207     case TYPE_LONG:
5208     case TYPE_ULONG:
5209         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5210         break;
5211     case TYPE_PTR:
5212         arg_type++;
5213         target_size = thunk_type_size(arg_type, 0);
5214         switch(ie->access) {
5215         case IOC_R:
5216             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5217             if (!is_error(ret)) {
5218                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5219                 if (!argptr)
5220                     return -TARGET_EFAULT;
5221                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5222                 unlock_user(argptr, arg, target_size);
5223             }
5224             break;
5225         case IOC_W:
5226             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5227             if (!argptr)
5228                 return -TARGET_EFAULT;
5229             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5230             unlock_user(argptr, arg, 0);
5231             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5232             break;
5233         default:
5234         case IOC_RW:
5235             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5236             if (!argptr)
5237                 return -TARGET_EFAULT;
5238             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5239             unlock_user(argptr, arg, 0);
5240             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5241             if (!is_error(ret)) {
5242                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5243                 if (!argptr)
5244                     return -TARGET_EFAULT;
5245                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5246                 unlock_user(argptr, arg, target_size);
5247             }
5248             break;
5249         }
5250         break;
5251     default:
5252         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5253                  (long)cmd, arg_type[0]);
5254         ret = -TARGET_ENOSYS;
5255         break;
5256     }
5257     return ret;
5258 }
5259 
5260 static const bitmask_transtbl iflag_tbl[] = {
5261         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5262         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5263         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5264         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5265         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5266         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5267         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5268         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5269         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5270         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5271         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5272         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5273         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5274         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5275         { 0, 0, 0, 0 }
5276 };
5277 
5278 static const bitmask_transtbl oflag_tbl[] = {
5279 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5280 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5281 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5282 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5283 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5284 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5285 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5286 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5287 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5288 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5289 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5290 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5291 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5292 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5293 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5294 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5295 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5296 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5297 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5298 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5299 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5300 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5301 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5302 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5303 	{ 0, 0, 0, 0 }
5304 };
5305 
5306 static const bitmask_transtbl cflag_tbl[] = {
5307 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5308 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5309 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5310 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5311 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5312 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5313 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5314 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5315 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5316 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5317 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5318 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5319 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5320 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5321 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5322 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5323 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5324 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5325 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5326 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5327 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5328 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5329 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5330 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5331 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5332 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5333 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5334 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5335 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5336 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5337 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5338 	{ 0, 0, 0, 0 }
5339 };
5340 
5341 static const bitmask_transtbl lflag_tbl[] = {
5342 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5343 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5344 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5345 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5346 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5347 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5348 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5349 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5350 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5351 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5352 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5353 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5354 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5355 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5356 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5357 	{ 0, 0, 0, 0 }
5358 };
5359 
5360 static void target_to_host_termios (void *dst, const void *src)
5361 {
5362     struct host_termios *host = dst;
5363     const struct target_termios *target = src;
5364 
5365     host->c_iflag =
5366         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5367     host->c_oflag =
5368         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5369     host->c_cflag =
5370         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5371     host->c_lflag =
5372         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5373     host->c_line = target->c_line;
5374 
5375     memset(host->c_cc, 0, sizeof(host->c_cc));
5376     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5377     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5378     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5379     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5380     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5381     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5382     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5383     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5384     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5385     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5386     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5387     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5388     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5389     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5390     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5391     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5392     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5393 }
5394 
5395 static void host_to_target_termios (void *dst, const void *src)
5396 {
5397     struct target_termios *target = dst;
5398     const struct host_termios *host = src;
5399 
5400     target->c_iflag =
5401         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5402     target->c_oflag =
5403         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5404     target->c_cflag =
5405         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5406     target->c_lflag =
5407         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5408     target->c_line = host->c_line;
5409 
5410     memset(target->c_cc, 0, sizeof(target->c_cc));
5411     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5412     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5413     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5414     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5415     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5416     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5417     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5418     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5419     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5420     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5421     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5422     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5423     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5424     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5425     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5426     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5427     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5428 }
5429 
5430 static const StructEntry struct_termios_def = {
5431     .convert = { host_to_target_termios, target_to_host_termios },
5432     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5433     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5434 };
5435 
5436 static bitmask_transtbl mmap_flags_tbl[] = {
5437     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5438     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5439     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5440     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5441       MAP_ANONYMOUS, MAP_ANONYMOUS },
5442     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5443       MAP_GROWSDOWN, MAP_GROWSDOWN },
5444     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5445       MAP_DENYWRITE, MAP_DENYWRITE },
5446     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5447       MAP_EXECUTABLE, MAP_EXECUTABLE },
5448     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5449     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5450       MAP_NORESERVE, MAP_NORESERVE },
5451     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5452     /* MAP_STACK had been ignored by the kernel for quite some time.
5453        Recognize it for the target insofar as we do not want to pass
5454        it through to the host.  */
5455     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5456     { 0, 0, 0, 0 }
5457 };
5458 
5459 #if defined(TARGET_I386)
5460 
5461 /* NOTE: there is really one LDT for all the threads */
5462 static uint8_t *ldt_table;
5463 
5464 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5465 {
5466     int size;
5467     void *p;
5468 
5469     if (!ldt_table)
5470         return 0;
5471     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5472     if (size > bytecount)
5473         size = bytecount;
5474     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5475     if (!p)
5476         return -TARGET_EFAULT;
5477     /* ??? Should this by byteswapped?  */
5478     memcpy(p, ldt_table, size);
5479     unlock_user(p, ptr, size);
5480     return size;
5481 }
5482 
5483 /* XXX: add locking support */
5484 static abi_long write_ldt(CPUX86State *env,
5485                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5486 {
5487     struct target_modify_ldt_ldt_s ldt_info;
5488     struct target_modify_ldt_ldt_s *target_ldt_info;
5489     int seg_32bit, contents, read_exec_only, limit_in_pages;
5490     int seg_not_present, useable, lm;
5491     uint32_t *lp, entry_1, entry_2;
5492 
5493     if (bytecount != sizeof(ldt_info))
5494         return -TARGET_EINVAL;
5495     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5496         return -TARGET_EFAULT;
5497     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5498     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5499     ldt_info.limit = tswap32(target_ldt_info->limit);
5500     ldt_info.flags = tswap32(target_ldt_info->flags);
5501     unlock_user_struct(target_ldt_info, ptr, 0);
5502 
5503     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5504         return -TARGET_EINVAL;
5505     seg_32bit = ldt_info.flags & 1;
5506     contents = (ldt_info.flags >> 1) & 3;
5507     read_exec_only = (ldt_info.flags >> 3) & 1;
5508     limit_in_pages = (ldt_info.flags >> 4) & 1;
5509     seg_not_present = (ldt_info.flags >> 5) & 1;
5510     useable = (ldt_info.flags >> 6) & 1;
5511 #ifdef TARGET_ABI32
5512     lm = 0;
5513 #else
5514     lm = (ldt_info.flags >> 7) & 1;
5515 #endif
5516     if (contents == 3) {
5517         if (oldmode)
5518             return -TARGET_EINVAL;
5519         if (seg_not_present == 0)
5520             return -TARGET_EINVAL;
5521     }
5522     /* allocate the LDT */
5523     if (!ldt_table) {
5524         env->ldt.base = target_mmap(0,
5525                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5526                                     PROT_READ|PROT_WRITE,
5527                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5528         if (env->ldt.base == -1)
5529             return -TARGET_ENOMEM;
5530         memset(g2h(env->ldt.base), 0,
5531                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5532         env->ldt.limit = 0xffff;
5533         ldt_table = g2h(env->ldt.base);
5534     }
5535 
5536     /* NOTE: same code as Linux kernel */
5537     /* Allow LDTs to be cleared by the user. */
5538     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5539         if (oldmode ||
5540             (contents == 0		&&
5541              read_exec_only == 1	&&
5542              seg_32bit == 0		&&
5543              limit_in_pages == 0	&&
5544              seg_not_present == 1	&&
5545              useable == 0 )) {
5546             entry_1 = 0;
5547             entry_2 = 0;
5548             goto install;
5549         }
5550     }
5551 
5552     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5553         (ldt_info.limit & 0x0ffff);
5554     entry_2 = (ldt_info.base_addr & 0xff000000) |
5555         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5556         (ldt_info.limit & 0xf0000) |
5557         ((read_exec_only ^ 1) << 9) |
5558         (contents << 10) |
5559         ((seg_not_present ^ 1) << 15) |
5560         (seg_32bit << 22) |
5561         (limit_in_pages << 23) |
5562         (lm << 21) |
5563         0x7000;
5564     if (!oldmode)
5565         entry_2 |= (useable << 20);
5566 
5567     /* Install the new entry ...  */
5568 install:
5569     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5570     lp[0] = tswap32(entry_1);
5571     lp[1] = tswap32(entry_2);
5572     return 0;
5573 }
5574 
5575 /* specific and weird i386 syscalls */
5576 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5577                               unsigned long bytecount)
5578 {
5579     abi_long ret;
5580 
5581     switch (func) {
5582     case 0:
5583         ret = read_ldt(ptr, bytecount);
5584         break;
5585     case 1:
5586         ret = write_ldt(env, ptr, bytecount, 1);
5587         break;
5588     case 0x11:
5589         ret = write_ldt(env, ptr, bytecount, 0);
5590         break;
5591     default:
5592         ret = -TARGET_ENOSYS;
5593         break;
5594     }
5595     return ret;
5596 }
5597 
5598 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5599 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5600 {
5601     uint64_t *gdt_table = g2h(env->gdt.base);
5602     struct target_modify_ldt_ldt_s ldt_info;
5603     struct target_modify_ldt_ldt_s *target_ldt_info;
5604     int seg_32bit, contents, read_exec_only, limit_in_pages;
5605     int seg_not_present, useable, lm;
5606     uint32_t *lp, entry_1, entry_2;
5607     int i;
5608 
5609     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5610     if (!target_ldt_info)
5611         return -TARGET_EFAULT;
5612     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5613     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5614     ldt_info.limit = tswap32(target_ldt_info->limit);
5615     ldt_info.flags = tswap32(target_ldt_info->flags);
5616     if (ldt_info.entry_number == -1) {
5617         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5618             if (gdt_table[i] == 0) {
5619                 ldt_info.entry_number = i;
5620                 target_ldt_info->entry_number = tswap32(i);
5621                 break;
5622             }
5623         }
5624     }
5625     unlock_user_struct(target_ldt_info, ptr, 1);
5626 
5627     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5628         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5629            return -TARGET_EINVAL;
5630     seg_32bit = ldt_info.flags & 1;
5631     contents = (ldt_info.flags >> 1) & 3;
5632     read_exec_only = (ldt_info.flags >> 3) & 1;
5633     limit_in_pages = (ldt_info.flags >> 4) & 1;
5634     seg_not_present = (ldt_info.flags >> 5) & 1;
5635     useable = (ldt_info.flags >> 6) & 1;
5636 #ifdef TARGET_ABI32
5637     lm = 0;
5638 #else
5639     lm = (ldt_info.flags >> 7) & 1;
5640 #endif
5641 
5642     if (contents == 3) {
5643         if (seg_not_present == 0)
5644             return -TARGET_EINVAL;
5645     }
5646 
5647     /* NOTE: same code as Linux kernel */
5648     /* Allow LDTs to be cleared by the user. */
5649     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5650         if ((contents == 0             &&
5651              read_exec_only == 1       &&
5652              seg_32bit == 0            &&
5653              limit_in_pages == 0       &&
5654              seg_not_present == 1      &&
5655              useable == 0 )) {
5656             entry_1 = 0;
5657             entry_2 = 0;
5658             goto install;
5659         }
5660     }
5661 
5662     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5663         (ldt_info.limit & 0x0ffff);
5664     entry_2 = (ldt_info.base_addr & 0xff000000) |
5665         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5666         (ldt_info.limit & 0xf0000) |
5667         ((read_exec_only ^ 1) << 9) |
5668         (contents << 10) |
5669         ((seg_not_present ^ 1) << 15) |
5670         (seg_32bit << 22) |
5671         (limit_in_pages << 23) |
5672         (useable << 20) |
5673         (lm << 21) |
5674         0x7000;
5675 
5676     /* Install the new entry ...  */
5677 install:
5678     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5679     lp[0] = tswap32(entry_1);
5680     lp[1] = tswap32(entry_2);
5681     return 0;
5682 }
5683 
5684 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5685 {
5686     struct target_modify_ldt_ldt_s *target_ldt_info;
5687     uint64_t *gdt_table = g2h(env->gdt.base);
5688     uint32_t base_addr, limit, flags;
5689     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5690     int seg_not_present, useable, lm;
5691     uint32_t *lp, entry_1, entry_2;
5692 
5693     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5694     if (!target_ldt_info)
5695         return -TARGET_EFAULT;
5696     idx = tswap32(target_ldt_info->entry_number);
5697     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5698         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5699         unlock_user_struct(target_ldt_info, ptr, 1);
5700         return -TARGET_EINVAL;
5701     }
5702     lp = (uint32_t *)(gdt_table + idx);
5703     entry_1 = tswap32(lp[0]);
5704     entry_2 = tswap32(lp[1]);
5705 
5706     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5707     contents = (entry_2 >> 10) & 3;
5708     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5709     seg_32bit = (entry_2 >> 22) & 1;
5710     limit_in_pages = (entry_2 >> 23) & 1;
5711     useable = (entry_2 >> 20) & 1;
5712 #ifdef TARGET_ABI32
5713     lm = 0;
5714 #else
5715     lm = (entry_2 >> 21) & 1;
5716 #endif
5717     flags = (seg_32bit << 0) | (contents << 1) |
5718         (read_exec_only << 3) | (limit_in_pages << 4) |
5719         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5720     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5721     base_addr = (entry_1 >> 16) |
5722         (entry_2 & 0xff000000) |
5723         ((entry_2 & 0xff) << 16);
5724     target_ldt_info->base_addr = tswapal(base_addr);
5725     target_ldt_info->limit = tswap32(limit);
5726     target_ldt_info->flags = tswap32(flags);
5727     unlock_user_struct(target_ldt_info, ptr, 1);
5728     return 0;
5729 }
5730 #endif /* TARGET_I386 && TARGET_ABI32 */
5731 
5732 #ifndef TARGET_ABI32
5733 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5734 {
5735     abi_long ret = 0;
5736     abi_ulong val;
5737     int idx;
5738 
5739     switch(code) {
5740     case TARGET_ARCH_SET_GS:
5741     case TARGET_ARCH_SET_FS:
5742         if (code == TARGET_ARCH_SET_GS)
5743             idx = R_GS;
5744         else
5745             idx = R_FS;
5746         cpu_x86_load_seg(env, idx, 0);
5747         env->segs[idx].base = addr;
5748         break;
5749     case TARGET_ARCH_GET_GS:
5750     case TARGET_ARCH_GET_FS:
5751         if (code == TARGET_ARCH_GET_GS)
5752             idx = R_GS;
5753         else
5754             idx = R_FS;
5755         val = env->segs[idx].base;
5756         if (put_user(val, addr, abi_ulong))
5757             ret = -TARGET_EFAULT;
5758         break;
5759     default:
5760         ret = -TARGET_EINVAL;
5761         break;
5762     }
5763     return ret;
5764 }
5765 #endif
5766 
5767 #endif /* defined(TARGET_I386) */
5768 
5769 #define NEW_STACK_SIZE 0x40000
5770 
5771 
5772 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5773 typedef struct {
5774     CPUArchState *env;
5775     pthread_mutex_t mutex;
5776     pthread_cond_t cond;
5777     pthread_t thread;
5778     uint32_t tid;
5779     abi_ulong child_tidptr;
5780     abi_ulong parent_tidptr;
5781     sigset_t sigmask;
5782 } new_thread_info;
5783 
5784 static void *clone_func(void *arg)
5785 {
5786     new_thread_info *info = arg;
5787     CPUArchState *env;
5788     CPUState *cpu;
5789     TaskState *ts;
5790 
5791     rcu_register_thread();
5792     tcg_register_thread();
5793     env = info->env;
5794     cpu = env_cpu(env);
5795     thread_cpu = cpu;
5796     ts = (TaskState *)cpu->opaque;
5797     info->tid = sys_gettid();
5798     task_settid(ts);
5799     if (info->child_tidptr)
5800         put_user_u32(info->tid, info->child_tidptr);
5801     if (info->parent_tidptr)
5802         put_user_u32(info->tid, info->parent_tidptr);
5803     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5804     /* Enable signals.  */
5805     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5806     /* Signal to the parent that we're ready.  */
5807     pthread_mutex_lock(&info->mutex);
5808     pthread_cond_broadcast(&info->cond);
5809     pthread_mutex_unlock(&info->mutex);
5810     /* Wait until the parent has finished initializing the tls state.  */
5811     pthread_mutex_lock(&clone_lock);
5812     pthread_mutex_unlock(&clone_lock);
5813     cpu_loop(env);
5814     /* never exits */
5815     return NULL;
5816 }
5817 
5818 /* do_fork() Must return host values and target errnos (unlike most
5819    do_*() functions). */
5820 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5821                    abi_ulong parent_tidptr, target_ulong newtls,
5822                    abi_ulong child_tidptr)
5823 {
5824     CPUState *cpu = env_cpu(env);
5825     int ret;
5826     TaskState *ts;
5827     CPUState *new_cpu;
5828     CPUArchState *new_env;
5829     sigset_t sigmask;
5830 
5831     flags &= ~CLONE_IGNORED_FLAGS;
5832 
5833     /* Emulate vfork() with fork() */
5834     if (flags & CLONE_VFORK)
5835         flags &= ~(CLONE_VFORK | CLONE_VM);
5836 
5837     if (flags & CLONE_VM) {
5838         TaskState *parent_ts = (TaskState *)cpu->opaque;
5839         new_thread_info info;
5840         pthread_attr_t attr;
5841 
5842         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5843             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5844             return -TARGET_EINVAL;
5845         }
5846 
5847         ts = g_new0(TaskState, 1);
5848         init_task_state(ts);
5849 
5850         /* Grab a mutex so that thread setup appears atomic.  */
5851         pthread_mutex_lock(&clone_lock);
5852 
5853         /* we create a new CPU instance. */
5854         new_env = cpu_copy(env);
5855         /* Init regs that differ from the parent.  */
5856         cpu_clone_regs_child(new_env, newsp, flags);
5857         cpu_clone_regs_parent(env, flags);
5858         new_cpu = env_cpu(new_env);
5859         new_cpu->opaque = ts;
5860         ts->bprm = parent_ts->bprm;
5861         ts->info = parent_ts->info;
5862         ts->signal_mask = parent_ts->signal_mask;
5863 
5864         if (flags & CLONE_CHILD_CLEARTID) {
5865             ts->child_tidptr = child_tidptr;
5866         }
5867 
5868         if (flags & CLONE_SETTLS) {
5869             cpu_set_tls (new_env, newtls);
5870         }
5871 
5872         memset(&info, 0, sizeof(info));
5873         pthread_mutex_init(&info.mutex, NULL);
5874         pthread_mutex_lock(&info.mutex);
5875         pthread_cond_init(&info.cond, NULL);
5876         info.env = new_env;
5877         if (flags & CLONE_CHILD_SETTID) {
5878             info.child_tidptr = child_tidptr;
5879         }
5880         if (flags & CLONE_PARENT_SETTID) {
5881             info.parent_tidptr = parent_tidptr;
5882         }
5883 
5884         ret = pthread_attr_init(&attr);
5885         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5886         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5887         /* It is not safe to deliver signals until the child has finished
5888            initializing, so temporarily block all signals.  */
5889         sigfillset(&sigmask);
5890         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5891         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5892 
5893         /* If this is our first additional thread, we need to ensure we
5894          * generate code for parallel execution and flush old translations.
5895          */
5896         if (!parallel_cpus) {
5897             parallel_cpus = true;
5898             tb_flush(cpu);
5899         }
5900 
5901         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5902         /* TODO: Free new CPU state if thread creation failed.  */
5903 
5904         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5905         pthread_attr_destroy(&attr);
5906         if (ret == 0) {
5907             /* Wait for the child to initialize.  */
5908             pthread_cond_wait(&info.cond, &info.mutex);
5909             ret = info.tid;
5910         } else {
5911             ret = -1;
5912         }
5913         pthread_mutex_unlock(&info.mutex);
5914         pthread_cond_destroy(&info.cond);
5915         pthread_mutex_destroy(&info.mutex);
5916         pthread_mutex_unlock(&clone_lock);
5917     } else {
5918         /* if no CLONE_VM, we consider it is a fork */
5919         if (flags & CLONE_INVALID_FORK_FLAGS) {
5920             return -TARGET_EINVAL;
5921         }
5922 
5923         /* We can't support custom termination signals */
5924         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5925             return -TARGET_EINVAL;
5926         }
5927 
5928         if (block_signals()) {
5929             return -TARGET_ERESTARTSYS;
5930         }
5931 
5932         fork_start();
5933         ret = fork();
5934         if (ret == 0) {
5935             /* Child Process.  */
5936             cpu_clone_regs_child(env, newsp, flags);
5937             fork_end(1);
5938             /* There is a race condition here.  The parent process could
5939                theoretically read the TID in the child process before the child
5940                tid is set.  This would require using either ptrace
5941                (not implemented) or having *_tidptr to point at a shared memory
5942                mapping.  We can't repeat the spinlock hack used above because
5943                the child process gets its own copy of the lock.  */
5944             if (flags & CLONE_CHILD_SETTID)
5945                 put_user_u32(sys_gettid(), child_tidptr);
5946             if (flags & CLONE_PARENT_SETTID)
5947                 put_user_u32(sys_gettid(), parent_tidptr);
5948             ts = (TaskState *)cpu->opaque;
5949             if (flags & CLONE_SETTLS)
5950                 cpu_set_tls (env, newtls);
5951             if (flags & CLONE_CHILD_CLEARTID)
5952                 ts->child_tidptr = child_tidptr;
5953         } else {
5954             cpu_clone_regs_parent(env, flags);
5955             fork_end(0);
5956         }
5957     }
5958     return ret;
5959 }
5960 
5961 /* warning : doesn't handle linux specific flags... */
5962 static int target_to_host_fcntl_cmd(int cmd)
5963 {
5964     int ret;
5965 
5966     switch(cmd) {
5967     case TARGET_F_DUPFD:
5968     case TARGET_F_GETFD:
5969     case TARGET_F_SETFD:
5970     case TARGET_F_GETFL:
5971     case TARGET_F_SETFL:
5972         ret = cmd;
5973         break;
5974     case TARGET_F_GETLK:
5975         ret = F_GETLK64;
5976         break;
5977     case TARGET_F_SETLK:
5978         ret = F_SETLK64;
5979         break;
5980     case TARGET_F_SETLKW:
5981         ret = F_SETLKW64;
5982         break;
5983     case TARGET_F_GETOWN:
5984         ret = F_GETOWN;
5985         break;
5986     case TARGET_F_SETOWN:
5987         ret = F_SETOWN;
5988         break;
5989     case TARGET_F_GETSIG:
5990         ret = F_GETSIG;
5991         break;
5992     case TARGET_F_SETSIG:
5993         ret = F_SETSIG;
5994         break;
5995 #if TARGET_ABI_BITS == 32
5996     case TARGET_F_GETLK64:
5997         ret = F_GETLK64;
5998         break;
5999     case TARGET_F_SETLK64:
6000         ret = F_SETLK64;
6001         break;
6002     case TARGET_F_SETLKW64:
6003         ret = F_SETLKW64;
6004         break;
6005 #endif
6006     case TARGET_F_SETLEASE:
6007         ret = F_SETLEASE;
6008         break;
6009     case TARGET_F_GETLEASE:
6010         ret = F_GETLEASE;
6011         break;
6012 #ifdef F_DUPFD_CLOEXEC
6013     case TARGET_F_DUPFD_CLOEXEC:
6014         ret = F_DUPFD_CLOEXEC;
6015         break;
6016 #endif
6017     case TARGET_F_NOTIFY:
6018         ret = F_NOTIFY;
6019         break;
6020 #ifdef F_GETOWN_EX
6021     case TARGET_F_GETOWN_EX:
6022         ret = F_GETOWN_EX;
6023         break;
6024 #endif
6025 #ifdef F_SETOWN_EX
6026     case TARGET_F_SETOWN_EX:
6027         ret = F_SETOWN_EX;
6028         break;
6029 #endif
6030 #ifdef F_SETPIPE_SZ
6031     case TARGET_F_SETPIPE_SZ:
6032         ret = F_SETPIPE_SZ;
6033         break;
6034     case TARGET_F_GETPIPE_SZ:
6035         ret = F_GETPIPE_SZ;
6036         break;
6037 #endif
6038     default:
6039         ret = -TARGET_EINVAL;
6040         break;
6041     }
6042 
6043 #if defined(__powerpc64__)
6044     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6045      * is not supported by kernel. The glibc fcntl call actually adjusts
6046      * them to 5, 6 and 7 before making the syscall(). Since we make the
6047      * syscall directly, adjust to what is supported by the kernel.
6048      */
6049     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6050         ret -= F_GETLK64 - 5;
6051     }
6052 #endif
6053 
6054     return ret;
6055 }
6056 
6057 #define FLOCK_TRANSTBL \
6058     switch (type) { \
6059     TRANSTBL_CONVERT(F_RDLCK); \
6060     TRANSTBL_CONVERT(F_WRLCK); \
6061     TRANSTBL_CONVERT(F_UNLCK); \
6062     TRANSTBL_CONVERT(F_EXLCK); \
6063     TRANSTBL_CONVERT(F_SHLCK); \
6064     }
6065 
6066 static int target_to_host_flock(int type)
6067 {
6068 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6069     FLOCK_TRANSTBL
6070 #undef  TRANSTBL_CONVERT
6071     return -TARGET_EINVAL;
6072 }
6073 
6074 static int host_to_target_flock(int type)
6075 {
6076 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6077     FLOCK_TRANSTBL
6078 #undef  TRANSTBL_CONVERT
6079     /* if we don't know how to convert the value coming
6080      * from the host we copy to the target field as-is
6081      */
6082     return type;
6083 }
6084 
6085 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6086                                             abi_ulong target_flock_addr)
6087 {
6088     struct target_flock *target_fl;
6089     int l_type;
6090 
6091     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6092         return -TARGET_EFAULT;
6093     }
6094 
6095     __get_user(l_type, &target_fl->l_type);
6096     l_type = target_to_host_flock(l_type);
6097     if (l_type < 0) {
6098         return l_type;
6099     }
6100     fl->l_type = l_type;
6101     __get_user(fl->l_whence, &target_fl->l_whence);
6102     __get_user(fl->l_start, &target_fl->l_start);
6103     __get_user(fl->l_len, &target_fl->l_len);
6104     __get_user(fl->l_pid, &target_fl->l_pid);
6105     unlock_user_struct(target_fl, target_flock_addr, 0);
6106     return 0;
6107 }
6108 
6109 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6110                                           const struct flock64 *fl)
6111 {
6112     struct target_flock *target_fl;
6113     short l_type;
6114 
6115     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6116         return -TARGET_EFAULT;
6117     }
6118 
6119     l_type = host_to_target_flock(fl->l_type);
6120     __put_user(l_type, &target_fl->l_type);
6121     __put_user(fl->l_whence, &target_fl->l_whence);
6122     __put_user(fl->l_start, &target_fl->l_start);
6123     __put_user(fl->l_len, &target_fl->l_len);
6124     __put_user(fl->l_pid, &target_fl->l_pid);
6125     unlock_user_struct(target_fl, target_flock_addr, 1);
6126     return 0;
6127 }
6128 
6129 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6130 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6131 
6132 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6133 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6134                                                    abi_ulong target_flock_addr)
6135 {
6136     struct target_oabi_flock64 *target_fl;
6137     int l_type;
6138 
6139     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6140         return -TARGET_EFAULT;
6141     }
6142 
6143     __get_user(l_type, &target_fl->l_type);
6144     l_type = target_to_host_flock(l_type);
6145     if (l_type < 0) {
6146         return l_type;
6147     }
6148     fl->l_type = l_type;
6149     __get_user(fl->l_whence, &target_fl->l_whence);
6150     __get_user(fl->l_start, &target_fl->l_start);
6151     __get_user(fl->l_len, &target_fl->l_len);
6152     __get_user(fl->l_pid, &target_fl->l_pid);
6153     unlock_user_struct(target_fl, target_flock_addr, 0);
6154     return 0;
6155 }
6156 
6157 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6158                                                  const struct flock64 *fl)
6159 {
6160     struct target_oabi_flock64 *target_fl;
6161     short l_type;
6162 
6163     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6164         return -TARGET_EFAULT;
6165     }
6166 
6167     l_type = host_to_target_flock(fl->l_type);
6168     __put_user(l_type, &target_fl->l_type);
6169     __put_user(fl->l_whence, &target_fl->l_whence);
6170     __put_user(fl->l_start, &target_fl->l_start);
6171     __put_user(fl->l_len, &target_fl->l_len);
6172     __put_user(fl->l_pid, &target_fl->l_pid);
6173     unlock_user_struct(target_fl, target_flock_addr, 1);
6174     return 0;
6175 }
6176 #endif
6177 
6178 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6179                                               abi_ulong target_flock_addr)
6180 {
6181     struct target_flock64 *target_fl;
6182     int l_type;
6183 
6184     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6185         return -TARGET_EFAULT;
6186     }
6187 
6188     __get_user(l_type, &target_fl->l_type);
6189     l_type = target_to_host_flock(l_type);
6190     if (l_type < 0) {
6191         return l_type;
6192     }
6193     fl->l_type = l_type;
6194     __get_user(fl->l_whence, &target_fl->l_whence);
6195     __get_user(fl->l_start, &target_fl->l_start);
6196     __get_user(fl->l_len, &target_fl->l_len);
6197     __get_user(fl->l_pid, &target_fl->l_pid);
6198     unlock_user_struct(target_fl, target_flock_addr, 0);
6199     return 0;
6200 }
6201 
6202 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6203                                             const struct flock64 *fl)
6204 {
6205     struct target_flock64 *target_fl;
6206     short l_type;
6207 
6208     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6209         return -TARGET_EFAULT;
6210     }
6211 
6212     l_type = host_to_target_flock(fl->l_type);
6213     __put_user(l_type, &target_fl->l_type);
6214     __put_user(fl->l_whence, &target_fl->l_whence);
6215     __put_user(fl->l_start, &target_fl->l_start);
6216     __put_user(fl->l_len, &target_fl->l_len);
6217     __put_user(fl->l_pid, &target_fl->l_pid);
6218     unlock_user_struct(target_fl, target_flock_addr, 1);
6219     return 0;
6220 }
6221 
6222 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6223 {
6224     struct flock64 fl64;
6225 #ifdef F_GETOWN_EX
6226     struct f_owner_ex fox;
6227     struct target_f_owner_ex *target_fox;
6228 #endif
6229     abi_long ret;
6230     int host_cmd = target_to_host_fcntl_cmd(cmd);
6231 
6232     if (host_cmd == -TARGET_EINVAL)
6233 	    return host_cmd;
6234 
6235     switch(cmd) {
6236     case TARGET_F_GETLK:
6237         ret = copy_from_user_flock(&fl64, arg);
6238         if (ret) {
6239             return ret;
6240         }
6241         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6242         if (ret == 0) {
6243             ret = copy_to_user_flock(arg, &fl64);
6244         }
6245         break;
6246 
6247     case TARGET_F_SETLK:
6248     case TARGET_F_SETLKW:
6249         ret = copy_from_user_flock(&fl64, arg);
6250         if (ret) {
6251             return ret;
6252         }
6253         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6254         break;
6255 
6256     case TARGET_F_GETLK64:
6257         ret = copy_from_user_flock64(&fl64, arg);
6258         if (ret) {
6259             return ret;
6260         }
6261         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6262         if (ret == 0) {
6263             ret = copy_to_user_flock64(arg, &fl64);
6264         }
6265         break;
6266     case TARGET_F_SETLK64:
6267     case TARGET_F_SETLKW64:
6268         ret = copy_from_user_flock64(&fl64, arg);
6269         if (ret) {
6270             return ret;
6271         }
6272         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6273         break;
6274 
6275     case TARGET_F_GETFL:
6276         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6277         if (ret >= 0) {
6278             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6279         }
6280         break;
6281 
6282     case TARGET_F_SETFL:
6283         ret = get_errno(safe_fcntl(fd, host_cmd,
6284                                    target_to_host_bitmask(arg,
6285                                                           fcntl_flags_tbl)));
6286         break;
6287 
6288 #ifdef F_GETOWN_EX
6289     case TARGET_F_GETOWN_EX:
6290         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6291         if (ret >= 0) {
6292             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6293                 return -TARGET_EFAULT;
6294             target_fox->type = tswap32(fox.type);
6295             target_fox->pid = tswap32(fox.pid);
6296             unlock_user_struct(target_fox, arg, 1);
6297         }
6298         break;
6299 #endif
6300 
6301 #ifdef F_SETOWN_EX
6302     case TARGET_F_SETOWN_EX:
6303         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6304             return -TARGET_EFAULT;
6305         fox.type = tswap32(target_fox->type);
6306         fox.pid = tswap32(target_fox->pid);
6307         unlock_user_struct(target_fox, arg, 0);
6308         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6309         break;
6310 #endif
6311 
6312     case TARGET_F_SETOWN:
6313     case TARGET_F_GETOWN:
6314     case TARGET_F_SETSIG:
6315     case TARGET_F_GETSIG:
6316     case TARGET_F_SETLEASE:
6317     case TARGET_F_GETLEASE:
6318     case TARGET_F_SETPIPE_SZ:
6319     case TARGET_F_GETPIPE_SZ:
6320         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6321         break;
6322 
6323     default:
6324         ret = get_errno(safe_fcntl(fd, cmd, arg));
6325         break;
6326     }
6327     return ret;
6328 }
6329 
6330 #ifdef USE_UID16
6331 
6332 static inline int high2lowuid(int uid)
6333 {
6334     if (uid > 65535)
6335         return 65534;
6336     else
6337         return uid;
6338 }
6339 
6340 static inline int high2lowgid(int gid)
6341 {
6342     if (gid > 65535)
6343         return 65534;
6344     else
6345         return gid;
6346 }
6347 
6348 static inline int low2highuid(int uid)
6349 {
6350     if ((int16_t)uid == -1)
6351         return -1;
6352     else
6353         return uid;
6354 }
6355 
6356 static inline int low2highgid(int gid)
6357 {
6358     if ((int16_t)gid == -1)
6359         return -1;
6360     else
6361         return gid;
6362 }
6363 static inline int tswapid(int id)
6364 {
6365     return tswap16(id);
6366 }
6367 
6368 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6369 
6370 #else /* !USE_UID16 */
6371 static inline int high2lowuid(int uid)
6372 {
6373     return uid;
6374 }
6375 static inline int high2lowgid(int gid)
6376 {
6377     return gid;
6378 }
6379 static inline int low2highuid(int uid)
6380 {
6381     return uid;
6382 }
6383 static inline int low2highgid(int gid)
6384 {
6385     return gid;
6386 }
6387 static inline int tswapid(int id)
6388 {
6389     return tswap32(id);
6390 }
6391 
6392 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6393 
6394 #endif /* USE_UID16 */
6395 
6396 /* We must do direct syscalls for setting UID/GID, because we want to
6397  * implement the Linux system call semantics of "change only for this thread",
6398  * not the libc/POSIX semantics of "change for all threads in process".
6399  * (See http://ewontfix.com/17/ for more details.)
6400  * We use the 32-bit version of the syscalls if present; if it is not
6401  * then either the host architecture supports 32-bit UIDs natively with
6402  * the standard syscall, or the 16-bit UID is the best we can do.
6403  */
6404 #ifdef __NR_setuid32
6405 #define __NR_sys_setuid __NR_setuid32
6406 #else
6407 #define __NR_sys_setuid __NR_setuid
6408 #endif
6409 #ifdef __NR_setgid32
6410 #define __NR_sys_setgid __NR_setgid32
6411 #else
6412 #define __NR_sys_setgid __NR_setgid
6413 #endif
6414 #ifdef __NR_setresuid32
6415 #define __NR_sys_setresuid __NR_setresuid32
6416 #else
6417 #define __NR_sys_setresuid __NR_setresuid
6418 #endif
6419 #ifdef __NR_setresgid32
6420 #define __NR_sys_setresgid __NR_setresgid32
6421 #else
6422 #define __NR_sys_setresgid __NR_setresgid
6423 #endif
6424 
6425 _syscall1(int, sys_setuid, uid_t, uid)
6426 _syscall1(int, sys_setgid, gid_t, gid)
6427 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6428 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6429 
6430 void syscall_init(void)
6431 {
6432     IOCTLEntry *ie;
6433     const argtype *arg_type;
6434     int size;
6435     int i;
6436 
6437     thunk_init(STRUCT_MAX);
6438 
6439 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6440 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6441 #include "syscall_types.h"
6442 #undef STRUCT
6443 #undef STRUCT_SPECIAL
6444 
6445     /* Build target_to_host_errno_table[] table from
6446      * host_to_target_errno_table[]. */
6447     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6448         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6449     }
6450 
6451     /* we patch the ioctl size if necessary. We rely on the fact that
6452        no ioctl has all the bits at '1' in the size field */
6453     ie = ioctl_entries;
6454     while (ie->target_cmd != 0) {
6455         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6456             TARGET_IOC_SIZEMASK) {
6457             arg_type = ie->arg_type;
6458             if (arg_type[0] != TYPE_PTR) {
6459                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6460                         ie->target_cmd);
6461                 exit(1);
6462             }
6463             arg_type++;
6464             size = thunk_type_size(arg_type, 0);
6465             ie->target_cmd = (ie->target_cmd &
6466                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6467                 (size << TARGET_IOC_SIZESHIFT);
6468         }
6469 
6470         /* automatic consistency check if same arch */
6471 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6472     (defined(__x86_64__) && defined(TARGET_X86_64))
6473         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6474             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6475                     ie->name, ie->target_cmd, ie->host_cmd);
6476         }
6477 #endif
6478         ie++;
6479     }
6480 }
6481 
6482 #if TARGET_ABI_BITS == 32
6483 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6484 {
6485 #ifdef TARGET_WORDS_BIGENDIAN
6486     return ((uint64_t)word0 << 32) | word1;
6487 #else
6488     return ((uint64_t)word1 << 32) | word0;
6489 #endif
6490 }
6491 #else /* TARGET_ABI_BITS == 32 */
6492 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6493 {
6494     return word0;
6495 }
6496 #endif /* TARGET_ABI_BITS != 32 */
6497 
6498 #ifdef TARGET_NR_truncate64
6499 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6500                                          abi_long arg2,
6501                                          abi_long arg3,
6502                                          abi_long arg4)
6503 {
6504     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6505         arg2 = arg3;
6506         arg3 = arg4;
6507     }
6508     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6509 }
6510 #endif
6511 
6512 #ifdef TARGET_NR_ftruncate64
6513 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6514                                           abi_long arg2,
6515                                           abi_long arg3,
6516                                           abi_long arg4)
6517 {
6518     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6519         arg2 = arg3;
6520         arg3 = arg4;
6521     }
6522     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6523 }
6524 #endif
6525 
6526 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6527                                                  abi_ulong target_addr)
6528 {
6529     struct target_itimerspec *target_itspec;
6530 
6531     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6532         return -TARGET_EFAULT;
6533     }
6534 
6535     host_itspec->it_interval.tv_sec =
6536                             tswapal(target_itspec->it_interval.tv_sec);
6537     host_itspec->it_interval.tv_nsec =
6538                             tswapal(target_itspec->it_interval.tv_nsec);
6539     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6540     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6541 
6542     unlock_user_struct(target_itspec, target_addr, 1);
6543     return 0;
6544 }
6545 
6546 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6547                                                struct itimerspec *host_its)
6548 {
6549     struct target_itimerspec *target_itspec;
6550 
6551     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6552         return -TARGET_EFAULT;
6553     }
6554 
6555     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6556     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6557 
6558     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6559     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6560 
6561     unlock_user_struct(target_itspec, target_addr, 0);
6562     return 0;
6563 }
6564 
6565 static inline abi_long target_to_host_timex(struct timex *host_tx,
6566                                             abi_long target_addr)
6567 {
6568     struct target_timex *target_tx;
6569 
6570     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6571         return -TARGET_EFAULT;
6572     }
6573 
6574     __get_user(host_tx->modes, &target_tx->modes);
6575     __get_user(host_tx->offset, &target_tx->offset);
6576     __get_user(host_tx->freq, &target_tx->freq);
6577     __get_user(host_tx->maxerror, &target_tx->maxerror);
6578     __get_user(host_tx->esterror, &target_tx->esterror);
6579     __get_user(host_tx->status, &target_tx->status);
6580     __get_user(host_tx->constant, &target_tx->constant);
6581     __get_user(host_tx->precision, &target_tx->precision);
6582     __get_user(host_tx->tolerance, &target_tx->tolerance);
6583     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6584     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6585     __get_user(host_tx->tick, &target_tx->tick);
6586     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6587     __get_user(host_tx->jitter, &target_tx->jitter);
6588     __get_user(host_tx->shift, &target_tx->shift);
6589     __get_user(host_tx->stabil, &target_tx->stabil);
6590     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6591     __get_user(host_tx->calcnt, &target_tx->calcnt);
6592     __get_user(host_tx->errcnt, &target_tx->errcnt);
6593     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6594     __get_user(host_tx->tai, &target_tx->tai);
6595 
6596     unlock_user_struct(target_tx, target_addr, 0);
6597     return 0;
6598 }
6599 
6600 static inline abi_long host_to_target_timex(abi_long target_addr,
6601                                             struct timex *host_tx)
6602 {
6603     struct target_timex *target_tx;
6604 
6605     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6606         return -TARGET_EFAULT;
6607     }
6608 
6609     __put_user(host_tx->modes, &target_tx->modes);
6610     __put_user(host_tx->offset, &target_tx->offset);
6611     __put_user(host_tx->freq, &target_tx->freq);
6612     __put_user(host_tx->maxerror, &target_tx->maxerror);
6613     __put_user(host_tx->esterror, &target_tx->esterror);
6614     __put_user(host_tx->status, &target_tx->status);
6615     __put_user(host_tx->constant, &target_tx->constant);
6616     __put_user(host_tx->precision, &target_tx->precision);
6617     __put_user(host_tx->tolerance, &target_tx->tolerance);
6618     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6619     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6620     __put_user(host_tx->tick, &target_tx->tick);
6621     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6622     __put_user(host_tx->jitter, &target_tx->jitter);
6623     __put_user(host_tx->shift, &target_tx->shift);
6624     __put_user(host_tx->stabil, &target_tx->stabil);
6625     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6626     __put_user(host_tx->calcnt, &target_tx->calcnt);
6627     __put_user(host_tx->errcnt, &target_tx->errcnt);
6628     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6629     __put_user(host_tx->tai, &target_tx->tai);
6630 
6631     unlock_user_struct(target_tx, target_addr, 1);
6632     return 0;
6633 }
6634 
6635 
6636 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6637                                                abi_ulong target_addr)
6638 {
6639     struct target_sigevent *target_sevp;
6640 
6641     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6642         return -TARGET_EFAULT;
6643     }
6644 
6645     /* This union is awkward on 64 bit systems because it has a 32 bit
6646      * integer and a pointer in it; we follow the conversion approach
6647      * used for handling sigval types in signal.c so the guest should get
6648      * the correct value back even if we did a 64 bit byteswap and it's
6649      * using the 32 bit integer.
6650      */
6651     host_sevp->sigev_value.sival_ptr =
6652         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6653     host_sevp->sigev_signo =
6654         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6655     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6656     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6657 
6658     unlock_user_struct(target_sevp, target_addr, 1);
6659     return 0;
6660 }
6661 
6662 #if defined(TARGET_NR_mlockall)
6663 static inline int target_to_host_mlockall_arg(int arg)
6664 {
6665     int result = 0;
6666 
6667     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6668         result |= MCL_CURRENT;
6669     }
6670     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6671         result |= MCL_FUTURE;
6672     }
6673     return result;
6674 }
6675 #endif
6676 
6677 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6678      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6679      defined(TARGET_NR_newfstatat))
6680 static inline abi_long host_to_target_stat64(void *cpu_env,
6681                                              abi_ulong target_addr,
6682                                              struct stat *host_st)
6683 {
6684 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6685     if (((CPUARMState *)cpu_env)->eabi) {
6686         struct target_eabi_stat64 *target_st;
6687 
6688         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6689             return -TARGET_EFAULT;
6690         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6691         __put_user(host_st->st_dev, &target_st->st_dev);
6692         __put_user(host_st->st_ino, &target_st->st_ino);
6693 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6694         __put_user(host_st->st_ino, &target_st->__st_ino);
6695 #endif
6696         __put_user(host_st->st_mode, &target_st->st_mode);
6697         __put_user(host_st->st_nlink, &target_st->st_nlink);
6698         __put_user(host_st->st_uid, &target_st->st_uid);
6699         __put_user(host_st->st_gid, &target_st->st_gid);
6700         __put_user(host_st->st_rdev, &target_st->st_rdev);
6701         __put_user(host_st->st_size, &target_st->st_size);
6702         __put_user(host_st->st_blksize, &target_st->st_blksize);
6703         __put_user(host_st->st_blocks, &target_st->st_blocks);
6704         __put_user(host_st->st_atime, &target_st->target_st_atime);
6705         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6706         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6707 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6708         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6709         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6710         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6711 #endif
6712         unlock_user_struct(target_st, target_addr, 1);
6713     } else
6714 #endif
6715     {
6716 #if defined(TARGET_HAS_STRUCT_STAT64)
6717         struct target_stat64 *target_st;
6718 #else
6719         struct target_stat *target_st;
6720 #endif
6721 
6722         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6723             return -TARGET_EFAULT;
6724         memset(target_st, 0, sizeof(*target_st));
6725         __put_user(host_st->st_dev, &target_st->st_dev);
6726         __put_user(host_st->st_ino, &target_st->st_ino);
6727 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6728         __put_user(host_st->st_ino, &target_st->__st_ino);
6729 #endif
6730         __put_user(host_st->st_mode, &target_st->st_mode);
6731         __put_user(host_st->st_nlink, &target_st->st_nlink);
6732         __put_user(host_st->st_uid, &target_st->st_uid);
6733         __put_user(host_st->st_gid, &target_st->st_gid);
6734         __put_user(host_st->st_rdev, &target_st->st_rdev);
6735         /* XXX: better use of kernel struct */
6736         __put_user(host_st->st_size, &target_st->st_size);
6737         __put_user(host_st->st_blksize, &target_st->st_blksize);
6738         __put_user(host_st->st_blocks, &target_st->st_blocks);
6739         __put_user(host_st->st_atime, &target_st->target_st_atime);
6740         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6741         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6742 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6743         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6744         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6745         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6746 #endif
6747         unlock_user_struct(target_st, target_addr, 1);
6748     }
6749 
6750     return 0;
6751 }
6752 #endif
6753 
6754 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6755 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6756                                             abi_ulong target_addr)
6757 {
6758     struct target_statx *target_stx;
6759 
6760     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6761         return -TARGET_EFAULT;
6762     }
6763     memset(target_stx, 0, sizeof(*target_stx));
6764 
6765     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6766     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6767     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6768     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6769     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6770     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6771     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6772     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6773     __put_user(host_stx->stx_size, &target_stx->stx_size);
6774     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6775     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6776     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6777     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6778     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
6779     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
6780     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
6781     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
6782     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
6783     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
6784     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6785     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6786     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6787     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6788 
6789     unlock_user_struct(target_stx, target_addr, 1);
6790 
6791     return 0;
6792 }
6793 #endif
6794 
6795 
6796 /* ??? Using host futex calls even when target atomic operations
6797    are not really atomic probably breaks things.  However implementing
6798    futexes locally would make futexes shared between multiple processes
6799    tricky.  However they're probably useless because guest atomic
6800    operations won't work either.  */
6801 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6802                     target_ulong uaddr2, int val3)
6803 {
6804     struct timespec ts, *pts;
6805     int base_op;
6806 
6807     /* ??? We assume FUTEX_* constants are the same on both host
6808        and target.  */
6809 #ifdef FUTEX_CMD_MASK
6810     base_op = op & FUTEX_CMD_MASK;
6811 #else
6812     base_op = op;
6813 #endif
6814     switch (base_op) {
6815     case FUTEX_WAIT:
6816     case FUTEX_WAIT_BITSET:
6817         if (timeout) {
6818             pts = &ts;
6819             target_to_host_timespec(pts, timeout);
6820         } else {
6821             pts = NULL;
6822         }
6823         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6824                          pts, NULL, val3));
6825     case FUTEX_WAKE:
6826         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6827     case FUTEX_FD:
6828         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6829     case FUTEX_REQUEUE:
6830     case FUTEX_CMP_REQUEUE:
6831     case FUTEX_WAKE_OP:
6832         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6833            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6834            But the prototype takes a `struct timespec *'; insert casts
6835            to satisfy the compiler.  We do not need to tswap TIMEOUT
6836            since it's not compared to guest memory.  */
6837         pts = (struct timespec *)(uintptr_t) timeout;
6838         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6839                                     g2h(uaddr2),
6840                                     (base_op == FUTEX_CMP_REQUEUE
6841                                      ? tswap32(val3)
6842                                      : val3)));
6843     default:
6844         return -TARGET_ENOSYS;
6845     }
6846 }
6847 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6848 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6849                                      abi_long handle, abi_long mount_id,
6850                                      abi_long flags)
6851 {
6852     struct file_handle *target_fh;
6853     struct file_handle *fh;
6854     int mid = 0;
6855     abi_long ret;
6856     char *name;
6857     unsigned int size, total_size;
6858 
6859     if (get_user_s32(size, handle)) {
6860         return -TARGET_EFAULT;
6861     }
6862 
6863     name = lock_user_string(pathname);
6864     if (!name) {
6865         return -TARGET_EFAULT;
6866     }
6867 
6868     total_size = sizeof(struct file_handle) + size;
6869     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6870     if (!target_fh) {
6871         unlock_user(name, pathname, 0);
6872         return -TARGET_EFAULT;
6873     }
6874 
6875     fh = g_malloc0(total_size);
6876     fh->handle_bytes = size;
6877 
6878     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6879     unlock_user(name, pathname, 0);
6880 
6881     /* man name_to_handle_at(2):
6882      * Other than the use of the handle_bytes field, the caller should treat
6883      * the file_handle structure as an opaque data type
6884      */
6885 
6886     memcpy(target_fh, fh, total_size);
6887     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6888     target_fh->handle_type = tswap32(fh->handle_type);
6889     g_free(fh);
6890     unlock_user(target_fh, handle, total_size);
6891 
6892     if (put_user_s32(mid, mount_id)) {
6893         return -TARGET_EFAULT;
6894     }
6895 
6896     return ret;
6897 
6898 }
6899 #endif
6900 
6901 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6902 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6903                                      abi_long flags)
6904 {
6905     struct file_handle *target_fh;
6906     struct file_handle *fh;
6907     unsigned int size, total_size;
6908     abi_long ret;
6909 
6910     if (get_user_s32(size, handle)) {
6911         return -TARGET_EFAULT;
6912     }
6913 
6914     total_size = sizeof(struct file_handle) + size;
6915     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6916     if (!target_fh) {
6917         return -TARGET_EFAULT;
6918     }
6919 
6920     fh = g_memdup(target_fh, total_size);
6921     fh->handle_bytes = size;
6922     fh->handle_type = tswap32(target_fh->handle_type);
6923 
6924     ret = get_errno(open_by_handle_at(mount_fd, fh,
6925                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6926 
6927     g_free(fh);
6928 
6929     unlock_user(target_fh, handle, total_size);
6930 
6931     return ret;
6932 }
6933 #endif
6934 
6935 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6936 
6937 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6938 {
6939     int host_flags;
6940     target_sigset_t *target_mask;
6941     sigset_t host_mask;
6942     abi_long ret;
6943 
6944     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6945         return -TARGET_EINVAL;
6946     }
6947     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6948         return -TARGET_EFAULT;
6949     }
6950 
6951     target_to_host_sigset(&host_mask, target_mask);
6952 
6953     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6954 
6955     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6956     if (ret >= 0) {
6957         fd_trans_register(ret, &target_signalfd_trans);
6958     }
6959 
6960     unlock_user_struct(target_mask, mask, 0);
6961 
6962     return ret;
6963 }
6964 #endif
6965 
6966 /* Map host to target signal numbers for the wait family of syscalls.
6967    Assume all other status bits are the same.  */
6968 int host_to_target_waitstatus(int status)
6969 {
6970     if (WIFSIGNALED(status)) {
6971         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6972     }
6973     if (WIFSTOPPED(status)) {
6974         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6975                | (status & 0xff);
6976     }
6977     return status;
6978 }
6979 
6980 static int open_self_cmdline(void *cpu_env, int fd)
6981 {
6982     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6983     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6984     int i;
6985 
6986     for (i = 0; i < bprm->argc; i++) {
6987         size_t len = strlen(bprm->argv[i]) + 1;
6988 
6989         if (write(fd, bprm->argv[i], len) != len) {
6990             return -1;
6991         }
6992     }
6993 
6994     return 0;
6995 }
6996 
6997 static int open_self_maps(void *cpu_env, int fd)
6998 {
6999     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7000     TaskState *ts = cpu->opaque;
7001     FILE *fp;
7002     char *line = NULL;
7003     size_t len = 0;
7004     ssize_t read;
7005 
7006     fp = fopen("/proc/self/maps", "r");
7007     if (fp == NULL) {
7008         return -1;
7009     }
7010 
7011     while ((read = getline(&line, &len, fp)) != -1) {
7012         int fields, dev_maj, dev_min, inode;
7013         uint64_t min, max, offset;
7014         char flag_r, flag_w, flag_x, flag_p;
7015         char path[512] = "";
7016         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7017                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7018                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7019 
7020         if ((fields < 10) || (fields > 11)) {
7021             continue;
7022         }
7023         if (h2g_valid(min)) {
7024             int flags = page_get_flags(h2g(min));
7025             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7026             if (page_check_range(h2g(min), max - min, flags) == -1) {
7027                 continue;
7028             }
7029             if (h2g(min) == ts->info->stack_limit) {
7030                 pstrcpy(path, sizeof(path), "      [stack]");
7031             }
7032             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7033                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7034                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7035                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7036                     path[0] ? "         " : "", path);
7037         }
7038     }
7039 
7040     free(line);
7041     fclose(fp);
7042 
7043     return 0;
7044 }
7045 
7046 static int open_self_stat(void *cpu_env, int fd)
7047 {
7048     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7049     TaskState *ts = cpu->opaque;
7050     abi_ulong start_stack = ts->info->start_stack;
7051     int i;
7052 
7053     for (i = 0; i < 44; i++) {
7054       char buf[128];
7055       int len;
7056       uint64_t val = 0;
7057 
7058       if (i == 0) {
7059         /* pid */
7060         val = getpid();
7061         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7062       } else if (i == 1) {
7063         /* app name */
7064         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7065       } else if (i == 27) {
7066         /* stack bottom */
7067         val = start_stack;
7068         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7069       } else {
7070         /* for the rest, there is MasterCard */
7071         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7072       }
7073 
7074       len = strlen(buf);
7075       if (write(fd, buf, len) != len) {
7076           return -1;
7077       }
7078     }
7079 
7080     return 0;
7081 }
7082 
7083 static int open_self_auxv(void *cpu_env, int fd)
7084 {
7085     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7086     TaskState *ts = cpu->opaque;
7087     abi_ulong auxv = ts->info->saved_auxv;
7088     abi_ulong len = ts->info->auxv_len;
7089     char *ptr;
7090 
7091     /*
7092      * Auxiliary vector is stored in target process stack.
7093      * read in whole auxv vector and copy it to file
7094      */
7095     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7096     if (ptr != NULL) {
7097         while (len > 0) {
7098             ssize_t r;
7099             r = write(fd, ptr, len);
7100             if (r <= 0) {
7101                 break;
7102             }
7103             len -= r;
7104             ptr += r;
7105         }
7106         lseek(fd, 0, SEEK_SET);
7107         unlock_user(ptr, auxv, len);
7108     }
7109 
7110     return 0;
7111 }
7112 
7113 static int is_proc_myself(const char *filename, const char *entry)
7114 {
7115     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7116         filename += strlen("/proc/");
7117         if (!strncmp(filename, "self/", strlen("self/"))) {
7118             filename += strlen("self/");
7119         } else if (*filename >= '1' && *filename <= '9') {
7120             char myself[80];
7121             snprintf(myself, sizeof(myself), "%d/", getpid());
7122             if (!strncmp(filename, myself, strlen(myself))) {
7123                 filename += strlen(myself);
7124             } else {
7125                 return 0;
7126             }
7127         } else {
7128             return 0;
7129         }
7130         if (!strcmp(filename, entry)) {
7131             return 1;
7132         }
7133     }
7134     return 0;
7135 }
7136 
7137 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7138     defined(TARGET_SPARC) || defined(TARGET_M68K)
7139 static int is_proc(const char *filename, const char *entry)
7140 {
7141     return strcmp(filename, entry) == 0;
7142 }
7143 #endif
7144 
7145 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7146 static int open_net_route(void *cpu_env, int fd)
7147 {
7148     FILE *fp;
7149     char *line = NULL;
7150     size_t len = 0;
7151     ssize_t read;
7152 
7153     fp = fopen("/proc/net/route", "r");
7154     if (fp == NULL) {
7155         return -1;
7156     }
7157 
7158     /* read header */
7159 
7160     read = getline(&line, &len, fp);
7161     dprintf(fd, "%s", line);
7162 
7163     /* read routes */
7164 
7165     while ((read = getline(&line, &len, fp)) != -1) {
7166         char iface[16];
7167         uint32_t dest, gw, mask;
7168         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7169         int fields;
7170 
7171         fields = sscanf(line,
7172                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7173                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7174                         &mask, &mtu, &window, &irtt);
7175         if (fields != 11) {
7176             continue;
7177         }
7178         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7179                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7180                 metric, tswap32(mask), mtu, window, irtt);
7181     }
7182 
7183     free(line);
7184     fclose(fp);
7185 
7186     return 0;
7187 }
7188 #endif
7189 
7190 #if defined(TARGET_SPARC)
7191 static int open_cpuinfo(void *cpu_env, int fd)
7192 {
7193     dprintf(fd, "type\t\t: sun4u\n");
7194     return 0;
7195 }
7196 #endif
7197 
7198 #if defined(TARGET_M68K)
7199 static int open_hardware(void *cpu_env, int fd)
7200 {
7201     dprintf(fd, "Model:\t\tqemu-m68k\n");
7202     return 0;
7203 }
7204 #endif
7205 
7206 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7207 {
7208     struct fake_open {
7209         const char *filename;
7210         int (*fill)(void *cpu_env, int fd);
7211         int (*cmp)(const char *s1, const char *s2);
7212     };
7213     const struct fake_open *fake_open;
7214     static const struct fake_open fakes[] = {
7215         { "maps", open_self_maps, is_proc_myself },
7216         { "stat", open_self_stat, is_proc_myself },
7217         { "auxv", open_self_auxv, is_proc_myself },
7218         { "cmdline", open_self_cmdline, is_proc_myself },
7219 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7220         { "/proc/net/route", open_net_route, is_proc },
7221 #endif
7222 #if defined(TARGET_SPARC)
7223         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7224 #endif
7225 #if defined(TARGET_M68K)
7226         { "/proc/hardware", open_hardware, is_proc },
7227 #endif
7228         { NULL, NULL, NULL }
7229     };
7230 
7231     if (is_proc_myself(pathname, "exe")) {
7232         int execfd = qemu_getauxval(AT_EXECFD);
7233         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7234     }
7235 
7236     for (fake_open = fakes; fake_open->filename; fake_open++) {
7237         if (fake_open->cmp(pathname, fake_open->filename)) {
7238             break;
7239         }
7240     }
7241 
7242     if (fake_open->filename) {
7243         const char *tmpdir;
7244         char filename[PATH_MAX];
7245         int fd, r;
7246 
7247         /* create temporary file to map stat to */
7248         tmpdir = getenv("TMPDIR");
7249         if (!tmpdir)
7250             tmpdir = "/tmp";
7251         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7252         fd = mkstemp(filename);
7253         if (fd < 0) {
7254             return fd;
7255         }
7256         unlink(filename);
7257 
7258         if ((r = fake_open->fill(cpu_env, fd))) {
7259             int e = errno;
7260             close(fd);
7261             errno = e;
7262             return r;
7263         }
7264         lseek(fd, 0, SEEK_SET);
7265 
7266         return fd;
7267     }
7268 
7269     return safe_openat(dirfd, path(pathname), flags, mode);
7270 }
7271 
7272 #define TIMER_MAGIC 0x0caf0000
7273 #define TIMER_MAGIC_MASK 0xffff0000
7274 
7275 /* Convert QEMU provided timer ID back to internal 16bit index format */
7276 static target_timer_t get_timer_id(abi_long arg)
7277 {
7278     target_timer_t timerid = arg;
7279 
7280     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7281         return -TARGET_EINVAL;
7282     }
7283 
7284     timerid &= 0xffff;
7285 
7286     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7287         return -TARGET_EINVAL;
7288     }
7289 
7290     return timerid;
7291 }
7292 
7293 static int target_to_host_cpu_mask(unsigned long *host_mask,
7294                                    size_t host_size,
7295                                    abi_ulong target_addr,
7296                                    size_t target_size)
7297 {
7298     unsigned target_bits = sizeof(abi_ulong) * 8;
7299     unsigned host_bits = sizeof(*host_mask) * 8;
7300     abi_ulong *target_mask;
7301     unsigned i, j;
7302 
7303     assert(host_size >= target_size);
7304 
7305     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7306     if (!target_mask) {
7307         return -TARGET_EFAULT;
7308     }
7309     memset(host_mask, 0, host_size);
7310 
7311     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7312         unsigned bit = i * target_bits;
7313         abi_ulong val;
7314 
7315         __get_user(val, &target_mask[i]);
7316         for (j = 0; j < target_bits; j++, bit++) {
7317             if (val & (1UL << j)) {
7318                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7319             }
7320         }
7321     }
7322 
7323     unlock_user(target_mask, target_addr, 0);
7324     return 0;
7325 }
7326 
7327 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7328                                    size_t host_size,
7329                                    abi_ulong target_addr,
7330                                    size_t target_size)
7331 {
7332     unsigned target_bits = sizeof(abi_ulong) * 8;
7333     unsigned host_bits = sizeof(*host_mask) * 8;
7334     abi_ulong *target_mask;
7335     unsigned i, j;
7336 
7337     assert(host_size >= target_size);
7338 
7339     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7340     if (!target_mask) {
7341         return -TARGET_EFAULT;
7342     }
7343 
7344     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7345         unsigned bit = i * target_bits;
7346         abi_ulong val = 0;
7347 
7348         for (j = 0; j < target_bits; j++, bit++) {
7349             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7350                 val |= 1UL << j;
7351             }
7352         }
7353         __put_user(val, &target_mask[i]);
7354     }
7355 
7356     unlock_user(target_mask, target_addr, target_size);
7357     return 0;
7358 }
7359 
7360 /* This is an internal helper for do_syscall so that it is easier
7361  * to have a single return point, so that actions, such as logging
7362  * of syscall results, can be performed.
7363  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7364  */
7365 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7366                             abi_long arg2, abi_long arg3, abi_long arg4,
7367                             abi_long arg5, abi_long arg6, abi_long arg7,
7368                             abi_long arg8)
7369 {
7370     CPUState *cpu = env_cpu(cpu_env);
7371     abi_long ret;
7372 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7373     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7374     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7375     || defined(TARGET_NR_statx)
7376     struct stat st;
7377 #endif
7378 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7379     || defined(TARGET_NR_fstatfs)
7380     struct statfs stfs;
7381 #endif
7382     void *p;
7383 
7384     switch(num) {
7385     case TARGET_NR_exit:
7386         /* In old applications this may be used to implement _exit(2).
7387            However in threaded applictions it is used for thread termination,
7388            and _exit_group is used for application termination.
7389            Do thread termination if we have more then one thread.  */
7390 
7391         if (block_signals()) {
7392             return -TARGET_ERESTARTSYS;
7393         }
7394 
7395         cpu_list_lock();
7396 
7397         if (CPU_NEXT(first_cpu)) {
7398             TaskState *ts;
7399 
7400             /* Remove the CPU from the list.  */
7401             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7402 
7403             cpu_list_unlock();
7404 
7405             ts = cpu->opaque;
7406             if (ts->child_tidptr) {
7407                 put_user_u32(0, ts->child_tidptr);
7408                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7409                           NULL, NULL, 0);
7410             }
7411             thread_cpu = NULL;
7412             object_unref(OBJECT(cpu));
7413             g_free(ts);
7414             rcu_unregister_thread();
7415             pthread_exit(NULL);
7416         }
7417 
7418         cpu_list_unlock();
7419         preexit_cleanup(cpu_env, arg1);
7420         _exit(arg1);
7421         return 0; /* avoid warning */
7422     case TARGET_NR_read:
7423         if (arg2 == 0 && arg3 == 0) {
7424             return get_errno(safe_read(arg1, 0, 0));
7425         } else {
7426             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7427                 return -TARGET_EFAULT;
7428             ret = get_errno(safe_read(arg1, p, arg3));
7429             if (ret >= 0 &&
7430                 fd_trans_host_to_target_data(arg1)) {
7431                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7432             }
7433             unlock_user(p, arg2, ret);
7434         }
7435         return ret;
7436     case TARGET_NR_write:
7437         if (arg2 == 0 && arg3 == 0) {
7438             return get_errno(safe_write(arg1, 0, 0));
7439         }
7440         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7441             return -TARGET_EFAULT;
7442         if (fd_trans_target_to_host_data(arg1)) {
7443             void *copy = g_malloc(arg3);
7444             memcpy(copy, p, arg3);
7445             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7446             if (ret >= 0) {
7447                 ret = get_errno(safe_write(arg1, copy, ret));
7448             }
7449             g_free(copy);
7450         } else {
7451             ret = get_errno(safe_write(arg1, p, arg3));
7452         }
7453         unlock_user(p, arg2, 0);
7454         return ret;
7455 
7456 #ifdef TARGET_NR_open
7457     case TARGET_NR_open:
7458         if (!(p = lock_user_string(arg1)))
7459             return -TARGET_EFAULT;
7460         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7461                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7462                                   arg3));
7463         fd_trans_unregister(ret);
7464         unlock_user(p, arg1, 0);
7465         return ret;
7466 #endif
7467     case TARGET_NR_openat:
7468         if (!(p = lock_user_string(arg2)))
7469             return -TARGET_EFAULT;
7470         ret = get_errno(do_openat(cpu_env, arg1, p,
7471                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7472                                   arg4));
7473         fd_trans_unregister(ret);
7474         unlock_user(p, arg2, 0);
7475         return ret;
7476 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7477     case TARGET_NR_name_to_handle_at:
7478         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7479         return ret;
7480 #endif
7481 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7482     case TARGET_NR_open_by_handle_at:
7483         ret = do_open_by_handle_at(arg1, arg2, arg3);
7484         fd_trans_unregister(ret);
7485         return ret;
7486 #endif
7487     case TARGET_NR_close:
7488         fd_trans_unregister(arg1);
7489         return get_errno(close(arg1));
7490 
7491     case TARGET_NR_brk:
7492         return do_brk(arg1);
7493 #ifdef TARGET_NR_fork
7494     case TARGET_NR_fork:
7495         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7496 #endif
7497 #ifdef TARGET_NR_waitpid
7498     case TARGET_NR_waitpid:
7499         {
7500             int status;
7501             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7502             if (!is_error(ret) && arg2 && ret
7503                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7504                 return -TARGET_EFAULT;
7505         }
7506         return ret;
7507 #endif
7508 #ifdef TARGET_NR_waitid
7509     case TARGET_NR_waitid:
7510         {
7511             siginfo_t info;
7512             info.si_pid = 0;
7513             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7514             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7515                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7516                     return -TARGET_EFAULT;
7517                 host_to_target_siginfo(p, &info);
7518                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7519             }
7520         }
7521         return ret;
7522 #endif
7523 #ifdef TARGET_NR_creat /* not on alpha */
7524     case TARGET_NR_creat:
7525         if (!(p = lock_user_string(arg1)))
7526             return -TARGET_EFAULT;
7527         ret = get_errno(creat(p, arg2));
7528         fd_trans_unregister(ret);
7529         unlock_user(p, arg1, 0);
7530         return ret;
7531 #endif
7532 #ifdef TARGET_NR_link
7533     case TARGET_NR_link:
7534         {
7535             void * p2;
7536             p = lock_user_string(arg1);
7537             p2 = lock_user_string(arg2);
7538             if (!p || !p2)
7539                 ret = -TARGET_EFAULT;
7540             else
7541                 ret = get_errno(link(p, p2));
7542             unlock_user(p2, arg2, 0);
7543             unlock_user(p, arg1, 0);
7544         }
7545         return ret;
7546 #endif
7547 #if defined(TARGET_NR_linkat)
7548     case TARGET_NR_linkat:
7549         {
7550             void * p2 = NULL;
7551             if (!arg2 || !arg4)
7552                 return -TARGET_EFAULT;
7553             p  = lock_user_string(arg2);
7554             p2 = lock_user_string(arg4);
7555             if (!p || !p2)
7556                 ret = -TARGET_EFAULT;
7557             else
7558                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7559             unlock_user(p, arg2, 0);
7560             unlock_user(p2, arg4, 0);
7561         }
7562         return ret;
7563 #endif
7564 #ifdef TARGET_NR_unlink
7565     case TARGET_NR_unlink:
7566         if (!(p = lock_user_string(arg1)))
7567             return -TARGET_EFAULT;
7568         ret = get_errno(unlink(p));
7569         unlock_user(p, arg1, 0);
7570         return ret;
7571 #endif
7572 #if defined(TARGET_NR_unlinkat)
7573     case TARGET_NR_unlinkat:
7574         if (!(p = lock_user_string(arg2)))
7575             return -TARGET_EFAULT;
7576         ret = get_errno(unlinkat(arg1, p, arg3));
7577         unlock_user(p, arg2, 0);
7578         return ret;
7579 #endif
7580     case TARGET_NR_execve:
7581         {
7582             char **argp, **envp;
7583             int argc, envc;
7584             abi_ulong gp;
7585             abi_ulong guest_argp;
7586             abi_ulong guest_envp;
7587             abi_ulong addr;
7588             char **q;
7589             int total_size = 0;
7590 
7591             argc = 0;
7592             guest_argp = arg2;
7593             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7594                 if (get_user_ual(addr, gp))
7595                     return -TARGET_EFAULT;
7596                 if (!addr)
7597                     break;
7598                 argc++;
7599             }
7600             envc = 0;
7601             guest_envp = arg3;
7602             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7603                 if (get_user_ual(addr, gp))
7604                     return -TARGET_EFAULT;
7605                 if (!addr)
7606                     break;
7607                 envc++;
7608             }
7609 
7610             argp = g_new0(char *, argc + 1);
7611             envp = g_new0(char *, envc + 1);
7612 
7613             for (gp = guest_argp, q = argp; gp;
7614                   gp += sizeof(abi_ulong), q++) {
7615                 if (get_user_ual(addr, gp))
7616                     goto execve_efault;
7617                 if (!addr)
7618                     break;
7619                 if (!(*q = lock_user_string(addr)))
7620                     goto execve_efault;
7621                 total_size += strlen(*q) + 1;
7622             }
7623             *q = NULL;
7624 
7625             for (gp = guest_envp, q = envp; gp;
7626                   gp += sizeof(abi_ulong), q++) {
7627                 if (get_user_ual(addr, gp))
7628                     goto execve_efault;
7629                 if (!addr)
7630                     break;
7631                 if (!(*q = lock_user_string(addr)))
7632                     goto execve_efault;
7633                 total_size += strlen(*q) + 1;
7634             }
7635             *q = NULL;
7636 
7637             if (!(p = lock_user_string(arg1)))
7638                 goto execve_efault;
7639             /* Although execve() is not an interruptible syscall it is
7640              * a special case where we must use the safe_syscall wrapper:
7641              * if we allow a signal to happen before we make the host
7642              * syscall then we will 'lose' it, because at the point of
7643              * execve the process leaves QEMU's control. So we use the
7644              * safe syscall wrapper to ensure that we either take the
7645              * signal as a guest signal, or else it does not happen
7646              * before the execve completes and makes it the other
7647              * program's problem.
7648              */
7649             ret = get_errno(safe_execve(p, argp, envp));
7650             unlock_user(p, arg1, 0);
7651 
7652             goto execve_end;
7653 
7654         execve_efault:
7655             ret = -TARGET_EFAULT;
7656 
7657         execve_end:
7658             for (gp = guest_argp, q = argp; *q;
7659                   gp += sizeof(abi_ulong), q++) {
7660                 if (get_user_ual(addr, gp)
7661                     || !addr)
7662                     break;
7663                 unlock_user(*q, addr, 0);
7664             }
7665             for (gp = guest_envp, q = envp; *q;
7666                   gp += sizeof(abi_ulong), q++) {
7667                 if (get_user_ual(addr, gp)
7668                     || !addr)
7669                     break;
7670                 unlock_user(*q, addr, 0);
7671             }
7672 
7673             g_free(argp);
7674             g_free(envp);
7675         }
7676         return ret;
7677     case TARGET_NR_chdir:
7678         if (!(p = lock_user_string(arg1)))
7679             return -TARGET_EFAULT;
7680         ret = get_errno(chdir(p));
7681         unlock_user(p, arg1, 0);
7682         return ret;
7683 #ifdef TARGET_NR_time
7684     case TARGET_NR_time:
7685         {
7686             time_t host_time;
7687             ret = get_errno(time(&host_time));
7688             if (!is_error(ret)
7689                 && arg1
7690                 && put_user_sal(host_time, arg1))
7691                 return -TARGET_EFAULT;
7692         }
7693         return ret;
7694 #endif
7695 #ifdef TARGET_NR_mknod
7696     case TARGET_NR_mknod:
7697         if (!(p = lock_user_string(arg1)))
7698             return -TARGET_EFAULT;
7699         ret = get_errno(mknod(p, arg2, arg3));
7700         unlock_user(p, arg1, 0);
7701         return ret;
7702 #endif
7703 #if defined(TARGET_NR_mknodat)
7704     case TARGET_NR_mknodat:
7705         if (!(p = lock_user_string(arg2)))
7706             return -TARGET_EFAULT;
7707         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7708         unlock_user(p, arg2, 0);
7709         return ret;
7710 #endif
7711 #ifdef TARGET_NR_chmod
7712     case TARGET_NR_chmod:
7713         if (!(p = lock_user_string(arg1)))
7714             return -TARGET_EFAULT;
7715         ret = get_errno(chmod(p, arg2));
7716         unlock_user(p, arg1, 0);
7717         return ret;
7718 #endif
7719 #ifdef TARGET_NR_lseek
7720     case TARGET_NR_lseek:
7721         return get_errno(lseek(arg1, arg2, arg3));
7722 #endif
7723 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7724     /* Alpha specific */
7725     case TARGET_NR_getxpid:
7726         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7727         return get_errno(getpid());
7728 #endif
7729 #ifdef TARGET_NR_getpid
7730     case TARGET_NR_getpid:
7731         return get_errno(getpid());
7732 #endif
7733     case TARGET_NR_mount:
7734         {
7735             /* need to look at the data field */
7736             void *p2, *p3;
7737 
7738             if (arg1) {
7739                 p = lock_user_string(arg1);
7740                 if (!p) {
7741                     return -TARGET_EFAULT;
7742                 }
7743             } else {
7744                 p = NULL;
7745             }
7746 
7747             p2 = lock_user_string(arg2);
7748             if (!p2) {
7749                 if (arg1) {
7750                     unlock_user(p, arg1, 0);
7751                 }
7752                 return -TARGET_EFAULT;
7753             }
7754 
7755             if (arg3) {
7756                 p3 = lock_user_string(arg3);
7757                 if (!p3) {
7758                     if (arg1) {
7759                         unlock_user(p, arg1, 0);
7760                     }
7761                     unlock_user(p2, arg2, 0);
7762                     return -TARGET_EFAULT;
7763                 }
7764             } else {
7765                 p3 = NULL;
7766             }
7767 
7768             /* FIXME - arg5 should be locked, but it isn't clear how to
7769              * do that since it's not guaranteed to be a NULL-terminated
7770              * string.
7771              */
7772             if (!arg5) {
7773                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7774             } else {
7775                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7776             }
7777             ret = get_errno(ret);
7778 
7779             if (arg1) {
7780                 unlock_user(p, arg1, 0);
7781             }
7782             unlock_user(p2, arg2, 0);
7783             if (arg3) {
7784                 unlock_user(p3, arg3, 0);
7785             }
7786         }
7787         return ret;
7788 #ifdef TARGET_NR_umount
7789     case TARGET_NR_umount:
7790         if (!(p = lock_user_string(arg1)))
7791             return -TARGET_EFAULT;
7792         ret = get_errno(umount(p));
7793         unlock_user(p, arg1, 0);
7794         return ret;
7795 #endif
7796 #ifdef TARGET_NR_stime /* not on alpha */
7797     case TARGET_NR_stime:
7798         {
7799             struct timespec ts;
7800             ts.tv_nsec = 0;
7801             if (get_user_sal(ts.tv_sec, arg1)) {
7802                 return -TARGET_EFAULT;
7803             }
7804             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
7805         }
7806 #endif
7807 #ifdef TARGET_NR_alarm /* not on alpha */
7808     case TARGET_NR_alarm:
7809         return alarm(arg1);
7810 #endif
7811 #ifdef TARGET_NR_pause /* not on alpha */
7812     case TARGET_NR_pause:
7813         if (!block_signals()) {
7814             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7815         }
7816         return -TARGET_EINTR;
7817 #endif
7818 #ifdef TARGET_NR_utime
7819     case TARGET_NR_utime:
7820         {
7821             struct utimbuf tbuf, *host_tbuf;
7822             struct target_utimbuf *target_tbuf;
7823             if (arg2) {
7824                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7825                     return -TARGET_EFAULT;
7826                 tbuf.actime = tswapal(target_tbuf->actime);
7827                 tbuf.modtime = tswapal(target_tbuf->modtime);
7828                 unlock_user_struct(target_tbuf, arg2, 0);
7829                 host_tbuf = &tbuf;
7830             } else {
7831                 host_tbuf = NULL;
7832             }
7833             if (!(p = lock_user_string(arg1)))
7834                 return -TARGET_EFAULT;
7835             ret = get_errno(utime(p, host_tbuf));
7836             unlock_user(p, arg1, 0);
7837         }
7838         return ret;
7839 #endif
7840 #ifdef TARGET_NR_utimes
7841     case TARGET_NR_utimes:
7842         {
7843             struct timeval *tvp, tv[2];
7844             if (arg2) {
7845                 if (copy_from_user_timeval(&tv[0], arg2)
7846                     || copy_from_user_timeval(&tv[1],
7847                                               arg2 + sizeof(struct target_timeval)))
7848                     return -TARGET_EFAULT;
7849                 tvp = tv;
7850             } else {
7851                 tvp = NULL;
7852             }
7853             if (!(p = lock_user_string(arg1)))
7854                 return -TARGET_EFAULT;
7855             ret = get_errno(utimes(p, tvp));
7856             unlock_user(p, arg1, 0);
7857         }
7858         return ret;
7859 #endif
7860 #if defined(TARGET_NR_futimesat)
7861     case TARGET_NR_futimesat:
7862         {
7863             struct timeval *tvp, tv[2];
7864             if (arg3) {
7865                 if (copy_from_user_timeval(&tv[0], arg3)
7866                     || copy_from_user_timeval(&tv[1],
7867                                               arg3 + sizeof(struct target_timeval)))
7868                     return -TARGET_EFAULT;
7869                 tvp = tv;
7870             } else {
7871                 tvp = NULL;
7872             }
7873             if (!(p = lock_user_string(arg2))) {
7874                 return -TARGET_EFAULT;
7875             }
7876             ret = get_errno(futimesat(arg1, path(p), tvp));
7877             unlock_user(p, arg2, 0);
7878         }
7879         return ret;
7880 #endif
7881 #ifdef TARGET_NR_access
7882     case TARGET_NR_access:
7883         if (!(p = lock_user_string(arg1))) {
7884             return -TARGET_EFAULT;
7885         }
7886         ret = get_errno(access(path(p), arg2));
7887         unlock_user(p, arg1, 0);
7888         return ret;
7889 #endif
7890 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7891     case TARGET_NR_faccessat:
7892         if (!(p = lock_user_string(arg2))) {
7893             return -TARGET_EFAULT;
7894         }
7895         ret = get_errno(faccessat(arg1, p, arg3, 0));
7896         unlock_user(p, arg2, 0);
7897         return ret;
7898 #endif
7899 #ifdef TARGET_NR_nice /* not on alpha */
7900     case TARGET_NR_nice:
7901         return get_errno(nice(arg1));
7902 #endif
7903     case TARGET_NR_sync:
7904         sync();
7905         return 0;
7906 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7907     case TARGET_NR_syncfs:
7908         return get_errno(syncfs(arg1));
7909 #endif
7910     case TARGET_NR_kill:
7911         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7912 #ifdef TARGET_NR_rename
7913     case TARGET_NR_rename:
7914         {
7915             void *p2;
7916             p = lock_user_string(arg1);
7917             p2 = lock_user_string(arg2);
7918             if (!p || !p2)
7919                 ret = -TARGET_EFAULT;
7920             else
7921                 ret = get_errno(rename(p, p2));
7922             unlock_user(p2, arg2, 0);
7923             unlock_user(p, arg1, 0);
7924         }
7925         return ret;
7926 #endif
7927 #if defined(TARGET_NR_renameat)
7928     case TARGET_NR_renameat:
7929         {
7930             void *p2;
7931             p  = lock_user_string(arg2);
7932             p2 = lock_user_string(arg4);
7933             if (!p || !p2)
7934                 ret = -TARGET_EFAULT;
7935             else
7936                 ret = get_errno(renameat(arg1, p, arg3, p2));
7937             unlock_user(p2, arg4, 0);
7938             unlock_user(p, arg2, 0);
7939         }
7940         return ret;
7941 #endif
7942 #if defined(TARGET_NR_renameat2)
7943     case TARGET_NR_renameat2:
7944         {
7945             void *p2;
7946             p  = lock_user_string(arg2);
7947             p2 = lock_user_string(arg4);
7948             if (!p || !p2) {
7949                 ret = -TARGET_EFAULT;
7950             } else {
7951                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7952             }
7953             unlock_user(p2, arg4, 0);
7954             unlock_user(p, arg2, 0);
7955         }
7956         return ret;
7957 #endif
7958 #ifdef TARGET_NR_mkdir
7959     case TARGET_NR_mkdir:
7960         if (!(p = lock_user_string(arg1)))
7961             return -TARGET_EFAULT;
7962         ret = get_errno(mkdir(p, arg2));
7963         unlock_user(p, arg1, 0);
7964         return ret;
7965 #endif
7966 #if defined(TARGET_NR_mkdirat)
7967     case TARGET_NR_mkdirat:
7968         if (!(p = lock_user_string(arg2)))
7969             return -TARGET_EFAULT;
7970         ret = get_errno(mkdirat(arg1, p, arg3));
7971         unlock_user(p, arg2, 0);
7972         return ret;
7973 #endif
7974 #ifdef TARGET_NR_rmdir
7975     case TARGET_NR_rmdir:
7976         if (!(p = lock_user_string(arg1)))
7977             return -TARGET_EFAULT;
7978         ret = get_errno(rmdir(p));
7979         unlock_user(p, arg1, 0);
7980         return ret;
7981 #endif
7982     case TARGET_NR_dup:
7983         ret = get_errno(dup(arg1));
7984         if (ret >= 0) {
7985             fd_trans_dup(arg1, ret);
7986         }
7987         return ret;
7988 #ifdef TARGET_NR_pipe
7989     case TARGET_NR_pipe:
7990         return do_pipe(cpu_env, arg1, 0, 0);
7991 #endif
7992 #ifdef TARGET_NR_pipe2
7993     case TARGET_NR_pipe2:
7994         return do_pipe(cpu_env, arg1,
7995                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7996 #endif
7997     case TARGET_NR_times:
7998         {
7999             struct target_tms *tmsp;
8000             struct tms tms;
8001             ret = get_errno(times(&tms));
8002             if (arg1) {
8003                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8004                 if (!tmsp)
8005                     return -TARGET_EFAULT;
8006                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8007                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8008                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8009                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8010             }
8011             if (!is_error(ret))
8012                 ret = host_to_target_clock_t(ret);
8013         }
8014         return ret;
8015     case TARGET_NR_acct:
8016         if (arg1 == 0) {
8017             ret = get_errno(acct(NULL));
8018         } else {
8019             if (!(p = lock_user_string(arg1))) {
8020                 return -TARGET_EFAULT;
8021             }
8022             ret = get_errno(acct(path(p)));
8023             unlock_user(p, arg1, 0);
8024         }
8025         return ret;
8026 #ifdef TARGET_NR_umount2
8027     case TARGET_NR_umount2:
8028         if (!(p = lock_user_string(arg1)))
8029             return -TARGET_EFAULT;
8030         ret = get_errno(umount2(p, arg2));
8031         unlock_user(p, arg1, 0);
8032         return ret;
8033 #endif
8034     case TARGET_NR_ioctl:
8035         return do_ioctl(arg1, arg2, arg3);
8036 #ifdef TARGET_NR_fcntl
8037     case TARGET_NR_fcntl:
8038         return do_fcntl(arg1, arg2, arg3);
8039 #endif
8040     case TARGET_NR_setpgid:
8041         return get_errno(setpgid(arg1, arg2));
8042     case TARGET_NR_umask:
8043         return get_errno(umask(arg1));
8044     case TARGET_NR_chroot:
8045         if (!(p = lock_user_string(arg1)))
8046             return -TARGET_EFAULT;
8047         ret = get_errno(chroot(p));
8048         unlock_user(p, arg1, 0);
8049         return ret;
8050 #ifdef TARGET_NR_dup2
8051     case TARGET_NR_dup2:
8052         ret = get_errno(dup2(arg1, arg2));
8053         if (ret >= 0) {
8054             fd_trans_dup(arg1, arg2);
8055         }
8056         return ret;
8057 #endif
8058 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8059     case TARGET_NR_dup3:
8060     {
8061         int host_flags;
8062 
8063         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8064             return -EINVAL;
8065         }
8066         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8067         ret = get_errno(dup3(arg1, arg2, host_flags));
8068         if (ret >= 0) {
8069             fd_trans_dup(arg1, arg2);
8070         }
8071         return ret;
8072     }
8073 #endif
8074 #ifdef TARGET_NR_getppid /* not on alpha */
8075     case TARGET_NR_getppid:
8076         return get_errno(getppid());
8077 #endif
8078 #ifdef TARGET_NR_getpgrp
8079     case TARGET_NR_getpgrp:
8080         return get_errno(getpgrp());
8081 #endif
8082     case TARGET_NR_setsid:
8083         return get_errno(setsid());
8084 #ifdef TARGET_NR_sigaction
8085     case TARGET_NR_sigaction:
8086         {
8087 #if defined(TARGET_ALPHA)
8088             struct target_sigaction act, oact, *pact = 0;
8089             struct target_old_sigaction *old_act;
8090             if (arg2) {
8091                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8092                     return -TARGET_EFAULT;
8093                 act._sa_handler = old_act->_sa_handler;
8094                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8095                 act.sa_flags = old_act->sa_flags;
8096                 act.sa_restorer = 0;
8097                 unlock_user_struct(old_act, arg2, 0);
8098                 pact = &act;
8099             }
8100             ret = get_errno(do_sigaction(arg1, pact, &oact));
8101             if (!is_error(ret) && arg3) {
8102                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8103                     return -TARGET_EFAULT;
8104                 old_act->_sa_handler = oact._sa_handler;
8105                 old_act->sa_mask = oact.sa_mask.sig[0];
8106                 old_act->sa_flags = oact.sa_flags;
8107                 unlock_user_struct(old_act, arg3, 1);
8108             }
8109 #elif defined(TARGET_MIPS)
8110 	    struct target_sigaction act, oact, *pact, *old_act;
8111 
8112 	    if (arg2) {
8113                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8114                     return -TARGET_EFAULT;
8115 		act._sa_handler = old_act->_sa_handler;
8116 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8117 		act.sa_flags = old_act->sa_flags;
8118 		unlock_user_struct(old_act, arg2, 0);
8119 		pact = &act;
8120 	    } else {
8121 		pact = NULL;
8122 	    }
8123 
8124 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8125 
8126 	    if (!is_error(ret) && arg3) {
8127                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8128                     return -TARGET_EFAULT;
8129 		old_act->_sa_handler = oact._sa_handler;
8130 		old_act->sa_flags = oact.sa_flags;
8131 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8132 		old_act->sa_mask.sig[1] = 0;
8133 		old_act->sa_mask.sig[2] = 0;
8134 		old_act->sa_mask.sig[3] = 0;
8135 		unlock_user_struct(old_act, arg3, 1);
8136 	    }
8137 #else
8138             struct target_old_sigaction *old_act;
8139             struct target_sigaction act, oact, *pact;
8140             if (arg2) {
8141                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8142                     return -TARGET_EFAULT;
8143                 act._sa_handler = old_act->_sa_handler;
8144                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8145                 act.sa_flags = old_act->sa_flags;
8146                 act.sa_restorer = old_act->sa_restorer;
8147 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8148                 act.ka_restorer = 0;
8149 #endif
8150                 unlock_user_struct(old_act, arg2, 0);
8151                 pact = &act;
8152             } else {
8153                 pact = NULL;
8154             }
8155             ret = get_errno(do_sigaction(arg1, pact, &oact));
8156             if (!is_error(ret) && arg3) {
8157                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8158                     return -TARGET_EFAULT;
8159                 old_act->_sa_handler = oact._sa_handler;
8160                 old_act->sa_mask = oact.sa_mask.sig[0];
8161                 old_act->sa_flags = oact.sa_flags;
8162                 old_act->sa_restorer = oact.sa_restorer;
8163                 unlock_user_struct(old_act, arg3, 1);
8164             }
8165 #endif
8166         }
8167         return ret;
8168 #endif
8169     case TARGET_NR_rt_sigaction:
8170         {
8171 #if defined(TARGET_ALPHA)
8172             /* For Alpha and SPARC this is a 5 argument syscall, with
8173              * a 'restorer' parameter which must be copied into the
8174              * sa_restorer field of the sigaction struct.
8175              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8176              * and arg5 is the sigsetsize.
8177              * Alpha also has a separate rt_sigaction struct that it uses
8178              * here; SPARC uses the usual sigaction struct.
8179              */
8180             struct target_rt_sigaction *rt_act;
8181             struct target_sigaction act, oact, *pact = 0;
8182 
8183             if (arg4 != sizeof(target_sigset_t)) {
8184                 return -TARGET_EINVAL;
8185             }
8186             if (arg2) {
8187                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8188                     return -TARGET_EFAULT;
8189                 act._sa_handler = rt_act->_sa_handler;
8190                 act.sa_mask = rt_act->sa_mask;
8191                 act.sa_flags = rt_act->sa_flags;
8192                 act.sa_restorer = arg5;
8193                 unlock_user_struct(rt_act, arg2, 0);
8194                 pact = &act;
8195             }
8196             ret = get_errno(do_sigaction(arg1, pact, &oact));
8197             if (!is_error(ret) && arg3) {
8198                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8199                     return -TARGET_EFAULT;
8200                 rt_act->_sa_handler = oact._sa_handler;
8201                 rt_act->sa_mask = oact.sa_mask;
8202                 rt_act->sa_flags = oact.sa_flags;
8203                 unlock_user_struct(rt_act, arg3, 1);
8204             }
8205 #else
8206 #ifdef TARGET_SPARC
8207             target_ulong restorer = arg4;
8208             target_ulong sigsetsize = arg5;
8209 #else
8210             target_ulong sigsetsize = arg4;
8211 #endif
8212             struct target_sigaction *act;
8213             struct target_sigaction *oact;
8214 
8215             if (sigsetsize != sizeof(target_sigset_t)) {
8216                 return -TARGET_EINVAL;
8217             }
8218             if (arg2) {
8219                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8220                     return -TARGET_EFAULT;
8221                 }
8222 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8223                 act->ka_restorer = restorer;
8224 #endif
8225             } else {
8226                 act = NULL;
8227             }
8228             if (arg3) {
8229                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8230                     ret = -TARGET_EFAULT;
8231                     goto rt_sigaction_fail;
8232                 }
8233             } else
8234                 oact = NULL;
8235             ret = get_errno(do_sigaction(arg1, act, oact));
8236 	rt_sigaction_fail:
8237             if (act)
8238                 unlock_user_struct(act, arg2, 0);
8239             if (oact)
8240                 unlock_user_struct(oact, arg3, 1);
8241 #endif
8242         }
8243         return ret;
8244 #ifdef TARGET_NR_sgetmask /* not on alpha */
8245     case TARGET_NR_sgetmask:
8246         {
8247             sigset_t cur_set;
8248             abi_ulong target_set;
8249             ret = do_sigprocmask(0, NULL, &cur_set);
8250             if (!ret) {
8251                 host_to_target_old_sigset(&target_set, &cur_set);
8252                 ret = target_set;
8253             }
8254         }
8255         return ret;
8256 #endif
8257 #ifdef TARGET_NR_ssetmask /* not on alpha */
8258     case TARGET_NR_ssetmask:
8259         {
8260             sigset_t set, oset;
8261             abi_ulong target_set = arg1;
8262             target_to_host_old_sigset(&set, &target_set);
8263             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8264             if (!ret) {
8265                 host_to_target_old_sigset(&target_set, &oset);
8266                 ret = target_set;
8267             }
8268         }
8269         return ret;
8270 #endif
8271 #ifdef TARGET_NR_sigprocmask
8272     case TARGET_NR_sigprocmask:
8273         {
8274 #if defined(TARGET_ALPHA)
8275             sigset_t set, oldset;
8276             abi_ulong mask;
8277             int how;
8278 
8279             switch (arg1) {
8280             case TARGET_SIG_BLOCK:
8281                 how = SIG_BLOCK;
8282                 break;
8283             case TARGET_SIG_UNBLOCK:
8284                 how = SIG_UNBLOCK;
8285                 break;
8286             case TARGET_SIG_SETMASK:
8287                 how = SIG_SETMASK;
8288                 break;
8289             default:
8290                 return -TARGET_EINVAL;
8291             }
8292             mask = arg2;
8293             target_to_host_old_sigset(&set, &mask);
8294 
8295             ret = do_sigprocmask(how, &set, &oldset);
8296             if (!is_error(ret)) {
8297                 host_to_target_old_sigset(&mask, &oldset);
8298                 ret = mask;
8299                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8300             }
8301 #else
8302             sigset_t set, oldset, *set_ptr;
8303             int how;
8304 
8305             if (arg2) {
8306                 switch (arg1) {
8307                 case TARGET_SIG_BLOCK:
8308                     how = SIG_BLOCK;
8309                     break;
8310                 case TARGET_SIG_UNBLOCK:
8311                     how = SIG_UNBLOCK;
8312                     break;
8313                 case TARGET_SIG_SETMASK:
8314                     how = SIG_SETMASK;
8315                     break;
8316                 default:
8317                     return -TARGET_EINVAL;
8318                 }
8319                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8320                     return -TARGET_EFAULT;
8321                 target_to_host_old_sigset(&set, p);
8322                 unlock_user(p, arg2, 0);
8323                 set_ptr = &set;
8324             } else {
8325                 how = 0;
8326                 set_ptr = NULL;
8327             }
8328             ret = do_sigprocmask(how, set_ptr, &oldset);
8329             if (!is_error(ret) && arg3) {
8330                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8331                     return -TARGET_EFAULT;
8332                 host_to_target_old_sigset(p, &oldset);
8333                 unlock_user(p, arg3, sizeof(target_sigset_t));
8334             }
8335 #endif
8336         }
8337         return ret;
8338 #endif
8339     case TARGET_NR_rt_sigprocmask:
8340         {
8341             int how = arg1;
8342             sigset_t set, oldset, *set_ptr;
8343 
8344             if (arg4 != sizeof(target_sigset_t)) {
8345                 return -TARGET_EINVAL;
8346             }
8347 
8348             if (arg2) {
8349                 switch(how) {
8350                 case TARGET_SIG_BLOCK:
8351                     how = SIG_BLOCK;
8352                     break;
8353                 case TARGET_SIG_UNBLOCK:
8354                     how = SIG_UNBLOCK;
8355                     break;
8356                 case TARGET_SIG_SETMASK:
8357                     how = SIG_SETMASK;
8358                     break;
8359                 default:
8360                     return -TARGET_EINVAL;
8361                 }
8362                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8363                     return -TARGET_EFAULT;
8364                 target_to_host_sigset(&set, p);
8365                 unlock_user(p, arg2, 0);
8366                 set_ptr = &set;
8367             } else {
8368                 how = 0;
8369                 set_ptr = NULL;
8370             }
8371             ret = do_sigprocmask(how, set_ptr, &oldset);
8372             if (!is_error(ret) && arg3) {
8373                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8374                     return -TARGET_EFAULT;
8375                 host_to_target_sigset(p, &oldset);
8376                 unlock_user(p, arg3, sizeof(target_sigset_t));
8377             }
8378         }
8379         return ret;
8380 #ifdef TARGET_NR_sigpending
8381     case TARGET_NR_sigpending:
8382         {
8383             sigset_t set;
8384             ret = get_errno(sigpending(&set));
8385             if (!is_error(ret)) {
8386                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8387                     return -TARGET_EFAULT;
8388                 host_to_target_old_sigset(p, &set);
8389                 unlock_user(p, arg1, sizeof(target_sigset_t));
8390             }
8391         }
8392         return ret;
8393 #endif
8394     case TARGET_NR_rt_sigpending:
8395         {
8396             sigset_t set;
8397 
8398             /* Yes, this check is >, not != like most. We follow the kernel's
8399              * logic and it does it like this because it implements
8400              * NR_sigpending through the same code path, and in that case
8401              * the old_sigset_t is smaller in size.
8402              */
8403             if (arg2 > sizeof(target_sigset_t)) {
8404                 return -TARGET_EINVAL;
8405             }
8406 
8407             ret = get_errno(sigpending(&set));
8408             if (!is_error(ret)) {
8409                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8410                     return -TARGET_EFAULT;
8411                 host_to_target_sigset(p, &set);
8412                 unlock_user(p, arg1, sizeof(target_sigset_t));
8413             }
8414         }
8415         return ret;
8416 #ifdef TARGET_NR_sigsuspend
8417     case TARGET_NR_sigsuspend:
8418         {
8419             TaskState *ts = cpu->opaque;
8420 #if defined(TARGET_ALPHA)
8421             abi_ulong mask = arg1;
8422             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8423 #else
8424             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8425                 return -TARGET_EFAULT;
8426             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8427             unlock_user(p, arg1, 0);
8428 #endif
8429             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8430                                                SIGSET_T_SIZE));
8431             if (ret != -TARGET_ERESTARTSYS) {
8432                 ts->in_sigsuspend = 1;
8433             }
8434         }
8435         return ret;
8436 #endif
8437     case TARGET_NR_rt_sigsuspend:
8438         {
8439             TaskState *ts = cpu->opaque;
8440 
8441             if (arg2 != sizeof(target_sigset_t)) {
8442                 return -TARGET_EINVAL;
8443             }
8444             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8445                 return -TARGET_EFAULT;
8446             target_to_host_sigset(&ts->sigsuspend_mask, p);
8447             unlock_user(p, arg1, 0);
8448             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8449                                                SIGSET_T_SIZE));
8450             if (ret != -TARGET_ERESTARTSYS) {
8451                 ts->in_sigsuspend = 1;
8452             }
8453         }
8454         return ret;
8455     case TARGET_NR_rt_sigtimedwait:
8456         {
8457             sigset_t set;
8458             struct timespec uts, *puts;
8459             siginfo_t uinfo;
8460 
8461             if (arg4 != sizeof(target_sigset_t)) {
8462                 return -TARGET_EINVAL;
8463             }
8464 
8465             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8466                 return -TARGET_EFAULT;
8467             target_to_host_sigset(&set, p);
8468             unlock_user(p, arg1, 0);
8469             if (arg3) {
8470                 puts = &uts;
8471                 target_to_host_timespec(puts, arg3);
8472             } else {
8473                 puts = NULL;
8474             }
8475             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8476                                                  SIGSET_T_SIZE));
8477             if (!is_error(ret)) {
8478                 if (arg2) {
8479                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8480                                   0);
8481                     if (!p) {
8482                         return -TARGET_EFAULT;
8483                     }
8484                     host_to_target_siginfo(p, &uinfo);
8485                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8486                 }
8487                 ret = host_to_target_signal(ret);
8488             }
8489         }
8490         return ret;
8491     case TARGET_NR_rt_sigqueueinfo:
8492         {
8493             siginfo_t uinfo;
8494 
8495             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8496             if (!p) {
8497                 return -TARGET_EFAULT;
8498             }
8499             target_to_host_siginfo(&uinfo, p);
8500             unlock_user(p, arg3, 0);
8501             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8502         }
8503         return ret;
8504     case TARGET_NR_rt_tgsigqueueinfo:
8505         {
8506             siginfo_t uinfo;
8507 
8508             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8509             if (!p) {
8510                 return -TARGET_EFAULT;
8511             }
8512             target_to_host_siginfo(&uinfo, p);
8513             unlock_user(p, arg4, 0);
8514             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8515         }
8516         return ret;
8517 #ifdef TARGET_NR_sigreturn
8518     case TARGET_NR_sigreturn:
8519         if (block_signals()) {
8520             return -TARGET_ERESTARTSYS;
8521         }
8522         return do_sigreturn(cpu_env);
8523 #endif
8524     case TARGET_NR_rt_sigreturn:
8525         if (block_signals()) {
8526             return -TARGET_ERESTARTSYS;
8527         }
8528         return do_rt_sigreturn(cpu_env);
8529     case TARGET_NR_sethostname:
8530         if (!(p = lock_user_string(arg1)))
8531             return -TARGET_EFAULT;
8532         ret = get_errno(sethostname(p, arg2));
8533         unlock_user(p, arg1, 0);
8534         return ret;
8535 #ifdef TARGET_NR_setrlimit
8536     case TARGET_NR_setrlimit:
8537         {
8538             int resource = target_to_host_resource(arg1);
8539             struct target_rlimit *target_rlim;
8540             struct rlimit rlim;
8541             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8542                 return -TARGET_EFAULT;
8543             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8544             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8545             unlock_user_struct(target_rlim, arg2, 0);
8546             /*
8547              * If we just passed through resource limit settings for memory then
8548              * they would also apply to QEMU's own allocations, and QEMU will
8549              * crash or hang or die if its allocations fail. Ideally we would
8550              * track the guest allocations in QEMU and apply the limits ourselves.
8551              * For now, just tell the guest the call succeeded but don't actually
8552              * limit anything.
8553              */
8554             if (resource != RLIMIT_AS &&
8555                 resource != RLIMIT_DATA &&
8556                 resource != RLIMIT_STACK) {
8557                 return get_errno(setrlimit(resource, &rlim));
8558             } else {
8559                 return 0;
8560             }
8561         }
8562 #endif
8563 #ifdef TARGET_NR_getrlimit
8564     case TARGET_NR_getrlimit:
8565         {
8566             int resource = target_to_host_resource(arg1);
8567             struct target_rlimit *target_rlim;
8568             struct rlimit rlim;
8569 
8570             ret = get_errno(getrlimit(resource, &rlim));
8571             if (!is_error(ret)) {
8572                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8573                     return -TARGET_EFAULT;
8574                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8575                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8576                 unlock_user_struct(target_rlim, arg2, 1);
8577             }
8578         }
8579         return ret;
8580 #endif
8581     case TARGET_NR_getrusage:
8582         {
8583             struct rusage rusage;
8584             ret = get_errno(getrusage(arg1, &rusage));
8585             if (!is_error(ret)) {
8586                 ret = host_to_target_rusage(arg2, &rusage);
8587             }
8588         }
8589         return ret;
8590     case TARGET_NR_gettimeofday:
8591         {
8592             struct timeval tv;
8593             ret = get_errno(gettimeofday(&tv, NULL));
8594             if (!is_error(ret)) {
8595                 if (copy_to_user_timeval(arg1, &tv))
8596                     return -TARGET_EFAULT;
8597             }
8598         }
8599         return ret;
8600     case TARGET_NR_settimeofday:
8601         {
8602             struct timeval tv, *ptv = NULL;
8603             struct timezone tz, *ptz = NULL;
8604 
8605             if (arg1) {
8606                 if (copy_from_user_timeval(&tv, arg1)) {
8607                     return -TARGET_EFAULT;
8608                 }
8609                 ptv = &tv;
8610             }
8611 
8612             if (arg2) {
8613                 if (copy_from_user_timezone(&tz, arg2)) {
8614                     return -TARGET_EFAULT;
8615                 }
8616                 ptz = &tz;
8617             }
8618 
8619             return get_errno(settimeofday(ptv, ptz));
8620         }
8621 #if defined(TARGET_NR_select)
8622     case TARGET_NR_select:
8623 #if defined(TARGET_WANT_NI_OLD_SELECT)
8624         /* some architectures used to have old_select here
8625          * but now ENOSYS it.
8626          */
8627         ret = -TARGET_ENOSYS;
8628 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8629         ret = do_old_select(arg1);
8630 #else
8631         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8632 #endif
8633         return ret;
8634 #endif
8635 #ifdef TARGET_NR_pselect6
8636     case TARGET_NR_pselect6:
8637         {
8638             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8639             fd_set rfds, wfds, efds;
8640             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8641             struct timespec ts, *ts_ptr;
8642 
8643             /*
8644              * The 6th arg is actually two args smashed together,
8645              * so we cannot use the C library.
8646              */
8647             sigset_t set;
8648             struct {
8649                 sigset_t *set;
8650                 size_t size;
8651             } sig, *sig_ptr;
8652 
8653             abi_ulong arg_sigset, arg_sigsize, *arg7;
8654             target_sigset_t *target_sigset;
8655 
8656             n = arg1;
8657             rfd_addr = arg2;
8658             wfd_addr = arg3;
8659             efd_addr = arg4;
8660             ts_addr = arg5;
8661 
8662             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8663             if (ret) {
8664                 return ret;
8665             }
8666             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8667             if (ret) {
8668                 return ret;
8669             }
8670             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8671             if (ret) {
8672                 return ret;
8673             }
8674 
8675             /*
8676              * This takes a timespec, and not a timeval, so we cannot
8677              * use the do_select() helper ...
8678              */
8679             if (ts_addr) {
8680                 if (target_to_host_timespec(&ts, ts_addr)) {
8681                     return -TARGET_EFAULT;
8682                 }
8683                 ts_ptr = &ts;
8684             } else {
8685                 ts_ptr = NULL;
8686             }
8687 
8688             /* Extract the two packed args for the sigset */
8689             if (arg6) {
8690                 sig_ptr = &sig;
8691                 sig.size = SIGSET_T_SIZE;
8692 
8693                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8694                 if (!arg7) {
8695                     return -TARGET_EFAULT;
8696                 }
8697                 arg_sigset = tswapal(arg7[0]);
8698                 arg_sigsize = tswapal(arg7[1]);
8699                 unlock_user(arg7, arg6, 0);
8700 
8701                 if (arg_sigset) {
8702                     sig.set = &set;
8703                     if (arg_sigsize != sizeof(*target_sigset)) {
8704                         /* Like the kernel, we enforce correct size sigsets */
8705                         return -TARGET_EINVAL;
8706                     }
8707                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8708                                               sizeof(*target_sigset), 1);
8709                     if (!target_sigset) {
8710                         return -TARGET_EFAULT;
8711                     }
8712                     target_to_host_sigset(&set, target_sigset);
8713                     unlock_user(target_sigset, arg_sigset, 0);
8714                 } else {
8715                     sig.set = NULL;
8716                 }
8717             } else {
8718                 sig_ptr = NULL;
8719             }
8720 
8721             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8722                                           ts_ptr, sig_ptr));
8723 
8724             if (!is_error(ret)) {
8725                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8726                     return -TARGET_EFAULT;
8727                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8728                     return -TARGET_EFAULT;
8729                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8730                     return -TARGET_EFAULT;
8731 
8732                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8733                     return -TARGET_EFAULT;
8734             }
8735         }
8736         return ret;
8737 #endif
8738 #ifdef TARGET_NR_symlink
8739     case TARGET_NR_symlink:
8740         {
8741             void *p2;
8742             p = lock_user_string(arg1);
8743             p2 = lock_user_string(arg2);
8744             if (!p || !p2)
8745                 ret = -TARGET_EFAULT;
8746             else
8747                 ret = get_errno(symlink(p, p2));
8748             unlock_user(p2, arg2, 0);
8749             unlock_user(p, arg1, 0);
8750         }
8751         return ret;
8752 #endif
8753 #if defined(TARGET_NR_symlinkat)
8754     case TARGET_NR_symlinkat:
8755         {
8756             void *p2;
8757             p  = lock_user_string(arg1);
8758             p2 = lock_user_string(arg3);
8759             if (!p || !p2)
8760                 ret = -TARGET_EFAULT;
8761             else
8762                 ret = get_errno(symlinkat(p, arg2, p2));
8763             unlock_user(p2, arg3, 0);
8764             unlock_user(p, arg1, 0);
8765         }
8766         return ret;
8767 #endif
8768 #ifdef TARGET_NR_readlink
8769     case TARGET_NR_readlink:
8770         {
8771             void *p2;
8772             p = lock_user_string(arg1);
8773             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8774             if (!p || !p2) {
8775                 ret = -TARGET_EFAULT;
8776             } else if (!arg3) {
8777                 /* Short circuit this for the magic exe check. */
8778                 ret = -TARGET_EINVAL;
8779             } else if (is_proc_myself((const char *)p, "exe")) {
8780                 char real[PATH_MAX], *temp;
8781                 temp = realpath(exec_path, real);
8782                 /* Return value is # of bytes that we wrote to the buffer. */
8783                 if (temp == NULL) {
8784                     ret = get_errno(-1);
8785                 } else {
8786                     /* Don't worry about sign mismatch as earlier mapping
8787                      * logic would have thrown a bad address error. */
8788                     ret = MIN(strlen(real), arg3);
8789                     /* We cannot NUL terminate the string. */
8790                     memcpy(p2, real, ret);
8791                 }
8792             } else {
8793                 ret = get_errno(readlink(path(p), p2, arg3));
8794             }
8795             unlock_user(p2, arg2, ret);
8796             unlock_user(p, arg1, 0);
8797         }
8798         return ret;
8799 #endif
8800 #if defined(TARGET_NR_readlinkat)
8801     case TARGET_NR_readlinkat:
8802         {
8803             void *p2;
8804             p  = lock_user_string(arg2);
8805             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8806             if (!p || !p2) {
8807                 ret = -TARGET_EFAULT;
8808             } else if (is_proc_myself((const char *)p, "exe")) {
8809                 char real[PATH_MAX], *temp;
8810                 temp = realpath(exec_path, real);
8811                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8812                 snprintf((char *)p2, arg4, "%s", real);
8813             } else {
8814                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8815             }
8816             unlock_user(p2, arg3, ret);
8817             unlock_user(p, arg2, 0);
8818         }
8819         return ret;
8820 #endif
8821 #ifdef TARGET_NR_swapon
8822     case TARGET_NR_swapon:
8823         if (!(p = lock_user_string(arg1)))
8824             return -TARGET_EFAULT;
8825         ret = get_errno(swapon(p, arg2));
8826         unlock_user(p, arg1, 0);
8827         return ret;
8828 #endif
8829     case TARGET_NR_reboot:
8830         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8831            /* arg4 must be ignored in all other cases */
8832            p = lock_user_string(arg4);
8833            if (!p) {
8834                return -TARGET_EFAULT;
8835            }
8836            ret = get_errno(reboot(arg1, arg2, arg3, p));
8837            unlock_user(p, arg4, 0);
8838         } else {
8839            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8840         }
8841         return ret;
8842 #ifdef TARGET_NR_mmap
8843     case TARGET_NR_mmap:
8844 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8845     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8846     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8847     || defined(TARGET_S390X)
8848         {
8849             abi_ulong *v;
8850             abi_ulong v1, v2, v3, v4, v5, v6;
8851             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8852                 return -TARGET_EFAULT;
8853             v1 = tswapal(v[0]);
8854             v2 = tswapal(v[1]);
8855             v3 = tswapal(v[2]);
8856             v4 = tswapal(v[3]);
8857             v5 = tswapal(v[4]);
8858             v6 = tswapal(v[5]);
8859             unlock_user(v, arg1, 0);
8860             ret = get_errno(target_mmap(v1, v2, v3,
8861                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8862                                         v5, v6));
8863         }
8864 #else
8865         ret = get_errno(target_mmap(arg1, arg2, arg3,
8866                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8867                                     arg5,
8868                                     arg6));
8869 #endif
8870         return ret;
8871 #endif
8872 #ifdef TARGET_NR_mmap2
8873     case TARGET_NR_mmap2:
8874 #ifndef MMAP_SHIFT
8875 #define MMAP_SHIFT 12
8876 #endif
8877         ret = target_mmap(arg1, arg2, arg3,
8878                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8879                           arg5, arg6 << MMAP_SHIFT);
8880         return get_errno(ret);
8881 #endif
8882     case TARGET_NR_munmap:
8883         return get_errno(target_munmap(arg1, arg2));
8884     case TARGET_NR_mprotect:
8885         {
8886             TaskState *ts = cpu->opaque;
8887             /* Special hack to detect libc making the stack executable.  */
8888             if ((arg3 & PROT_GROWSDOWN)
8889                 && arg1 >= ts->info->stack_limit
8890                 && arg1 <= ts->info->start_stack) {
8891                 arg3 &= ~PROT_GROWSDOWN;
8892                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8893                 arg1 = ts->info->stack_limit;
8894             }
8895         }
8896         return get_errno(target_mprotect(arg1, arg2, arg3));
8897 #ifdef TARGET_NR_mremap
8898     case TARGET_NR_mremap:
8899         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8900 #endif
8901         /* ??? msync/mlock/munlock are broken for softmmu.  */
8902 #ifdef TARGET_NR_msync
8903     case TARGET_NR_msync:
8904         return get_errno(msync(g2h(arg1), arg2, arg3));
8905 #endif
8906 #ifdef TARGET_NR_mlock
8907     case TARGET_NR_mlock:
8908         return get_errno(mlock(g2h(arg1), arg2));
8909 #endif
8910 #ifdef TARGET_NR_munlock
8911     case TARGET_NR_munlock:
8912         return get_errno(munlock(g2h(arg1), arg2));
8913 #endif
8914 #ifdef TARGET_NR_mlockall
8915     case TARGET_NR_mlockall:
8916         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8917 #endif
8918 #ifdef TARGET_NR_munlockall
8919     case TARGET_NR_munlockall:
8920         return get_errno(munlockall());
8921 #endif
8922 #ifdef TARGET_NR_truncate
8923     case TARGET_NR_truncate:
8924         if (!(p = lock_user_string(arg1)))
8925             return -TARGET_EFAULT;
8926         ret = get_errno(truncate(p, arg2));
8927         unlock_user(p, arg1, 0);
8928         return ret;
8929 #endif
8930 #ifdef TARGET_NR_ftruncate
8931     case TARGET_NR_ftruncate:
8932         return get_errno(ftruncate(arg1, arg2));
8933 #endif
8934     case TARGET_NR_fchmod:
8935         return get_errno(fchmod(arg1, arg2));
8936 #if defined(TARGET_NR_fchmodat)
8937     case TARGET_NR_fchmodat:
8938         if (!(p = lock_user_string(arg2)))
8939             return -TARGET_EFAULT;
8940         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8941         unlock_user(p, arg2, 0);
8942         return ret;
8943 #endif
8944     case TARGET_NR_getpriority:
8945         /* Note that negative values are valid for getpriority, so we must
8946            differentiate based on errno settings.  */
8947         errno = 0;
8948         ret = getpriority(arg1, arg2);
8949         if (ret == -1 && errno != 0) {
8950             return -host_to_target_errno(errno);
8951         }
8952 #ifdef TARGET_ALPHA
8953         /* Return value is the unbiased priority.  Signal no error.  */
8954         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8955 #else
8956         /* Return value is a biased priority to avoid negative numbers.  */
8957         ret = 20 - ret;
8958 #endif
8959         return ret;
8960     case TARGET_NR_setpriority:
8961         return get_errno(setpriority(arg1, arg2, arg3));
8962 #ifdef TARGET_NR_statfs
8963     case TARGET_NR_statfs:
8964         if (!(p = lock_user_string(arg1))) {
8965             return -TARGET_EFAULT;
8966         }
8967         ret = get_errno(statfs(path(p), &stfs));
8968         unlock_user(p, arg1, 0);
8969     convert_statfs:
8970         if (!is_error(ret)) {
8971             struct target_statfs *target_stfs;
8972 
8973             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8974                 return -TARGET_EFAULT;
8975             __put_user(stfs.f_type, &target_stfs->f_type);
8976             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8977             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8978             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8979             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8980             __put_user(stfs.f_files, &target_stfs->f_files);
8981             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8982             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8983             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8984             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8985             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8986 #ifdef _STATFS_F_FLAGS
8987             __put_user(stfs.f_flags, &target_stfs->f_flags);
8988 #else
8989             __put_user(0, &target_stfs->f_flags);
8990 #endif
8991             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8992             unlock_user_struct(target_stfs, arg2, 1);
8993         }
8994         return ret;
8995 #endif
8996 #ifdef TARGET_NR_fstatfs
8997     case TARGET_NR_fstatfs:
8998         ret = get_errno(fstatfs(arg1, &stfs));
8999         goto convert_statfs;
9000 #endif
9001 #ifdef TARGET_NR_statfs64
9002     case TARGET_NR_statfs64:
9003         if (!(p = lock_user_string(arg1))) {
9004             return -TARGET_EFAULT;
9005         }
9006         ret = get_errno(statfs(path(p), &stfs));
9007         unlock_user(p, arg1, 0);
9008     convert_statfs64:
9009         if (!is_error(ret)) {
9010             struct target_statfs64 *target_stfs;
9011 
9012             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9013                 return -TARGET_EFAULT;
9014             __put_user(stfs.f_type, &target_stfs->f_type);
9015             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9016             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9017             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9018             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9019             __put_user(stfs.f_files, &target_stfs->f_files);
9020             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9021             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9022             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9023             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9024             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9025             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9026             unlock_user_struct(target_stfs, arg3, 1);
9027         }
9028         return ret;
9029     case TARGET_NR_fstatfs64:
9030         ret = get_errno(fstatfs(arg1, &stfs));
9031         goto convert_statfs64;
9032 #endif
9033 #ifdef TARGET_NR_socketcall
9034     case TARGET_NR_socketcall:
9035         return do_socketcall(arg1, arg2);
9036 #endif
9037 #ifdef TARGET_NR_accept
9038     case TARGET_NR_accept:
9039         return do_accept4(arg1, arg2, arg3, 0);
9040 #endif
9041 #ifdef TARGET_NR_accept4
9042     case TARGET_NR_accept4:
9043         return do_accept4(arg1, arg2, arg3, arg4);
9044 #endif
9045 #ifdef TARGET_NR_bind
9046     case TARGET_NR_bind:
9047         return do_bind(arg1, arg2, arg3);
9048 #endif
9049 #ifdef TARGET_NR_connect
9050     case TARGET_NR_connect:
9051         return do_connect(arg1, arg2, arg3);
9052 #endif
9053 #ifdef TARGET_NR_getpeername
9054     case TARGET_NR_getpeername:
9055         return do_getpeername(arg1, arg2, arg3);
9056 #endif
9057 #ifdef TARGET_NR_getsockname
9058     case TARGET_NR_getsockname:
9059         return do_getsockname(arg1, arg2, arg3);
9060 #endif
9061 #ifdef TARGET_NR_getsockopt
9062     case TARGET_NR_getsockopt:
9063         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9064 #endif
9065 #ifdef TARGET_NR_listen
9066     case TARGET_NR_listen:
9067         return get_errno(listen(arg1, arg2));
9068 #endif
9069 #ifdef TARGET_NR_recv
9070     case TARGET_NR_recv:
9071         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9072 #endif
9073 #ifdef TARGET_NR_recvfrom
9074     case TARGET_NR_recvfrom:
9075         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9076 #endif
9077 #ifdef TARGET_NR_recvmsg
9078     case TARGET_NR_recvmsg:
9079         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9080 #endif
9081 #ifdef TARGET_NR_send
9082     case TARGET_NR_send:
9083         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9084 #endif
9085 #ifdef TARGET_NR_sendmsg
9086     case TARGET_NR_sendmsg:
9087         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9088 #endif
9089 #ifdef TARGET_NR_sendmmsg
9090     case TARGET_NR_sendmmsg:
9091         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9092     case TARGET_NR_recvmmsg:
9093         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9094 #endif
9095 #ifdef TARGET_NR_sendto
9096     case TARGET_NR_sendto:
9097         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9098 #endif
9099 #ifdef TARGET_NR_shutdown
9100     case TARGET_NR_shutdown:
9101         return get_errno(shutdown(arg1, arg2));
9102 #endif
9103 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9104     case TARGET_NR_getrandom:
9105         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9106         if (!p) {
9107             return -TARGET_EFAULT;
9108         }
9109         ret = get_errno(getrandom(p, arg2, arg3));
9110         unlock_user(p, arg1, ret);
9111         return ret;
9112 #endif
9113 #ifdef TARGET_NR_socket
9114     case TARGET_NR_socket:
9115         return do_socket(arg1, arg2, arg3);
9116 #endif
9117 #ifdef TARGET_NR_socketpair
9118     case TARGET_NR_socketpair:
9119         return do_socketpair(arg1, arg2, arg3, arg4);
9120 #endif
9121 #ifdef TARGET_NR_setsockopt
9122     case TARGET_NR_setsockopt:
9123         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9124 #endif
9125 #if defined(TARGET_NR_syslog)
9126     case TARGET_NR_syslog:
9127         {
9128             int len = arg2;
9129 
9130             switch (arg1) {
9131             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9132             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9133             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9134             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9135             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9136             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9137             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9138             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9139                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9140             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9141             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9142             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9143                 {
9144                     if (len < 0) {
9145                         return -TARGET_EINVAL;
9146                     }
9147                     if (len == 0) {
9148                         return 0;
9149                     }
9150                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9151                     if (!p) {
9152                         return -TARGET_EFAULT;
9153                     }
9154                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9155                     unlock_user(p, arg2, arg3);
9156                 }
9157                 return ret;
9158             default:
9159                 return -TARGET_EINVAL;
9160             }
9161         }
9162         break;
9163 #endif
9164     case TARGET_NR_setitimer:
9165         {
9166             struct itimerval value, ovalue, *pvalue;
9167 
9168             if (arg2) {
9169                 pvalue = &value;
9170                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9171                     || copy_from_user_timeval(&pvalue->it_value,
9172                                               arg2 + sizeof(struct target_timeval)))
9173                     return -TARGET_EFAULT;
9174             } else {
9175                 pvalue = NULL;
9176             }
9177             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9178             if (!is_error(ret) && arg3) {
9179                 if (copy_to_user_timeval(arg3,
9180                                          &ovalue.it_interval)
9181                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9182                                             &ovalue.it_value))
9183                     return -TARGET_EFAULT;
9184             }
9185         }
9186         return ret;
9187     case TARGET_NR_getitimer:
9188         {
9189             struct itimerval value;
9190 
9191             ret = get_errno(getitimer(arg1, &value));
9192             if (!is_error(ret) && arg2) {
9193                 if (copy_to_user_timeval(arg2,
9194                                          &value.it_interval)
9195                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9196                                             &value.it_value))
9197                     return -TARGET_EFAULT;
9198             }
9199         }
9200         return ret;
9201 #ifdef TARGET_NR_stat
9202     case TARGET_NR_stat:
9203         if (!(p = lock_user_string(arg1))) {
9204             return -TARGET_EFAULT;
9205         }
9206         ret = get_errno(stat(path(p), &st));
9207         unlock_user(p, arg1, 0);
9208         goto do_stat;
9209 #endif
9210 #ifdef TARGET_NR_lstat
9211     case TARGET_NR_lstat:
9212         if (!(p = lock_user_string(arg1))) {
9213             return -TARGET_EFAULT;
9214         }
9215         ret = get_errno(lstat(path(p), &st));
9216         unlock_user(p, arg1, 0);
9217         goto do_stat;
9218 #endif
9219 #ifdef TARGET_NR_fstat
9220     case TARGET_NR_fstat:
9221         {
9222             ret = get_errno(fstat(arg1, &st));
9223 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9224         do_stat:
9225 #endif
9226             if (!is_error(ret)) {
9227                 struct target_stat *target_st;
9228 
9229                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9230                     return -TARGET_EFAULT;
9231                 memset(target_st, 0, sizeof(*target_st));
9232                 __put_user(st.st_dev, &target_st->st_dev);
9233                 __put_user(st.st_ino, &target_st->st_ino);
9234                 __put_user(st.st_mode, &target_st->st_mode);
9235                 __put_user(st.st_uid, &target_st->st_uid);
9236                 __put_user(st.st_gid, &target_st->st_gid);
9237                 __put_user(st.st_nlink, &target_st->st_nlink);
9238                 __put_user(st.st_rdev, &target_st->st_rdev);
9239                 __put_user(st.st_size, &target_st->st_size);
9240                 __put_user(st.st_blksize, &target_st->st_blksize);
9241                 __put_user(st.st_blocks, &target_st->st_blocks);
9242                 __put_user(st.st_atime, &target_st->target_st_atime);
9243                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9244                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9245 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9246     defined(TARGET_STAT_HAVE_NSEC)
9247                 __put_user(st.st_atim.tv_nsec,
9248                            &target_st->target_st_atime_nsec);
9249                 __put_user(st.st_mtim.tv_nsec,
9250                            &target_st->target_st_mtime_nsec);
9251                 __put_user(st.st_ctim.tv_nsec,
9252                            &target_st->target_st_ctime_nsec);
9253 #endif
9254                 unlock_user_struct(target_st, arg2, 1);
9255             }
9256         }
9257         return ret;
9258 #endif
9259     case TARGET_NR_vhangup:
9260         return get_errno(vhangup());
9261 #ifdef TARGET_NR_syscall
9262     case TARGET_NR_syscall:
9263         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9264                           arg6, arg7, arg8, 0);
9265 #endif
9266     case TARGET_NR_wait4:
9267         {
9268             int status;
9269             abi_long status_ptr = arg2;
9270             struct rusage rusage, *rusage_ptr;
9271             abi_ulong target_rusage = arg4;
9272             abi_long rusage_err;
9273             if (target_rusage)
9274                 rusage_ptr = &rusage;
9275             else
9276                 rusage_ptr = NULL;
9277             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9278             if (!is_error(ret)) {
9279                 if (status_ptr && ret) {
9280                     status = host_to_target_waitstatus(status);
9281                     if (put_user_s32(status, status_ptr))
9282                         return -TARGET_EFAULT;
9283                 }
9284                 if (target_rusage) {
9285                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9286                     if (rusage_err) {
9287                         ret = rusage_err;
9288                     }
9289                 }
9290             }
9291         }
9292         return ret;
9293 #ifdef TARGET_NR_swapoff
9294     case TARGET_NR_swapoff:
9295         if (!(p = lock_user_string(arg1)))
9296             return -TARGET_EFAULT;
9297         ret = get_errno(swapoff(p));
9298         unlock_user(p, arg1, 0);
9299         return ret;
9300 #endif
9301     case TARGET_NR_sysinfo:
9302         {
9303             struct target_sysinfo *target_value;
9304             struct sysinfo value;
9305             ret = get_errno(sysinfo(&value));
9306             if (!is_error(ret) && arg1)
9307             {
9308                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9309                     return -TARGET_EFAULT;
9310                 __put_user(value.uptime, &target_value->uptime);
9311                 __put_user(value.loads[0], &target_value->loads[0]);
9312                 __put_user(value.loads[1], &target_value->loads[1]);
9313                 __put_user(value.loads[2], &target_value->loads[2]);
9314                 __put_user(value.totalram, &target_value->totalram);
9315                 __put_user(value.freeram, &target_value->freeram);
9316                 __put_user(value.sharedram, &target_value->sharedram);
9317                 __put_user(value.bufferram, &target_value->bufferram);
9318                 __put_user(value.totalswap, &target_value->totalswap);
9319                 __put_user(value.freeswap, &target_value->freeswap);
9320                 __put_user(value.procs, &target_value->procs);
9321                 __put_user(value.totalhigh, &target_value->totalhigh);
9322                 __put_user(value.freehigh, &target_value->freehigh);
9323                 __put_user(value.mem_unit, &target_value->mem_unit);
9324                 unlock_user_struct(target_value, arg1, 1);
9325             }
9326         }
9327         return ret;
9328 #ifdef TARGET_NR_ipc
9329     case TARGET_NR_ipc:
9330         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9331 #endif
9332 #ifdef TARGET_NR_semget
9333     case TARGET_NR_semget:
9334         return get_errno(semget(arg1, arg2, arg3));
9335 #endif
9336 #ifdef TARGET_NR_semop
9337     case TARGET_NR_semop:
9338         return do_semop(arg1, arg2, arg3);
9339 #endif
9340 #ifdef TARGET_NR_semctl
9341     case TARGET_NR_semctl:
9342         return do_semctl(arg1, arg2, arg3, arg4);
9343 #endif
9344 #ifdef TARGET_NR_msgctl
9345     case TARGET_NR_msgctl:
9346         return do_msgctl(arg1, arg2, arg3);
9347 #endif
9348 #ifdef TARGET_NR_msgget
9349     case TARGET_NR_msgget:
9350         return get_errno(msgget(arg1, arg2));
9351 #endif
9352 #ifdef TARGET_NR_msgrcv
9353     case TARGET_NR_msgrcv:
9354         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9355 #endif
9356 #ifdef TARGET_NR_msgsnd
9357     case TARGET_NR_msgsnd:
9358         return do_msgsnd(arg1, arg2, arg3, arg4);
9359 #endif
9360 #ifdef TARGET_NR_shmget
9361     case TARGET_NR_shmget:
9362         return get_errno(shmget(arg1, arg2, arg3));
9363 #endif
9364 #ifdef TARGET_NR_shmctl
9365     case TARGET_NR_shmctl:
9366         return do_shmctl(arg1, arg2, arg3);
9367 #endif
9368 #ifdef TARGET_NR_shmat
9369     case TARGET_NR_shmat:
9370         return do_shmat(cpu_env, arg1, arg2, arg3);
9371 #endif
9372 #ifdef TARGET_NR_shmdt
9373     case TARGET_NR_shmdt:
9374         return do_shmdt(arg1);
9375 #endif
9376     case TARGET_NR_fsync:
9377         return get_errno(fsync(arg1));
9378     case TARGET_NR_clone:
9379         /* Linux manages to have three different orderings for its
9380          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9381          * match the kernel's CONFIG_CLONE_* settings.
9382          * Microblaze is further special in that it uses a sixth
9383          * implicit argument to clone for the TLS pointer.
9384          */
9385 #if defined(TARGET_MICROBLAZE)
9386         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9387 #elif defined(TARGET_CLONE_BACKWARDS)
9388         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9389 #elif defined(TARGET_CLONE_BACKWARDS2)
9390         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9391 #else
9392         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9393 #endif
9394         return ret;
9395 #ifdef __NR_exit_group
9396         /* new thread calls */
9397     case TARGET_NR_exit_group:
9398         preexit_cleanup(cpu_env, arg1);
9399         return get_errno(exit_group(arg1));
9400 #endif
9401     case TARGET_NR_setdomainname:
9402         if (!(p = lock_user_string(arg1)))
9403             return -TARGET_EFAULT;
9404         ret = get_errno(setdomainname(p, arg2));
9405         unlock_user(p, arg1, 0);
9406         return ret;
9407     case TARGET_NR_uname:
9408         /* no need to transcode because we use the linux syscall */
9409         {
9410             struct new_utsname * buf;
9411 
9412             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9413                 return -TARGET_EFAULT;
9414             ret = get_errno(sys_uname(buf));
9415             if (!is_error(ret)) {
9416                 /* Overwrite the native machine name with whatever is being
9417                    emulated. */
9418                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9419                           sizeof(buf->machine));
9420                 /* Allow the user to override the reported release.  */
9421                 if (qemu_uname_release && *qemu_uname_release) {
9422                     g_strlcpy(buf->release, qemu_uname_release,
9423                               sizeof(buf->release));
9424                 }
9425             }
9426             unlock_user_struct(buf, arg1, 1);
9427         }
9428         return ret;
9429 #ifdef TARGET_I386
9430     case TARGET_NR_modify_ldt:
9431         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9432 #if !defined(TARGET_X86_64)
9433     case TARGET_NR_vm86:
9434         return do_vm86(cpu_env, arg1, arg2);
9435 #endif
9436 #endif
9437     case TARGET_NR_adjtimex:
9438         {
9439             struct timex host_buf;
9440 
9441             if (target_to_host_timex(&host_buf, arg1) != 0) {
9442                 return -TARGET_EFAULT;
9443             }
9444             ret = get_errno(adjtimex(&host_buf));
9445             if (!is_error(ret)) {
9446                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9447                     return -TARGET_EFAULT;
9448                 }
9449             }
9450         }
9451         return ret;
9452 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9453     case TARGET_NR_clock_adjtime:
9454         {
9455             struct timex htx, *phtx = &htx;
9456 
9457             if (target_to_host_timex(phtx, arg2) != 0) {
9458                 return -TARGET_EFAULT;
9459             }
9460             ret = get_errno(clock_adjtime(arg1, phtx));
9461             if (!is_error(ret) && phtx) {
9462                 if (host_to_target_timex(arg2, phtx) != 0) {
9463                     return -TARGET_EFAULT;
9464                 }
9465             }
9466         }
9467         return ret;
9468 #endif
9469     case TARGET_NR_getpgid:
9470         return get_errno(getpgid(arg1));
9471     case TARGET_NR_fchdir:
9472         return get_errno(fchdir(arg1));
9473     case TARGET_NR_personality:
9474         return get_errno(personality(arg1));
9475 #ifdef TARGET_NR__llseek /* Not on alpha */
9476     case TARGET_NR__llseek:
9477         {
9478             int64_t res;
9479 #if !defined(__NR_llseek)
9480             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9481             if (res == -1) {
9482                 ret = get_errno(res);
9483             } else {
9484                 ret = 0;
9485             }
9486 #else
9487             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9488 #endif
9489             if ((ret == 0) && put_user_s64(res, arg4)) {
9490                 return -TARGET_EFAULT;
9491             }
9492         }
9493         return ret;
9494 #endif
9495 #ifdef TARGET_NR_getdents
9496     case TARGET_NR_getdents:
9497 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9498 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9499         {
9500             struct target_dirent *target_dirp;
9501             struct linux_dirent *dirp;
9502             abi_long count = arg3;
9503 
9504             dirp = g_try_malloc(count);
9505             if (!dirp) {
9506                 return -TARGET_ENOMEM;
9507             }
9508 
9509             ret = get_errno(sys_getdents(arg1, dirp, count));
9510             if (!is_error(ret)) {
9511                 struct linux_dirent *de;
9512 		struct target_dirent *tde;
9513                 int len = ret;
9514                 int reclen, treclen;
9515 		int count1, tnamelen;
9516 
9517 		count1 = 0;
9518                 de = dirp;
9519                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9520                     return -TARGET_EFAULT;
9521 		tde = target_dirp;
9522                 while (len > 0) {
9523                     reclen = de->d_reclen;
9524                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9525                     assert(tnamelen >= 0);
9526                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9527                     assert(count1 + treclen <= count);
9528                     tde->d_reclen = tswap16(treclen);
9529                     tde->d_ino = tswapal(de->d_ino);
9530                     tde->d_off = tswapal(de->d_off);
9531                     memcpy(tde->d_name, de->d_name, tnamelen);
9532                     de = (struct linux_dirent *)((char *)de + reclen);
9533                     len -= reclen;
9534                     tde = (struct target_dirent *)((char *)tde + treclen);
9535 		    count1 += treclen;
9536                 }
9537 		ret = count1;
9538                 unlock_user(target_dirp, arg2, ret);
9539             }
9540             g_free(dirp);
9541         }
9542 #else
9543         {
9544             struct linux_dirent *dirp;
9545             abi_long count = arg3;
9546 
9547             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9548                 return -TARGET_EFAULT;
9549             ret = get_errno(sys_getdents(arg1, dirp, count));
9550             if (!is_error(ret)) {
9551                 struct linux_dirent *de;
9552                 int len = ret;
9553                 int reclen;
9554                 de = dirp;
9555                 while (len > 0) {
9556                     reclen = de->d_reclen;
9557                     if (reclen > len)
9558                         break;
9559                     de->d_reclen = tswap16(reclen);
9560                     tswapls(&de->d_ino);
9561                     tswapls(&de->d_off);
9562                     de = (struct linux_dirent *)((char *)de + reclen);
9563                     len -= reclen;
9564                 }
9565             }
9566             unlock_user(dirp, arg2, ret);
9567         }
9568 #endif
9569 #else
9570         /* Implement getdents in terms of getdents64 */
9571         {
9572             struct linux_dirent64 *dirp;
9573             abi_long count = arg3;
9574 
9575             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9576             if (!dirp) {
9577                 return -TARGET_EFAULT;
9578             }
9579             ret = get_errno(sys_getdents64(arg1, dirp, count));
9580             if (!is_error(ret)) {
9581                 /* Convert the dirent64 structs to target dirent.  We do this
9582                  * in-place, since we can guarantee that a target_dirent is no
9583                  * larger than a dirent64; however this means we have to be
9584                  * careful to read everything before writing in the new format.
9585                  */
9586                 struct linux_dirent64 *de;
9587                 struct target_dirent *tde;
9588                 int len = ret;
9589                 int tlen = 0;
9590 
9591                 de = dirp;
9592                 tde = (struct target_dirent *)dirp;
9593                 while (len > 0) {
9594                     int namelen, treclen;
9595                     int reclen = de->d_reclen;
9596                     uint64_t ino = de->d_ino;
9597                     int64_t off = de->d_off;
9598                     uint8_t type = de->d_type;
9599 
9600                     namelen = strlen(de->d_name);
9601                     treclen = offsetof(struct target_dirent, d_name)
9602                         + namelen + 2;
9603                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9604 
9605                     memmove(tde->d_name, de->d_name, namelen + 1);
9606                     tde->d_ino = tswapal(ino);
9607                     tde->d_off = tswapal(off);
9608                     tde->d_reclen = tswap16(treclen);
9609                     /* The target_dirent type is in what was formerly a padding
9610                      * byte at the end of the structure:
9611                      */
9612                     *(((char *)tde) + treclen - 1) = type;
9613 
9614                     de = (struct linux_dirent64 *)((char *)de + reclen);
9615                     tde = (struct target_dirent *)((char *)tde + treclen);
9616                     len -= reclen;
9617                     tlen += treclen;
9618                 }
9619                 ret = tlen;
9620             }
9621             unlock_user(dirp, arg2, ret);
9622         }
9623 #endif
9624         return ret;
9625 #endif /* TARGET_NR_getdents */
9626 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9627     case TARGET_NR_getdents64:
9628         {
9629             struct linux_dirent64 *dirp;
9630             abi_long count = arg3;
9631             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9632                 return -TARGET_EFAULT;
9633             ret = get_errno(sys_getdents64(arg1, dirp, count));
9634             if (!is_error(ret)) {
9635                 struct linux_dirent64 *de;
9636                 int len = ret;
9637                 int reclen;
9638                 de = dirp;
9639                 while (len > 0) {
9640                     reclen = de->d_reclen;
9641                     if (reclen > len)
9642                         break;
9643                     de->d_reclen = tswap16(reclen);
9644                     tswap64s((uint64_t *)&de->d_ino);
9645                     tswap64s((uint64_t *)&de->d_off);
9646                     de = (struct linux_dirent64 *)((char *)de + reclen);
9647                     len -= reclen;
9648                 }
9649             }
9650             unlock_user(dirp, arg2, ret);
9651         }
9652         return ret;
9653 #endif /* TARGET_NR_getdents64 */
9654 #if defined(TARGET_NR__newselect)
9655     case TARGET_NR__newselect:
9656         return do_select(arg1, arg2, arg3, arg4, arg5);
9657 #endif
9658 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9659 # ifdef TARGET_NR_poll
9660     case TARGET_NR_poll:
9661 # endif
9662 # ifdef TARGET_NR_ppoll
9663     case TARGET_NR_ppoll:
9664 # endif
9665         {
9666             struct target_pollfd *target_pfd;
9667             unsigned int nfds = arg2;
9668             struct pollfd *pfd;
9669             unsigned int i;
9670 
9671             pfd = NULL;
9672             target_pfd = NULL;
9673             if (nfds) {
9674                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9675                     return -TARGET_EINVAL;
9676                 }
9677 
9678                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9679                                        sizeof(struct target_pollfd) * nfds, 1);
9680                 if (!target_pfd) {
9681                     return -TARGET_EFAULT;
9682                 }
9683 
9684                 pfd = alloca(sizeof(struct pollfd) * nfds);
9685                 for (i = 0; i < nfds; i++) {
9686                     pfd[i].fd = tswap32(target_pfd[i].fd);
9687                     pfd[i].events = tswap16(target_pfd[i].events);
9688                 }
9689             }
9690 
9691             switch (num) {
9692 # ifdef TARGET_NR_ppoll
9693             case TARGET_NR_ppoll:
9694             {
9695                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9696                 target_sigset_t *target_set;
9697                 sigset_t _set, *set = &_set;
9698 
9699                 if (arg3) {
9700                     if (target_to_host_timespec(timeout_ts, arg3)) {
9701                         unlock_user(target_pfd, arg1, 0);
9702                         return -TARGET_EFAULT;
9703                     }
9704                 } else {
9705                     timeout_ts = NULL;
9706                 }
9707 
9708                 if (arg4) {
9709                     if (arg5 != sizeof(target_sigset_t)) {
9710                         unlock_user(target_pfd, arg1, 0);
9711                         return -TARGET_EINVAL;
9712                     }
9713 
9714                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9715                     if (!target_set) {
9716                         unlock_user(target_pfd, arg1, 0);
9717                         return -TARGET_EFAULT;
9718                     }
9719                     target_to_host_sigset(set, target_set);
9720                 } else {
9721                     set = NULL;
9722                 }
9723 
9724                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9725                                            set, SIGSET_T_SIZE));
9726 
9727                 if (!is_error(ret) && arg3) {
9728                     host_to_target_timespec(arg3, timeout_ts);
9729                 }
9730                 if (arg4) {
9731                     unlock_user(target_set, arg4, 0);
9732                 }
9733                 break;
9734             }
9735 # endif
9736 # ifdef TARGET_NR_poll
9737             case TARGET_NR_poll:
9738             {
9739                 struct timespec ts, *pts;
9740 
9741                 if (arg3 >= 0) {
9742                     /* Convert ms to secs, ns */
9743                     ts.tv_sec = arg3 / 1000;
9744                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9745                     pts = &ts;
9746                 } else {
9747                     /* -ve poll() timeout means "infinite" */
9748                     pts = NULL;
9749                 }
9750                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9751                 break;
9752             }
9753 # endif
9754             default:
9755                 g_assert_not_reached();
9756             }
9757 
9758             if (!is_error(ret)) {
9759                 for(i = 0; i < nfds; i++) {
9760                     target_pfd[i].revents = tswap16(pfd[i].revents);
9761                 }
9762             }
9763             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9764         }
9765         return ret;
9766 #endif
9767     case TARGET_NR_flock:
9768         /* NOTE: the flock constant seems to be the same for every
9769            Linux platform */
9770         return get_errno(safe_flock(arg1, arg2));
9771     case TARGET_NR_readv:
9772         {
9773             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9774             if (vec != NULL) {
9775                 ret = get_errno(safe_readv(arg1, vec, arg3));
9776                 unlock_iovec(vec, arg2, arg3, 1);
9777             } else {
9778                 ret = -host_to_target_errno(errno);
9779             }
9780         }
9781         return ret;
9782     case TARGET_NR_writev:
9783         {
9784             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9785             if (vec != NULL) {
9786                 ret = get_errno(safe_writev(arg1, vec, arg3));
9787                 unlock_iovec(vec, arg2, arg3, 0);
9788             } else {
9789                 ret = -host_to_target_errno(errno);
9790             }
9791         }
9792         return ret;
9793 #if defined(TARGET_NR_preadv)
9794     case TARGET_NR_preadv:
9795         {
9796             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9797             if (vec != NULL) {
9798                 unsigned long low, high;
9799 
9800                 target_to_host_low_high(arg4, arg5, &low, &high);
9801                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9802                 unlock_iovec(vec, arg2, arg3, 1);
9803             } else {
9804                 ret = -host_to_target_errno(errno);
9805            }
9806         }
9807         return ret;
9808 #endif
9809 #if defined(TARGET_NR_pwritev)
9810     case TARGET_NR_pwritev:
9811         {
9812             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9813             if (vec != NULL) {
9814                 unsigned long low, high;
9815 
9816                 target_to_host_low_high(arg4, arg5, &low, &high);
9817                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9818                 unlock_iovec(vec, arg2, arg3, 0);
9819             } else {
9820                 ret = -host_to_target_errno(errno);
9821            }
9822         }
9823         return ret;
9824 #endif
9825     case TARGET_NR_getsid:
9826         return get_errno(getsid(arg1));
9827 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9828     case TARGET_NR_fdatasync:
9829         return get_errno(fdatasync(arg1));
9830 #endif
9831 #ifdef TARGET_NR__sysctl
9832     case TARGET_NR__sysctl:
9833         /* We don't implement this, but ENOTDIR is always a safe
9834            return value. */
9835         return -TARGET_ENOTDIR;
9836 #endif
9837     case TARGET_NR_sched_getaffinity:
9838         {
9839             unsigned int mask_size;
9840             unsigned long *mask;
9841 
9842             /*
9843              * sched_getaffinity needs multiples of ulong, so need to take
9844              * care of mismatches between target ulong and host ulong sizes.
9845              */
9846             if (arg2 & (sizeof(abi_ulong) - 1)) {
9847                 return -TARGET_EINVAL;
9848             }
9849             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9850 
9851             mask = alloca(mask_size);
9852             memset(mask, 0, mask_size);
9853             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9854 
9855             if (!is_error(ret)) {
9856                 if (ret > arg2) {
9857                     /* More data returned than the caller's buffer will fit.
9858                      * This only happens if sizeof(abi_long) < sizeof(long)
9859                      * and the caller passed us a buffer holding an odd number
9860                      * of abi_longs. If the host kernel is actually using the
9861                      * extra 4 bytes then fail EINVAL; otherwise we can just
9862                      * ignore them and only copy the interesting part.
9863                      */
9864                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9865                     if (numcpus > arg2 * 8) {
9866                         return -TARGET_EINVAL;
9867                     }
9868                     ret = arg2;
9869                 }
9870 
9871                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9872                     return -TARGET_EFAULT;
9873                 }
9874             }
9875         }
9876         return ret;
9877     case TARGET_NR_sched_setaffinity:
9878         {
9879             unsigned int mask_size;
9880             unsigned long *mask;
9881 
9882             /*
9883              * sched_setaffinity needs multiples of ulong, so need to take
9884              * care of mismatches between target ulong and host ulong sizes.
9885              */
9886             if (arg2 & (sizeof(abi_ulong) - 1)) {
9887                 return -TARGET_EINVAL;
9888             }
9889             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9890             mask = alloca(mask_size);
9891 
9892             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9893             if (ret) {
9894                 return ret;
9895             }
9896 
9897             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9898         }
9899     case TARGET_NR_getcpu:
9900         {
9901             unsigned cpu, node;
9902             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9903                                        arg2 ? &node : NULL,
9904                                        NULL));
9905             if (is_error(ret)) {
9906                 return ret;
9907             }
9908             if (arg1 && put_user_u32(cpu, arg1)) {
9909                 return -TARGET_EFAULT;
9910             }
9911             if (arg2 && put_user_u32(node, arg2)) {
9912                 return -TARGET_EFAULT;
9913             }
9914         }
9915         return ret;
9916     case TARGET_NR_sched_setparam:
9917         {
9918             struct sched_param *target_schp;
9919             struct sched_param schp;
9920 
9921             if (arg2 == 0) {
9922                 return -TARGET_EINVAL;
9923             }
9924             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9925                 return -TARGET_EFAULT;
9926             schp.sched_priority = tswap32(target_schp->sched_priority);
9927             unlock_user_struct(target_schp, arg2, 0);
9928             return get_errno(sched_setparam(arg1, &schp));
9929         }
9930     case TARGET_NR_sched_getparam:
9931         {
9932             struct sched_param *target_schp;
9933             struct sched_param schp;
9934 
9935             if (arg2 == 0) {
9936                 return -TARGET_EINVAL;
9937             }
9938             ret = get_errno(sched_getparam(arg1, &schp));
9939             if (!is_error(ret)) {
9940                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9941                     return -TARGET_EFAULT;
9942                 target_schp->sched_priority = tswap32(schp.sched_priority);
9943                 unlock_user_struct(target_schp, arg2, 1);
9944             }
9945         }
9946         return ret;
9947     case TARGET_NR_sched_setscheduler:
9948         {
9949             struct sched_param *target_schp;
9950             struct sched_param schp;
9951             if (arg3 == 0) {
9952                 return -TARGET_EINVAL;
9953             }
9954             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9955                 return -TARGET_EFAULT;
9956             schp.sched_priority = tswap32(target_schp->sched_priority);
9957             unlock_user_struct(target_schp, arg3, 0);
9958             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9959         }
9960     case TARGET_NR_sched_getscheduler:
9961         return get_errno(sched_getscheduler(arg1));
9962     case TARGET_NR_sched_yield:
9963         return get_errno(sched_yield());
9964     case TARGET_NR_sched_get_priority_max:
9965         return get_errno(sched_get_priority_max(arg1));
9966     case TARGET_NR_sched_get_priority_min:
9967         return get_errno(sched_get_priority_min(arg1));
9968     case TARGET_NR_sched_rr_get_interval:
9969         {
9970             struct timespec ts;
9971             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9972             if (!is_error(ret)) {
9973                 ret = host_to_target_timespec(arg2, &ts);
9974             }
9975         }
9976         return ret;
9977     case TARGET_NR_nanosleep:
9978         {
9979             struct timespec req, rem;
9980             target_to_host_timespec(&req, arg1);
9981             ret = get_errno(safe_nanosleep(&req, &rem));
9982             if (is_error(ret) && arg2) {
9983                 host_to_target_timespec(arg2, &rem);
9984             }
9985         }
9986         return ret;
9987     case TARGET_NR_prctl:
9988         switch (arg1) {
9989         case PR_GET_PDEATHSIG:
9990         {
9991             int deathsig;
9992             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9993             if (!is_error(ret) && arg2
9994                 && put_user_ual(deathsig, arg2)) {
9995                 return -TARGET_EFAULT;
9996             }
9997             return ret;
9998         }
9999 #ifdef PR_GET_NAME
10000         case PR_GET_NAME:
10001         {
10002             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10003             if (!name) {
10004                 return -TARGET_EFAULT;
10005             }
10006             ret = get_errno(prctl(arg1, (unsigned long)name,
10007                                   arg3, arg4, arg5));
10008             unlock_user(name, arg2, 16);
10009             return ret;
10010         }
10011         case PR_SET_NAME:
10012         {
10013             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10014             if (!name) {
10015                 return -TARGET_EFAULT;
10016             }
10017             ret = get_errno(prctl(arg1, (unsigned long)name,
10018                                   arg3, arg4, arg5));
10019             unlock_user(name, arg2, 0);
10020             return ret;
10021         }
10022 #endif
10023 #ifdef TARGET_MIPS
10024         case TARGET_PR_GET_FP_MODE:
10025         {
10026             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10027             ret = 0;
10028             if (env->CP0_Status & (1 << CP0St_FR)) {
10029                 ret |= TARGET_PR_FP_MODE_FR;
10030             }
10031             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10032                 ret |= TARGET_PR_FP_MODE_FRE;
10033             }
10034             return ret;
10035         }
10036         case TARGET_PR_SET_FP_MODE:
10037         {
10038             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10039             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10040             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10041             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10042             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10043 
10044             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10045                                             TARGET_PR_FP_MODE_FRE;
10046 
10047             /* If nothing to change, return right away, successfully.  */
10048             if (old_fr == new_fr && old_fre == new_fre) {
10049                 return 0;
10050             }
10051             /* Check the value is valid */
10052             if (arg2 & ~known_bits) {
10053                 return -TARGET_EOPNOTSUPP;
10054             }
10055             /* Setting FRE without FR is not supported.  */
10056             if (new_fre && !new_fr) {
10057                 return -TARGET_EOPNOTSUPP;
10058             }
10059             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10060                 /* FR1 is not supported */
10061                 return -TARGET_EOPNOTSUPP;
10062             }
10063             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10064                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10065                 /* cannot set FR=0 */
10066                 return -TARGET_EOPNOTSUPP;
10067             }
10068             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10069                 /* Cannot set FRE=1 */
10070                 return -TARGET_EOPNOTSUPP;
10071             }
10072 
10073             int i;
10074             fpr_t *fpr = env->active_fpu.fpr;
10075             for (i = 0; i < 32 ; i += 2) {
10076                 if (!old_fr && new_fr) {
10077                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10078                 } else if (old_fr && !new_fr) {
10079                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10080                 }
10081             }
10082 
10083             if (new_fr) {
10084                 env->CP0_Status |= (1 << CP0St_FR);
10085                 env->hflags |= MIPS_HFLAG_F64;
10086             } else {
10087                 env->CP0_Status &= ~(1 << CP0St_FR);
10088                 env->hflags &= ~MIPS_HFLAG_F64;
10089             }
10090             if (new_fre) {
10091                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10092                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10093                     env->hflags |= MIPS_HFLAG_FRE;
10094                 }
10095             } else {
10096                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10097                 env->hflags &= ~MIPS_HFLAG_FRE;
10098             }
10099 
10100             return 0;
10101         }
10102 #endif /* MIPS */
10103 #ifdef TARGET_AARCH64
10104         case TARGET_PR_SVE_SET_VL:
10105             /*
10106              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10107              * PR_SVE_VL_INHERIT.  Note the kernel definition
10108              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10109              * even though the current architectural maximum is VQ=16.
10110              */
10111             ret = -TARGET_EINVAL;
10112             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10113                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10114                 CPUARMState *env = cpu_env;
10115                 ARMCPU *cpu = env_archcpu(env);
10116                 uint32_t vq, old_vq;
10117 
10118                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10119                 vq = MAX(arg2 / 16, 1);
10120                 vq = MIN(vq, cpu->sve_max_vq);
10121 
10122                 if (vq < old_vq) {
10123                     aarch64_sve_narrow_vq(env, vq);
10124                 }
10125                 env->vfp.zcr_el[1] = vq - 1;
10126                 arm_rebuild_hflags(env);
10127                 ret = vq * 16;
10128             }
10129             return ret;
10130         case TARGET_PR_SVE_GET_VL:
10131             ret = -TARGET_EINVAL;
10132             {
10133                 ARMCPU *cpu = env_archcpu(cpu_env);
10134                 if (cpu_isar_feature(aa64_sve, cpu)) {
10135                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10136                 }
10137             }
10138             return ret;
10139         case TARGET_PR_PAC_RESET_KEYS:
10140             {
10141                 CPUARMState *env = cpu_env;
10142                 ARMCPU *cpu = env_archcpu(env);
10143 
10144                 if (arg3 || arg4 || arg5) {
10145                     return -TARGET_EINVAL;
10146                 }
10147                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10148                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10149                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10150                                TARGET_PR_PAC_APGAKEY);
10151                     int ret = 0;
10152                     Error *err = NULL;
10153 
10154                     if (arg2 == 0) {
10155                         arg2 = all;
10156                     } else if (arg2 & ~all) {
10157                         return -TARGET_EINVAL;
10158                     }
10159                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10160                         ret |= qemu_guest_getrandom(&env->keys.apia,
10161                                                     sizeof(ARMPACKey), &err);
10162                     }
10163                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10164                         ret |= qemu_guest_getrandom(&env->keys.apib,
10165                                                     sizeof(ARMPACKey), &err);
10166                     }
10167                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10168                         ret |= qemu_guest_getrandom(&env->keys.apda,
10169                                                     sizeof(ARMPACKey), &err);
10170                     }
10171                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10172                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10173                                                     sizeof(ARMPACKey), &err);
10174                     }
10175                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10176                         ret |= qemu_guest_getrandom(&env->keys.apga,
10177                                                     sizeof(ARMPACKey), &err);
10178                     }
10179                     if (ret != 0) {
10180                         /*
10181                          * Some unknown failure in the crypto.  The best
10182                          * we can do is log it and fail the syscall.
10183                          * The real syscall cannot fail this way.
10184                          */
10185                         qemu_log_mask(LOG_UNIMP,
10186                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10187                                       error_get_pretty(err));
10188                         error_free(err);
10189                         return -TARGET_EIO;
10190                     }
10191                     return 0;
10192                 }
10193             }
10194             return -TARGET_EINVAL;
10195 #endif /* AARCH64 */
10196         case PR_GET_SECCOMP:
10197         case PR_SET_SECCOMP:
10198             /* Disable seccomp to prevent the target disabling syscalls we
10199              * need. */
10200             return -TARGET_EINVAL;
10201         default:
10202             /* Most prctl options have no pointer arguments */
10203             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10204         }
10205         break;
10206 #ifdef TARGET_NR_arch_prctl
10207     case TARGET_NR_arch_prctl:
10208 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10209         return do_arch_prctl(cpu_env, arg1, arg2);
10210 #else
10211 #error unreachable
10212 #endif
10213 #endif
10214 #ifdef TARGET_NR_pread64
10215     case TARGET_NR_pread64:
10216         if (regpairs_aligned(cpu_env, num)) {
10217             arg4 = arg5;
10218             arg5 = arg6;
10219         }
10220         if (arg2 == 0 && arg3 == 0) {
10221             /* Special-case NULL buffer and zero length, which should succeed */
10222             p = 0;
10223         } else {
10224             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10225             if (!p) {
10226                 return -TARGET_EFAULT;
10227             }
10228         }
10229         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10230         unlock_user(p, arg2, ret);
10231         return ret;
10232     case TARGET_NR_pwrite64:
10233         if (regpairs_aligned(cpu_env, num)) {
10234             arg4 = arg5;
10235             arg5 = arg6;
10236         }
10237         if (arg2 == 0 && arg3 == 0) {
10238             /* Special-case NULL buffer and zero length, which should succeed */
10239             p = 0;
10240         } else {
10241             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10242             if (!p) {
10243                 return -TARGET_EFAULT;
10244             }
10245         }
10246         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10247         unlock_user(p, arg2, 0);
10248         return ret;
10249 #endif
10250     case TARGET_NR_getcwd:
10251         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10252             return -TARGET_EFAULT;
10253         ret = get_errno(sys_getcwd1(p, arg2));
10254         unlock_user(p, arg1, ret);
10255         return ret;
10256     case TARGET_NR_capget:
10257     case TARGET_NR_capset:
10258     {
10259         struct target_user_cap_header *target_header;
10260         struct target_user_cap_data *target_data = NULL;
10261         struct __user_cap_header_struct header;
10262         struct __user_cap_data_struct data[2];
10263         struct __user_cap_data_struct *dataptr = NULL;
10264         int i, target_datalen;
10265         int data_items = 1;
10266 
10267         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10268             return -TARGET_EFAULT;
10269         }
10270         header.version = tswap32(target_header->version);
10271         header.pid = tswap32(target_header->pid);
10272 
10273         if (header.version != _LINUX_CAPABILITY_VERSION) {
10274             /* Version 2 and up takes pointer to two user_data structs */
10275             data_items = 2;
10276         }
10277 
10278         target_datalen = sizeof(*target_data) * data_items;
10279 
10280         if (arg2) {
10281             if (num == TARGET_NR_capget) {
10282                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10283             } else {
10284                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10285             }
10286             if (!target_data) {
10287                 unlock_user_struct(target_header, arg1, 0);
10288                 return -TARGET_EFAULT;
10289             }
10290 
10291             if (num == TARGET_NR_capset) {
10292                 for (i = 0; i < data_items; i++) {
10293                     data[i].effective = tswap32(target_data[i].effective);
10294                     data[i].permitted = tswap32(target_data[i].permitted);
10295                     data[i].inheritable = tswap32(target_data[i].inheritable);
10296                 }
10297             }
10298 
10299             dataptr = data;
10300         }
10301 
10302         if (num == TARGET_NR_capget) {
10303             ret = get_errno(capget(&header, dataptr));
10304         } else {
10305             ret = get_errno(capset(&header, dataptr));
10306         }
10307 
10308         /* The kernel always updates version for both capget and capset */
10309         target_header->version = tswap32(header.version);
10310         unlock_user_struct(target_header, arg1, 1);
10311 
10312         if (arg2) {
10313             if (num == TARGET_NR_capget) {
10314                 for (i = 0; i < data_items; i++) {
10315                     target_data[i].effective = tswap32(data[i].effective);
10316                     target_data[i].permitted = tswap32(data[i].permitted);
10317                     target_data[i].inheritable = tswap32(data[i].inheritable);
10318                 }
10319                 unlock_user(target_data, arg2, target_datalen);
10320             } else {
10321                 unlock_user(target_data, arg2, 0);
10322             }
10323         }
10324         return ret;
10325     }
10326     case TARGET_NR_sigaltstack:
10327         return do_sigaltstack(arg1, arg2,
10328                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10329 
10330 #ifdef CONFIG_SENDFILE
10331 #ifdef TARGET_NR_sendfile
10332     case TARGET_NR_sendfile:
10333     {
10334         off_t *offp = NULL;
10335         off_t off;
10336         if (arg3) {
10337             ret = get_user_sal(off, arg3);
10338             if (is_error(ret)) {
10339                 return ret;
10340             }
10341             offp = &off;
10342         }
10343         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10344         if (!is_error(ret) && arg3) {
10345             abi_long ret2 = put_user_sal(off, arg3);
10346             if (is_error(ret2)) {
10347                 ret = ret2;
10348             }
10349         }
10350         return ret;
10351     }
10352 #endif
10353 #ifdef TARGET_NR_sendfile64
10354     case TARGET_NR_sendfile64:
10355     {
10356         off_t *offp = NULL;
10357         off_t off;
10358         if (arg3) {
10359             ret = get_user_s64(off, arg3);
10360             if (is_error(ret)) {
10361                 return ret;
10362             }
10363             offp = &off;
10364         }
10365         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10366         if (!is_error(ret) && arg3) {
10367             abi_long ret2 = put_user_s64(off, arg3);
10368             if (is_error(ret2)) {
10369                 ret = ret2;
10370             }
10371         }
10372         return ret;
10373     }
10374 #endif
10375 #endif
10376 #ifdef TARGET_NR_vfork
10377     case TARGET_NR_vfork:
10378         return get_errno(do_fork(cpu_env,
10379                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10380                          0, 0, 0, 0));
10381 #endif
10382 #ifdef TARGET_NR_ugetrlimit
10383     case TARGET_NR_ugetrlimit:
10384     {
10385 	struct rlimit rlim;
10386 	int resource = target_to_host_resource(arg1);
10387 	ret = get_errno(getrlimit(resource, &rlim));
10388 	if (!is_error(ret)) {
10389 	    struct target_rlimit *target_rlim;
10390             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10391                 return -TARGET_EFAULT;
10392 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10393 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10394             unlock_user_struct(target_rlim, arg2, 1);
10395 	}
10396         return ret;
10397     }
10398 #endif
10399 #ifdef TARGET_NR_truncate64
10400     case TARGET_NR_truncate64:
10401         if (!(p = lock_user_string(arg1)))
10402             return -TARGET_EFAULT;
10403 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10404         unlock_user(p, arg1, 0);
10405         return ret;
10406 #endif
10407 #ifdef TARGET_NR_ftruncate64
10408     case TARGET_NR_ftruncate64:
10409         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10410 #endif
10411 #ifdef TARGET_NR_stat64
10412     case TARGET_NR_stat64:
10413         if (!(p = lock_user_string(arg1))) {
10414             return -TARGET_EFAULT;
10415         }
10416         ret = get_errno(stat(path(p), &st));
10417         unlock_user(p, arg1, 0);
10418         if (!is_error(ret))
10419             ret = host_to_target_stat64(cpu_env, arg2, &st);
10420         return ret;
10421 #endif
10422 #ifdef TARGET_NR_lstat64
10423     case TARGET_NR_lstat64:
10424         if (!(p = lock_user_string(arg1))) {
10425             return -TARGET_EFAULT;
10426         }
10427         ret = get_errno(lstat(path(p), &st));
10428         unlock_user(p, arg1, 0);
10429         if (!is_error(ret))
10430             ret = host_to_target_stat64(cpu_env, arg2, &st);
10431         return ret;
10432 #endif
10433 #ifdef TARGET_NR_fstat64
10434     case TARGET_NR_fstat64:
10435         ret = get_errno(fstat(arg1, &st));
10436         if (!is_error(ret))
10437             ret = host_to_target_stat64(cpu_env, arg2, &st);
10438         return ret;
10439 #endif
10440 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10441 #ifdef TARGET_NR_fstatat64
10442     case TARGET_NR_fstatat64:
10443 #endif
10444 #ifdef TARGET_NR_newfstatat
10445     case TARGET_NR_newfstatat:
10446 #endif
10447         if (!(p = lock_user_string(arg2))) {
10448             return -TARGET_EFAULT;
10449         }
10450         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10451         unlock_user(p, arg2, 0);
10452         if (!is_error(ret))
10453             ret = host_to_target_stat64(cpu_env, arg3, &st);
10454         return ret;
10455 #endif
10456 #if defined(TARGET_NR_statx)
10457     case TARGET_NR_statx:
10458         {
10459             struct target_statx *target_stx;
10460             int dirfd = arg1;
10461             int flags = arg3;
10462 
10463             p = lock_user_string(arg2);
10464             if (p == NULL) {
10465                 return -TARGET_EFAULT;
10466             }
10467 #if defined(__NR_statx)
10468             {
10469                 /*
10470                  * It is assumed that struct statx is architecture independent.
10471                  */
10472                 struct target_statx host_stx;
10473                 int mask = arg4;
10474 
10475                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10476                 if (!is_error(ret)) {
10477                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10478                         unlock_user(p, arg2, 0);
10479                         return -TARGET_EFAULT;
10480                     }
10481                 }
10482 
10483                 if (ret != -TARGET_ENOSYS) {
10484                     unlock_user(p, arg2, 0);
10485                     return ret;
10486                 }
10487             }
10488 #endif
10489             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10490             unlock_user(p, arg2, 0);
10491 
10492             if (!is_error(ret)) {
10493                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10494                     return -TARGET_EFAULT;
10495                 }
10496                 memset(target_stx, 0, sizeof(*target_stx));
10497                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10498                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10499                 __put_user(st.st_ino, &target_stx->stx_ino);
10500                 __put_user(st.st_mode, &target_stx->stx_mode);
10501                 __put_user(st.st_uid, &target_stx->stx_uid);
10502                 __put_user(st.st_gid, &target_stx->stx_gid);
10503                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10504                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10505                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10506                 __put_user(st.st_size, &target_stx->stx_size);
10507                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10508                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10509                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10510                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10511                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10512                 unlock_user_struct(target_stx, arg5, 1);
10513             }
10514         }
10515         return ret;
10516 #endif
10517 #ifdef TARGET_NR_lchown
10518     case TARGET_NR_lchown:
10519         if (!(p = lock_user_string(arg1)))
10520             return -TARGET_EFAULT;
10521         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10522         unlock_user(p, arg1, 0);
10523         return ret;
10524 #endif
10525 #ifdef TARGET_NR_getuid
10526     case TARGET_NR_getuid:
10527         return get_errno(high2lowuid(getuid()));
10528 #endif
10529 #ifdef TARGET_NR_getgid
10530     case TARGET_NR_getgid:
10531         return get_errno(high2lowgid(getgid()));
10532 #endif
10533 #ifdef TARGET_NR_geteuid
10534     case TARGET_NR_geteuid:
10535         return get_errno(high2lowuid(geteuid()));
10536 #endif
10537 #ifdef TARGET_NR_getegid
10538     case TARGET_NR_getegid:
10539         return get_errno(high2lowgid(getegid()));
10540 #endif
10541     case TARGET_NR_setreuid:
10542         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10543     case TARGET_NR_setregid:
10544         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10545     case TARGET_NR_getgroups:
10546         {
10547             int gidsetsize = arg1;
10548             target_id *target_grouplist;
10549             gid_t *grouplist;
10550             int i;
10551 
10552             grouplist = alloca(gidsetsize * sizeof(gid_t));
10553             ret = get_errno(getgroups(gidsetsize, grouplist));
10554             if (gidsetsize == 0)
10555                 return ret;
10556             if (!is_error(ret)) {
10557                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10558                 if (!target_grouplist)
10559                     return -TARGET_EFAULT;
10560                 for(i = 0;i < ret; i++)
10561                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10562                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10563             }
10564         }
10565         return ret;
10566     case TARGET_NR_setgroups:
10567         {
10568             int gidsetsize = arg1;
10569             target_id *target_grouplist;
10570             gid_t *grouplist = NULL;
10571             int i;
10572             if (gidsetsize) {
10573                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10574                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10575                 if (!target_grouplist) {
10576                     return -TARGET_EFAULT;
10577                 }
10578                 for (i = 0; i < gidsetsize; i++) {
10579                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10580                 }
10581                 unlock_user(target_grouplist, arg2, 0);
10582             }
10583             return get_errno(setgroups(gidsetsize, grouplist));
10584         }
10585     case TARGET_NR_fchown:
10586         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10587 #if defined(TARGET_NR_fchownat)
10588     case TARGET_NR_fchownat:
10589         if (!(p = lock_user_string(arg2)))
10590             return -TARGET_EFAULT;
10591         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10592                                  low2highgid(arg4), arg5));
10593         unlock_user(p, arg2, 0);
10594         return ret;
10595 #endif
10596 #ifdef TARGET_NR_setresuid
10597     case TARGET_NR_setresuid:
10598         return get_errno(sys_setresuid(low2highuid(arg1),
10599                                        low2highuid(arg2),
10600                                        low2highuid(arg3)));
10601 #endif
10602 #ifdef TARGET_NR_getresuid
10603     case TARGET_NR_getresuid:
10604         {
10605             uid_t ruid, euid, suid;
10606             ret = get_errno(getresuid(&ruid, &euid, &suid));
10607             if (!is_error(ret)) {
10608                 if (put_user_id(high2lowuid(ruid), arg1)
10609                     || put_user_id(high2lowuid(euid), arg2)
10610                     || put_user_id(high2lowuid(suid), arg3))
10611                     return -TARGET_EFAULT;
10612             }
10613         }
10614         return ret;
10615 #endif
10616 #ifdef TARGET_NR_getresgid
10617     case TARGET_NR_setresgid:
10618         return get_errno(sys_setresgid(low2highgid(arg1),
10619                                        low2highgid(arg2),
10620                                        low2highgid(arg3)));
10621 #endif
10622 #ifdef TARGET_NR_getresgid
10623     case TARGET_NR_getresgid:
10624         {
10625             gid_t rgid, egid, sgid;
10626             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10627             if (!is_error(ret)) {
10628                 if (put_user_id(high2lowgid(rgid), arg1)
10629                     || put_user_id(high2lowgid(egid), arg2)
10630                     || put_user_id(high2lowgid(sgid), arg3))
10631                     return -TARGET_EFAULT;
10632             }
10633         }
10634         return ret;
10635 #endif
10636 #ifdef TARGET_NR_chown
10637     case TARGET_NR_chown:
10638         if (!(p = lock_user_string(arg1)))
10639             return -TARGET_EFAULT;
10640         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10641         unlock_user(p, arg1, 0);
10642         return ret;
10643 #endif
10644     case TARGET_NR_setuid:
10645         return get_errno(sys_setuid(low2highuid(arg1)));
10646     case TARGET_NR_setgid:
10647         return get_errno(sys_setgid(low2highgid(arg1)));
10648     case TARGET_NR_setfsuid:
10649         return get_errno(setfsuid(arg1));
10650     case TARGET_NR_setfsgid:
10651         return get_errno(setfsgid(arg1));
10652 
10653 #ifdef TARGET_NR_lchown32
10654     case TARGET_NR_lchown32:
10655         if (!(p = lock_user_string(arg1)))
10656             return -TARGET_EFAULT;
10657         ret = get_errno(lchown(p, arg2, arg3));
10658         unlock_user(p, arg1, 0);
10659         return ret;
10660 #endif
10661 #ifdef TARGET_NR_getuid32
10662     case TARGET_NR_getuid32:
10663         return get_errno(getuid());
10664 #endif
10665 
10666 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10667    /* Alpha specific */
10668     case TARGET_NR_getxuid:
10669          {
10670             uid_t euid;
10671             euid=geteuid();
10672             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10673          }
10674         return get_errno(getuid());
10675 #endif
10676 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10677    /* Alpha specific */
10678     case TARGET_NR_getxgid:
10679          {
10680             uid_t egid;
10681             egid=getegid();
10682             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10683          }
10684         return get_errno(getgid());
10685 #endif
10686 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10687     /* Alpha specific */
10688     case TARGET_NR_osf_getsysinfo:
10689         ret = -TARGET_EOPNOTSUPP;
10690         switch (arg1) {
10691           case TARGET_GSI_IEEE_FP_CONTROL:
10692             {
10693                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10694                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10695 
10696                 swcr &= ~SWCR_STATUS_MASK;
10697                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10698 
10699                 if (put_user_u64 (swcr, arg2))
10700                         return -TARGET_EFAULT;
10701                 ret = 0;
10702             }
10703             break;
10704 
10705           /* case GSI_IEEE_STATE_AT_SIGNAL:
10706              -- Not implemented in linux kernel.
10707              case GSI_UACPROC:
10708              -- Retrieves current unaligned access state; not much used.
10709              case GSI_PROC_TYPE:
10710              -- Retrieves implver information; surely not used.
10711              case GSI_GET_HWRPB:
10712              -- Grabs a copy of the HWRPB; surely not used.
10713           */
10714         }
10715         return ret;
10716 #endif
10717 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10718     /* Alpha specific */
10719     case TARGET_NR_osf_setsysinfo:
10720         ret = -TARGET_EOPNOTSUPP;
10721         switch (arg1) {
10722           case TARGET_SSI_IEEE_FP_CONTROL:
10723             {
10724                 uint64_t swcr, fpcr;
10725 
10726                 if (get_user_u64 (swcr, arg2)) {
10727                     return -TARGET_EFAULT;
10728                 }
10729 
10730                 /*
10731                  * The kernel calls swcr_update_status to update the
10732                  * status bits from the fpcr at every point that it
10733                  * could be queried.  Therefore, we store the status
10734                  * bits only in FPCR.
10735                  */
10736                 ((CPUAlphaState *)cpu_env)->swcr
10737                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10738 
10739                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10740                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10741                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10742                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10743                 ret = 0;
10744             }
10745             break;
10746 
10747           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10748             {
10749                 uint64_t exc, fpcr, fex;
10750 
10751                 if (get_user_u64(exc, arg2)) {
10752                     return -TARGET_EFAULT;
10753                 }
10754                 exc &= SWCR_STATUS_MASK;
10755                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10756 
10757                 /* Old exceptions are not signaled.  */
10758                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10759                 fex = exc & ~fex;
10760                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10761                 fex &= ((CPUArchState *)cpu_env)->swcr;
10762 
10763                 /* Update the hardware fpcr.  */
10764                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10765                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10766 
10767                 if (fex) {
10768                     int si_code = TARGET_FPE_FLTUNK;
10769                     target_siginfo_t info;
10770 
10771                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10772                         si_code = TARGET_FPE_FLTUND;
10773                     }
10774                     if (fex & SWCR_TRAP_ENABLE_INE) {
10775                         si_code = TARGET_FPE_FLTRES;
10776                     }
10777                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10778                         si_code = TARGET_FPE_FLTUND;
10779                     }
10780                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10781                         si_code = TARGET_FPE_FLTOVF;
10782                     }
10783                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10784                         si_code = TARGET_FPE_FLTDIV;
10785                     }
10786                     if (fex & SWCR_TRAP_ENABLE_INV) {
10787                         si_code = TARGET_FPE_FLTINV;
10788                     }
10789 
10790                     info.si_signo = SIGFPE;
10791                     info.si_errno = 0;
10792                     info.si_code = si_code;
10793                     info._sifields._sigfault._addr
10794                         = ((CPUArchState *)cpu_env)->pc;
10795                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10796                                  QEMU_SI_FAULT, &info);
10797                 }
10798                 ret = 0;
10799             }
10800             break;
10801 
10802           /* case SSI_NVPAIRS:
10803              -- Used with SSIN_UACPROC to enable unaligned accesses.
10804              case SSI_IEEE_STATE_AT_SIGNAL:
10805              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10806              -- Not implemented in linux kernel
10807           */
10808         }
10809         return ret;
10810 #endif
10811 #ifdef TARGET_NR_osf_sigprocmask
10812     /* Alpha specific.  */
10813     case TARGET_NR_osf_sigprocmask:
10814         {
10815             abi_ulong mask;
10816             int how;
10817             sigset_t set, oldset;
10818 
10819             switch(arg1) {
10820             case TARGET_SIG_BLOCK:
10821                 how = SIG_BLOCK;
10822                 break;
10823             case TARGET_SIG_UNBLOCK:
10824                 how = SIG_UNBLOCK;
10825                 break;
10826             case TARGET_SIG_SETMASK:
10827                 how = SIG_SETMASK;
10828                 break;
10829             default:
10830                 return -TARGET_EINVAL;
10831             }
10832             mask = arg2;
10833             target_to_host_old_sigset(&set, &mask);
10834             ret = do_sigprocmask(how, &set, &oldset);
10835             if (!ret) {
10836                 host_to_target_old_sigset(&mask, &oldset);
10837                 ret = mask;
10838             }
10839         }
10840         return ret;
10841 #endif
10842 
10843 #ifdef TARGET_NR_getgid32
10844     case TARGET_NR_getgid32:
10845         return get_errno(getgid());
10846 #endif
10847 #ifdef TARGET_NR_geteuid32
10848     case TARGET_NR_geteuid32:
10849         return get_errno(geteuid());
10850 #endif
10851 #ifdef TARGET_NR_getegid32
10852     case TARGET_NR_getegid32:
10853         return get_errno(getegid());
10854 #endif
10855 #ifdef TARGET_NR_setreuid32
10856     case TARGET_NR_setreuid32:
10857         return get_errno(setreuid(arg1, arg2));
10858 #endif
10859 #ifdef TARGET_NR_setregid32
10860     case TARGET_NR_setregid32:
10861         return get_errno(setregid(arg1, arg2));
10862 #endif
10863 #ifdef TARGET_NR_getgroups32
10864     case TARGET_NR_getgroups32:
10865         {
10866             int gidsetsize = arg1;
10867             uint32_t *target_grouplist;
10868             gid_t *grouplist;
10869             int i;
10870 
10871             grouplist = alloca(gidsetsize * sizeof(gid_t));
10872             ret = get_errno(getgroups(gidsetsize, grouplist));
10873             if (gidsetsize == 0)
10874                 return ret;
10875             if (!is_error(ret)) {
10876                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10877                 if (!target_grouplist) {
10878                     return -TARGET_EFAULT;
10879                 }
10880                 for(i = 0;i < ret; i++)
10881                     target_grouplist[i] = tswap32(grouplist[i]);
10882                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10883             }
10884         }
10885         return ret;
10886 #endif
10887 #ifdef TARGET_NR_setgroups32
10888     case TARGET_NR_setgroups32:
10889         {
10890             int gidsetsize = arg1;
10891             uint32_t *target_grouplist;
10892             gid_t *grouplist;
10893             int i;
10894 
10895             grouplist = alloca(gidsetsize * sizeof(gid_t));
10896             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10897             if (!target_grouplist) {
10898                 return -TARGET_EFAULT;
10899             }
10900             for(i = 0;i < gidsetsize; i++)
10901                 grouplist[i] = tswap32(target_grouplist[i]);
10902             unlock_user(target_grouplist, arg2, 0);
10903             return get_errno(setgroups(gidsetsize, grouplist));
10904         }
10905 #endif
10906 #ifdef TARGET_NR_fchown32
10907     case TARGET_NR_fchown32:
10908         return get_errno(fchown(arg1, arg2, arg3));
10909 #endif
10910 #ifdef TARGET_NR_setresuid32
10911     case TARGET_NR_setresuid32:
10912         return get_errno(sys_setresuid(arg1, arg2, arg3));
10913 #endif
10914 #ifdef TARGET_NR_getresuid32
10915     case TARGET_NR_getresuid32:
10916         {
10917             uid_t ruid, euid, suid;
10918             ret = get_errno(getresuid(&ruid, &euid, &suid));
10919             if (!is_error(ret)) {
10920                 if (put_user_u32(ruid, arg1)
10921                     || put_user_u32(euid, arg2)
10922                     || put_user_u32(suid, arg3))
10923                     return -TARGET_EFAULT;
10924             }
10925         }
10926         return ret;
10927 #endif
10928 #ifdef TARGET_NR_setresgid32
10929     case TARGET_NR_setresgid32:
10930         return get_errno(sys_setresgid(arg1, arg2, arg3));
10931 #endif
10932 #ifdef TARGET_NR_getresgid32
10933     case TARGET_NR_getresgid32:
10934         {
10935             gid_t rgid, egid, sgid;
10936             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10937             if (!is_error(ret)) {
10938                 if (put_user_u32(rgid, arg1)
10939                     || put_user_u32(egid, arg2)
10940                     || put_user_u32(sgid, arg3))
10941                     return -TARGET_EFAULT;
10942             }
10943         }
10944         return ret;
10945 #endif
10946 #ifdef TARGET_NR_chown32
10947     case TARGET_NR_chown32:
10948         if (!(p = lock_user_string(arg1)))
10949             return -TARGET_EFAULT;
10950         ret = get_errno(chown(p, arg2, arg3));
10951         unlock_user(p, arg1, 0);
10952         return ret;
10953 #endif
10954 #ifdef TARGET_NR_setuid32
10955     case TARGET_NR_setuid32:
10956         return get_errno(sys_setuid(arg1));
10957 #endif
10958 #ifdef TARGET_NR_setgid32
10959     case TARGET_NR_setgid32:
10960         return get_errno(sys_setgid(arg1));
10961 #endif
10962 #ifdef TARGET_NR_setfsuid32
10963     case TARGET_NR_setfsuid32:
10964         return get_errno(setfsuid(arg1));
10965 #endif
10966 #ifdef TARGET_NR_setfsgid32
10967     case TARGET_NR_setfsgid32:
10968         return get_errno(setfsgid(arg1));
10969 #endif
10970 #ifdef TARGET_NR_mincore
10971     case TARGET_NR_mincore:
10972         {
10973             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10974             if (!a) {
10975                 return -TARGET_ENOMEM;
10976             }
10977             p = lock_user_string(arg3);
10978             if (!p) {
10979                 ret = -TARGET_EFAULT;
10980             } else {
10981                 ret = get_errno(mincore(a, arg2, p));
10982                 unlock_user(p, arg3, ret);
10983             }
10984             unlock_user(a, arg1, 0);
10985         }
10986         return ret;
10987 #endif
10988 #ifdef TARGET_NR_arm_fadvise64_64
10989     case TARGET_NR_arm_fadvise64_64:
10990         /* arm_fadvise64_64 looks like fadvise64_64 but
10991          * with different argument order: fd, advice, offset, len
10992          * rather than the usual fd, offset, len, advice.
10993          * Note that offset and len are both 64-bit so appear as
10994          * pairs of 32-bit registers.
10995          */
10996         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10997                             target_offset64(arg5, arg6), arg2);
10998         return -host_to_target_errno(ret);
10999 #endif
11000 
11001 #if TARGET_ABI_BITS == 32
11002 
11003 #ifdef TARGET_NR_fadvise64_64
11004     case TARGET_NR_fadvise64_64:
11005 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11006         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11007         ret = arg2;
11008         arg2 = arg3;
11009         arg3 = arg4;
11010         arg4 = arg5;
11011         arg5 = arg6;
11012         arg6 = ret;
11013 #else
11014         /* 6 args: fd, offset (high, low), len (high, low), advice */
11015         if (regpairs_aligned(cpu_env, num)) {
11016             /* offset is in (3,4), len in (5,6) and advice in 7 */
11017             arg2 = arg3;
11018             arg3 = arg4;
11019             arg4 = arg5;
11020             arg5 = arg6;
11021             arg6 = arg7;
11022         }
11023 #endif
11024         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11025                             target_offset64(arg4, arg5), arg6);
11026         return -host_to_target_errno(ret);
11027 #endif
11028 
11029 #ifdef TARGET_NR_fadvise64
11030     case TARGET_NR_fadvise64:
11031         /* 5 args: fd, offset (high, low), len, advice */
11032         if (regpairs_aligned(cpu_env, num)) {
11033             /* offset is in (3,4), len in 5 and advice in 6 */
11034             arg2 = arg3;
11035             arg3 = arg4;
11036             arg4 = arg5;
11037             arg5 = arg6;
11038         }
11039         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11040         return -host_to_target_errno(ret);
11041 #endif
11042 
11043 #else /* not a 32-bit ABI */
11044 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11045 #ifdef TARGET_NR_fadvise64_64
11046     case TARGET_NR_fadvise64_64:
11047 #endif
11048 #ifdef TARGET_NR_fadvise64
11049     case TARGET_NR_fadvise64:
11050 #endif
11051 #ifdef TARGET_S390X
11052         switch (arg4) {
11053         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11054         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11055         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11056         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11057         default: break;
11058         }
11059 #endif
11060         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11061 #endif
11062 #endif /* end of 64-bit ABI fadvise handling */
11063 
11064 #ifdef TARGET_NR_madvise
11065     case TARGET_NR_madvise:
11066         /* A straight passthrough may not be safe because qemu sometimes
11067            turns private file-backed mappings into anonymous mappings.
11068            This will break MADV_DONTNEED.
11069            This is a hint, so ignoring and returning success is ok.  */
11070         return 0;
11071 #endif
11072 #if TARGET_ABI_BITS == 32
11073     case TARGET_NR_fcntl64:
11074     {
11075 	int cmd;
11076 	struct flock64 fl;
11077         from_flock64_fn *copyfrom = copy_from_user_flock64;
11078         to_flock64_fn *copyto = copy_to_user_flock64;
11079 
11080 #ifdef TARGET_ARM
11081         if (!((CPUARMState *)cpu_env)->eabi) {
11082             copyfrom = copy_from_user_oabi_flock64;
11083             copyto = copy_to_user_oabi_flock64;
11084         }
11085 #endif
11086 
11087 	cmd = target_to_host_fcntl_cmd(arg2);
11088         if (cmd == -TARGET_EINVAL) {
11089             return cmd;
11090         }
11091 
11092         switch(arg2) {
11093         case TARGET_F_GETLK64:
11094             ret = copyfrom(&fl, arg3);
11095             if (ret) {
11096                 break;
11097             }
11098             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11099             if (ret == 0) {
11100                 ret = copyto(arg3, &fl);
11101             }
11102 	    break;
11103 
11104         case TARGET_F_SETLK64:
11105         case TARGET_F_SETLKW64:
11106             ret = copyfrom(&fl, arg3);
11107             if (ret) {
11108                 break;
11109             }
11110             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11111 	    break;
11112         default:
11113             ret = do_fcntl(arg1, arg2, arg3);
11114             break;
11115         }
11116         return ret;
11117     }
11118 #endif
11119 #ifdef TARGET_NR_cacheflush
11120     case TARGET_NR_cacheflush:
11121         /* self-modifying code is handled automatically, so nothing needed */
11122         return 0;
11123 #endif
11124 #ifdef TARGET_NR_getpagesize
11125     case TARGET_NR_getpagesize:
11126         return TARGET_PAGE_SIZE;
11127 #endif
11128     case TARGET_NR_gettid:
11129         return get_errno(sys_gettid());
11130 #ifdef TARGET_NR_readahead
11131     case TARGET_NR_readahead:
11132 #if TARGET_ABI_BITS == 32
11133         if (regpairs_aligned(cpu_env, num)) {
11134             arg2 = arg3;
11135             arg3 = arg4;
11136             arg4 = arg5;
11137         }
11138         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11139 #else
11140         ret = get_errno(readahead(arg1, arg2, arg3));
11141 #endif
11142         return ret;
11143 #endif
11144 #ifdef CONFIG_ATTR
11145 #ifdef TARGET_NR_setxattr
11146     case TARGET_NR_listxattr:
11147     case TARGET_NR_llistxattr:
11148     {
11149         void *p, *b = 0;
11150         if (arg2) {
11151             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11152             if (!b) {
11153                 return -TARGET_EFAULT;
11154             }
11155         }
11156         p = lock_user_string(arg1);
11157         if (p) {
11158             if (num == TARGET_NR_listxattr) {
11159                 ret = get_errno(listxattr(p, b, arg3));
11160             } else {
11161                 ret = get_errno(llistxattr(p, b, arg3));
11162             }
11163         } else {
11164             ret = -TARGET_EFAULT;
11165         }
11166         unlock_user(p, arg1, 0);
11167         unlock_user(b, arg2, arg3);
11168         return ret;
11169     }
11170     case TARGET_NR_flistxattr:
11171     {
11172         void *b = 0;
11173         if (arg2) {
11174             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11175             if (!b) {
11176                 return -TARGET_EFAULT;
11177             }
11178         }
11179         ret = get_errno(flistxattr(arg1, b, arg3));
11180         unlock_user(b, arg2, arg3);
11181         return ret;
11182     }
11183     case TARGET_NR_setxattr:
11184     case TARGET_NR_lsetxattr:
11185         {
11186             void *p, *n, *v = 0;
11187             if (arg3) {
11188                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11189                 if (!v) {
11190                     return -TARGET_EFAULT;
11191                 }
11192             }
11193             p = lock_user_string(arg1);
11194             n = lock_user_string(arg2);
11195             if (p && n) {
11196                 if (num == TARGET_NR_setxattr) {
11197                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11198                 } else {
11199                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11200                 }
11201             } else {
11202                 ret = -TARGET_EFAULT;
11203             }
11204             unlock_user(p, arg1, 0);
11205             unlock_user(n, arg2, 0);
11206             unlock_user(v, arg3, 0);
11207         }
11208         return ret;
11209     case TARGET_NR_fsetxattr:
11210         {
11211             void *n, *v = 0;
11212             if (arg3) {
11213                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11214                 if (!v) {
11215                     return -TARGET_EFAULT;
11216                 }
11217             }
11218             n = lock_user_string(arg2);
11219             if (n) {
11220                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11221             } else {
11222                 ret = -TARGET_EFAULT;
11223             }
11224             unlock_user(n, arg2, 0);
11225             unlock_user(v, arg3, 0);
11226         }
11227         return ret;
11228     case TARGET_NR_getxattr:
11229     case TARGET_NR_lgetxattr:
11230         {
11231             void *p, *n, *v = 0;
11232             if (arg3) {
11233                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11234                 if (!v) {
11235                     return -TARGET_EFAULT;
11236                 }
11237             }
11238             p = lock_user_string(arg1);
11239             n = lock_user_string(arg2);
11240             if (p && n) {
11241                 if (num == TARGET_NR_getxattr) {
11242                     ret = get_errno(getxattr(p, n, v, arg4));
11243                 } else {
11244                     ret = get_errno(lgetxattr(p, n, v, arg4));
11245                 }
11246             } else {
11247                 ret = -TARGET_EFAULT;
11248             }
11249             unlock_user(p, arg1, 0);
11250             unlock_user(n, arg2, 0);
11251             unlock_user(v, arg3, arg4);
11252         }
11253         return ret;
11254     case TARGET_NR_fgetxattr:
11255         {
11256             void *n, *v = 0;
11257             if (arg3) {
11258                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11259                 if (!v) {
11260                     return -TARGET_EFAULT;
11261                 }
11262             }
11263             n = lock_user_string(arg2);
11264             if (n) {
11265                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11266             } else {
11267                 ret = -TARGET_EFAULT;
11268             }
11269             unlock_user(n, arg2, 0);
11270             unlock_user(v, arg3, arg4);
11271         }
11272         return ret;
11273     case TARGET_NR_removexattr:
11274     case TARGET_NR_lremovexattr:
11275         {
11276             void *p, *n;
11277             p = lock_user_string(arg1);
11278             n = lock_user_string(arg2);
11279             if (p && n) {
11280                 if (num == TARGET_NR_removexattr) {
11281                     ret = get_errno(removexattr(p, n));
11282                 } else {
11283                     ret = get_errno(lremovexattr(p, n));
11284                 }
11285             } else {
11286                 ret = -TARGET_EFAULT;
11287             }
11288             unlock_user(p, arg1, 0);
11289             unlock_user(n, arg2, 0);
11290         }
11291         return ret;
11292     case TARGET_NR_fremovexattr:
11293         {
11294             void *n;
11295             n = lock_user_string(arg2);
11296             if (n) {
11297                 ret = get_errno(fremovexattr(arg1, n));
11298             } else {
11299                 ret = -TARGET_EFAULT;
11300             }
11301             unlock_user(n, arg2, 0);
11302         }
11303         return ret;
11304 #endif
11305 #endif /* CONFIG_ATTR */
11306 #ifdef TARGET_NR_set_thread_area
11307     case TARGET_NR_set_thread_area:
11308 #if defined(TARGET_MIPS)
11309       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11310       return 0;
11311 #elif defined(TARGET_CRIS)
11312       if (arg1 & 0xff)
11313           ret = -TARGET_EINVAL;
11314       else {
11315           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11316           ret = 0;
11317       }
11318       return ret;
11319 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11320       return do_set_thread_area(cpu_env, arg1);
11321 #elif defined(TARGET_M68K)
11322       {
11323           TaskState *ts = cpu->opaque;
11324           ts->tp_value = arg1;
11325           return 0;
11326       }
11327 #else
11328       return -TARGET_ENOSYS;
11329 #endif
11330 #endif
11331 #ifdef TARGET_NR_get_thread_area
11332     case TARGET_NR_get_thread_area:
11333 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11334         return do_get_thread_area(cpu_env, arg1);
11335 #elif defined(TARGET_M68K)
11336         {
11337             TaskState *ts = cpu->opaque;
11338             return ts->tp_value;
11339         }
11340 #else
11341         return -TARGET_ENOSYS;
11342 #endif
11343 #endif
11344 #ifdef TARGET_NR_getdomainname
11345     case TARGET_NR_getdomainname:
11346         return -TARGET_ENOSYS;
11347 #endif
11348 
11349 #ifdef TARGET_NR_clock_settime
11350     case TARGET_NR_clock_settime:
11351     {
11352         struct timespec ts;
11353 
11354         ret = target_to_host_timespec(&ts, arg2);
11355         if (!is_error(ret)) {
11356             ret = get_errno(clock_settime(arg1, &ts));
11357         }
11358         return ret;
11359     }
11360 #endif
11361 #ifdef TARGET_NR_clock_gettime
11362     case TARGET_NR_clock_gettime:
11363     {
11364         struct timespec ts;
11365         ret = get_errno(clock_gettime(arg1, &ts));
11366         if (!is_error(ret)) {
11367             ret = host_to_target_timespec(arg2, &ts);
11368         }
11369         return ret;
11370     }
11371 #endif
11372 #ifdef TARGET_NR_clock_getres
11373     case TARGET_NR_clock_getres:
11374     {
11375         struct timespec ts;
11376         ret = get_errno(clock_getres(arg1, &ts));
11377         if (!is_error(ret)) {
11378             host_to_target_timespec(arg2, &ts);
11379         }
11380         return ret;
11381     }
11382 #endif
11383 #ifdef TARGET_NR_clock_nanosleep
11384     case TARGET_NR_clock_nanosleep:
11385     {
11386         struct timespec ts;
11387         target_to_host_timespec(&ts, arg3);
11388         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11389                                              &ts, arg4 ? &ts : NULL));
11390         if (arg4)
11391             host_to_target_timespec(arg4, &ts);
11392 
11393 #if defined(TARGET_PPC)
11394         /* clock_nanosleep is odd in that it returns positive errno values.
11395          * On PPC, CR0 bit 3 should be set in such a situation. */
11396         if (ret && ret != -TARGET_ERESTARTSYS) {
11397             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11398         }
11399 #endif
11400         return ret;
11401     }
11402 #endif
11403 
11404 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11405     case TARGET_NR_set_tid_address:
11406         return get_errno(set_tid_address((int *)g2h(arg1)));
11407 #endif
11408 
11409     case TARGET_NR_tkill:
11410         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11411 
11412     case TARGET_NR_tgkill:
11413         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11414                          target_to_host_signal(arg3)));
11415 
11416 #ifdef TARGET_NR_set_robust_list
11417     case TARGET_NR_set_robust_list:
11418     case TARGET_NR_get_robust_list:
11419         /* The ABI for supporting robust futexes has userspace pass
11420          * the kernel a pointer to a linked list which is updated by
11421          * userspace after the syscall; the list is walked by the kernel
11422          * when the thread exits. Since the linked list in QEMU guest
11423          * memory isn't a valid linked list for the host and we have
11424          * no way to reliably intercept the thread-death event, we can't
11425          * support these. Silently return ENOSYS so that guest userspace
11426          * falls back to a non-robust futex implementation (which should
11427          * be OK except in the corner case of the guest crashing while
11428          * holding a mutex that is shared with another process via
11429          * shared memory).
11430          */
11431         return -TARGET_ENOSYS;
11432 #endif
11433 
11434 #if defined(TARGET_NR_utimensat)
11435     case TARGET_NR_utimensat:
11436         {
11437             struct timespec *tsp, ts[2];
11438             if (!arg3) {
11439                 tsp = NULL;
11440             } else {
11441                 target_to_host_timespec(ts, arg3);
11442                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11443                 tsp = ts;
11444             }
11445             if (!arg2)
11446                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11447             else {
11448                 if (!(p = lock_user_string(arg2))) {
11449                     return -TARGET_EFAULT;
11450                 }
11451                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11452                 unlock_user(p, arg2, 0);
11453             }
11454         }
11455         return ret;
11456 #endif
11457     case TARGET_NR_futex:
11458         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11459 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11460     case TARGET_NR_inotify_init:
11461         ret = get_errno(sys_inotify_init());
11462         if (ret >= 0) {
11463             fd_trans_register(ret, &target_inotify_trans);
11464         }
11465         return ret;
11466 #endif
11467 #ifdef CONFIG_INOTIFY1
11468 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11469     case TARGET_NR_inotify_init1:
11470         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11471                                           fcntl_flags_tbl)));
11472         if (ret >= 0) {
11473             fd_trans_register(ret, &target_inotify_trans);
11474         }
11475         return ret;
11476 #endif
11477 #endif
11478 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11479     case TARGET_NR_inotify_add_watch:
11480         p = lock_user_string(arg2);
11481         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11482         unlock_user(p, arg2, 0);
11483         return ret;
11484 #endif
11485 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11486     case TARGET_NR_inotify_rm_watch:
11487         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11488 #endif
11489 
11490 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11491     case TARGET_NR_mq_open:
11492         {
11493             struct mq_attr posix_mq_attr;
11494             struct mq_attr *pposix_mq_attr;
11495             int host_flags;
11496 
11497             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11498             pposix_mq_attr = NULL;
11499             if (arg4) {
11500                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11501                     return -TARGET_EFAULT;
11502                 }
11503                 pposix_mq_attr = &posix_mq_attr;
11504             }
11505             p = lock_user_string(arg1 - 1);
11506             if (!p) {
11507                 return -TARGET_EFAULT;
11508             }
11509             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11510             unlock_user (p, arg1, 0);
11511         }
11512         return ret;
11513 
11514     case TARGET_NR_mq_unlink:
11515         p = lock_user_string(arg1 - 1);
11516         if (!p) {
11517             return -TARGET_EFAULT;
11518         }
11519         ret = get_errno(mq_unlink(p));
11520         unlock_user (p, arg1, 0);
11521         return ret;
11522 
11523     case TARGET_NR_mq_timedsend:
11524         {
11525             struct timespec ts;
11526 
11527             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11528             if (arg5 != 0) {
11529                 target_to_host_timespec(&ts, arg5);
11530                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11531                 host_to_target_timespec(arg5, &ts);
11532             } else {
11533                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11534             }
11535             unlock_user (p, arg2, arg3);
11536         }
11537         return ret;
11538 
11539     case TARGET_NR_mq_timedreceive:
11540         {
11541             struct timespec ts;
11542             unsigned int prio;
11543 
11544             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11545             if (arg5 != 0) {
11546                 target_to_host_timespec(&ts, arg5);
11547                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11548                                                      &prio, &ts));
11549                 host_to_target_timespec(arg5, &ts);
11550             } else {
11551                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11552                                                      &prio, NULL));
11553             }
11554             unlock_user (p, arg2, arg3);
11555             if (arg4 != 0)
11556                 put_user_u32(prio, arg4);
11557         }
11558         return ret;
11559 
11560     /* Not implemented for now... */
11561 /*     case TARGET_NR_mq_notify: */
11562 /*         break; */
11563 
11564     case TARGET_NR_mq_getsetattr:
11565         {
11566             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11567             ret = 0;
11568             if (arg2 != 0) {
11569                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11570                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11571                                            &posix_mq_attr_out));
11572             } else if (arg3 != 0) {
11573                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11574             }
11575             if (ret == 0 && arg3 != 0) {
11576                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11577             }
11578         }
11579         return ret;
11580 #endif
11581 
11582 #ifdef CONFIG_SPLICE
11583 #ifdef TARGET_NR_tee
11584     case TARGET_NR_tee:
11585         {
11586             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11587         }
11588         return ret;
11589 #endif
11590 #ifdef TARGET_NR_splice
11591     case TARGET_NR_splice:
11592         {
11593             loff_t loff_in, loff_out;
11594             loff_t *ploff_in = NULL, *ploff_out = NULL;
11595             if (arg2) {
11596                 if (get_user_u64(loff_in, arg2)) {
11597                     return -TARGET_EFAULT;
11598                 }
11599                 ploff_in = &loff_in;
11600             }
11601             if (arg4) {
11602                 if (get_user_u64(loff_out, arg4)) {
11603                     return -TARGET_EFAULT;
11604                 }
11605                 ploff_out = &loff_out;
11606             }
11607             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11608             if (arg2) {
11609                 if (put_user_u64(loff_in, arg2)) {
11610                     return -TARGET_EFAULT;
11611                 }
11612             }
11613             if (arg4) {
11614                 if (put_user_u64(loff_out, arg4)) {
11615                     return -TARGET_EFAULT;
11616                 }
11617             }
11618         }
11619         return ret;
11620 #endif
11621 #ifdef TARGET_NR_vmsplice
11622 	case TARGET_NR_vmsplice:
11623         {
11624             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11625             if (vec != NULL) {
11626                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11627                 unlock_iovec(vec, arg2, arg3, 0);
11628             } else {
11629                 ret = -host_to_target_errno(errno);
11630             }
11631         }
11632         return ret;
11633 #endif
11634 #endif /* CONFIG_SPLICE */
11635 #ifdef CONFIG_EVENTFD
11636 #if defined(TARGET_NR_eventfd)
11637     case TARGET_NR_eventfd:
11638         ret = get_errno(eventfd(arg1, 0));
11639         if (ret >= 0) {
11640             fd_trans_register(ret, &target_eventfd_trans);
11641         }
11642         return ret;
11643 #endif
11644 #if defined(TARGET_NR_eventfd2)
11645     case TARGET_NR_eventfd2:
11646     {
11647         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11648         if (arg2 & TARGET_O_NONBLOCK) {
11649             host_flags |= O_NONBLOCK;
11650         }
11651         if (arg2 & TARGET_O_CLOEXEC) {
11652             host_flags |= O_CLOEXEC;
11653         }
11654         ret = get_errno(eventfd(arg1, host_flags));
11655         if (ret >= 0) {
11656             fd_trans_register(ret, &target_eventfd_trans);
11657         }
11658         return ret;
11659     }
11660 #endif
11661 #endif /* CONFIG_EVENTFD  */
11662 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11663     case TARGET_NR_fallocate:
11664 #if TARGET_ABI_BITS == 32
11665         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11666                                   target_offset64(arg5, arg6)));
11667 #else
11668         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11669 #endif
11670         return ret;
11671 #endif
11672 #if defined(CONFIG_SYNC_FILE_RANGE)
11673 #if defined(TARGET_NR_sync_file_range)
11674     case TARGET_NR_sync_file_range:
11675 #if TARGET_ABI_BITS == 32
11676 #if defined(TARGET_MIPS)
11677         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11678                                         target_offset64(arg5, arg6), arg7));
11679 #else
11680         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11681                                         target_offset64(arg4, arg5), arg6));
11682 #endif /* !TARGET_MIPS */
11683 #else
11684         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11685 #endif
11686         return ret;
11687 #endif
11688 #if defined(TARGET_NR_sync_file_range2)
11689     case TARGET_NR_sync_file_range2:
11690         /* This is like sync_file_range but the arguments are reordered */
11691 #if TARGET_ABI_BITS == 32
11692         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11693                                         target_offset64(arg5, arg6), arg2));
11694 #else
11695         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11696 #endif
11697         return ret;
11698 #endif
11699 #endif
11700 #if defined(TARGET_NR_signalfd4)
11701     case TARGET_NR_signalfd4:
11702         return do_signalfd4(arg1, arg2, arg4);
11703 #endif
11704 #if defined(TARGET_NR_signalfd)
11705     case TARGET_NR_signalfd:
11706         return do_signalfd4(arg1, arg2, 0);
11707 #endif
11708 #if defined(CONFIG_EPOLL)
11709 #if defined(TARGET_NR_epoll_create)
11710     case TARGET_NR_epoll_create:
11711         return get_errno(epoll_create(arg1));
11712 #endif
11713 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11714     case TARGET_NR_epoll_create1:
11715         return get_errno(epoll_create1(arg1));
11716 #endif
11717 #if defined(TARGET_NR_epoll_ctl)
11718     case TARGET_NR_epoll_ctl:
11719     {
11720         struct epoll_event ep;
11721         struct epoll_event *epp = 0;
11722         if (arg4) {
11723             struct target_epoll_event *target_ep;
11724             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11725                 return -TARGET_EFAULT;
11726             }
11727             ep.events = tswap32(target_ep->events);
11728             /* The epoll_data_t union is just opaque data to the kernel,
11729              * so we transfer all 64 bits across and need not worry what
11730              * actual data type it is.
11731              */
11732             ep.data.u64 = tswap64(target_ep->data.u64);
11733             unlock_user_struct(target_ep, arg4, 0);
11734             epp = &ep;
11735         }
11736         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11737     }
11738 #endif
11739 
11740 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11741 #if defined(TARGET_NR_epoll_wait)
11742     case TARGET_NR_epoll_wait:
11743 #endif
11744 #if defined(TARGET_NR_epoll_pwait)
11745     case TARGET_NR_epoll_pwait:
11746 #endif
11747     {
11748         struct target_epoll_event *target_ep;
11749         struct epoll_event *ep;
11750         int epfd = arg1;
11751         int maxevents = arg3;
11752         int timeout = arg4;
11753 
11754         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11755             return -TARGET_EINVAL;
11756         }
11757 
11758         target_ep = lock_user(VERIFY_WRITE, arg2,
11759                               maxevents * sizeof(struct target_epoll_event), 1);
11760         if (!target_ep) {
11761             return -TARGET_EFAULT;
11762         }
11763 
11764         ep = g_try_new(struct epoll_event, maxevents);
11765         if (!ep) {
11766             unlock_user(target_ep, arg2, 0);
11767             return -TARGET_ENOMEM;
11768         }
11769 
11770         switch (num) {
11771 #if defined(TARGET_NR_epoll_pwait)
11772         case TARGET_NR_epoll_pwait:
11773         {
11774             target_sigset_t *target_set;
11775             sigset_t _set, *set = &_set;
11776 
11777             if (arg5) {
11778                 if (arg6 != sizeof(target_sigset_t)) {
11779                     ret = -TARGET_EINVAL;
11780                     break;
11781                 }
11782 
11783                 target_set = lock_user(VERIFY_READ, arg5,
11784                                        sizeof(target_sigset_t), 1);
11785                 if (!target_set) {
11786                     ret = -TARGET_EFAULT;
11787                     break;
11788                 }
11789                 target_to_host_sigset(set, target_set);
11790                 unlock_user(target_set, arg5, 0);
11791             } else {
11792                 set = NULL;
11793             }
11794 
11795             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11796                                              set, SIGSET_T_SIZE));
11797             break;
11798         }
11799 #endif
11800 #if defined(TARGET_NR_epoll_wait)
11801         case TARGET_NR_epoll_wait:
11802             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11803                                              NULL, 0));
11804             break;
11805 #endif
11806         default:
11807             ret = -TARGET_ENOSYS;
11808         }
11809         if (!is_error(ret)) {
11810             int i;
11811             for (i = 0; i < ret; i++) {
11812                 target_ep[i].events = tswap32(ep[i].events);
11813                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11814             }
11815             unlock_user(target_ep, arg2,
11816                         ret * sizeof(struct target_epoll_event));
11817         } else {
11818             unlock_user(target_ep, arg2, 0);
11819         }
11820         g_free(ep);
11821         return ret;
11822     }
11823 #endif
11824 #endif
11825 #ifdef TARGET_NR_prlimit64
11826     case TARGET_NR_prlimit64:
11827     {
11828         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11829         struct target_rlimit64 *target_rnew, *target_rold;
11830         struct host_rlimit64 rnew, rold, *rnewp = 0;
11831         int resource = target_to_host_resource(arg2);
11832         if (arg3) {
11833             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11834                 return -TARGET_EFAULT;
11835             }
11836             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11837             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11838             unlock_user_struct(target_rnew, arg3, 0);
11839             rnewp = &rnew;
11840         }
11841 
11842         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11843         if (!is_error(ret) && arg4) {
11844             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11845                 return -TARGET_EFAULT;
11846             }
11847             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11848             target_rold->rlim_max = tswap64(rold.rlim_max);
11849             unlock_user_struct(target_rold, arg4, 1);
11850         }
11851         return ret;
11852     }
11853 #endif
11854 #ifdef TARGET_NR_gethostname
11855     case TARGET_NR_gethostname:
11856     {
11857         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11858         if (name) {
11859             ret = get_errno(gethostname(name, arg2));
11860             unlock_user(name, arg1, arg2);
11861         } else {
11862             ret = -TARGET_EFAULT;
11863         }
11864         return ret;
11865     }
11866 #endif
11867 #ifdef TARGET_NR_atomic_cmpxchg_32
11868     case TARGET_NR_atomic_cmpxchg_32:
11869     {
11870         /* should use start_exclusive from main.c */
11871         abi_ulong mem_value;
11872         if (get_user_u32(mem_value, arg6)) {
11873             target_siginfo_t info;
11874             info.si_signo = SIGSEGV;
11875             info.si_errno = 0;
11876             info.si_code = TARGET_SEGV_MAPERR;
11877             info._sifields._sigfault._addr = arg6;
11878             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11879                          QEMU_SI_FAULT, &info);
11880             ret = 0xdeadbeef;
11881 
11882         }
11883         if (mem_value == arg2)
11884             put_user_u32(arg1, arg6);
11885         return mem_value;
11886     }
11887 #endif
11888 #ifdef TARGET_NR_atomic_barrier
11889     case TARGET_NR_atomic_barrier:
11890         /* Like the kernel implementation and the
11891            qemu arm barrier, no-op this? */
11892         return 0;
11893 #endif
11894 
11895 #ifdef TARGET_NR_timer_create
11896     case TARGET_NR_timer_create:
11897     {
11898         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11899 
11900         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11901 
11902         int clkid = arg1;
11903         int timer_index = next_free_host_timer();
11904 
11905         if (timer_index < 0) {
11906             ret = -TARGET_EAGAIN;
11907         } else {
11908             timer_t *phtimer = g_posix_timers  + timer_index;
11909 
11910             if (arg2) {
11911                 phost_sevp = &host_sevp;
11912                 ret = target_to_host_sigevent(phost_sevp, arg2);
11913                 if (ret != 0) {
11914                     return ret;
11915                 }
11916             }
11917 
11918             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11919             if (ret) {
11920                 phtimer = NULL;
11921             } else {
11922                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11923                     return -TARGET_EFAULT;
11924                 }
11925             }
11926         }
11927         return ret;
11928     }
11929 #endif
11930 
11931 #ifdef TARGET_NR_timer_settime
11932     case TARGET_NR_timer_settime:
11933     {
11934         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11935          * struct itimerspec * old_value */
11936         target_timer_t timerid = get_timer_id(arg1);
11937 
11938         if (timerid < 0) {
11939             ret = timerid;
11940         } else if (arg3 == 0) {
11941             ret = -TARGET_EINVAL;
11942         } else {
11943             timer_t htimer = g_posix_timers[timerid];
11944             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11945 
11946             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11947                 return -TARGET_EFAULT;
11948             }
11949             ret = get_errno(
11950                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11951             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11952                 return -TARGET_EFAULT;
11953             }
11954         }
11955         return ret;
11956     }
11957 #endif
11958 
11959 #ifdef TARGET_NR_timer_gettime
11960     case TARGET_NR_timer_gettime:
11961     {
11962         /* args: timer_t timerid, struct itimerspec *curr_value */
11963         target_timer_t timerid = get_timer_id(arg1);
11964 
11965         if (timerid < 0) {
11966             ret = timerid;
11967         } else if (!arg2) {
11968             ret = -TARGET_EFAULT;
11969         } else {
11970             timer_t htimer = g_posix_timers[timerid];
11971             struct itimerspec hspec;
11972             ret = get_errno(timer_gettime(htimer, &hspec));
11973 
11974             if (host_to_target_itimerspec(arg2, &hspec)) {
11975                 ret = -TARGET_EFAULT;
11976             }
11977         }
11978         return ret;
11979     }
11980 #endif
11981 
11982 #ifdef TARGET_NR_timer_getoverrun
11983     case TARGET_NR_timer_getoverrun:
11984     {
11985         /* args: timer_t timerid */
11986         target_timer_t timerid = get_timer_id(arg1);
11987 
11988         if (timerid < 0) {
11989             ret = timerid;
11990         } else {
11991             timer_t htimer = g_posix_timers[timerid];
11992             ret = get_errno(timer_getoverrun(htimer));
11993         }
11994         return ret;
11995     }
11996 #endif
11997 
11998 #ifdef TARGET_NR_timer_delete
11999     case TARGET_NR_timer_delete:
12000     {
12001         /* args: timer_t timerid */
12002         target_timer_t timerid = get_timer_id(arg1);
12003 
12004         if (timerid < 0) {
12005             ret = timerid;
12006         } else {
12007             timer_t htimer = g_posix_timers[timerid];
12008             ret = get_errno(timer_delete(htimer));
12009             g_posix_timers[timerid] = 0;
12010         }
12011         return ret;
12012     }
12013 #endif
12014 
12015 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12016     case TARGET_NR_timerfd_create:
12017         return get_errno(timerfd_create(arg1,
12018                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12019 #endif
12020 
12021 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12022     case TARGET_NR_timerfd_gettime:
12023         {
12024             struct itimerspec its_curr;
12025 
12026             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12027 
12028             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12029                 return -TARGET_EFAULT;
12030             }
12031         }
12032         return ret;
12033 #endif
12034 
12035 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12036     case TARGET_NR_timerfd_settime:
12037         {
12038             struct itimerspec its_new, its_old, *p_new;
12039 
12040             if (arg3) {
12041                 if (target_to_host_itimerspec(&its_new, arg3)) {
12042                     return -TARGET_EFAULT;
12043                 }
12044                 p_new = &its_new;
12045             } else {
12046                 p_new = NULL;
12047             }
12048 
12049             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12050 
12051             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12052                 return -TARGET_EFAULT;
12053             }
12054         }
12055         return ret;
12056 #endif
12057 
12058 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12059     case TARGET_NR_ioprio_get:
12060         return get_errno(ioprio_get(arg1, arg2));
12061 #endif
12062 
12063 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12064     case TARGET_NR_ioprio_set:
12065         return get_errno(ioprio_set(arg1, arg2, arg3));
12066 #endif
12067 
12068 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12069     case TARGET_NR_setns:
12070         return get_errno(setns(arg1, arg2));
12071 #endif
12072 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12073     case TARGET_NR_unshare:
12074         return get_errno(unshare(arg1));
12075 #endif
12076 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12077     case TARGET_NR_kcmp:
12078         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12079 #endif
12080 #ifdef TARGET_NR_swapcontext
12081     case TARGET_NR_swapcontext:
12082         /* PowerPC specific.  */
12083         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12084 #endif
12085 #ifdef TARGET_NR_memfd_create
12086     case TARGET_NR_memfd_create:
12087         p = lock_user_string(arg1);
12088         if (!p) {
12089             return -TARGET_EFAULT;
12090         }
12091         ret = get_errno(memfd_create(p, arg2));
12092         fd_trans_unregister(ret);
12093         unlock_user(p, arg1, 0);
12094         return ret;
12095 #endif
12096 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12097     case TARGET_NR_membarrier:
12098         return get_errno(membarrier(arg1, arg2));
12099 #endif
12100 
12101     default:
12102         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12103         return -TARGET_ENOSYS;
12104     }
12105     return ret;
12106 }
12107 
12108 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12109                     abi_long arg2, abi_long arg3, abi_long arg4,
12110                     abi_long arg5, abi_long arg6, abi_long arg7,
12111                     abi_long arg8)
12112 {
12113     CPUState *cpu = env_cpu(cpu_env);
12114     abi_long ret;
12115 
12116 #ifdef DEBUG_ERESTARTSYS
12117     /* Debug-only code for exercising the syscall-restart code paths
12118      * in the per-architecture cpu main loops: restart every syscall
12119      * the guest makes once before letting it through.
12120      */
12121     {
12122         static bool flag;
12123         flag = !flag;
12124         if (flag) {
12125             return -TARGET_ERESTARTSYS;
12126         }
12127     }
12128 #endif
12129 
12130     record_syscall_start(cpu, num, arg1,
12131                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12132 
12133     if (unlikely(do_strace)) {
12134         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12135         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12136                           arg5, arg6, arg7, arg8);
12137         print_syscall_ret(num, ret);
12138     } else {
12139         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12140                           arg5, arg6, arg7, arg8);
12141     }
12142 
12143     record_syscall_return(cpu, num, ret);
12144     return ret;
12145 }
12146