xref: /openbmc/qemu/linux-user/syscall.c (revision 4c2169b2)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
83 
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/cdrom.h>
87 #include <linux/hdreg.h>
88 #include <linux/soundcard.h>
89 #include <linux/kd.h>
90 #include <linux/mtio.h>
91 #include <linux/fs.h>
92 #include <linux/fd.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include <linux/if_alg.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112 
113 #include "qemu.h"
114 #include "qemu/guest-random.h"
115 #include "user/syscall-trace.h"
116 #include "qapi/error.h"
117 #include "fd-trans.h"
118 #include "tcg.h"
119 
120 #ifndef CLONE_IO
121 #define CLONE_IO                0x80000000      /* Clone io context */
122 #endif
123 
124 /* We can't directly call the host clone syscall, because this will
125  * badly confuse libc (breaking mutexes, for example). So we must
126  * divide clone flags into:
127  *  * flag combinations that look like pthread_create()
128  *  * flag combinations that look like fork()
129  *  * flags we can implement within QEMU itself
130  *  * flags we can't support and will return an error for
131  */
132 /* For thread creation, all these flags must be present; for
133  * fork, none must be present.
134  */
135 #define CLONE_THREAD_FLAGS                              \
136     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
137      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
138 
139 /* These flags are ignored:
140  * CLONE_DETACHED is now ignored by the kernel;
141  * CLONE_IO is just an optimisation hint to the I/O scheduler
142  */
143 #define CLONE_IGNORED_FLAGS                     \
144     (CLONE_DETACHED | CLONE_IO)
145 
146 /* Flags for fork which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_FORK_FLAGS               \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
150 
151 /* Flags for thread creation which we can implement within QEMU itself */
152 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
153     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
154      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
155 
156 #define CLONE_INVALID_FORK_FLAGS                                        \
157     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
158 
159 #define CLONE_INVALID_THREAD_FLAGS                                      \
160     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
161        CLONE_IGNORED_FLAGS))
162 
163 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
164  * have almost all been allocated. We cannot support any of
165  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
166  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
167  * The checks against the invalid thread masks above will catch these.
168  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
169  */
170 
171 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
172  * once. This exercises the codepaths for restart.
173  */
174 //#define DEBUG_ERESTARTSYS
175 
176 //#include <linux/msdos_fs.h>
177 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
178 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
179 
180 #undef _syscall0
181 #undef _syscall1
182 #undef _syscall2
183 #undef _syscall3
184 #undef _syscall4
185 #undef _syscall5
186 #undef _syscall6
187 
188 #define _syscall0(type,name)		\
189 static type name (void)			\
190 {					\
191 	return syscall(__NR_##name);	\
192 }
193 
194 #define _syscall1(type,name,type1,arg1)		\
195 static type name (type1 arg1)			\
196 {						\
197 	return syscall(__NR_##name, arg1);	\
198 }
199 
200 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
201 static type name (type1 arg1,type2 arg2)		\
202 {							\
203 	return syscall(__NR_##name, arg1, arg2);	\
204 }
205 
206 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
207 static type name (type1 arg1,type2 arg2,type3 arg3)		\
208 {								\
209 	return syscall(__NR_##name, arg1, arg2, arg3);		\
210 }
211 
212 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
213 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
214 {										\
215 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
216 }
217 
218 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
219 		  type5,arg5)							\
220 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
221 {										\
222 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
223 }
224 
225 
226 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
227 		  type5,arg5,type6,arg6)					\
228 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
229                   type6 arg6)							\
230 {										\
231 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
232 }
233 
234 
235 #define __NR_sys_uname __NR_uname
236 #define __NR_sys_getcwd1 __NR_getcwd
237 #define __NR_sys_getdents __NR_getdents
238 #define __NR_sys_getdents64 __NR_getdents64
239 #define __NR_sys_getpriority __NR_getpriority
240 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
241 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
242 #define __NR_sys_syslog __NR_syslog
243 #define __NR_sys_futex __NR_futex
244 #define __NR_sys_inotify_init __NR_inotify_init
245 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
246 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
247 #define __NR_sys_statx __NR_statx
248 
249 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
250 #define __NR__llseek __NR_lseek
251 #endif
252 
253 /* Newer kernel ports have llseek() instead of _llseek() */
254 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
255 #define TARGET_NR__llseek TARGET_NR_llseek
256 #endif
257 
258 #define __NR_sys_gettid __NR_gettid
259 _syscall0(int, sys_gettid)
260 
261 /* For the 64-bit guest on 32-bit host case we must emulate
262  * getdents using getdents64, because otherwise the host
263  * might hand us back more dirent records than we can fit
264  * into the guest buffer after structure format conversion.
265  * Otherwise we emulate getdents with getdents if the host has it.
266  */
267 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
268 #define EMULATE_GETDENTS_WITH_GETDENTS
269 #endif
270 
271 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
272 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
273 #endif
274 #if (defined(TARGET_NR_getdents) && \
275       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
276     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
277 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
278 #endif
279 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
280 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
281           loff_t *, res, uint, wh);
282 #endif
283 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
284 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
285           siginfo_t *, uinfo)
286 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
287 #ifdef __NR_exit_group
288 _syscall1(int,exit_group,int,error_code)
289 #endif
290 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
291 _syscall1(int,set_tid_address,int *,tidptr)
292 #endif
293 #if defined(TARGET_NR_futex) && defined(__NR_futex)
294 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
295           const struct timespec *,timeout,int *,uaddr2,int,val3)
296 #endif
297 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
298 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
299           unsigned long *, user_mask_ptr);
300 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
301 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
302           unsigned long *, user_mask_ptr);
303 #define __NR_sys_getcpu __NR_getcpu
304 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
305 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
306           void *, arg);
307 _syscall2(int, capget, struct __user_cap_header_struct *, header,
308           struct __user_cap_data_struct *, data);
309 _syscall2(int, capset, struct __user_cap_header_struct *, header,
310           struct __user_cap_data_struct *, data);
311 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
312 _syscall2(int, ioprio_get, int, which, int, who)
313 #endif
314 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
315 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
316 #endif
317 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
318 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
319 #endif
320 
321 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
322 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
323           unsigned long, idx1, unsigned long, idx2)
324 #endif
325 
326 /*
327  * It is assumed that struct statx is architecture independent.
328  */
329 #if defined(TARGET_NR_statx) && defined(__NR_statx)
330 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
331           unsigned int, mask, struct target_statx *, statxbuf)
332 #endif
333 
334 static bitmask_transtbl fcntl_flags_tbl[] = {
335   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
336   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
337   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
338   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
339   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
340   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
341   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
342   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
343   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
344   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
345   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
346   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
347   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
348 #if defined(O_DIRECT)
349   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
350 #endif
351 #if defined(O_NOATIME)
352   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
353 #endif
354 #if defined(O_CLOEXEC)
355   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
356 #endif
357 #if defined(O_PATH)
358   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
359 #endif
360 #if defined(O_TMPFILE)
361   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
362 #endif
363   /* Don't terminate the list prematurely on 64-bit host+guest.  */
364 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
365   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
366 #endif
367   { 0, 0, 0, 0 }
368 };
369 
370 static int sys_getcwd1(char *buf, size_t size)
371 {
372   if (getcwd(buf, size) == NULL) {
373       /* getcwd() sets errno */
374       return (-1);
375   }
376   return strlen(buf)+1;
377 }
378 
379 #ifdef TARGET_NR_utimensat
380 #if defined(__NR_utimensat)
381 #define __NR_sys_utimensat __NR_utimensat
382 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
383           const struct timespec *,tsp,int,flags)
384 #else
385 static int sys_utimensat(int dirfd, const char *pathname,
386                          const struct timespec times[2], int flags)
387 {
388     errno = ENOSYS;
389     return -1;
390 }
391 #endif
392 #endif /* TARGET_NR_utimensat */
393 
394 #ifdef TARGET_NR_renameat2
395 #if defined(__NR_renameat2)
396 #define __NR_sys_renameat2 __NR_renameat2
397 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
398           const char *, new, unsigned int, flags)
399 #else
400 static int sys_renameat2(int oldfd, const char *old,
401                          int newfd, const char *new, int flags)
402 {
403     if (flags == 0) {
404         return renameat(oldfd, old, newfd, new);
405     }
406     errno = ENOSYS;
407     return -1;
408 }
409 #endif
410 #endif /* TARGET_NR_renameat2 */
411 
412 #ifdef CONFIG_INOTIFY
413 #include <sys/inotify.h>
414 
415 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
416 static int sys_inotify_init(void)
417 {
418   return (inotify_init());
419 }
420 #endif
421 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
422 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
423 {
424   return (inotify_add_watch(fd, pathname, mask));
425 }
426 #endif
427 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
428 static int sys_inotify_rm_watch(int fd, int32_t wd)
429 {
430   return (inotify_rm_watch(fd, wd));
431 }
432 #endif
433 #ifdef CONFIG_INOTIFY1
434 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
435 static int sys_inotify_init1(int flags)
436 {
437   return (inotify_init1(flags));
438 }
439 #endif
440 #endif
441 #else
442 /* Userspace can usually survive runtime without inotify */
443 #undef TARGET_NR_inotify_init
444 #undef TARGET_NR_inotify_init1
445 #undef TARGET_NR_inotify_add_watch
446 #undef TARGET_NR_inotify_rm_watch
447 #endif /* CONFIG_INOTIFY  */
448 
449 #if defined(TARGET_NR_prlimit64)
450 #ifndef __NR_prlimit64
451 # define __NR_prlimit64 -1
452 #endif
453 #define __NR_sys_prlimit64 __NR_prlimit64
454 /* The glibc rlimit structure may not be that used by the underlying syscall */
455 struct host_rlimit64 {
456     uint64_t rlim_cur;
457     uint64_t rlim_max;
458 };
459 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
460           const struct host_rlimit64 *, new_limit,
461           struct host_rlimit64 *, old_limit)
462 #endif
463 
464 
465 #if defined(TARGET_NR_timer_create)
466 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
467 static timer_t g_posix_timers[32] = { 0, } ;
468 
469 static inline int next_free_host_timer(void)
470 {
471     int k ;
472     /* FIXME: Does finding the next free slot require a lock? */
473     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
474         if (g_posix_timers[k] == 0) {
475             g_posix_timers[k] = (timer_t) 1;
476             return k;
477         }
478     }
479     return -1;
480 }
481 #endif
482 
483 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
484 #ifdef TARGET_ARM
485 static inline int regpairs_aligned(void *cpu_env, int num)
486 {
487     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
488 }
489 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
490 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
491 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
492 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
493  * of registers which translates to the same as ARM/MIPS, because we start with
494  * r3 as arg1 */
495 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
496 #elif defined(TARGET_SH4)
497 /* SH4 doesn't align register pairs, except for p{read,write}64 */
498 static inline int regpairs_aligned(void *cpu_env, int num)
499 {
500     switch (num) {
501     case TARGET_NR_pread64:
502     case TARGET_NR_pwrite64:
503         return 1;
504 
505     default:
506         return 0;
507     }
508 }
509 #elif defined(TARGET_XTENSA)
510 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
511 #else
512 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
513 #endif
514 
515 #define ERRNO_TABLE_SIZE 1200
516 
517 /* target_to_host_errno_table[] is initialized from
518  * host_to_target_errno_table[] in syscall_init(). */
519 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
520 };
521 
522 /*
523  * This list is the union of errno values overridden in asm-<arch>/errno.h
524  * minus the errnos that are not actually generic to all archs.
525  */
526 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
527     [EAGAIN]		= TARGET_EAGAIN,
528     [EIDRM]		= TARGET_EIDRM,
529     [ECHRNG]		= TARGET_ECHRNG,
530     [EL2NSYNC]		= TARGET_EL2NSYNC,
531     [EL3HLT]		= TARGET_EL3HLT,
532     [EL3RST]		= TARGET_EL3RST,
533     [ELNRNG]		= TARGET_ELNRNG,
534     [EUNATCH]		= TARGET_EUNATCH,
535     [ENOCSI]		= TARGET_ENOCSI,
536     [EL2HLT]		= TARGET_EL2HLT,
537     [EDEADLK]		= TARGET_EDEADLK,
538     [ENOLCK]		= TARGET_ENOLCK,
539     [EBADE]		= TARGET_EBADE,
540     [EBADR]		= TARGET_EBADR,
541     [EXFULL]		= TARGET_EXFULL,
542     [ENOANO]		= TARGET_ENOANO,
543     [EBADRQC]		= TARGET_EBADRQC,
544     [EBADSLT]		= TARGET_EBADSLT,
545     [EBFONT]		= TARGET_EBFONT,
546     [ENOSTR]		= TARGET_ENOSTR,
547     [ENODATA]		= TARGET_ENODATA,
548     [ETIME]		= TARGET_ETIME,
549     [ENOSR]		= TARGET_ENOSR,
550     [ENONET]		= TARGET_ENONET,
551     [ENOPKG]		= TARGET_ENOPKG,
552     [EREMOTE]		= TARGET_EREMOTE,
553     [ENOLINK]		= TARGET_ENOLINK,
554     [EADV]		= TARGET_EADV,
555     [ESRMNT]		= TARGET_ESRMNT,
556     [ECOMM]		= TARGET_ECOMM,
557     [EPROTO]		= TARGET_EPROTO,
558     [EDOTDOT]		= TARGET_EDOTDOT,
559     [EMULTIHOP]		= TARGET_EMULTIHOP,
560     [EBADMSG]		= TARGET_EBADMSG,
561     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
562     [EOVERFLOW]		= TARGET_EOVERFLOW,
563     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
564     [EBADFD]		= TARGET_EBADFD,
565     [EREMCHG]		= TARGET_EREMCHG,
566     [ELIBACC]		= TARGET_ELIBACC,
567     [ELIBBAD]		= TARGET_ELIBBAD,
568     [ELIBSCN]		= TARGET_ELIBSCN,
569     [ELIBMAX]		= TARGET_ELIBMAX,
570     [ELIBEXEC]		= TARGET_ELIBEXEC,
571     [EILSEQ]		= TARGET_EILSEQ,
572     [ENOSYS]		= TARGET_ENOSYS,
573     [ELOOP]		= TARGET_ELOOP,
574     [ERESTART]		= TARGET_ERESTART,
575     [ESTRPIPE]		= TARGET_ESTRPIPE,
576     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
577     [EUSERS]		= TARGET_EUSERS,
578     [ENOTSOCK]		= TARGET_ENOTSOCK,
579     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
580     [EMSGSIZE]		= TARGET_EMSGSIZE,
581     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
582     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
583     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
584     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
585     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
586     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
587     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
588     [EADDRINUSE]	= TARGET_EADDRINUSE,
589     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
590     [ENETDOWN]		= TARGET_ENETDOWN,
591     [ENETUNREACH]	= TARGET_ENETUNREACH,
592     [ENETRESET]		= TARGET_ENETRESET,
593     [ECONNABORTED]	= TARGET_ECONNABORTED,
594     [ECONNRESET]	= TARGET_ECONNRESET,
595     [ENOBUFS]		= TARGET_ENOBUFS,
596     [EISCONN]		= TARGET_EISCONN,
597     [ENOTCONN]		= TARGET_ENOTCONN,
598     [EUCLEAN]		= TARGET_EUCLEAN,
599     [ENOTNAM]		= TARGET_ENOTNAM,
600     [ENAVAIL]		= TARGET_ENAVAIL,
601     [EISNAM]		= TARGET_EISNAM,
602     [EREMOTEIO]		= TARGET_EREMOTEIO,
603     [EDQUOT]            = TARGET_EDQUOT,
604     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
605     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
606     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
607     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
608     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
609     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
610     [EALREADY]		= TARGET_EALREADY,
611     [EINPROGRESS]	= TARGET_EINPROGRESS,
612     [ESTALE]		= TARGET_ESTALE,
613     [ECANCELED]		= TARGET_ECANCELED,
614     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
615     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
616 #ifdef ENOKEY
617     [ENOKEY]		= TARGET_ENOKEY,
618 #endif
619 #ifdef EKEYEXPIRED
620     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
621 #endif
622 #ifdef EKEYREVOKED
623     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
624 #endif
625 #ifdef EKEYREJECTED
626     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
627 #endif
628 #ifdef EOWNERDEAD
629     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
630 #endif
631 #ifdef ENOTRECOVERABLE
632     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
633 #endif
634 #ifdef ENOMSG
635     [ENOMSG]            = TARGET_ENOMSG,
636 #endif
637 #ifdef ERKFILL
638     [ERFKILL]           = TARGET_ERFKILL,
639 #endif
640 #ifdef EHWPOISON
641     [EHWPOISON]         = TARGET_EHWPOISON,
642 #endif
643 };
644 
645 static inline int host_to_target_errno(int err)
646 {
647     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
648         host_to_target_errno_table[err]) {
649         return host_to_target_errno_table[err];
650     }
651     return err;
652 }
653 
654 static inline int target_to_host_errno(int err)
655 {
656     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
657         target_to_host_errno_table[err]) {
658         return target_to_host_errno_table[err];
659     }
660     return err;
661 }
662 
663 static inline abi_long get_errno(abi_long ret)
664 {
665     if (ret == -1)
666         return -host_to_target_errno(errno);
667     else
668         return ret;
669 }
670 
671 const char *target_strerror(int err)
672 {
673     if (err == TARGET_ERESTARTSYS) {
674         return "To be restarted";
675     }
676     if (err == TARGET_QEMU_ESIGRETURN) {
677         return "Successful exit from sigreturn";
678     }
679 
680     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
681         return NULL;
682     }
683     return strerror(target_to_host_errno(err));
684 }
685 
686 #define safe_syscall0(type, name) \
687 static type safe_##name(void) \
688 { \
689     return safe_syscall(__NR_##name); \
690 }
691 
692 #define safe_syscall1(type, name, type1, arg1) \
693 static type safe_##name(type1 arg1) \
694 { \
695     return safe_syscall(__NR_##name, arg1); \
696 }
697 
698 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
699 static type safe_##name(type1 arg1, type2 arg2) \
700 { \
701     return safe_syscall(__NR_##name, arg1, arg2); \
702 }
703 
704 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
706 { \
707     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
708 }
709 
710 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
711     type4, arg4) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
713 { \
714     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
715 }
716 
717 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
718     type4, arg4, type5, arg5) \
719 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
720     type5 arg5) \
721 { \
722     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
723 }
724 
725 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
726     type4, arg4, type5, arg5, type6, arg6) \
727 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
728     type5 arg5, type6 arg6) \
729 { \
730     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
731 }
732 
733 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
734 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
735 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
736               int, flags, mode_t, mode)
737 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
738               struct rusage *, rusage)
739 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
740               int, options, struct rusage *, rusage)
741 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
742 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
743               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
744 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
745               struct timespec *, tsp, const sigset_t *, sigmask,
746               size_t, sigsetsize)
747 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
748               int, maxevents, int, timeout, const sigset_t *, sigmask,
749               size_t, sigsetsize)
750 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
751               const struct timespec *,timeout,int *,uaddr2,int,val3)
752 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
753 safe_syscall2(int, kill, pid_t, pid, int, sig)
754 safe_syscall2(int, tkill, int, tid, int, sig)
755 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
756 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
757 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
758 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
759               unsigned long, pos_l, unsigned long, pos_h)
760 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
761               unsigned long, pos_l, unsigned long, pos_h)
762 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
763               socklen_t, addrlen)
764 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
765               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
766 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
767               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
768 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
769 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
770 safe_syscall2(int, flock, int, fd, int, operation)
771 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
772               const struct timespec *, uts, size_t, sigsetsize)
773 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
774               int, flags)
775 safe_syscall2(int, nanosleep, const struct timespec *, req,
776               struct timespec *, rem)
777 #ifdef TARGET_NR_clock_nanosleep
778 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
779               const struct timespec *, req, struct timespec *, rem)
780 #endif
781 #ifdef __NR_ipc
782 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
783               void *, ptr, long, fifth)
784 #endif
785 #ifdef __NR_msgsnd
786 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
787               int, flags)
788 #endif
789 #ifdef __NR_msgrcv
790 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
791               long, msgtype, int, flags)
792 #endif
793 #ifdef __NR_semtimedop
794 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
795               unsigned, nsops, const struct timespec *, timeout)
796 #endif
797 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
798 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
799               size_t, len, unsigned, prio, const struct timespec *, timeout)
800 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
801               size_t, len, unsigned *, prio, const struct timespec *, timeout)
802 #endif
803 /* We do ioctl like this rather than via safe_syscall3 to preserve the
804  * "third argument might be integer or pointer or not present" behaviour of
805  * the libc function.
806  */
807 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
808 /* Similarly for fcntl. Note that callers must always:
809  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
810  *  use the flock64 struct rather than unsuffixed flock
811  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
812  */
813 #ifdef __NR_fcntl64
814 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
815 #else
816 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
817 #endif
818 
819 static inline int host_to_target_sock_type(int host_type)
820 {
821     int target_type;
822 
823     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
824     case SOCK_DGRAM:
825         target_type = TARGET_SOCK_DGRAM;
826         break;
827     case SOCK_STREAM:
828         target_type = TARGET_SOCK_STREAM;
829         break;
830     default:
831         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
832         break;
833     }
834 
835 #if defined(SOCK_CLOEXEC)
836     if (host_type & SOCK_CLOEXEC) {
837         target_type |= TARGET_SOCK_CLOEXEC;
838     }
839 #endif
840 
841 #if defined(SOCK_NONBLOCK)
842     if (host_type & SOCK_NONBLOCK) {
843         target_type |= TARGET_SOCK_NONBLOCK;
844     }
845 #endif
846 
847     return target_type;
848 }
849 
850 static abi_ulong target_brk;
851 static abi_ulong target_original_brk;
852 static abi_ulong brk_page;
853 
854 void target_set_brk(abi_ulong new_brk)
855 {
856     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
857     brk_page = HOST_PAGE_ALIGN(target_brk);
858 }
859 
860 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
861 #define DEBUGF_BRK(message, args...)
862 
863 /* do_brk() must return target values and target errnos. */
864 abi_long do_brk(abi_ulong new_brk)
865 {
866     abi_long mapped_addr;
867     abi_ulong new_alloc_size;
868 
869     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
870 
871     if (!new_brk) {
872         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
873         return target_brk;
874     }
875     if (new_brk < target_original_brk) {
876         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
877                    target_brk);
878         return target_brk;
879     }
880 
881     /* If the new brk is less than the highest page reserved to the
882      * target heap allocation, set it and we're almost done...  */
883     if (new_brk <= brk_page) {
884         /* Heap contents are initialized to zero, as for anonymous
885          * mapped pages.  */
886         if (new_brk > target_brk) {
887             memset(g2h(target_brk), 0, new_brk - target_brk);
888         }
889 	target_brk = new_brk;
890         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
891 	return target_brk;
892     }
893 
894     /* We need to allocate more memory after the brk... Note that
895      * we don't use MAP_FIXED because that will map over the top of
896      * any existing mapping (like the one with the host libc or qemu
897      * itself); instead we treat "mapped but at wrong address" as
898      * a failure and unmap again.
899      */
900     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
901     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
902                                         PROT_READ|PROT_WRITE,
903                                         MAP_ANON|MAP_PRIVATE, 0, 0));
904 
905     if (mapped_addr == brk_page) {
906         /* Heap contents are initialized to zero, as for anonymous
907          * mapped pages.  Technically the new pages are already
908          * initialized to zero since they *are* anonymous mapped
909          * pages, however we have to take care with the contents that
910          * come from the remaining part of the previous page: it may
911          * contains garbage data due to a previous heap usage (grown
912          * then shrunken).  */
913         memset(g2h(target_brk), 0, brk_page - target_brk);
914 
915         target_brk = new_brk;
916         brk_page = HOST_PAGE_ALIGN(target_brk);
917         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
918             target_brk);
919         return target_brk;
920     } else if (mapped_addr != -1) {
921         /* Mapped but at wrong address, meaning there wasn't actually
922          * enough space for this brk.
923          */
924         target_munmap(mapped_addr, new_alloc_size);
925         mapped_addr = -1;
926         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
927     }
928     else {
929         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
930     }
931 
932 #if defined(TARGET_ALPHA)
933     /* We (partially) emulate OSF/1 on Alpha, which requires we
934        return a proper errno, not an unchanged brk value.  */
935     return -TARGET_ENOMEM;
936 #endif
937     /* For everything else, return the previous break. */
938     return target_brk;
939 }
940 
941 static inline abi_long copy_from_user_fdset(fd_set *fds,
942                                             abi_ulong target_fds_addr,
943                                             int n)
944 {
945     int i, nw, j, k;
946     abi_ulong b, *target_fds;
947 
948     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
949     if (!(target_fds = lock_user(VERIFY_READ,
950                                  target_fds_addr,
951                                  sizeof(abi_ulong) * nw,
952                                  1)))
953         return -TARGET_EFAULT;
954 
955     FD_ZERO(fds);
956     k = 0;
957     for (i = 0; i < nw; i++) {
958         /* grab the abi_ulong */
959         __get_user(b, &target_fds[i]);
960         for (j = 0; j < TARGET_ABI_BITS; j++) {
961             /* check the bit inside the abi_ulong */
962             if ((b >> j) & 1)
963                 FD_SET(k, fds);
964             k++;
965         }
966     }
967 
968     unlock_user(target_fds, target_fds_addr, 0);
969 
970     return 0;
971 }
972 
973 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
974                                                  abi_ulong target_fds_addr,
975                                                  int n)
976 {
977     if (target_fds_addr) {
978         if (copy_from_user_fdset(fds, target_fds_addr, n))
979             return -TARGET_EFAULT;
980         *fds_ptr = fds;
981     } else {
982         *fds_ptr = NULL;
983     }
984     return 0;
985 }
986 
987 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
988                                           const fd_set *fds,
989                                           int n)
990 {
991     int i, nw, j, k;
992     abi_long v;
993     abi_ulong *target_fds;
994 
995     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
996     if (!(target_fds = lock_user(VERIFY_WRITE,
997                                  target_fds_addr,
998                                  sizeof(abi_ulong) * nw,
999                                  0)))
1000         return -TARGET_EFAULT;
1001 
1002     k = 0;
1003     for (i = 0; i < nw; i++) {
1004         v = 0;
1005         for (j = 0; j < TARGET_ABI_BITS; j++) {
1006             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1007             k++;
1008         }
1009         __put_user(v, &target_fds[i]);
1010     }
1011 
1012     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1013 
1014     return 0;
1015 }
1016 
1017 #if defined(__alpha__)
1018 #define HOST_HZ 1024
1019 #else
1020 #define HOST_HZ 100
1021 #endif
1022 
1023 static inline abi_long host_to_target_clock_t(long ticks)
1024 {
1025 #if HOST_HZ == TARGET_HZ
1026     return ticks;
1027 #else
1028     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1029 #endif
1030 }
1031 
1032 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1033                                              const struct rusage *rusage)
1034 {
1035     struct target_rusage *target_rusage;
1036 
1037     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1038         return -TARGET_EFAULT;
1039     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1040     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1041     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1042     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1043     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1044     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1045     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1046     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1047     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1048     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1049     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1050     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1051     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1052     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1053     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1054     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1055     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1056     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1057     unlock_user_struct(target_rusage, target_addr, 1);
1058 
1059     return 0;
1060 }
1061 
1062 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1063 {
1064     abi_ulong target_rlim_swap;
1065     rlim_t result;
1066 
1067     target_rlim_swap = tswapal(target_rlim);
1068     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1069         return RLIM_INFINITY;
1070 
1071     result = target_rlim_swap;
1072     if (target_rlim_swap != (rlim_t)result)
1073         return RLIM_INFINITY;
1074 
1075     return result;
1076 }
1077 
1078 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1079 {
1080     abi_ulong target_rlim_swap;
1081     abi_ulong result;
1082 
1083     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1084         target_rlim_swap = TARGET_RLIM_INFINITY;
1085     else
1086         target_rlim_swap = rlim;
1087     result = tswapal(target_rlim_swap);
1088 
1089     return result;
1090 }
1091 
1092 static inline int target_to_host_resource(int code)
1093 {
1094     switch (code) {
1095     case TARGET_RLIMIT_AS:
1096         return RLIMIT_AS;
1097     case TARGET_RLIMIT_CORE:
1098         return RLIMIT_CORE;
1099     case TARGET_RLIMIT_CPU:
1100         return RLIMIT_CPU;
1101     case TARGET_RLIMIT_DATA:
1102         return RLIMIT_DATA;
1103     case TARGET_RLIMIT_FSIZE:
1104         return RLIMIT_FSIZE;
1105     case TARGET_RLIMIT_LOCKS:
1106         return RLIMIT_LOCKS;
1107     case TARGET_RLIMIT_MEMLOCK:
1108         return RLIMIT_MEMLOCK;
1109     case TARGET_RLIMIT_MSGQUEUE:
1110         return RLIMIT_MSGQUEUE;
1111     case TARGET_RLIMIT_NICE:
1112         return RLIMIT_NICE;
1113     case TARGET_RLIMIT_NOFILE:
1114         return RLIMIT_NOFILE;
1115     case TARGET_RLIMIT_NPROC:
1116         return RLIMIT_NPROC;
1117     case TARGET_RLIMIT_RSS:
1118         return RLIMIT_RSS;
1119     case TARGET_RLIMIT_RTPRIO:
1120         return RLIMIT_RTPRIO;
1121     case TARGET_RLIMIT_SIGPENDING:
1122         return RLIMIT_SIGPENDING;
1123     case TARGET_RLIMIT_STACK:
1124         return RLIMIT_STACK;
1125     default:
1126         return code;
1127     }
1128 }
1129 
1130 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1131                                               abi_ulong target_tv_addr)
1132 {
1133     struct target_timeval *target_tv;
1134 
1135     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1136         return -TARGET_EFAULT;
1137     }
1138 
1139     __get_user(tv->tv_sec, &target_tv->tv_sec);
1140     __get_user(tv->tv_usec, &target_tv->tv_usec);
1141 
1142     unlock_user_struct(target_tv, target_tv_addr, 0);
1143 
1144     return 0;
1145 }
1146 
1147 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1148                                             const struct timeval *tv)
1149 {
1150     struct target_timeval *target_tv;
1151 
1152     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1153         return -TARGET_EFAULT;
1154     }
1155 
1156     __put_user(tv->tv_sec, &target_tv->tv_sec);
1157     __put_user(tv->tv_usec, &target_tv->tv_usec);
1158 
1159     unlock_user_struct(target_tv, target_tv_addr, 1);
1160 
1161     return 0;
1162 }
1163 
1164 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1165                                              const struct timeval *tv)
1166 {
1167     struct target__kernel_sock_timeval *target_tv;
1168 
1169     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1170         return -TARGET_EFAULT;
1171     }
1172 
1173     __put_user(tv->tv_sec, &target_tv->tv_sec);
1174     __put_user(tv->tv_usec, &target_tv->tv_usec);
1175 
1176     unlock_user_struct(target_tv, target_tv_addr, 1);
1177 
1178     return 0;
1179 }
1180 
1181 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1182                                                abi_ulong target_addr)
1183 {
1184     struct target_timespec *target_ts;
1185 
1186     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1187         return -TARGET_EFAULT;
1188     }
1189     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1190     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1191     unlock_user_struct(target_ts, target_addr, 0);
1192     return 0;
1193 }
1194 
1195 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1196                                                struct timespec *host_ts)
1197 {
1198     struct target_timespec *target_ts;
1199 
1200     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1201         return -TARGET_EFAULT;
1202     }
1203     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1204     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1205     unlock_user_struct(target_ts, target_addr, 1);
1206     return 0;
1207 }
1208 
1209 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1210                                                  struct timespec *host_ts)
1211 {
1212     struct target__kernel_timespec *target_ts;
1213 
1214     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1215         return -TARGET_EFAULT;
1216     }
1217     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1218     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1219     unlock_user_struct(target_ts, target_addr, 1);
1220     return 0;
1221 }
1222 
1223 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1224                                                abi_ulong target_tz_addr)
1225 {
1226     struct target_timezone *target_tz;
1227 
1228     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1229         return -TARGET_EFAULT;
1230     }
1231 
1232     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1233     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1234 
1235     unlock_user_struct(target_tz, target_tz_addr, 0);
1236 
1237     return 0;
1238 }
1239 
1240 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1241 #include <mqueue.h>
1242 
1243 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1244                                               abi_ulong target_mq_attr_addr)
1245 {
1246     struct target_mq_attr *target_mq_attr;
1247 
1248     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1249                           target_mq_attr_addr, 1))
1250         return -TARGET_EFAULT;
1251 
1252     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1253     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1254     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1255     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1256 
1257     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1258 
1259     return 0;
1260 }
1261 
1262 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1263                                             const struct mq_attr *attr)
1264 {
1265     struct target_mq_attr *target_mq_attr;
1266 
1267     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1268                           target_mq_attr_addr, 0))
1269         return -TARGET_EFAULT;
1270 
1271     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1272     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1273     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1274     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1275 
1276     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1277 
1278     return 0;
1279 }
1280 #endif
1281 
1282 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1283 /* do_select() must return target values and target errnos. */
1284 static abi_long do_select(int n,
1285                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1286                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1287 {
1288     fd_set rfds, wfds, efds;
1289     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1290     struct timeval tv;
1291     struct timespec ts, *ts_ptr;
1292     abi_long ret;
1293 
1294     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1295     if (ret) {
1296         return ret;
1297     }
1298     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1299     if (ret) {
1300         return ret;
1301     }
1302     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1303     if (ret) {
1304         return ret;
1305     }
1306 
1307     if (target_tv_addr) {
1308         if (copy_from_user_timeval(&tv, target_tv_addr))
1309             return -TARGET_EFAULT;
1310         ts.tv_sec = tv.tv_sec;
1311         ts.tv_nsec = tv.tv_usec * 1000;
1312         ts_ptr = &ts;
1313     } else {
1314         ts_ptr = NULL;
1315     }
1316 
1317     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1318                                   ts_ptr, NULL));
1319 
1320     if (!is_error(ret)) {
1321         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1322             return -TARGET_EFAULT;
1323         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1324             return -TARGET_EFAULT;
1325         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1326             return -TARGET_EFAULT;
1327 
1328         if (target_tv_addr) {
1329             tv.tv_sec = ts.tv_sec;
1330             tv.tv_usec = ts.tv_nsec / 1000;
1331             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1332                 return -TARGET_EFAULT;
1333             }
1334         }
1335     }
1336 
1337     return ret;
1338 }
1339 
1340 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1341 static abi_long do_old_select(abi_ulong arg1)
1342 {
1343     struct target_sel_arg_struct *sel;
1344     abi_ulong inp, outp, exp, tvp;
1345     long nsel;
1346 
1347     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1348         return -TARGET_EFAULT;
1349     }
1350 
1351     nsel = tswapal(sel->n);
1352     inp = tswapal(sel->inp);
1353     outp = tswapal(sel->outp);
1354     exp = tswapal(sel->exp);
1355     tvp = tswapal(sel->tvp);
1356 
1357     unlock_user_struct(sel, arg1, 0);
1358 
1359     return do_select(nsel, inp, outp, exp, tvp);
1360 }
1361 #endif
1362 #endif
1363 
1364 static abi_long do_pipe2(int host_pipe[], int flags)
1365 {
1366 #ifdef CONFIG_PIPE2
1367     return pipe2(host_pipe, flags);
1368 #else
1369     return -ENOSYS;
1370 #endif
1371 }
1372 
1373 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1374                         int flags, int is_pipe2)
1375 {
1376     int host_pipe[2];
1377     abi_long ret;
1378     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1379 
1380     if (is_error(ret))
1381         return get_errno(ret);
1382 
1383     /* Several targets have special calling conventions for the original
1384        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1385     if (!is_pipe2) {
1386 #if defined(TARGET_ALPHA)
1387         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1388         return host_pipe[0];
1389 #elif defined(TARGET_MIPS)
1390         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1391         return host_pipe[0];
1392 #elif defined(TARGET_SH4)
1393         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1394         return host_pipe[0];
1395 #elif defined(TARGET_SPARC)
1396         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1397         return host_pipe[0];
1398 #endif
1399     }
1400 
1401     if (put_user_s32(host_pipe[0], pipedes)
1402         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1403         return -TARGET_EFAULT;
1404     return get_errno(ret);
1405 }
1406 
1407 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1408                                               abi_ulong target_addr,
1409                                               socklen_t len)
1410 {
1411     struct target_ip_mreqn *target_smreqn;
1412 
1413     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1414     if (!target_smreqn)
1415         return -TARGET_EFAULT;
1416     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1417     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1418     if (len == sizeof(struct target_ip_mreqn))
1419         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1420     unlock_user(target_smreqn, target_addr, 0);
1421 
1422     return 0;
1423 }
1424 
1425 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1426                                                abi_ulong target_addr,
1427                                                socklen_t len)
1428 {
1429     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1430     sa_family_t sa_family;
1431     struct target_sockaddr *target_saddr;
1432 
1433     if (fd_trans_target_to_host_addr(fd)) {
1434         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1435     }
1436 
1437     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1438     if (!target_saddr)
1439         return -TARGET_EFAULT;
1440 
1441     sa_family = tswap16(target_saddr->sa_family);
1442 
1443     /* Oops. The caller might send a incomplete sun_path; sun_path
1444      * must be terminated by \0 (see the manual page), but
1445      * unfortunately it is quite common to specify sockaddr_un
1446      * length as "strlen(x->sun_path)" while it should be
1447      * "strlen(...) + 1". We'll fix that here if needed.
1448      * Linux kernel has a similar feature.
1449      */
1450 
1451     if (sa_family == AF_UNIX) {
1452         if (len < unix_maxlen && len > 0) {
1453             char *cp = (char*)target_saddr;
1454 
1455             if ( cp[len-1] && !cp[len] )
1456                 len++;
1457         }
1458         if (len > unix_maxlen)
1459             len = unix_maxlen;
1460     }
1461 
1462     memcpy(addr, target_saddr, len);
1463     addr->sa_family = sa_family;
1464     if (sa_family == AF_NETLINK) {
1465         struct sockaddr_nl *nladdr;
1466 
1467         nladdr = (struct sockaddr_nl *)addr;
1468         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1469         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1470     } else if (sa_family == AF_PACKET) {
1471 	struct target_sockaddr_ll *lladdr;
1472 
1473 	lladdr = (struct target_sockaddr_ll *)addr;
1474 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1475 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1476     }
1477     unlock_user(target_saddr, target_addr, 0);
1478 
1479     return 0;
1480 }
1481 
1482 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1483                                                struct sockaddr *addr,
1484                                                socklen_t len)
1485 {
1486     struct target_sockaddr *target_saddr;
1487 
1488     if (len == 0) {
1489         return 0;
1490     }
1491     assert(addr);
1492 
1493     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1494     if (!target_saddr)
1495         return -TARGET_EFAULT;
1496     memcpy(target_saddr, addr, len);
1497     if (len >= offsetof(struct target_sockaddr, sa_family) +
1498         sizeof(target_saddr->sa_family)) {
1499         target_saddr->sa_family = tswap16(addr->sa_family);
1500     }
1501     if (addr->sa_family == AF_NETLINK &&
1502         len >= sizeof(struct target_sockaddr_nl)) {
1503         struct target_sockaddr_nl *target_nl =
1504                (struct target_sockaddr_nl *)target_saddr;
1505         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1506         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1507     } else if (addr->sa_family == AF_PACKET) {
1508         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1509         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1510         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1511     } else if (addr->sa_family == AF_INET6 &&
1512                len >= sizeof(struct target_sockaddr_in6)) {
1513         struct target_sockaddr_in6 *target_in6 =
1514                (struct target_sockaddr_in6 *)target_saddr;
1515         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1516     }
1517     unlock_user(target_saddr, target_addr, len);
1518 
1519     return 0;
1520 }
1521 
1522 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1523                                            struct target_msghdr *target_msgh)
1524 {
1525     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1526     abi_long msg_controllen;
1527     abi_ulong target_cmsg_addr;
1528     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1529     socklen_t space = 0;
1530 
1531     msg_controllen = tswapal(target_msgh->msg_controllen);
1532     if (msg_controllen < sizeof (struct target_cmsghdr))
1533         goto the_end;
1534     target_cmsg_addr = tswapal(target_msgh->msg_control);
1535     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1536     target_cmsg_start = target_cmsg;
1537     if (!target_cmsg)
1538         return -TARGET_EFAULT;
1539 
1540     while (cmsg && target_cmsg) {
1541         void *data = CMSG_DATA(cmsg);
1542         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1543 
1544         int len = tswapal(target_cmsg->cmsg_len)
1545             - sizeof(struct target_cmsghdr);
1546 
1547         space += CMSG_SPACE(len);
1548         if (space > msgh->msg_controllen) {
1549             space -= CMSG_SPACE(len);
1550             /* This is a QEMU bug, since we allocated the payload
1551              * area ourselves (unlike overflow in host-to-target
1552              * conversion, which is just the guest giving us a buffer
1553              * that's too small). It can't happen for the payload types
1554              * we currently support; if it becomes an issue in future
1555              * we would need to improve our allocation strategy to
1556              * something more intelligent than "twice the size of the
1557              * target buffer we're reading from".
1558              */
1559             gemu_log("Host cmsg overflow\n");
1560             break;
1561         }
1562 
1563         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1564             cmsg->cmsg_level = SOL_SOCKET;
1565         } else {
1566             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1567         }
1568         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1569         cmsg->cmsg_len = CMSG_LEN(len);
1570 
1571         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1572             int *fd = (int *)data;
1573             int *target_fd = (int *)target_data;
1574             int i, numfds = len / sizeof(int);
1575 
1576             for (i = 0; i < numfds; i++) {
1577                 __get_user(fd[i], target_fd + i);
1578             }
1579         } else if (cmsg->cmsg_level == SOL_SOCKET
1580                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1581             struct ucred *cred = (struct ucred *)data;
1582             struct target_ucred *target_cred =
1583                 (struct target_ucred *)target_data;
1584 
1585             __get_user(cred->pid, &target_cred->pid);
1586             __get_user(cred->uid, &target_cred->uid);
1587             __get_user(cred->gid, &target_cred->gid);
1588         } else {
1589             gemu_log("Unsupported ancillary data: %d/%d\n",
1590                                         cmsg->cmsg_level, cmsg->cmsg_type);
1591             memcpy(data, target_data, len);
1592         }
1593 
1594         cmsg = CMSG_NXTHDR(msgh, cmsg);
1595         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1596                                          target_cmsg_start);
1597     }
1598     unlock_user(target_cmsg, target_cmsg_addr, 0);
1599  the_end:
1600     msgh->msg_controllen = space;
1601     return 0;
1602 }
1603 
1604 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1605                                            struct msghdr *msgh)
1606 {
1607     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1608     abi_long msg_controllen;
1609     abi_ulong target_cmsg_addr;
1610     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1611     socklen_t space = 0;
1612 
1613     msg_controllen = tswapal(target_msgh->msg_controllen);
1614     if (msg_controllen < sizeof (struct target_cmsghdr))
1615         goto the_end;
1616     target_cmsg_addr = tswapal(target_msgh->msg_control);
1617     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1618     target_cmsg_start = target_cmsg;
1619     if (!target_cmsg)
1620         return -TARGET_EFAULT;
1621 
1622     while (cmsg && target_cmsg) {
1623         void *data = CMSG_DATA(cmsg);
1624         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1625 
1626         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1627         int tgt_len, tgt_space;
1628 
1629         /* We never copy a half-header but may copy half-data;
1630          * this is Linux's behaviour in put_cmsg(). Note that
1631          * truncation here is a guest problem (which we report
1632          * to the guest via the CTRUNC bit), unlike truncation
1633          * in target_to_host_cmsg, which is a QEMU bug.
1634          */
1635         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1636             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1637             break;
1638         }
1639 
1640         if (cmsg->cmsg_level == SOL_SOCKET) {
1641             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1642         } else {
1643             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1644         }
1645         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1646 
1647         /* Payload types which need a different size of payload on
1648          * the target must adjust tgt_len here.
1649          */
1650         tgt_len = len;
1651         switch (cmsg->cmsg_level) {
1652         case SOL_SOCKET:
1653             switch (cmsg->cmsg_type) {
1654             case SO_TIMESTAMP:
1655                 tgt_len = sizeof(struct target_timeval);
1656                 break;
1657             default:
1658                 break;
1659             }
1660             break;
1661         default:
1662             break;
1663         }
1664 
1665         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1666             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1667             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1668         }
1669 
1670         /* We must now copy-and-convert len bytes of payload
1671          * into tgt_len bytes of destination space. Bear in mind
1672          * that in both source and destination we may be dealing
1673          * with a truncated value!
1674          */
1675         switch (cmsg->cmsg_level) {
1676         case SOL_SOCKET:
1677             switch (cmsg->cmsg_type) {
1678             case SCM_RIGHTS:
1679             {
1680                 int *fd = (int *)data;
1681                 int *target_fd = (int *)target_data;
1682                 int i, numfds = tgt_len / sizeof(int);
1683 
1684                 for (i = 0; i < numfds; i++) {
1685                     __put_user(fd[i], target_fd + i);
1686                 }
1687                 break;
1688             }
1689             case SO_TIMESTAMP:
1690             {
1691                 struct timeval *tv = (struct timeval *)data;
1692                 struct target_timeval *target_tv =
1693                     (struct target_timeval *)target_data;
1694 
1695                 if (len != sizeof(struct timeval) ||
1696                     tgt_len != sizeof(struct target_timeval)) {
1697                     goto unimplemented;
1698                 }
1699 
1700                 /* copy struct timeval to target */
1701                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1702                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1703                 break;
1704             }
1705             case SCM_CREDENTIALS:
1706             {
1707                 struct ucred *cred = (struct ucred *)data;
1708                 struct target_ucred *target_cred =
1709                     (struct target_ucred *)target_data;
1710 
1711                 __put_user(cred->pid, &target_cred->pid);
1712                 __put_user(cred->uid, &target_cred->uid);
1713                 __put_user(cred->gid, &target_cred->gid);
1714                 break;
1715             }
1716             default:
1717                 goto unimplemented;
1718             }
1719             break;
1720 
1721         case SOL_IP:
1722             switch (cmsg->cmsg_type) {
1723             case IP_TTL:
1724             {
1725                 uint32_t *v = (uint32_t *)data;
1726                 uint32_t *t_int = (uint32_t *)target_data;
1727 
1728                 if (len != sizeof(uint32_t) ||
1729                     tgt_len != sizeof(uint32_t)) {
1730                     goto unimplemented;
1731                 }
1732                 __put_user(*v, t_int);
1733                 break;
1734             }
1735             case IP_RECVERR:
1736             {
1737                 struct errhdr_t {
1738                    struct sock_extended_err ee;
1739                    struct sockaddr_in offender;
1740                 };
1741                 struct errhdr_t *errh = (struct errhdr_t *)data;
1742                 struct errhdr_t *target_errh =
1743                     (struct errhdr_t *)target_data;
1744 
1745                 if (len != sizeof(struct errhdr_t) ||
1746                     tgt_len != sizeof(struct errhdr_t)) {
1747                     goto unimplemented;
1748                 }
1749                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1750                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1751                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1752                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1753                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1754                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1755                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1756                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1757                     (void *) &errh->offender, sizeof(errh->offender));
1758                 break;
1759             }
1760             default:
1761                 goto unimplemented;
1762             }
1763             break;
1764 
1765         case SOL_IPV6:
1766             switch (cmsg->cmsg_type) {
1767             case IPV6_HOPLIMIT:
1768             {
1769                 uint32_t *v = (uint32_t *)data;
1770                 uint32_t *t_int = (uint32_t *)target_data;
1771 
1772                 if (len != sizeof(uint32_t) ||
1773                     tgt_len != sizeof(uint32_t)) {
1774                     goto unimplemented;
1775                 }
1776                 __put_user(*v, t_int);
1777                 break;
1778             }
1779             case IPV6_RECVERR:
1780             {
1781                 struct errhdr6_t {
1782                    struct sock_extended_err ee;
1783                    struct sockaddr_in6 offender;
1784                 };
1785                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1786                 struct errhdr6_t *target_errh =
1787                     (struct errhdr6_t *)target_data;
1788 
1789                 if (len != sizeof(struct errhdr6_t) ||
1790                     tgt_len != sizeof(struct errhdr6_t)) {
1791                     goto unimplemented;
1792                 }
1793                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1794                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1795                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1796                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1797                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1798                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1799                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1800                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1801                     (void *) &errh->offender, sizeof(errh->offender));
1802                 break;
1803             }
1804             default:
1805                 goto unimplemented;
1806             }
1807             break;
1808 
1809         default:
1810         unimplemented:
1811             gemu_log("Unsupported ancillary data: %d/%d\n",
1812                                         cmsg->cmsg_level, cmsg->cmsg_type);
1813             memcpy(target_data, data, MIN(len, tgt_len));
1814             if (tgt_len > len) {
1815                 memset(target_data + len, 0, tgt_len - len);
1816             }
1817         }
1818 
1819         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1820         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1821         if (msg_controllen < tgt_space) {
1822             tgt_space = msg_controllen;
1823         }
1824         msg_controllen -= tgt_space;
1825         space += tgt_space;
1826         cmsg = CMSG_NXTHDR(msgh, cmsg);
1827         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1828                                          target_cmsg_start);
1829     }
1830     unlock_user(target_cmsg, target_cmsg_addr, space);
1831  the_end:
1832     target_msgh->msg_controllen = tswapal(space);
1833     return 0;
1834 }
1835 
1836 /* do_setsockopt() Must return target values and target errnos. */
1837 static abi_long do_setsockopt(int sockfd, int level, int optname,
1838                               abi_ulong optval_addr, socklen_t optlen)
1839 {
1840     abi_long ret;
1841     int val;
1842     struct ip_mreqn *ip_mreq;
1843     struct ip_mreq_source *ip_mreq_source;
1844 
1845     switch(level) {
1846     case SOL_TCP:
1847         /* TCP options all take an 'int' value.  */
1848         if (optlen < sizeof(uint32_t))
1849             return -TARGET_EINVAL;
1850 
1851         if (get_user_u32(val, optval_addr))
1852             return -TARGET_EFAULT;
1853         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1854         break;
1855     case SOL_IP:
1856         switch(optname) {
1857         case IP_TOS:
1858         case IP_TTL:
1859         case IP_HDRINCL:
1860         case IP_ROUTER_ALERT:
1861         case IP_RECVOPTS:
1862         case IP_RETOPTS:
1863         case IP_PKTINFO:
1864         case IP_MTU_DISCOVER:
1865         case IP_RECVERR:
1866         case IP_RECVTTL:
1867         case IP_RECVTOS:
1868 #ifdef IP_FREEBIND
1869         case IP_FREEBIND:
1870 #endif
1871         case IP_MULTICAST_TTL:
1872         case IP_MULTICAST_LOOP:
1873             val = 0;
1874             if (optlen >= sizeof(uint32_t)) {
1875                 if (get_user_u32(val, optval_addr))
1876                     return -TARGET_EFAULT;
1877             } else if (optlen >= 1) {
1878                 if (get_user_u8(val, optval_addr))
1879                     return -TARGET_EFAULT;
1880             }
1881             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1882             break;
1883         case IP_ADD_MEMBERSHIP:
1884         case IP_DROP_MEMBERSHIP:
1885             if (optlen < sizeof (struct target_ip_mreq) ||
1886                 optlen > sizeof (struct target_ip_mreqn))
1887                 return -TARGET_EINVAL;
1888 
1889             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1890             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1891             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1892             break;
1893 
1894         case IP_BLOCK_SOURCE:
1895         case IP_UNBLOCK_SOURCE:
1896         case IP_ADD_SOURCE_MEMBERSHIP:
1897         case IP_DROP_SOURCE_MEMBERSHIP:
1898             if (optlen != sizeof (struct target_ip_mreq_source))
1899                 return -TARGET_EINVAL;
1900 
1901             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1902             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1903             unlock_user (ip_mreq_source, optval_addr, 0);
1904             break;
1905 
1906         default:
1907             goto unimplemented;
1908         }
1909         break;
1910     case SOL_IPV6:
1911         switch (optname) {
1912         case IPV6_MTU_DISCOVER:
1913         case IPV6_MTU:
1914         case IPV6_V6ONLY:
1915         case IPV6_RECVPKTINFO:
1916         case IPV6_UNICAST_HOPS:
1917         case IPV6_MULTICAST_HOPS:
1918         case IPV6_MULTICAST_LOOP:
1919         case IPV6_RECVERR:
1920         case IPV6_RECVHOPLIMIT:
1921         case IPV6_2292HOPLIMIT:
1922         case IPV6_CHECKSUM:
1923         case IPV6_ADDRFORM:
1924         case IPV6_2292PKTINFO:
1925         case IPV6_RECVTCLASS:
1926         case IPV6_RECVRTHDR:
1927         case IPV6_2292RTHDR:
1928         case IPV6_RECVHOPOPTS:
1929         case IPV6_2292HOPOPTS:
1930         case IPV6_RECVDSTOPTS:
1931         case IPV6_2292DSTOPTS:
1932         case IPV6_TCLASS:
1933 #ifdef IPV6_RECVPATHMTU
1934         case IPV6_RECVPATHMTU:
1935 #endif
1936 #ifdef IPV6_TRANSPARENT
1937         case IPV6_TRANSPARENT:
1938 #endif
1939 #ifdef IPV6_FREEBIND
1940         case IPV6_FREEBIND:
1941 #endif
1942 #ifdef IPV6_RECVORIGDSTADDR
1943         case IPV6_RECVORIGDSTADDR:
1944 #endif
1945             val = 0;
1946             if (optlen < sizeof(uint32_t)) {
1947                 return -TARGET_EINVAL;
1948             }
1949             if (get_user_u32(val, optval_addr)) {
1950                 return -TARGET_EFAULT;
1951             }
1952             ret = get_errno(setsockopt(sockfd, level, optname,
1953                                        &val, sizeof(val)));
1954             break;
1955         case IPV6_PKTINFO:
1956         {
1957             struct in6_pktinfo pki;
1958 
1959             if (optlen < sizeof(pki)) {
1960                 return -TARGET_EINVAL;
1961             }
1962 
1963             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1964                 return -TARGET_EFAULT;
1965             }
1966 
1967             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1968 
1969             ret = get_errno(setsockopt(sockfd, level, optname,
1970                                        &pki, sizeof(pki)));
1971             break;
1972         }
1973         case IPV6_ADD_MEMBERSHIP:
1974         case IPV6_DROP_MEMBERSHIP:
1975         {
1976             struct ipv6_mreq ipv6mreq;
1977 
1978             if (optlen < sizeof(ipv6mreq)) {
1979                 return -TARGET_EINVAL;
1980             }
1981 
1982             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1983                 return -TARGET_EFAULT;
1984             }
1985 
1986             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1987 
1988             ret = get_errno(setsockopt(sockfd, level, optname,
1989                                        &ipv6mreq, sizeof(ipv6mreq)));
1990             break;
1991         }
1992         default:
1993             goto unimplemented;
1994         }
1995         break;
1996     case SOL_ICMPV6:
1997         switch (optname) {
1998         case ICMPV6_FILTER:
1999         {
2000             struct icmp6_filter icmp6f;
2001 
2002             if (optlen > sizeof(icmp6f)) {
2003                 optlen = sizeof(icmp6f);
2004             }
2005 
2006             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2007                 return -TARGET_EFAULT;
2008             }
2009 
2010             for (val = 0; val < 8; val++) {
2011                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2012             }
2013 
2014             ret = get_errno(setsockopt(sockfd, level, optname,
2015                                        &icmp6f, optlen));
2016             break;
2017         }
2018         default:
2019             goto unimplemented;
2020         }
2021         break;
2022     case SOL_RAW:
2023         switch (optname) {
2024         case ICMP_FILTER:
2025         case IPV6_CHECKSUM:
2026             /* those take an u32 value */
2027             if (optlen < sizeof(uint32_t)) {
2028                 return -TARGET_EINVAL;
2029             }
2030 
2031             if (get_user_u32(val, optval_addr)) {
2032                 return -TARGET_EFAULT;
2033             }
2034             ret = get_errno(setsockopt(sockfd, level, optname,
2035                                        &val, sizeof(val)));
2036             break;
2037 
2038         default:
2039             goto unimplemented;
2040         }
2041         break;
2042 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2043     case SOL_ALG:
2044         switch (optname) {
2045         case ALG_SET_KEY:
2046         {
2047             char *alg_key = g_malloc(optlen);
2048 
2049             if (!alg_key) {
2050                 return -TARGET_ENOMEM;
2051             }
2052             if (copy_from_user(alg_key, optval_addr, optlen)) {
2053                 g_free(alg_key);
2054                 return -TARGET_EFAULT;
2055             }
2056             ret = get_errno(setsockopt(sockfd, level, optname,
2057                                        alg_key, optlen));
2058             g_free(alg_key);
2059             break;
2060         }
2061         case ALG_SET_AEAD_AUTHSIZE:
2062         {
2063             ret = get_errno(setsockopt(sockfd, level, optname,
2064                                        NULL, optlen));
2065             break;
2066         }
2067         default:
2068             goto unimplemented;
2069         }
2070         break;
2071 #endif
2072     case TARGET_SOL_SOCKET:
2073         switch (optname) {
2074         case TARGET_SO_RCVTIMEO:
2075         {
2076                 struct timeval tv;
2077 
2078                 optname = SO_RCVTIMEO;
2079 
2080 set_timeout:
2081                 if (optlen != sizeof(struct target_timeval)) {
2082                     return -TARGET_EINVAL;
2083                 }
2084 
2085                 if (copy_from_user_timeval(&tv, optval_addr)) {
2086                     return -TARGET_EFAULT;
2087                 }
2088 
2089                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2090                                 &tv, sizeof(tv)));
2091                 return ret;
2092         }
2093         case TARGET_SO_SNDTIMEO:
2094                 optname = SO_SNDTIMEO;
2095                 goto set_timeout;
2096         case TARGET_SO_ATTACH_FILTER:
2097         {
2098                 struct target_sock_fprog *tfprog;
2099                 struct target_sock_filter *tfilter;
2100                 struct sock_fprog fprog;
2101                 struct sock_filter *filter;
2102                 int i;
2103 
2104                 if (optlen != sizeof(*tfprog)) {
2105                     return -TARGET_EINVAL;
2106                 }
2107                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2108                     return -TARGET_EFAULT;
2109                 }
2110                 if (!lock_user_struct(VERIFY_READ, tfilter,
2111                                       tswapal(tfprog->filter), 0)) {
2112                     unlock_user_struct(tfprog, optval_addr, 1);
2113                     return -TARGET_EFAULT;
2114                 }
2115 
2116                 fprog.len = tswap16(tfprog->len);
2117                 filter = g_try_new(struct sock_filter, fprog.len);
2118                 if (filter == NULL) {
2119                     unlock_user_struct(tfilter, tfprog->filter, 1);
2120                     unlock_user_struct(tfprog, optval_addr, 1);
2121                     return -TARGET_ENOMEM;
2122                 }
2123                 for (i = 0; i < fprog.len; i++) {
2124                     filter[i].code = tswap16(tfilter[i].code);
2125                     filter[i].jt = tfilter[i].jt;
2126                     filter[i].jf = tfilter[i].jf;
2127                     filter[i].k = tswap32(tfilter[i].k);
2128                 }
2129                 fprog.filter = filter;
2130 
2131                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2132                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2133                 g_free(filter);
2134 
2135                 unlock_user_struct(tfilter, tfprog->filter, 1);
2136                 unlock_user_struct(tfprog, optval_addr, 1);
2137                 return ret;
2138         }
2139 	case TARGET_SO_BINDTODEVICE:
2140 	{
2141 		char *dev_ifname, *addr_ifname;
2142 
2143 		if (optlen > IFNAMSIZ - 1) {
2144 		    optlen = IFNAMSIZ - 1;
2145 		}
2146 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2147 		if (!dev_ifname) {
2148 		    return -TARGET_EFAULT;
2149 		}
2150 		optname = SO_BINDTODEVICE;
2151 		addr_ifname = alloca(IFNAMSIZ);
2152 		memcpy(addr_ifname, dev_ifname, optlen);
2153 		addr_ifname[optlen] = 0;
2154 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2155                                            addr_ifname, optlen));
2156 		unlock_user (dev_ifname, optval_addr, 0);
2157 		return ret;
2158 	}
2159         case TARGET_SO_LINGER:
2160         {
2161                 struct linger lg;
2162                 struct target_linger *tlg;
2163 
2164                 if (optlen != sizeof(struct target_linger)) {
2165                     return -TARGET_EINVAL;
2166                 }
2167                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2168                     return -TARGET_EFAULT;
2169                 }
2170                 __get_user(lg.l_onoff, &tlg->l_onoff);
2171                 __get_user(lg.l_linger, &tlg->l_linger);
2172                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2173                                 &lg, sizeof(lg)));
2174                 unlock_user_struct(tlg, optval_addr, 0);
2175                 return ret;
2176         }
2177             /* Options with 'int' argument.  */
2178         case TARGET_SO_DEBUG:
2179 		optname = SO_DEBUG;
2180 		break;
2181         case TARGET_SO_REUSEADDR:
2182 		optname = SO_REUSEADDR;
2183 		break;
2184 #ifdef SO_REUSEPORT
2185         case TARGET_SO_REUSEPORT:
2186                 optname = SO_REUSEPORT;
2187                 break;
2188 #endif
2189         case TARGET_SO_TYPE:
2190 		optname = SO_TYPE;
2191 		break;
2192         case TARGET_SO_ERROR:
2193 		optname = SO_ERROR;
2194 		break;
2195         case TARGET_SO_DONTROUTE:
2196 		optname = SO_DONTROUTE;
2197 		break;
2198         case TARGET_SO_BROADCAST:
2199 		optname = SO_BROADCAST;
2200 		break;
2201         case TARGET_SO_SNDBUF:
2202 		optname = SO_SNDBUF;
2203 		break;
2204         case TARGET_SO_SNDBUFFORCE:
2205                 optname = SO_SNDBUFFORCE;
2206                 break;
2207         case TARGET_SO_RCVBUF:
2208 		optname = SO_RCVBUF;
2209 		break;
2210         case TARGET_SO_RCVBUFFORCE:
2211                 optname = SO_RCVBUFFORCE;
2212                 break;
2213         case TARGET_SO_KEEPALIVE:
2214 		optname = SO_KEEPALIVE;
2215 		break;
2216         case TARGET_SO_OOBINLINE:
2217 		optname = SO_OOBINLINE;
2218 		break;
2219         case TARGET_SO_NO_CHECK:
2220 		optname = SO_NO_CHECK;
2221 		break;
2222         case TARGET_SO_PRIORITY:
2223 		optname = SO_PRIORITY;
2224 		break;
2225 #ifdef SO_BSDCOMPAT
2226         case TARGET_SO_BSDCOMPAT:
2227 		optname = SO_BSDCOMPAT;
2228 		break;
2229 #endif
2230         case TARGET_SO_PASSCRED:
2231 		optname = SO_PASSCRED;
2232 		break;
2233         case TARGET_SO_PASSSEC:
2234                 optname = SO_PASSSEC;
2235                 break;
2236         case TARGET_SO_TIMESTAMP:
2237 		optname = SO_TIMESTAMP;
2238 		break;
2239         case TARGET_SO_RCVLOWAT:
2240 		optname = SO_RCVLOWAT;
2241 		break;
2242         default:
2243             goto unimplemented;
2244         }
2245 	if (optlen < sizeof(uint32_t))
2246             return -TARGET_EINVAL;
2247 
2248 	if (get_user_u32(val, optval_addr))
2249             return -TARGET_EFAULT;
2250 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2251         break;
2252 #ifdef SOL_NETLINK
2253     case SOL_NETLINK:
2254         switch (optname) {
2255         case NETLINK_PKTINFO:
2256         case NETLINK_ADD_MEMBERSHIP:
2257         case NETLINK_DROP_MEMBERSHIP:
2258         case NETLINK_BROADCAST_ERROR:
2259         case NETLINK_NO_ENOBUFS:
2260 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2261         case NETLINK_LISTEN_ALL_NSID:
2262         case NETLINK_CAP_ACK:
2263 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2264 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2265         case NETLINK_EXT_ACK:
2266 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2267 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2268         case NETLINK_GET_STRICT_CHK:
2269 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2270             break;
2271         default:
2272             goto unimplemented;
2273         }
2274         val = 0;
2275         if (optlen < sizeof(uint32_t)) {
2276             return -TARGET_EINVAL;
2277         }
2278         if (get_user_u32(val, optval_addr)) {
2279             return -TARGET_EFAULT;
2280         }
2281         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2282                                    sizeof(val)));
2283         break;
2284 #endif /* SOL_NETLINK */
2285     default:
2286     unimplemented:
2287         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2288         ret = -TARGET_ENOPROTOOPT;
2289     }
2290     return ret;
2291 }
2292 
2293 /* do_getsockopt() Must return target values and target errnos. */
2294 static abi_long do_getsockopt(int sockfd, int level, int optname,
2295                               abi_ulong optval_addr, abi_ulong optlen)
2296 {
2297     abi_long ret;
2298     int len, val;
2299     socklen_t lv;
2300 
2301     switch(level) {
2302     case TARGET_SOL_SOCKET:
2303         level = SOL_SOCKET;
2304         switch (optname) {
2305         /* These don't just return a single integer */
2306         case TARGET_SO_RCVTIMEO:
2307         case TARGET_SO_SNDTIMEO:
2308         case TARGET_SO_PEERNAME:
2309             goto unimplemented;
2310         case TARGET_SO_PEERCRED: {
2311             struct ucred cr;
2312             socklen_t crlen;
2313             struct target_ucred *tcr;
2314 
2315             if (get_user_u32(len, optlen)) {
2316                 return -TARGET_EFAULT;
2317             }
2318             if (len < 0) {
2319                 return -TARGET_EINVAL;
2320             }
2321 
2322             crlen = sizeof(cr);
2323             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2324                                        &cr, &crlen));
2325             if (ret < 0) {
2326                 return ret;
2327             }
2328             if (len > crlen) {
2329                 len = crlen;
2330             }
2331             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2332                 return -TARGET_EFAULT;
2333             }
2334             __put_user(cr.pid, &tcr->pid);
2335             __put_user(cr.uid, &tcr->uid);
2336             __put_user(cr.gid, &tcr->gid);
2337             unlock_user_struct(tcr, optval_addr, 1);
2338             if (put_user_u32(len, optlen)) {
2339                 return -TARGET_EFAULT;
2340             }
2341             break;
2342         }
2343         case TARGET_SO_LINGER:
2344         {
2345             struct linger lg;
2346             socklen_t lglen;
2347             struct target_linger *tlg;
2348 
2349             if (get_user_u32(len, optlen)) {
2350                 return -TARGET_EFAULT;
2351             }
2352             if (len < 0) {
2353                 return -TARGET_EINVAL;
2354             }
2355 
2356             lglen = sizeof(lg);
2357             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2358                                        &lg, &lglen));
2359             if (ret < 0) {
2360                 return ret;
2361             }
2362             if (len > lglen) {
2363                 len = lglen;
2364             }
2365             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2366                 return -TARGET_EFAULT;
2367             }
2368             __put_user(lg.l_onoff, &tlg->l_onoff);
2369             __put_user(lg.l_linger, &tlg->l_linger);
2370             unlock_user_struct(tlg, optval_addr, 1);
2371             if (put_user_u32(len, optlen)) {
2372                 return -TARGET_EFAULT;
2373             }
2374             break;
2375         }
2376         /* Options with 'int' argument.  */
2377         case TARGET_SO_DEBUG:
2378             optname = SO_DEBUG;
2379             goto int_case;
2380         case TARGET_SO_REUSEADDR:
2381             optname = SO_REUSEADDR;
2382             goto int_case;
2383 #ifdef SO_REUSEPORT
2384         case TARGET_SO_REUSEPORT:
2385             optname = SO_REUSEPORT;
2386             goto int_case;
2387 #endif
2388         case TARGET_SO_TYPE:
2389             optname = SO_TYPE;
2390             goto int_case;
2391         case TARGET_SO_ERROR:
2392             optname = SO_ERROR;
2393             goto int_case;
2394         case TARGET_SO_DONTROUTE:
2395             optname = SO_DONTROUTE;
2396             goto int_case;
2397         case TARGET_SO_BROADCAST:
2398             optname = SO_BROADCAST;
2399             goto int_case;
2400         case TARGET_SO_SNDBUF:
2401             optname = SO_SNDBUF;
2402             goto int_case;
2403         case TARGET_SO_RCVBUF:
2404             optname = SO_RCVBUF;
2405             goto int_case;
2406         case TARGET_SO_KEEPALIVE:
2407             optname = SO_KEEPALIVE;
2408             goto int_case;
2409         case TARGET_SO_OOBINLINE:
2410             optname = SO_OOBINLINE;
2411             goto int_case;
2412         case TARGET_SO_NO_CHECK:
2413             optname = SO_NO_CHECK;
2414             goto int_case;
2415         case TARGET_SO_PRIORITY:
2416             optname = SO_PRIORITY;
2417             goto int_case;
2418 #ifdef SO_BSDCOMPAT
2419         case TARGET_SO_BSDCOMPAT:
2420             optname = SO_BSDCOMPAT;
2421             goto int_case;
2422 #endif
2423         case TARGET_SO_PASSCRED:
2424             optname = SO_PASSCRED;
2425             goto int_case;
2426         case TARGET_SO_TIMESTAMP:
2427             optname = SO_TIMESTAMP;
2428             goto int_case;
2429         case TARGET_SO_RCVLOWAT:
2430             optname = SO_RCVLOWAT;
2431             goto int_case;
2432         case TARGET_SO_ACCEPTCONN:
2433             optname = SO_ACCEPTCONN;
2434             goto int_case;
2435         default:
2436             goto int_case;
2437         }
2438         break;
2439     case SOL_TCP:
2440         /* TCP options all take an 'int' value.  */
2441     int_case:
2442         if (get_user_u32(len, optlen))
2443             return -TARGET_EFAULT;
2444         if (len < 0)
2445             return -TARGET_EINVAL;
2446         lv = sizeof(lv);
2447         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2448         if (ret < 0)
2449             return ret;
2450         if (optname == SO_TYPE) {
2451             val = host_to_target_sock_type(val);
2452         }
2453         if (len > lv)
2454             len = lv;
2455         if (len == 4) {
2456             if (put_user_u32(val, optval_addr))
2457                 return -TARGET_EFAULT;
2458         } else {
2459             if (put_user_u8(val, optval_addr))
2460                 return -TARGET_EFAULT;
2461         }
2462         if (put_user_u32(len, optlen))
2463             return -TARGET_EFAULT;
2464         break;
2465     case SOL_IP:
2466         switch(optname) {
2467         case IP_TOS:
2468         case IP_TTL:
2469         case IP_HDRINCL:
2470         case IP_ROUTER_ALERT:
2471         case IP_RECVOPTS:
2472         case IP_RETOPTS:
2473         case IP_PKTINFO:
2474         case IP_MTU_DISCOVER:
2475         case IP_RECVERR:
2476         case IP_RECVTOS:
2477 #ifdef IP_FREEBIND
2478         case IP_FREEBIND:
2479 #endif
2480         case IP_MULTICAST_TTL:
2481         case IP_MULTICAST_LOOP:
2482             if (get_user_u32(len, optlen))
2483                 return -TARGET_EFAULT;
2484             if (len < 0)
2485                 return -TARGET_EINVAL;
2486             lv = sizeof(lv);
2487             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2488             if (ret < 0)
2489                 return ret;
2490             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2491                 len = 1;
2492                 if (put_user_u32(len, optlen)
2493                     || put_user_u8(val, optval_addr))
2494                     return -TARGET_EFAULT;
2495             } else {
2496                 if (len > sizeof(int))
2497                     len = sizeof(int);
2498                 if (put_user_u32(len, optlen)
2499                     || put_user_u32(val, optval_addr))
2500                     return -TARGET_EFAULT;
2501             }
2502             break;
2503         default:
2504             ret = -TARGET_ENOPROTOOPT;
2505             break;
2506         }
2507         break;
2508     case SOL_IPV6:
2509         switch (optname) {
2510         case IPV6_MTU_DISCOVER:
2511         case IPV6_MTU:
2512         case IPV6_V6ONLY:
2513         case IPV6_RECVPKTINFO:
2514         case IPV6_UNICAST_HOPS:
2515         case IPV6_MULTICAST_HOPS:
2516         case IPV6_MULTICAST_LOOP:
2517         case IPV6_RECVERR:
2518         case IPV6_RECVHOPLIMIT:
2519         case IPV6_2292HOPLIMIT:
2520         case IPV6_CHECKSUM:
2521         case IPV6_ADDRFORM:
2522         case IPV6_2292PKTINFO:
2523         case IPV6_RECVTCLASS:
2524         case IPV6_RECVRTHDR:
2525         case IPV6_2292RTHDR:
2526         case IPV6_RECVHOPOPTS:
2527         case IPV6_2292HOPOPTS:
2528         case IPV6_RECVDSTOPTS:
2529         case IPV6_2292DSTOPTS:
2530         case IPV6_TCLASS:
2531 #ifdef IPV6_RECVPATHMTU
2532         case IPV6_RECVPATHMTU:
2533 #endif
2534 #ifdef IPV6_TRANSPARENT
2535         case IPV6_TRANSPARENT:
2536 #endif
2537 #ifdef IPV6_FREEBIND
2538         case IPV6_FREEBIND:
2539 #endif
2540 #ifdef IPV6_RECVORIGDSTADDR
2541         case IPV6_RECVORIGDSTADDR:
2542 #endif
2543             if (get_user_u32(len, optlen))
2544                 return -TARGET_EFAULT;
2545             if (len < 0)
2546                 return -TARGET_EINVAL;
2547             lv = sizeof(lv);
2548             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2549             if (ret < 0)
2550                 return ret;
2551             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2552                 len = 1;
2553                 if (put_user_u32(len, optlen)
2554                     || put_user_u8(val, optval_addr))
2555                     return -TARGET_EFAULT;
2556             } else {
2557                 if (len > sizeof(int))
2558                     len = sizeof(int);
2559                 if (put_user_u32(len, optlen)
2560                     || put_user_u32(val, optval_addr))
2561                     return -TARGET_EFAULT;
2562             }
2563             break;
2564         default:
2565             ret = -TARGET_ENOPROTOOPT;
2566             break;
2567         }
2568         break;
2569 #ifdef SOL_NETLINK
2570     case SOL_NETLINK:
2571         switch (optname) {
2572         case NETLINK_PKTINFO:
2573         case NETLINK_BROADCAST_ERROR:
2574         case NETLINK_NO_ENOBUFS:
2575 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2576         case NETLINK_LISTEN_ALL_NSID:
2577         case NETLINK_CAP_ACK:
2578 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2579 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2580         case NETLINK_EXT_ACK:
2581 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2582 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2583         case NETLINK_GET_STRICT_CHK:
2584 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2585             if (get_user_u32(len, optlen)) {
2586                 return -TARGET_EFAULT;
2587             }
2588             if (len != sizeof(val)) {
2589                 return -TARGET_EINVAL;
2590             }
2591             lv = len;
2592             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2593             if (ret < 0) {
2594                 return ret;
2595             }
2596             if (put_user_u32(lv, optlen)
2597                 || put_user_u32(val, optval_addr)) {
2598                 return -TARGET_EFAULT;
2599             }
2600             break;
2601 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2602         case NETLINK_LIST_MEMBERSHIPS:
2603         {
2604             uint32_t *results;
2605             int i;
2606             if (get_user_u32(len, optlen)) {
2607                 return -TARGET_EFAULT;
2608             }
2609             if (len < 0) {
2610                 return -TARGET_EINVAL;
2611             }
2612             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2613             if (!results) {
2614                 return -TARGET_EFAULT;
2615             }
2616             lv = len;
2617             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2618             if (ret < 0) {
2619                 unlock_user(results, optval_addr, 0);
2620                 return ret;
2621             }
2622             /* swap host endianess to target endianess. */
2623             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2624                 results[i] = tswap32(results[i]);
2625             }
2626             if (put_user_u32(lv, optlen)) {
2627                 return -TARGET_EFAULT;
2628             }
2629             unlock_user(results, optval_addr, 0);
2630             break;
2631         }
2632 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2633         default:
2634             goto unimplemented;
2635         }
2636         break;
2637 #endif /* SOL_NETLINK */
2638     default:
2639     unimplemented:
2640         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2641                  level, optname);
2642         ret = -TARGET_EOPNOTSUPP;
2643         break;
2644     }
2645     return ret;
2646 }
2647 
2648 /* Convert target low/high pair representing file offset into the host
2649  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2650  * as the kernel doesn't handle them either.
2651  */
2652 static void target_to_host_low_high(abi_ulong tlow,
2653                                     abi_ulong thigh,
2654                                     unsigned long *hlow,
2655                                     unsigned long *hhigh)
2656 {
2657     uint64_t off = tlow |
2658         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2659         TARGET_LONG_BITS / 2;
2660 
2661     *hlow = off;
2662     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2663 }
2664 
2665 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2666                                 abi_ulong count, int copy)
2667 {
2668     struct target_iovec *target_vec;
2669     struct iovec *vec;
2670     abi_ulong total_len, max_len;
2671     int i;
2672     int err = 0;
2673     bool bad_address = false;
2674 
2675     if (count == 0) {
2676         errno = 0;
2677         return NULL;
2678     }
2679     if (count > IOV_MAX) {
2680         errno = EINVAL;
2681         return NULL;
2682     }
2683 
2684     vec = g_try_new0(struct iovec, count);
2685     if (vec == NULL) {
2686         errno = ENOMEM;
2687         return NULL;
2688     }
2689 
2690     target_vec = lock_user(VERIFY_READ, target_addr,
2691                            count * sizeof(struct target_iovec), 1);
2692     if (target_vec == NULL) {
2693         err = EFAULT;
2694         goto fail2;
2695     }
2696 
2697     /* ??? If host page size > target page size, this will result in a
2698        value larger than what we can actually support.  */
2699     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2700     total_len = 0;
2701 
2702     for (i = 0; i < count; i++) {
2703         abi_ulong base = tswapal(target_vec[i].iov_base);
2704         abi_long len = tswapal(target_vec[i].iov_len);
2705 
2706         if (len < 0) {
2707             err = EINVAL;
2708             goto fail;
2709         } else if (len == 0) {
2710             /* Zero length pointer is ignored.  */
2711             vec[i].iov_base = 0;
2712         } else {
2713             vec[i].iov_base = lock_user(type, base, len, copy);
2714             /* If the first buffer pointer is bad, this is a fault.  But
2715              * subsequent bad buffers will result in a partial write; this
2716              * is realized by filling the vector with null pointers and
2717              * zero lengths. */
2718             if (!vec[i].iov_base) {
2719                 if (i == 0) {
2720                     err = EFAULT;
2721                     goto fail;
2722                 } else {
2723                     bad_address = true;
2724                 }
2725             }
2726             if (bad_address) {
2727                 len = 0;
2728             }
2729             if (len > max_len - total_len) {
2730                 len = max_len - total_len;
2731             }
2732         }
2733         vec[i].iov_len = len;
2734         total_len += len;
2735     }
2736 
2737     unlock_user(target_vec, target_addr, 0);
2738     return vec;
2739 
2740  fail:
2741     while (--i >= 0) {
2742         if (tswapal(target_vec[i].iov_len) > 0) {
2743             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2744         }
2745     }
2746     unlock_user(target_vec, target_addr, 0);
2747  fail2:
2748     g_free(vec);
2749     errno = err;
2750     return NULL;
2751 }
2752 
2753 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2754                          abi_ulong count, int copy)
2755 {
2756     struct target_iovec *target_vec;
2757     int i;
2758 
2759     target_vec = lock_user(VERIFY_READ, target_addr,
2760                            count * sizeof(struct target_iovec), 1);
2761     if (target_vec) {
2762         for (i = 0; i < count; i++) {
2763             abi_ulong base = tswapal(target_vec[i].iov_base);
2764             abi_long len = tswapal(target_vec[i].iov_len);
2765             if (len < 0) {
2766                 break;
2767             }
2768             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2769         }
2770         unlock_user(target_vec, target_addr, 0);
2771     }
2772 
2773     g_free(vec);
2774 }
2775 
2776 static inline int target_to_host_sock_type(int *type)
2777 {
2778     int host_type = 0;
2779     int target_type = *type;
2780 
2781     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2782     case TARGET_SOCK_DGRAM:
2783         host_type = SOCK_DGRAM;
2784         break;
2785     case TARGET_SOCK_STREAM:
2786         host_type = SOCK_STREAM;
2787         break;
2788     default:
2789         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2790         break;
2791     }
2792     if (target_type & TARGET_SOCK_CLOEXEC) {
2793 #if defined(SOCK_CLOEXEC)
2794         host_type |= SOCK_CLOEXEC;
2795 #else
2796         return -TARGET_EINVAL;
2797 #endif
2798     }
2799     if (target_type & TARGET_SOCK_NONBLOCK) {
2800 #if defined(SOCK_NONBLOCK)
2801         host_type |= SOCK_NONBLOCK;
2802 #elif !defined(O_NONBLOCK)
2803         return -TARGET_EINVAL;
2804 #endif
2805     }
2806     *type = host_type;
2807     return 0;
2808 }
2809 
2810 /* Try to emulate socket type flags after socket creation.  */
2811 static int sock_flags_fixup(int fd, int target_type)
2812 {
2813 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2814     if (target_type & TARGET_SOCK_NONBLOCK) {
2815         int flags = fcntl(fd, F_GETFL);
2816         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2817             close(fd);
2818             return -TARGET_EINVAL;
2819         }
2820     }
2821 #endif
2822     return fd;
2823 }
2824 
2825 /* do_socket() Must return target values and target errnos. */
2826 static abi_long do_socket(int domain, int type, int protocol)
2827 {
2828     int target_type = type;
2829     int ret;
2830 
2831     ret = target_to_host_sock_type(&type);
2832     if (ret) {
2833         return ret;
2834     }
2835 
2836     if (domain == PF_NETLINK && !(
2837 #ifdef CONFIG_RTNETLINK
2838          protocol == NETLINK_ROUTE ||
2839 #endif
2840          protocol == NETLINK_KOBJECT_UEVENT ||
2841          protocol == NETLINK_AUDIT)) {
2842         return -EPFNOSUPPORT;
2843     }
2844 
2845     if (domain == AF_PACKET ||
2846         (domain == AF_INET && type == SOCK_PACKET)) {
2847         protocol = tswap16(protocol);
2848     }
2849 
2850     ret = get_errno(socket(domain, type, protocol));
2851     if (ret >= 0) {
2852         ret = sock_flags_fixup(ret, target_type);
2853         if (type == SOCK_PACKET) {
2854             /* Manage an obsolete case :
2855              * if socket type is SOCK_PACKET, bind by name
2856              */
2857             fd_trans_register(ret, &target_packet_trans);
2858         } else if (domain == PF_NETLINK) {
2859             switch (protocol) {
2860 #ifdef CONFIG_RTNETLINK
2861             case NETLINK_ROUTE:
2862                 fd_trans_register(ret, &target_netlink_route_trans);
2863                 break;
2864 #endif
2865             case NETLINK_KOBJECT_UEVENT:
2866                 /* nothing to do: messages are strings */
2867                 break;
2868             case NETLINK_AUDIT:
2869                 fd_trans_register(ret, &target_netlink_audit_trans);
2870                 break;
2871             default:
2872                 g_assert_not_reached();
2873             }
2874         }
2875     }
2876     return ret;
2877 }
2878 
2879 /* do_bind() Must return target values and target errnos. */
2880 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2881                         socklen_t addrlen)
2882 {
2883     void *addr;
2884     abi_long ret;
2885 
2886     if ((int)addrlen < 0) {
2887         return -TARGET_EINVAL;
2888     }
2889 
2890     addr = alloca(addrlen+1);
2891 
2892     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2893     if (ret)
2894         return ret;
2895 
2896     return get_errno(bind(sockfd, addr, addrlen));
2897 }
2898 
2899 /* do_connect() Must return target values and target errnos. */
2900 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2901                            socklen_t addrlen)
2902 {
2903     void *addr;
2904     abi_long ret;
2905 
2906     if ((int)addrlen < 0) {
2907         return -TARGET_EINVAL;
2908     }
2909 
2910     addr = alloca(addrlen+1);
2911 
2912     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2913     if (ret)
2914         return ret;
2915 
2916     return get_errno(safe_connect(sockfd, addr, addrlen));
2917 }
2918 
2919 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2920 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2921                                       int flags, int send)
2922 {
2923     abi_long ret, len;
2924     struct msghdr msg;
2925     abi_ulong count;
2926     struct iovec *vec;
2927     abi_ulong target_vec;
2928 
2929     if (msgp->msg_name) {
2930         msg.msg_namelen = tswap32(msgp->msg_namelen);
2931         msg.msg_name = alloca(msg.msg_namelen+1);
2932         ret = target_to_host_sockaddr(fd, msg.msg_name,
2933                                       tswapal(msgp->msg_name),
2934                                       msg.msg_namelen);
2935         if (ret == -TARGET_EFAULT) {
2936             /* For connected sockets msg_name and msg_namelen must
2937              * be ignored, so returning EFAULT immediately is wrong.
2938              * Instead, pass a bad msg_name to the host kernel, and
2939              * let it decide whether to return EFAULT or not.
2940              */
2941             msg.msg_name = (void *)-1;
2942         } else if (ret) {
2943             goto out2;
2944         }
2945     } else {
2946         msg.msg_name = NULL;
2947         msg.msg_namelen = 0;
2948     }
2949     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2950     msg.msg_control = alloca(msg.msg_controllen);
2951     memset(msg.msg_control, 0, msg.msg_controllen);
2952 
2953     msg.msg_flags = tswap32(msgp->msg_flags);
2954 
2955     count = tswapal(msgp->msg_iovlen);
2956     target_vec = tswapal(msgp->msg_iov);
2957 
2958     if (count > IOV_MAX) {
2959         /* sendrcvmsg returns a different errno for this condition than
2960          * readv/writev, so we must catch it here before lock_iovec() does.
2961          */
2962         ret = -TARGET_EMSGSIZE;
2963         goto out2;
2964     }
2965 
2966     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2967                      target_vec, count, send);
2968     if (vec == NULL) {
2969         ret = -host_to_target_errno(errno);
2970         goto out2;
2971     }
2972     msg.msg_iovlen = count;
2973     msg.msg_iov = vec;
2974 
2975     if (send) {
2976         if (fd_trans_target_to_host_data(fd)) {
2977             void *host_msg;
2978 
2979             host_msg = g_malloc(msg.msg_iov->iov_len);
2980             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2981             ret = fd_trans_target_to_host_data(fd)(host_msg,
2982                                                    msg.msg_iov->iov_len);
2983             if (ret >= 0) {
2984                 msg.msg_iov->iov_base = host_msg;
2985                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2986             }
2987             g_free(host_msg);
2988         } else {
2989             ret = target_to_host_cmsg(&msg, msgp);
2990             if (ret == 0) {
2991                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2992             }
2993         }
2994     } else {
2995         ret = get_errno(safe_recvmsg(fd, &msg, flags));
2996         if (!is_error(ret)) {
2997             len = ret;
2998             if (fd_trans_host_to_target_data(fd)) {
2999                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3000                                                MIN(msg.msg_iov->iov_len, len));
3001             } else {
3002                 ret = host_to_target_cmsg(msgp, &msg);
3003             }
3004             if (!is_error(ret)) {
3005                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3006                 msgp->msg_flags = tswap32(msg.msg_flags);
3007                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3008                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3009                                     msg.msg_name, msg.msg_namelen);
3010                     if (ret) {
3011                         goto out;
3012                     }
3013                 }
3014 
3015                 ret = len;
3016             }
3017         }
3018     }
3019 
3020 out:
3021     unlock_iovec(vec, target_vec, count, !send);
3022 out2:
3023     return ret;
3024 }
3025 
3026 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3027                                int flags, int send)
3028 {
3029     abi_long ret;
3030     struct target_msghdr *msgp;
3031 
3032     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3033                           msgp,
3034                           target_msg,
3035                           send ? 1 : 0)) {
3036         return -TARGET_EFAULT;
3037     }
3038     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3039     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3040     return ret;
3041 }
3042 
3043 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3044  * so it might not have this *mmsg-specific flag either.
3045  */
3046 #ifndef MSG_WAITFORONE
3047 #define MSG_WAITFORONE 0x10000
3048 #endif
3049 
3050 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3051                                 unsigned int vlen, unsigned int flags,
3052                                 int send)
3053 {
3054     struct target_mmsghdr *mmsgp;
3055     abi_long ret = 0;
3056     int i;
3057 
3058     if (vlen > UIO_MAXIOV) {
3059         vlen = UIO_MAXIOV;
3060     }
3061 
3062     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3063     if (!mmsgp) {
3064         return -TARGET_EFAULT;
3065     }
3066 
3067     for (i = 0; i < vlen; i++) {
3068         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3069         if (is_error(ret)) {
3070             break;
3071         }
3072         mmsgp[i].msg_len = tswap32(ret);
3073         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3074         if (flags & MSG_WAITFORONE) {
3075             flags |= MSG_DONTWAIT;
3076         }
3077     }
3078 
3079     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3080 
3081     /* Return number of datagrams sent if we sent any at all;
3082      * otherwise return the error.
3083      */
3084     if (i) {
3085         return i;
3086     }
3087     return ret;
3088 }
3089 
3090 /* do_accept4() Must return target values and target errnos. */
3091 static abi_long do_accept4(int fd, abi_ulong target_addr,
3092                            abi_ulong target_addrlen_addr, int flags)
3093 {
3094     socklen_t addrlen, ret_addrlen;
3095     void *addr;
3096     abi_long ret;
3097     int host_flags;
3098 
3099     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3100 
3101     if (target_addr == 0) {
3102         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3103     }
3104 
3105     /* linux returns EINVAL if addrlen pointer is invalid */
3106     if (get_user_u32(addrlen, target_addrlen_addr))
3107         return -TARGET_EINVAL;
3108 
3109     if ((int)addrlen < 0) {
3110         return -TARGET_EINVAL;
3111     }
3112 
3113     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3114         return -TARGET_EINVAL;
3115 
3116     addr = alloca(addrlen);
3117 
3118     ret_addrlen = addrlen;
3119     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3120     if (!is_error(ret)) {
3121         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3122         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3123             ret = -TARGET_EFAULT;
3124         }
3125     }
3126     return ret;
3127 }
3128 
3129 /* do_getpeername() Must return target values and target errnos. */
3130 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3131                                abi_ulong target_addrlen_addr)
3132 {
3133     socklen_t addrlen, ret_addrlen;
3134     void *addr;
3135     abi_long ret;
3136 
3137     if (get_user_u32(addrlen, target_addrlen_addr))
3138         return -TARGET_EFAULT;
3139 
3140     if ((int)addrlen < 0) {
3141         return -TARGET_EINVAL;
3142     }
3143 
3144     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3145         return -TARGET_EFAULT;
3146 
3147     addr = alloca(addrlen);
3148 
3149     ret_addrlen = addrlen;
3150     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3151     if (!is_error(ret)) {
3152         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3153         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3154             ret = -TARGET_EFAULT;
3155         }
3156     }
3157     return ret;
3158 }
3159 
3160 /* do_getsockname() Must return target values and target errnos. */
3161 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3162                                abi_ulong target_addrlen_addr)
3163 {
3164     socklen_t addrlen, ret_addrlen;
3165     void *addr;
3166     abi_long ret;
3167 
3168     if (get_user_u32(addrlen, target_addrlen_addr))
3169         return -TARGET_EFAULT;
3170 
3171     if ((int)addrlen < 0) {
3172         return -TARGET_EINVAL;
3173     }
3174 
3175     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3176         return -TARGET_EFAULT;
3177 
3178     addr = alloca(addrlen);
3179 
3180     ret_addrlen = addrlen;
3181     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3182     if (!is_error(ret)) {
3183         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3184         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3185             ret = -TARGET_EFAULT;
3186         }
3187     }
3188     return ret;
3189 }
3190 
3191 /* do_socketpair() Must return target values and target errnos. */
3192 static abi_long do_socketpair(int domain, int type, int protocol,
3193                               abi_ulong target_tab_addr)
3194 {
3195     int tab[2];
3196     abi_long ret;
3197 
3198     target_to_host_sock_type(&type);
3199 
3200     ret = get_errno(socketpair(domain, type, protocol, tab));
3201     if (!is_error(ret)) {
3202         if (put_user_s32(tab[0], target_tab_addr)
3203             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3204             ret = -TARGET_EFAULT;
3205     }
3206     return ret;
3207 }
3208 
3209 /* do_sendto() Must return target values and target errnos. */
3210 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3211                           abi_ulong target_addr, socklen_t addrlen)
3212 {
3213     void *addr;
3214     void *host_msg;
3215     void *copy_msg = NULL;
3216     abi_long ret;
3217 
3218     if ((int)addrlen < 0) {
3219         return -TARGET_EINVAL;
3220     }
3221 
3222     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3223     if (!host_msg)
3224         return -TARGET_EFAULT;
3225     if (fd_trans_target_to_host_data(fd)) {
3226         copy_msg = host_msg;
3227         host_msg = g_malloc(len);
3228         memcpy(host_msg, copy_msg, len);
3229         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3230         if (ret < 0) {
3231             goto fail;
3232         }
3233     }
3234     if (target_addr) {
3235         addr = alloca(addrlen+1);
3236         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3237         if (ret) {
3238             goto fail;
3239         }
3240         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3241     } else {
3242         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3243     }
3244 fail:
3245     if (copy_msg) {
3246         g_free(host_msg);
3247         host_msg = copy_msg;
3248     }
3249     unlock_user(host_msg, msg, 0);
3250     return ret;
3251 }
3252 
3253 /* do_recvfrom() Must return target values and target errnos. */
3254 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3255                             abi_ulong target_addr,
3256                             abi_ulong target_addrlen)
3257 {
3258     socklen_t addrlen, ret_addrlen;
3259     void *addr;
3260     void *host_msg;
3261     abi_long ret;
3262 
3263     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3264     if (!host_msg)
3265         return -TARGET_EFAULT;
3266     if (target_addr) {
3267         if (get_user_u32(addrlen, target_addrlen)) {
3268             ret = -TARGET_EFAULT;
3269             goto fail;
3270         }
3271         if ((int)addrlen < 0) {
3272             ret = -TARGET_EINVAL;
3273             goto fail;
3274         }
3275         addr = alloca(addrlen);
3276         ret_addrlen = addrlen;
3277         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3278                                       addr, &ret_addrlen));
3279     } else {
3280         addr = NULL; /* To keep compiler quiet.  */
3281         addrlen = 0; /* To keep compiler quiet.  */
3282         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3283     }
3284     if (!is_error(ret)) {
3285         if (fd_trans_host_to_target_data(fd)) {
3286             abi_long trans;
3287             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3288             if (is_error(trans)) {
3289                 ret = trans;
3290                 goto fail;
3291             }
3292         }
3293         if (target_addr) {
3294             host_to_target_sockaddr(target_addr, addr,
3295                                     MIN(addrlen, ret_addrlen));
3296             if (put_user_u32(ret_addrlen, target_addrlen)) {
3297                 ret = -TARGET_EFAULT;
3298                 goto fail;
3299             }
3300         }
3301         unlock_user(host_msg, msg, len);
3302     } else {
3303 fail:
3304         unlock_user(host_msg, msg, 0);
3305     }
3306     return ret;
3307 }
3308 
3309 #ifdef TARGET_NR_socketcall
3310 /* do_socketcall() must return target values and target errnos. */
3311 static abi_long do_socketcall(int num, abi_ulong vptr)
3312 {
3313     static const unsigned nargs[] = { /* number of arguments per operation */
3314         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3315         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3316         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3317         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3318         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3319         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3320         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3321         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3322         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3323         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3324         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3325         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3326         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3327         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3328         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3329         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3330         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3331         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3332         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3333         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3334     };
3335     abi_long a[6]; /* max 6 args */
3336     unsigned i;
3337 
3338     /* check the range of the first argument num */
3339     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3340     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3341         return -TARGET_EINVAL;
3342     }
3343     /* ensure we have space for args */
3344     if (nargs[num] > ARRAY_SIZE(a)) {
3345         return -TARGET_EINVAL;
3346     }
3347     /* collect the arguments in a[] according to nargs[] */
3348     for (i = 0; i < nargs[num]; ++i) {
3349         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3350             return -TARGET_EFAULT;
3351         }
3352     }
3353     /* now when we have the args, invoke the appropriate underlying function */
3354     switch (num) {
3355     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3356         return do_socket(a[0], a[1], a[2]);
3357     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3358         return do_bind(a[0], a[1], a[2]);
3359     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3360         return do_connect(a[0], a[1], a[2]);
3361     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3362         return get_errno(listen(a[0], a[1]));
3363     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3364         return do_accept4(a[0], a[1], a[2], 0);
3365     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3366         return do_getsockname(a[0], a[1], a[2]);
3367     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3368         return do_getpeername(a[0], a[1], a[2]);
3369     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3370         return do_socketpair(a[0], a[1], a[2], a[3]);
3371     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3372         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3373     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3374         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3375     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3376         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3377     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3378         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3379     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3380         return get_errno(shutdown(a[0], a[1]));
3381     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3382         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3383     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3384         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3385     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3386         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3387     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3388         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3389     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3390         return do_accept4(a[0], a[1], a[2], a[3]);
3391     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3392         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3393     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3394         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3395     default:
3396         gemu_log("Unsupported socketcall: %d\n", num);
3397         return -TARGET_EINVAL;
3398     }
3399 }
3400 #endif
3401 
3402 #define N_SHM_REGIONS	32
3403 
3404 static struct shm_region {
3405     abi_ulong start;
3406     abi_ulong size;
3407     bool in_use;
3408 } shm_regions[N_SHM_REGIONS];
3409 
3410 #ifndef TARGET_SEMID64_DS
3411 /* asm-generic version of this struct */
3412 struct target_semid64_ds
3413 {
3414   struct target_ipc_perm sem_perm;
3415   abi_ulong sem_otime;
3416 #if TARGET_ABI_BITS == 32
3417   abi_ulong __unused1;
3418 #endif
3419   abi_ulong sem_ctime;
3420 #if TARGET_ABI_BITS == 32
3421   abi_ulong __unused2;
3422 #endif
3423   abi_ulong sem_nsems;
3424   abi_ulong __unused3;
3425   abi_ulong __unused4;
3426 };
3427 #endif
3428 
3429 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3430                                                abi_ulong target_addr)
3431 {
3432     struct target_ipc_perm *target_ip;
3433     struct target_semid64_ds *target_sd;
3434 
3435     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3436         return -TARGET_EFAULT;
3437     target_ip = &(target_sd->sem_perm);
3438     host_ip->__key = tswap32(target_ip->__key);
3439     host_ip->uid = tswap32(target_ip->uid);
3440     host_ip->gid = tswap32(target_ip->gid);
3441     host_ip->cuid = tswap32(target_ip->cuid);
3442     host_ip->cgid = tswap32(target_ip->cgid);
3443 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3444     host_ip->mode = tswap32(target_ip->mode);
3445 #else
3446     host_ip->mode = tswap16(target_ip->mode);
3447 #endif
3448 #if defined(TARGET_PPC)
3449     host_ip->__seq = tswap32(target_ip->__seq);
3450 #else
3451     host_ip->__seq = tswap16(target_ip->__seq);
3452 #endif
3453     unlock_user_struct(target_sd, target_addr, 0);
3454     return 0;
3455 }
3456 
3457 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3458                                                struct ipc_perm *host_ip)
3459 {
3460     struct target_ipc_perm *target_ip;
3461     struct target_semid64_ds *target_sd;
3462 
3463     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3464         return -TARGET_EFAULT;
3465     target_ip = &(target_sd->sem_perm);
3466     target_ip->__key = tswap32(host_ip->__key);
3467     target_ip->uid = tswap32(host_ip->uid);
3468     target_ip->gid = tswap32(host_ip->gid);
3469     target_ip->cuid = tswap32(host_ip->cuid);
3470     target_ip->cgid = tswap32(host_ip->cgid);
3471 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3472     target_ip->mode = tswap32(host_ip->mode);
3473 #else
3474     target_ip->mode = tswap16(host_ip->mode);
3475 #endif
3476 #if defined(TARGET_PPC)
3477     target_ip->__seq = tswap32(host_ip->__seq);
3478 #else
3479     target_ip->__seq = tswap16(host_ip->__seq);
3480 #endif
3481     unlock_user_struct(target_sd, target_addr, 1);
3482     return 0;
3483 }
3484 
3485 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3486                                                abi_ulong target_addr)
3487 {
3488     struct target_semid64_ds *target_sd;
3489 
3490     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3491         return -TARGET_EFAULT;
3492     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3493         return -TARGET_EFAULT;
3494     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3495     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3496     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3497     unlock_user_struct(target_sd, target_addr, 0);
3498     return 0;
3499 }
3500 
3501 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3502                                                struct semid_ds *host_sd)
3503 {
3504     struct target_semid64_ds *target_sd;
3505 
3506     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3507         return -TARGET_EFAULT;
3508     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3509         return -TARGET_EFAULT;
3510     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3511     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3512     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3513     unlock_user_struct(target_sd, target_addr, 1);
3514     return 0;
3515 }
3516 
3517 struct target_seminfo {
3518     int semmap;
3519     int semmni;
3520     int semmns;
3521     int semmnu;
3522     int semmsl;
3523     int semopm;
3524     int semume;
3525     int semusz;
3526     int semvmx;
3527     int semaem;
3528 };
3529 
3530 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3531                                               struct seminfo *host_seminfo)
3532 {
3533     struct target_seminfo *target_seminfo;
3534     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3535         return -TARGET_EFAULT;
3536     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3537     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3538     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3539     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3540     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3541     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3542     __put_user(host_seminfo->semume, &target_seminfo->semume);
3543     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3544     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3545     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3546     unlock_user_struct(target_seminfo, target_addr, 1);
3547     return 0;
3548 }
3549 
3550 union semun {
3551 	int val;
3552 	struct semid_ds *buf;
3553 	unsigned short *array;
3554 	struct seminfo *__buf;
3555 };
3556 
3557 union target_semun {
3558 	int val;
3559 	abi_ulong buf;
3560 	abi_ulong array;
3561 	abi_ulong __buf;
3562 };
3563 
3564 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3565                                                abi_ulong target_addr)
3566 {
3567     int nsems;
3568     unsigned short *array;
3569     union semun semun;
3570     struct semid_ds semid_ds;
3571     int i, ret;
3572 
3573     semun.buf = &semid_ds;
3574 
3575     ret = semctl(semid, 0, IPC_STAT, semun);
3576     if (ret == -1)
3577         return get_errno(ret);
3578 
3579     nsems = semid_ds.sem_nsems;
3580 
3581     *host_array = g_try_new(unsigned short, nsems);
3582     if (!*host_array) {
3583         return -TARGET_ENOMEM;
3584     }
3585     array = lock_user(VERIFY_READ, target_addr,
3586                       nsems*sizeof(unsigned short), 1);
3587     if (!array) {
3588         g_free(*host_array);
3589         return -TARGET_EFAULT;
3590     }
3591 
3592     for(i=0; i<nsems; i++) {
3593         __get_user((*host_array)[i], &array[i]);
3594     }
3595     unlock_user(array, target_addr, 0);
3596 
3597     return 0;
3598 }
3599 
3600 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3601                                                unsigned short **host_array)
3602 {
3603     int nsems;
3604     unsigned short *array;
3605     union semun semun;
3606     struct semid_ds semid_ds;
3607     int i, ret;
3608 
3609     semun.buf = &semid_ds;
3610 
3611     ret = semctl(semid, 0, IPC_STAT, semun);
3612     if (ret == -1)
3613         return get_errno(ret);
3614 
3615     nsems = semid_ds.sem_nsems;
3616 
3617     array = lock_user(VERIFY_WRITE, target_addr,
3618                       nsems*sizeof(unsigned short), 0);
3619     if (!array)
3620         return -TARGET_EFAULT;
3621 
3622     for(i=0; i<nsems; i++) {
3623         __put_user((*host_array)[i], &array[i]);
3624     }
3625     g_free(*host_array);
3626     unlock_user(array, target_addr, 1);
3627 
3628     return 0;
3629 }
3630 
3631 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3632                                  abi_ulong target_arg)
3633 {
3634     union target_semun target_su = { .buf = target_arg };
3635     union semun arg;
3636     struct semid_ds dsarg;
3637     unsigned short *array = NULL;
3638     struct seminfo seminfo;
3639     abi_long ret = -TARGET_EINVAL;
3640     abi_long err;
3641     cmd &= 0xff;
3642 
3643     switch( cmd ) {
3644 	case GETVAL:
3645 	case SETVAL:
3646             /* In 64 bit cross-endian situations, we will erroneously pick up
3647              * the wrong half of the union for the "val" element.  To rectify
3648              * this, the entire 8-byte structure is byteswapped, followed by
3649 	     * a swap of the 4 byte val field. In other cases, the data is
3650 	     * already in proper host byte order. */
3651 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3652 		target_su.buf = tswapal(target_su.buf);
3653 		arg.val = tswap32(target_su.val);
3654 	    } else {
3655 		arg.val = target_su.val;
3656 	    }
3657             ret = get_errno(semctl(semid, semnum, cmd, arg));
3658             break;
3659 	case GETALL:
3660 	case SETALL:
3661             err = target_to_host_semarray(semid, &array, target_su.array);
3662             if (err)
3663                 return err;
3664             arg.array = array;
3665             ret = get_errno(semctl(semid, semnum, cmd, arg));
3666             err = host_to_target_semarray(semid, target_su.array, &array);
3667             if (err)
3668                 return err;
3669             break;
3670 	case IPC_STAT:
3671 	case IPC_SET:
3672 	case SEM_STAT:
3673             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3674             if (err)
3675                 return err;
3676             arg.buf = &dsarg;
3677             ret = get_errno(semctl(semid, semnum, cmd, arg));
3678             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3679             if (err)
3680                 return err;
3681             break;
3682 	case IPC_INFO:
3683 	case SEM_INFO:
3684             arg.__buf = &seminfo;
3685             ret = get_errno(semctl(semid, semnum, cmd, arg));
3686             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3687             if (err)
3688                 return err;
3689             break;
3690 	case IPC_RMID:
3691 	case GETPID:
3692 	case GETNCNT:
3693 	case GETZCNT:
3694             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3695             break;
3696     }
3697 
3698     return ret;
3699 }
3700 
3701 struct target_sembuf {
3702     unsigned short sem_num;
3703     short sem_op;
3704     short sem_flg;
3705 };
3706 
3707 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3708                                              abi_ulong target_addr,
3709                                              unsigned nsops)
3710 {
3711     struct target_sembuf *target_sembuf;
3712     int i;
3713 
3714     target_sembuf = lock_user(VERIFY_READ, target_addr,
3715                               nsops*sizeof(struct target_sembuf), 1);
3716     if (!target_sembuf)
3717         return -TARGET_EFAULT;
3718 
3719     for(i=0; i<nsops; i++) {
3720         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3721         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3722         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3723     }
3724 
3725     unlock_user(target_sembuf, target_addr, 0);
3726 
3727     return 0;
3728 }
3729 
3730 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3731 {
3732     struct sembuf sops[nsops];
3733     abi_long ret;
3734 
3735     if (target_to_host_sembuf(sops, ptr, nsops))
3736         return -TARGET_EFAULT;
3737 
3738     ret = -TARGET_ENOSYS;
3739 #ifdef __NR_semtimedop
3740     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3741 #endif
3742 #ifdef __NR_ipc
3743     if (ret == -TARGET_ENOSYS) {
3744         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3745     }
3746 #endif
3747     return ret;
3748 }
3749 
3750 struct target_msqid_ds
3751 {
3752     struct target_ipc_perm msg_perm;
3753     abi_ulong msg_stime;
3754 #if TARGET_ABI_BITS == 32
3755     abi_ulong __unused1;
3756 #endif
3757     abi_ulong msg_rtime;
3758 #if TARGET_ABI_BITS == 32
3759     abi_ulong __unused2;
3760 #endif
3761     abi_ulong msg_ctime;
3762 #if TARGET_ABI_BITS == 32
3763     abi_ulong __unused3;
3764 #endif
3765     abi_ulong __msg_cbytes;
3766     abi_ulong msg_qnum;
3767     abi_ulong msg_qbytes;
3768     abi_ulong msg_lspid;
3769     abi_ulong msg_lrpid;
3770     abi_ulong __unused4;
3771     abi_ulong __unused5;
3772 };
3773 
3774 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3775                                                abi_ulong target_addr)
3776 {
3777     struct target_msqid_ds *target_md;
3778 
3779     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3780         return -TARGET_EFAULT;
3781     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3782         return -TARGET_EFAULT;
3783     host_md->msg_stime = tswapal(target_md->msg_stime);
3784     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3785     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3786     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3787     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3788     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3789     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3790     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3791     unlock_user_struct(target_md, target_addr, 0);
3792     return 0;
3793 }
3794 
3795 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3796                                                struct msqid_ds *host_md)
3797 {
3798     struct target_msqid_ds *target_md;
3799 
3800     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3801         return -TARGET_EFAULT;
3802     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3803         return -TARGET_EFAULT;
3804     target_md->msg_stime = tswapal(host_md->msg_stime);
3805     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3806     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3807     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3808     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3809     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3810     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3811     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3812     unlock_user_struct(target_md, target_addr, 1);
3813     return 0;
3814 }
3815 
3816 struct target_msginfo {
3817     int msgpool;
3818     int msgmap;
3819     int msgmax;
3820     int msgmnb;
3821     int msgmni;
3822     int msgssz;
3823     int msgtql;
3824     unsigned short int msgseg;
3825 };
3826 
3827 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3828                                               struct msginfo *host_msginfo)
3829 {
3830     struct target_msginfo *target_msginfo;
3831     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3832         return -TARGET_EFAULT;
3833     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3834     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3835     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3836     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3837     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3838     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3839     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3840     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3841     unlock_user_struct(target_msginfo, target_addr, 1);
3842     return 0;
3843 }
3844 
3845 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3846 {
3847     struct msqid_ds dsarg;
3848     struct msginfo msginfo;
3849     abi_long ret = -TARGET_EINVAL;
3850 
3851     cmd &= 0xff;
3852 
3853     switch (cmd) {
3854     case IPC_STAT:
3855     case IPC_SET:
3856     case MSG_STAT:
3857         if (target_to_host_msqid_ds(&dsarg,ptr))
3858             return -TARGET_EFAULT;
3859         ret = get_errno(msgctl(msgid, cmd, &dsarg));
3860         if (host_to_target_msqid_ds(ptr,&dsarg))
3861             return -TARGET_EFAULT;
3862         break;
3863     case IPC_RMID:
3864         ret = get_errno(msgctl(msgid, cmd, NULL));
3865         break;
3866     case IPC_INFO:
3867     case MSG_INFO:
3868         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3869         if (host_to_target_msginfo(ptr, &msginfo))
3870             return -TARGET_EFAULT;
3871         break;
3872     }
3873 
3874     return ret;
3875 }
3876 
3877 struct target_msgbuf {
3878     abi_long mtype;
3879     char	mtext[1];
3880 };
3881 
3882 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3883                                  ssize_t msgsz, int msgflg)
3884 {
3885     struct target_msgbuf *target_mb;
3886     struct msgbuf *host_mb;
3887     abi_long ret = 0;
3888 
3889     if (msgsz < 0) {
3890         return -TARGET_EINVAL;
3891     }
3892 
3893     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3894         return -TARGET_EFAULT;
3895     host_mb = g_try_malloc(msgsz + sizeof(long));
3896     if (!host_mb) {
3897         unlock_user_struct(target_mb, msgp, 0);
3898         return -TARGET_ENOMEM;
3899     }
3900     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3901     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3902     ret = -TARGET_ENOSYS;
3903 #ifdef __NR_msgsnd
3904     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3905 #endif
3906 #ifdef __NR_ipc
3907     if (ret == -TARGET_ENOSYS) {
3908         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3909                                  host_mb, 0));
3910     }
3911 #endif
3912     g_free(host_mb);
3913     unlock_user_struct(target_mb, msgp, 0);
3914 
3915     return ret;
3916 }
3917 
3918 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3919                                  ssize_t msgsz, abi_long msgtyp,
3920                                  int msgflg)
3921 {
3922     struct target_msgbuf *target_mb;
3923     char *target_mtext;
3924     struct msgbuf *host_mb;
3925     abi_long ret = 0;
3926 
3927     if (msgsz < 0) {
3928         return -TARGET_EINVAL;
3929     }
3930 
3931     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3932         return -TARGET_EFAULT;
3933 
3934     host_mb = g_try_malloc(msgsz + sizeof(long));
3935     if (!host_mb) {
3936         ret = -TARGET_ENOMEM;
3937         goto end;
3938     }
3939     ret = -TARGET_ENOSYS;
3940 #ifdef __NR_msgrcv
3941     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3942 #endif
3943 #ifdef __NR_ipc
3944     if (ret == -TARGET_ENOSYS) {
3945         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
3946                         msgflg, host_mb, msgtyp));
3947     }
3948 #endif
3949 
3950     if (ret > 0) {
3951         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3952         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3953         if (!target_mtext) {
3954             ret = -TARGET_EFAULT;
3955             goto end;
3956         }
3957         memcpy(target_mb->mtext, host_mb->mtext, ret);
3958         unlock_user(target_mtext, target_mtext_addr, ret);
3959     }
3960 
3961     target_mb->mtype = tswapal(host_mb->mtype);
3962 
3963 end:
3964     if (target_mb)
3965         unlock_user_struct(target_mb, msgp, 1);
3966     g_free(host_mb);
3967     return ret;
3968 }
3969 
3970 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3971                                                abi_ulong target_addr)
3972 {
3973     struct target_shmid_ds *target_sd;
3974 
3975     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3976         return -TARGET_EFAULT;
3977     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3978         return -TARGET_EFAULT;
3979     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3980     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3981     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3982     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3983     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3984     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3985     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3986     unlock_user_struct(target_sd, target_addr, 0);
3987     return 0;
3988 }
3989 
3990 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3991                                                struct shmid_ds *host_sd)
3992 {
3993     struct target_shmid_ds *target_sd;
3994 
3995     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3996         return -TARGET_EFAULT;
3997     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3998         return -TARGET_EFAULT;
3999     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4000     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4001     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4002     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4003     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4004     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4005     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4006     unlock_user_struct(target_sd, target_addr, 1);
4007     return 0;
4008 }
4009 
4010 struct  target_shminfo {
4011     abi_ulong shmmax;
4012     abi_ulong shmmin;
4013     abi_ulong shmmni;
4014     abi_ulong shmseg;
4015     abi_ulong shmall;
4016 };
4017 
4018 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4019                                               struct shminfo *host_shminfo)
4020 {
4021     struct target_shminfo *target_shminfo;
4022     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4023         return -TARGET_EFAULT;
4024     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4025     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4026     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4027     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4028     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4029     unlock_user_struct(target_shminfo, target_addr, 1);
4030     return 0;
4031 }
4032 
4033 struct target_shm_info {
4034     int used_ids;
4035     abi_ulong shm_tot;
4036     abi_ulong shm_rss;
4037     abi_ulong shm_swp;
4038     abi_ulong swap_attempts;
4039     abi_ulong swap_successes;
4040 };
4041 
4042 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4043                                                struct shm_info *host_shm_info)
4044 {
4045     struct target_shm_info *target_shm_info;
4046     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4047         return -TARGET_EFAULT;
4048     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4049     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4050     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4051     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4052     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4053     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4054     unlock_user_struct(target_shm_info, target_addr, 1);
4055     return 0;
4056 }
4057 
4058 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4059 {
4060     struct shmid_ds dsarg;
4061     struct shminfo shminfo;
4062     struct shm_info shm_info;
4063     abi_long ret = -TARGET_EINVAL;
4064 
4065     cmd &= 0xff;
4066 
4067     switch(cmd) {
4068     case IPC_STAT:
4069     case IPC_SET:
4070     case SHM_STAT:
4071         if (target_to_host_shmid_ds(&dsarg, buf))
4072             return -TARGET_EFAULT;
4073         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4074         if (host_to_target_shmid_ds(buf, &dsarg))
4075             return -TARGET_EFAULT;
4076         break;
4077     case IPC_INFO:
4078         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4079         if (host_to_target_shminfo(buf, &shminfo))
4080             return -TARGET_EFAULT;
4081         break;
4082     case SHM_INFO:
4083         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4084         if (host_to_target_shm_info(buf, &shm_info))
4085             return -TARGET_EFAULT;
4086         break;
4087     case IPC_RMID:
4088     case SHM_LOCK:
4089     case SHM_UNLOCK:
4090         ret = get_errno(shmctl(shmid, cmd, NULL));
4091         break;
4092     }
4093 
4094     return ret;
4095 }
4096 
4097 #ifndef TARGET_FORCE_SHMLBA
4098 /* For most architectures, SHMLBA is the same as the page size;
4099  * some architectures have larger values, in which case they should
4100  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4101  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4102  * and defining its own value for SHMLBA.
4103  *
4104  * The kernel also permits SHMLBA to be set by the architecture to a
4105  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4106  * this means that addresses are rounded to the large size if
4107  * SHM_RND is set but addresses not aligned to that size are not rejected
4108  * as long as they are at least page-aligned. Since the only architecture
4109  * which uses this is ia64 this code doesn't provide for that oddity.
4110  */
4111 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4112 {
4113     return TARGET_PAGE_SIZE;
4114 }
4115 #endif
4116 
4117 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4118                                  int shmid, abi_ulong shmaddr, int shmflg)
4119 {
4120     abi_long raddr;
4121     void *host_raddr;
4122     struct shmid_ds shm_info;
4123     int i,ret;
4124     abi_ulong shmlba;
4125 
4126     /* find out the length of the shared memory segment */
4127     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4128     if (is_error(ret)) {
4129         /* can't get length, bail out */
4130         return ret;
4131     }
4132 
4133     shmlba = target_shmlba(cpu_env);
4134 
4135     if (shmaddr & (shmlba - 1)) {
4136         if (shmflg & SHM_RND) {
4137             shmaddr &= ~(shmlba - 1);
4138         } else {
4139             return -TARGET_EINVAL;
4140         }
4141     }
4142     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4143         return -TARGET_EINVAL;
4144     }
4145 
4146     mmap_lock();
4147 
4148     if (shmaddr)
4149         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4150     else {
4151         abi_ulong mmap_start;
4152 
4153         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4154         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4155 
4156         if (mmap_start == -1) {
4157             errno = ENOMEM;
4158             host_raddr = (void *)-1;
4159         } else
4160             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4161     }
4162 
4163     if (host_raddr == (void *)-1) {
4164         mmap_unlock();
4165         return get_errno((long)host_raddr);
4166     }
4167     raddr=h2g((unsigned long)host_raddr);
4168 
4169     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4170                    PAGE_VALID | PAGE_READ |
4171                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4172 
4173     for (i = 0; i < N_SHM_REGIONS; i++) {
4174         if (!shm_regions[i].in_use) {
4175             shm_regions[i].in_use = true;
4176             shm_regions[i].start = raddr;
4177             shm_regions[i].size = shm_info.shm_segsz;
4178             break;
4179         }
4180     }
4181 
4182     mmap_unlock();
4183     return raddr;
4184 
4185 }
4186 
4187 static inline abi_long do_shmdt(abi_ulong shmaddr)
4188 {
4189     int i;
4190     abi_long rv;
4191 
4192     mmap_lock();
4193 
4194     for (i = 0; i < N_SHM_REGIONS; ++i) {
4195         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4196             shm_regions[i].in_use = false;
4197             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4198             break;
4199         }
4200     }
4201     rv = get_errno(shmdt(g2h(shmaddr)));
4202 
4203     mmap_unlock();
4204 
4205     return rv;
4206 }
4207 
4208 #ifdef TARGET_NR_ipc
4209 /* ??? This only works with linear mappings.  */
4210 /* do_ipc() must return target values and target errnos. */
4211 static abi_long do_ipc(CPUArchState *cpu_env,
4212                        unsigned int call, abi_long first,
4213                        abi_long second, abi_long third,
4214                        abi_long ptr, abi_long fifth)
4215 {
4216     int version;
4217     abi_long ret = 0;
4218 
4219     version = call >> 16;
4220     call &= 0xffff;
4221 
4222     switch (call) {
4223     case IPCOP_semop:
4224         ret = do_semop(first, ptr, second);
4225         break;
4226 
4227     case IPCOP_semget:
4228         ret = get_errno(semget(first, second, third));
4229         break;
4230 
4231     case IPCOP_semctl: {
4232         /* The semun argument to semctl is passed by value, so dereference the
4233          * ptr argument. */
4234         abi_ulong atptr;
4235         get_user_ual(atptr, ptr);
4236         ret = do_semctl(first, second, third, atptr);
4237         break;
4238     }
4239 
4240     case IPCOP_msgget:
4241         ret = get_errno(msgget(first, second));
4242         break;
4243 
4244     case IPCOP_msgsnd:
4245         ret = do_msgsnd(first, ptr, second, third);
4246         break;
4247 
4248     case IPCOP_msgctl:
4249         ret = do_msgctl(first, second, ptr);
4250         break;
4251 
4252     case IPCOP_msgrcv:
4253         switch (version) {
4254         case 0:
4255             {
4256                 struct target_ipc_kludge {
4257                     abi_long msgp;
4258                     abi_long msgtyp;
4259                 } *tmp;
4260 
4261                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4262                     ret = -TARGET_EFAULT;
4263                     break;
4264                 }
4265 
4266                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4267 
4268                 unlock_user_struct(tmp, ptr, 0);
4269                 break;
4270             }
4271         default:
4272             ret = do_msgrcv(first, ptr, second, fifth, third);
4273         }
4274         break;
4275 
4276     case IPCOP_shmat:
4277         switch (version) {
4278         default:
4279         {
4280             abi_ulong raddr;
4281             raddr = do_shmat(cpu_env, first, ptr, second);
4282             if (is_error(raddr))
4283                 return get_errno(raddr);
4284             if (put_user_ual(raddr, third))
4285                 return -TARGET_EFAULT;
4286             break;
4287         }
4288         case 1:
4289             ret = -TARGET_EINVAL;
4290             break;
4291         }
4292 	break;
4293     case IPCOP_shmdt:
4294         ret = do_shmdt(ptr);
4295 	break;
4296 
4297     case IPCOP_shmget:
4298 	/* IPC_* flag values are the same on all linux platforms */
4299 	ret = get_errno(shmget(first, second, third));
4300 	break;
4301 
4302 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4303     case IPCOP_shmctl:
4304         ret = do_shmctl(first, second, ptr);
4305         break;
4306     default:
4307 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4308 	ret = -TARGET_ENOSYS;
4309 	break;
4310     }
4311     return ret;
4312 }
4313 #endif
4314 
4315 /* kernel structure types definitions */
4316 
4317 #define STRUCT(name, ...) STRUCT_ ## name,
4318 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4319 enum {
4320 #include "syscall_types.h"
4321 STRUCT_MAX
4322 };
4323 #undef STRUCT
4324 #undef STRUCT_SPECIAL
4325 
4326 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4327 #define STRUCT_SPECIAL(name)
4328 #include "syscall_types.h"
4329 #undef STRUCT
4330 #undef STRUCT_SPECIAL
4331 
4332 typedef struct IOCTLEntry IOCTLEntry;
4333 
4334 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4335                              int fd, int cmd, abi_long arg);
4336 
4337 struct IOCTLEntry {
4338     int target_cmd;
4339     unsigned int host_cmd;
4340     const char *name;
4341     int access;
4342     do_ioctl_fn *do_ioctl;
4343     const argtype arg_type[5];
4344 };
4345 
4346 #define IOC_R 0x0001
4347 #define IOC_W 0x0002
4348 #define IOC_RW (IOC_R | IOC_W)
4349 
4350 #define MAX_STRUCT_SIZE 4096
4351 
4352 #ifdef CONFIG_FIEMAP
4353 /* So fiemap access checks don't overflow on 32 bit systems.
4354  * This is very slightly smaller than the limit imposed by
4355  * the underlying kernel.
4356  */
4357 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4358                             / sizeof(struct fiemap_extent))
4359 
4360 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4361                                        int fd, int cmd, abi_long arg)
4362 {
4363     /* The parameter for this ioctl is a struct fiemap followed
4364      * by an array of struct fiemap_extent whose size is set
4365      * in fiemap->fm_extent_count. The array is filled in by the
4366      * ioctl.
4367      */
4368     int target_size_in, target_size_out;
4369     struct fiemap *fm;
4370     const argtype *arg_type = ie->arg_type;
4371     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4372     void *argptr, *p;
4373     abi_long ret;
4374     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4375     uint32_t outbufsz;
4376     int free_fm = 0;
4377 
4378     assert(arg_type[0] == TYPE_PTR);
4379     assert(ie->access == IOC_RW);
4380     arg_type++;
4381     target_size_in = thunk_type_size(arg_type, 0);
4382     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4383     if (!argptr) {
4384         return -TARGET_EFAULT;
4385     }
4386     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4387     unlock_user(argptr, arg, 0);
4388     fm = (struct fiemap *)buf_temp;
4389     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4390         return -TARGET_EINVAL;
4391     }
4392 
4393     outbufsz = sizeof (*fm) +
4394         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4395 
4396     if (outbufsz > MAX_STRUCT_SIZE) {
4397         /* We can't fit all the extents into the fixed size buffer.
4398          * Allocate one that is large enough and use it instead.
4399          */
4400         fm = g_try_malloc(outbufsz);
4401         if (!fm) {
4402             return -TARGET_ENOMEM;
4403         }
4404         memcpy(fm, buf_temp, sizeof(struct fiemap));
4405         free_fm = 1;
4406     }
4407     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4408     if (!is_error(ret)) {
4409         target_size_out = target_size_in;
4410         /* An extent_count of 0 means we were only counting the extents
4411          * so there are no structs to copy
4412          */
4413         if (fm->fm_extent_count != 0) {
4414             target_size_out += fm->fm_mapped_extents * extent_size;
4415         }
4416         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4417         if (!argptr) {
4418             ret = -TARGET_EFAULT;
4419         } else {
4420             /* Convert the struct fiemap */
4421             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4422             if (fm->fm_extent_count != 0) {
4423                 p = argptr + target_size_in;
4424                 /* ...and then all the struct fiemap_extents */
4425                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4426                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4427                                   THUNK_TARGET);
4428                     p += extent_size;
4429                 }
4430             }
4431             unlock_user(argptr, arg, target_size_out);
4432         }
4433     }
4434     if (free_fm) {
4435         g_free(fm);
4436     }
4437     return ret;
4438 }
4439 #endif
4440 
4441 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4442                                 int fd, int cmd, abi_long arg)
4443 {
4444     const argtype *arg_type = ie->arg_type;
4445     int target_size;
4446     void *argptr;
4447     int ret;
4448     struct ifconf *host_ifconf;
4449     uint32_t outbufsz;
4450     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4451     int target_ifreq_size;
4452     int nb_ifreq;
4453     int free_buf = 0;
4454     int i;
4455     int target_ifc_len;
4456     abi_long target_ifc_buf;
4457     int host_ifc_len;
4458     char *host_ifc_buf;
4459 
4460     assert(arg_type[0] == TYPE_PTR);
4461     assert(ie->access == IOC_RW);
4462 
4463     arg_type++;
4464     target_size = thunk_type_size(arg_type, 0);
4465 
4466     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4467     if (!argptr)
4468         return -TARGET_EFAULT;
4469     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4470     unlock_user(argptr, arg, 0);
4471 
4472     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4473     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4474     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4475 
4476     if (target_ifc_buf != 0) {
4477         target_ifc_len = host_ifconf->ifc_len;
4478         nb_ifreq = target_ifc_len / target_ifreq_size;
4479         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4480 
4481         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4482         if (outbufsz > MAX_STRUCT_SIZE) {
4483             /*
4484              * We can't fit all the extents into the fixed size buffer.
4485              * Allocate one that is large enough and use it instead.
4486              */
4487             host_ifconf = malloc(outbufsz);
4488             if (!host_ifconf) {
4489                 return -TARGET_ENOMEM;
4490             }
4491             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4492             free_buf = 1;
4493         }
4494         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4495 
4496         host_ifconf->ifc_len = host_ifc_len;
4497     } else {
4498       host_ifc_buf = NULL;
4499     }
4500     host_ifconf->ifc_buf = host_ifc_buf;
4501 
4502     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4503     if (!is_error(ret)) {
4504 	/* convert host ifc_len to target ifc_len */
4505 
4506         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4507         target_ifc_len = nb_ifreq * target_ifreq_size;
4508         host_ifconf->ifc_len = target_ifc_len;
4509 
4510 	/* restore target ifc_buf */
4511 
4512         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4513 
4514 	/* copy struct ifconf to target user */
4515 
4516         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4517         if (!argptr)
4518             return -TARGET_EFAULT;
4519         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4520         unlock_user(argptr, arg, target_size);
4521 
4522         if (target_ifc_buf != 0) {
4523             /* copy ifreq[] to target user */
4524             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4525             for (i = 0; i < nb_ifreq ; i++) {
4526                 thunk_convert(argptr + i * target_ifreq_size,
4527                               host_ifc_buf + i * sizeof(struct ifreq),
4528                               ifreq_arg_type, THUNK_TARGET);
4529             }
4530             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4531         }
4532     }
4533 
4534     if (free_buf) {
4535         free(host_ifconf);
4536     }
4537 
4538     return ret;
4539 }
4540 
4541 #if defined(CONFIG_USBFS)
4542 #if HOST_LONG_BITS > 64
4543 #error USBDEVFS thunks do not support >64 bit hosts yet.
4544 #endif
4545 struct live_urb {
4546     uint64_t target_urb_adr;
4547     uint64_t target_buf_adr;
4548     char *target_buf_ptr;
4549     struct usbdevfs_urb host_urb;
4550 };
4551 
4552 static GHashTable *usbdevfs_urb_hashtable(void)
4553 {
4554     static GHashTable *urb_hashtable;
4555 
4556     if (!urb_hashtable) {
4557         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4558     }
4559     return urb_hashtable;
4560 }
4561 
4562 static void urb_hashtable_insert(struct live_urb *urb)
4563 {
4564     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4565     g_hash_table_insert(urb_hashtable, urb, urb);
4566 }
4567 
4568 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4569 {
4570     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4571     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4572 }
4573 
4574 static void urb_hashtable_remove(struct live_urb *urb)
4575 {
4576     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4577     g_hash_table_remove(urb_hashtable, urb);
4578 }
4579 
4580 static abi_long
4581 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4582                           int fd, int cmd, abi_long arg)
4583 {
4584     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4585     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4586     struct live_urb *lurb;
4587     void *argptr;
4588     uint64_t hurb;
4589     int target_size;
4590     uintptr_t target_urb_adr;
4591     abi_long ret;
4592 
4593     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4594 
4595     memset(buf_temp, 0, sizeof(uint64_t));
4596     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4597     if (is_error(ret)) {
4598         return ret;
4599     }
4600 
4601     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4602     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4603     if (!lurb->target_urb_adr) {
4604         return -TARGET_EFAULT;
4605     }
4606     urb_hashtable_remove(lurb);
4607     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4608         lurb->host_urb.buffer_length);
4609     lurb->target_buf_ptr = NULL;
4610 
4611     /* restore the guest buffer pointer */
4612     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4613 
4614     /* update the guest urb struct */
4615     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4616     if (!argptr) {
4617         g_free(lurb);
4618         return -TARGET_EFAULT;
4619     }
4620     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4621     unlock_user(argptr, lurb->target_urb_adr, target_size);
4622 
4623     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4624     /* write back the urb handle */
4625     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4626     if (!argptr) {
4627         g_free(lurb);
4628         return -TARGET_EFAULT;
4629     }
4630 
4631     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4632     target_urb_adr = lurb->target_urb_adr;
4633     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4634     unlock_user(argptr, arg, target_size);
4635 
4636     g_free(lurb);
4637     return ret;
4638 }
4639 
4640 static abi_long
4641 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4642                              uint8_t *buf_temp __attribute__((unused)),
4643                              int fd, int cmd, abi_long arg)
4644 {
4645     struct live_urb *lurb;
4646 
4647     /* map target address back to host URB with metadata. */
4648     lurb = urb_hashtable_lookup(arg);
4649     if (!lurb) {
4650         return -TARGET_EFAULT;
4651     }
4652     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4653 }
4654 
4655 static abi_long
4656 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4657                             int fd, int cmd, abi_long arg)
4658 {
4659     const argtype *arg_type = ie->arg_type;
4660     int target_size;
4661     abi_long ret;
4662     void *argptr;
4663     int rw_dir;
4664     struct live_urb *lurb;
4665 
4666     /*
4667      * each submitted URB needs to map to a unique ID for the
4668      * kernel, and that unique ID needs to be a pointer to
4669      * host memory.  hence, we need to malloc for each URB.
4670      * isochronous transfers have a variable length struct.
4671      */
4672     arg_type++;
4673     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4674 
4675     /* construct host copy of urb and metadata */
4676     lurb = g_try_malloc0(sizeof(struct live_urb));
4677     if (!lurb) {
4678         return -TARGET_ENOMEM;
4679     }
4680 
4681     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4682     if (!argptr) {
4683         g_free(lurb);
4684         return -TARGET_EFAULT;
4685     }
4686     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4687     unlock_user(argptr, arg, 0);
4688 
4689     lurb->target_urb_adr = arg;
4690     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4691 
4692     /* buffer space used depends on endpoint type so lock the entire buffer */
4693     /* control type urbs should check the buffer contents for true direction */
4694     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4695     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4696         lurb->host_urb.buffer_length, 1);
4697     if (lurb->target_buf_ptr == NULL) {
4698         g_free(lurb);
4699         return -TARGET_EFAULT;
4700     }
4701 
4702     /* update buffer pointer in host copy */
4703     lurb->host_urb.buffer = lurb->target_buf_ptr;
4704 
4705     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4706     if (is_error(ret)) {
4707         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4708         g_free(lurb);
4709     } else {
4710         urb_hashtable_insert(lurb);
4711     }
4712 
4713     return ret;
4714 }
4715 #endif /* CONFIG_USBFS */
4716 
4717 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4718                             int cmd, abi_long arg)
4719 {
4720     void *argptr;
4721     struct dm_ioctl *host_dm;
4722     abi_long guest_data;
4723     uint32_t guest_data_size;
4724     int target_size;
4725     const argtype *arg_type = ie->arg_type;
4726     abi_long ret;
4727     void *big_buf = NULL;
4728     char *host_data;
4729 
4730     arg_type++;
4731     target_size = thunk_type_size(arg_type, 0);
4732     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4733     if (!argptr) {
4734         ret = -TARGET_EFAULT;
4735         goto out;
4736     }
4737     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4738     unlock_user(argptr, arg, 0);
4739 
4740     /* buf_temp is too small, so fetch things into a bigger buffer */
4741     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4742     memcpy(big_buf, buf_temp, target_size);
4743     buf_temp = big_buf;
4744     host_dm = big_buf;
4745 
4746     guest_data = arg + host_dm->data_start;
4747     if ((guest_data - arg) < 0) {
4748         ret = -TARGET_EINVAL;
4749         goto out;
4750     }
4751     guest_data_size = host_dm->data_size - host_dm->data_start;
4752     host_data = (char*)host_dm + host_dm->data_start;
4753 
4754     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4755     if (!argptr) {
4756         ret = -TARGET_EFAULT;
4757         goto out;
4758     }
4759 
4760     switch (ie->host_cmd) {
4761     case DM_REMOVE_ALL:
4762     case DM_LIST_DEVICES:
4763     case DM_DEV_CREATE:
4764     case DM_DEV_REMOVE:
4765     case DM_DEV_SUSPEND:
4766     case DM_DEV_STATUS:
4767     case DM_DEV_WAIT:
4768     case DM_TABLE_STATUS:
4769     case DM_TABLE_CLEAR:
4770     case DM_TABLE_DEPS:
4771     case DM_LIST_VERSIONS:
4772         /* no input data */
4773         break;
4774     case DM_DEV_RENAME:
4775     case DM_DEV_SET_GEOMETRY:
4776         /* data contains only strings */
4777         memcpy(host_data, argptr, guest_data_size);
4778         break;
4779     case DM_TARGET_MSG:
4780         memcpy(host_data, argptr, guest_data_size);
4781         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4782         break;
4783     case DM_TABLE_LOAD:
4784     {
4785         void *gspec = argptr;
4786         void *cur_data = host_data;
4787         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4788         int spec_size = thunk_type_size(arg_type, 0);
4789         int i;
4790 
4791         for (i = 0; i < host_dm->target_count; i++) {
4792             struct dm_target_spec *spec = cur_data;
4793             uint32_t next;
4794             int slen;
4795 
4796             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4797             slen = strlen((char*)gspec + spec_size) + 1;
4798             next = spec->next;
4799             spec->next = sizeof(*spec) + slen;
4800             strcpy((char*)&spec[1], gspec + spec_size);
4801             gspec += next;
4802             cur_data += spec->next;
4803         }
4804         break;
4805     }
4806     default:
4807         ret = -TARGET_EINVAL;
4808         unlock_user(argptr, guest_data, 0);
4809         goto out;
4810     }
4811     unlock_user(argptr, guest_data, 0);
4812 
4813     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4814     if (!is_error(ret)) {
4815         guest_data = arg + host_dm->data_start;
4816         guest_data_size = host_dm->data_size - host_dm->data_start;
4817         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4818         switch (ie->host_cmd) {
4819         case DM_REMOVE_ALL:
4820         case DM_DEV_CREATE:
4821         case DM_DEV_REMOVE:
4822         case DM_DEV_RENAME:
4823         case DM_DEV_SUSPEND:
4824         case DM_DEV_STATUS:
4825         case DM_TABLE_LOAD:
4826         case DM_TABLE_CLEAR:
4827         case DM_TARGET_MSG:
4828         case DM_DEV_SET_GEOMETRY:
4829             /* no return data */
4830             break;
4831         case DM_LIST_DEVICES:
4832         {
4833             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4834             uint32_t remaining_data = guest_data_size;
4835             void *cur_data = argptr;
4836             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4837             int nl_size = 12; /* can't use thunk_size due to alignment */
4838 
4839             while (1) {
4840                 uint32_t next = nl->next;
4841                 if (next) {
4842                     nl->next = nl_size + (strlen(nl->name) + 1);
4843                 }
4844                 if (remaining_data < nl->next) {
4845                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4846                     break;
4847                 }
4848                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4849                 strcpy(cur_data + nl_size, nl->name);
4850                 cur_data += nl->next;
4851                 remaining_data -= nl->next;
4852                 if (!next) {
4853                     break;
4854                 }
4855                 nl = (void*)nl + next;
4856             }
4857             break;
4858         }
4859         case DM_DEV_WAIT:
4860         case DM_TABLE_STATUS:
4861         {
4862             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4863             void *cur_data = argptr;
4864             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4865             int spec_size = thunk_type_size(arg_type, 0);
4866             int i;
4867 
4868             for (i = 0; i < host_dm->target_count; i++) {
4869                 uint32_t next = spec->next;
4870                 int slen = strlen((char*)&spec[1]) + 1;
4871                 spec->next = (cur_data - argptr) + spec_size + slen;
4872                 if (guest_data_size < spec->next) {
4873                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4874                     break;
4875                 }
4876                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4877                 strcpy(cur_data + spec_size, (char*)&spec[1]);
4878                 cur_data = argptr + spec->next;
4879                 spec = (void*)host_dm + host_dm->data_start + next;
4880             }
4881             break;
4882         }
4883         case DM_TABLE_DEPS:
4884         {
4885             void *hdata = (void*)host_dm + host_dm->data_start;
4886             int count = *(uint32_t*)hdata;
4887             uint64_t *hdev = hdata + 8;
4888             uint64_t *gdev = argptr + 8;
4889             int i;
4890 
4891             *(uint32_t*)argptr = tswap32(count);
4892             for (i = 0; i < count; i++) {
4893                 *gdev = tswap64(*hdev);
4894                 gdev++;
4895                 hdev++;
4896             }
4897             break;
4898         }
4899         case DM_LIST_VERSIONS:
4900         {
4901             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4902             uint32_t remaining_data = guest_data_size;
4903             void *cur_data = argptr;
4904             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4905             int vers_size = thunk_type_size(arg_type, 0);
4906 
4907             while (1) {
4908                 uint32_t next = vers->next;
4909                 if (next) {
4910                     vers->next = vers_size + (strlen(vers->name) + 1);
4911                 }
4912                 if (remaining_data < vers->next) {
4913                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4914                     break;
4915                 }
4916                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4917                 strcpy(cur_data + vers_size, vers->name);
4918                 cur_data += vers->next;
4919                 remaining_data -= vers->next;
4920                 if (!next) {
4921                     break;
4922                 }
4923                 vers = (void*)vers + next;
4924             }
4925             break;
4926         }
4927         default:
4928             unlock_user(argptr, guest_data, 0);
4929             ret = -TARGET_EINVAL;
4930             goto out;
4931         }
4932         unlock_user(argptr, guest_data, guest_data_size);
4933 
4934         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4935         if (!argptr) {
4936             ret = -TARGET_EFAULT;
4937             goto out;
4938         }
4939         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4940         unlock_user(argptr, arg, target_size);
4941     }
4942 out:
4943     g_free(big_buf);
4944     return ret;
4945 }
4946 
4947 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4948                                int cmd, abi_long arg)
4949 {
4950     void *argptr;
4951     int target_size;
4952     const argtype *arg_type = ie->arg_type;
4953     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4954     abi_long ret;
4955 
4956     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4957     struct blkpg_partition host_part;
4958 
4959     /* Read and convert blkpg */
4960     arg_type++;
4961     target_size = thunk_type_size(arg_type, 0);
4962     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4963     if (!argptr) {
4964         ret = -TARGET_EFAULT;
4965         goto out;
4966     }
4967     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4968     unlock_user(argptr, arg, 0);
4969 
4970     switch (host_blkpg->op) {
4971     case BLKPG_ADD_PARTITION:
4972     case BLKPG_DEL_PARTITION:
4973         /* payload is struct blkpg_partition */
4974         break;
4975     default:
4976         /* Unknown opcode */
4977         ret = -TARGET_EINVAL;
4978         goto out;
4979     }
4980 
4981     /* Read and convert blkpg->data */
4982     arg = (abi_long)(uintptr_t)host_blkpg->data;
4983     target_size = thunk_type_size(part_arg_type, 0);
4984     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4985     if (!argptr) {
4986         ret = -TARGET_EFAULT;
4987         goto out;
4988     }
4989     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4990     unlock_user(argptr, arg, 0);
4991 
4992     /* Swizzle the data pointer to our local copy and call! */
4993     host_blkpg->data = &host_part;
4994     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4995 
4996 out:
4997     return ret;
4998 }
4999 
5000 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5001                                 int fd, int cmd, abi_long arg)
5002 {
5003     const argtype *arg_type = ie->arg_type;
5004     const StructEntry *se;
5005     const argtype *field_types;
5006     const int *dst_offsets, *src_offsets;
5007     int target_size;
5008     void *argptr;
5009     abi_ulong *target_rt_dev_ptr = NULL;
5010     unsigned long *host_rt_dev_ptr = NULL;
5011     abi_long ret;
5012     int i;
5013 
5014     assert(ie->access == IOC_W);
5015     assert(*arg_type == TYPE_PTR);
5016     arg_type++;
5017     assert(*arg_type == TYPE_STRUCT);
5018     target_size = thunk_type_size(arg_type, 0);
5019     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5020     if (!argptr) {
5021         return -TARGET_EFAULT;
5022     }
5023     arg_type++;
5024     assert(*arg_type == (int)STRUCT_rtentry);
5025     se = struct_entries + *arg_type++;
5026     assert(se->convert[0] == NULL);
5027     /* convert struct here to be able to catch rt_dev string */
5028     field_types = se->field_types;
5029     dst_offsets = se->field_offsets[THUNK_HOST];
5030     src_offsets = se->field_offsets[THUNK_TARGET];
5031     for (i = 0; i < se->nb_fields; i++) {
5032         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5033             assert(*field_types == TYPE_PTRVOID);
5034             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5035             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5036             if (*target_rt_dev_ptr != 0) {
5037                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5038                                                   tswapal(*target_rt_dev_ptr));
5039                 if (!*host_rt_dev_ptr) {
5040                     unlock_user(argptr, arg, 0);
5041                     return -TARGET_EFAULT;
5042                 }
5043             } else {
5044                 *host_rt_dev_ptr = 0;
5045             }
5046             field_types++;
5047             continue;
5048         }
5049         field_types = thunk_convert(buf_temp + dst_offsets[i],
5050                                     argptr + src_offsets[i],
5051                                     field_types, THUNK_HOST);
5052     }
5053     unlock_user(argptr, arg, 0);
5054 
5055     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5056 
5057     assert(host_rt_dev_ptr != NULL);
5058     assert(target_rt_dev_ptr != NULL);
5059     if (*host_rt_dev_ptr != 0) {
5060         unlock_user((void *)*host_rt_dev_ptr,
5061                     *target_rt_dev_ptr, 0);
5062     }
5063     return ret;
5064 }
5065 
5066 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5067                                      int fd, int cmd, abi_long arg)
5068 {
5069     int sig = target_to_host_signal(arg);
5070     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5071 }
5072 
5073 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5074                                     int fd, int cmd, abi_long arg)
5075 {
5076     struct timeval tv;
5077     abi_long ret;
5078 
5079     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5080     if (is_error(ret)) {
5081         return ret;
5082     }
5083 
5084     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5085         if (copy_to_user_timeval(arg, &tv)) {
5086             return -TARGET_EFAULT;
5087         }
5088     } else {
5089         if (copy_to_user_timeval64(arg, &tv)) {
5090             return -TARGET_EFAULT;
5091         }
5092     }
5093 
5094     return ret;
5095 }
5096 
5097 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5098                                       int fd, int cmd, abi_long arg)
5099 {
5100     struct timespec ts;
5101     abi_long ret;
5102 
5103     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5104     if (is_error(ret)) {
5105         return ret;
5106     }
5107 
5108     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5109         if (host_to_target_timespec(arg, &ts)) {
5110             return -TARGET_EFAULT;
5111         }
5112     } else{
5113         if (host_to_target_timespec64(arg, &ts)) {
5114             return -TARGET_EFAULT;
5115         }
5116     }
5117 
5118     return ret;
5119 }
5120 
5121 #ifdef TIOCGPTPEER
5122 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5123                                      int fd, int cmd, abi_long arg)
5124 {
5125     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5126     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5127 }
5128 #endif
5129 
5130 static IOCTLEntry ioctl_entries[] = {
5131 #define IOCTL(cmd, access, ...) \
5132     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5133 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5134     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5135 #define IOCTL_IGNORE(cmd) \
5136     { TARGET_ ## cmd, 0, #cmd },
5137 #include "ioctls.h"
5138     { 0, 0, },
5139 };
5140 
5141 /* ??? Implement proper locking for ioctls.  */
5142 /* do_ioctl() Must return target values and target errnos. */
5143 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5144 {
5145     const IOCTLEntry *ie;
5146     const argtype *arg_type;
5147     abi_long ret;
5148     uint8_t buf_temp[MAX_STRUCT_SIZE];
5149     int target_size;
5150     void *argptr;
5151 
5152     ie = ioctl_entries;
5153     for(;;) {
5154         if (ie->target_cmd == 0) {
5155             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5156             return -TARGET_ENOSYS;
5157         }
5158         if (ie->target_cmd == cmd)
5159             break;
5160         ie++;
5161     }
5162     arg_type = ie->arg_type;
5163     if (ie->do_ioctl) {
5164         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5165     } else if (!ie->host_cmd) {
5166         /* Some architectures define BSD ioctls in their headers
5167            that are not implemented in Linux.  */
5168         return -TARGET_ENOSYS;
5169     }
5170 
5171     switch(arg_type[0]) {
5172     case TYPE_NULL:
5173         /* no argument */
5174         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5175         break;
5176     case TYPE_PTRVOID:
5177     case TYPE_INT:
5178         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5179         break;
5180     case TYPE_PTR:
5181         arg_type++;
5182         target_size = thunk_type_size(arg_type, 0);
5183         switch(ie->access) {
5184         case IOC_R:
5185             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5186             if (!is_error(ret)) {
5187                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5188                 if (!argptr)
5189                     return -TARGET_EFAULT;
5190                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5191                 unlock_user(argptr, arg, target_size);
5192             }
5193             break;
5194         case IOC_W:
5195             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5196             if (!argptr)
5197                 return -TARGET_EFAULT;
5198             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5199             unlock_user(argptr, arg, 0);
5200             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5201             break;
5202         default:
5203         case IOC_RW:
5204             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5205             if (!argptr)
5206                 return -TARGET_EFAULT;
5207             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5208             unlock_user(argptr, arg, 0);
5209             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5210             if (!is_error(ret)) {
5211                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5212                 if (!argptr)
5213                     return -TARGET_EFAULT;
5214                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5215                 unlock_user(argptr, arg, target_size);
5216             }
5217             break;
5218         }
5219         break;
5220     default:
5221         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5222                  (long)cmd, arg_type[0]);
5223         ret = -TARGET_ENOSYS;
5224         break;
5225     }
5226     return ret;
5227 }
5228 
5229 static const bitmask_transtbl iflag_tbl[] = {
5230         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5231         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5232         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5233         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5234         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5235         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5236         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5237         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5238         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5239         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5240         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5241         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5242         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5243         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5244         { 0, 0, 0, 0 }
5245 };
5246 
5247 static const bitmask_transtbl oflag_tbl[] = {
5248 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5249 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5250 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5251 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5252 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5253 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5254 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5255 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5256 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5257 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5258 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5259 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5260 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5261 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5262 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5263 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5264 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5265 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5266 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5267 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5268 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5269 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5270 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5271 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5272 	{ 0, 0, 0, 0 }
5273 };
5274 
5275 static const bitmask_transtbl cflag_tbl[] = {
5276 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5277 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5278 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5279 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5280 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5281 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5282 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5283 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5284 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5285 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5286 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5287 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5288 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5289 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5290 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5291 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5292 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5293 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5294 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5295 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5296 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5297 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5298 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5299 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5300 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5301 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5302 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5303 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5304 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5305 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5306 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5307 	{ 0, 0, 0, 0 }
5308 };
5309 
5310 static const bitmask_transtbl lflag_tbl[] = {
5311 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5312 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5313 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5314 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5315 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5316 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5317 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5318 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5319 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5320 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5321 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5322 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5323 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5324 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5325 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5326 	{ 0, 0, 0, 0 }
5327 };
5328 
5329 static void target_to_host_termios (void *dst, const void *src)
5330 {
5331     struct host_termios *host = dst;
5332     const struct target_termios *target = src;
5333 
5334     host->c_iflag =
5335         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5336     host->c_oflag =
5337         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5338     host->c_cflag =
5339         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5340     host->c_lflag =
5341         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5342     host->c_line = target->c_line;
5343 
5344     memset(host->c_cc, 0, sizeof(host->c_cc));
5345     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5346     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5347     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5348     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5349     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5350     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5351     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5352     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5353     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5354     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5355     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5356     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5357     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5358     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5359     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5360     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5361     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5362 }
5363 
5364 static void host_to_target_termios (void *dst, const void *src)
5365 {
5366     struct target_termios *target = dst;
5367     const struct host_termios *host = src;
5368 
5369     target->c_iflag =
5370         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5371     target->c_oflag =
5372         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5373     target->c_cflag =
5374         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5375     target->c_lflag =
5376         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5377     target->c_line = host->c_line;
5378 
5379     memset(target->c_cc, 0, sizeof(target->c_cc));
5380     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5381     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5382     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5383     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5384     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5385     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5386     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5387     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5388     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5389     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5390     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5391     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5392     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5393     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5394     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5395     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5396     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5397 }
5398 
5399 static const StructEntry struct_termios_def = {
5400     .convert = { host_to_target_termios, target_to_host_termios },
5401     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5402     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5403 };
5404 
5405 static bitmask_transtbl mmap_flags_tbl[] = {
5406     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5407     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5408     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5409     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5410       MAP_ANONYMOUS, MAP_ANONYMOUS },
5411     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5412       MAP_GROWSDOWN, MAP_GROWSDOWN },
5413     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5414       MAP_DENYWRITE, MAP_DENYWRITE },
5415     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5416       MAP_EXECUTABLE, MAP_EXECUTABLE },
5417     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5418     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5419       MAP_NORESERVE, MAP_NORESERVE },
5420     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5421     /* MAP_STACK had been ignored by the kernel for quite some time.
5422        Recognize it for the target insofar as we do not want to pass
5423        it through to the host.  */
5424     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5425     { 0, 0, 0, 0 }
5426 };
5427 
5428 #if defined(TARGET_I386)
5429 
5430 /* NOTE: there is really one LDT for all the threads */
5431 static uint8_t *ldt_table;
5432 
5433 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5434 {
5435     int size;
5436     void *p;
5437 
5438     if (!ldt_table)
5439         return 0;
5440     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5441     if (size > bytecount)
5442         size = bytecount;
5443     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5444     if (!p)
5445         return -TARGET_EFAULT;
5446     /* ??? Should this by byteswapped?  */
5447     memcpy(p, ldt_table, size);
5448     unlock_user(p, ptr, size);
5449     return size;
5450 }
5451 
5452 /* XXX: add locking support */
5453 static abi_long write_ldt(CPUX86State *env,
5454                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5455 {
5456     struct target_modify_ldt_ldt_s ldt_info;
5457     struct target_modify_ldt_ldt_s *target_ldt_info;
5458     int seg_32bit, contents, read_exec_only, limit_in_pages;
5459     int seg_not_present, useable, lm;
5460     uint32_t *lp, entry_1, entry_2;
5461 
5462     if (bytecount != sizeof(ldt_info))
5463         return -TARGET_EINVAL;
5464     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5465         return -TARGET_EFAULT;
5466     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5467     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5468     ldt_info.limit = tswap32(target_ldt_info->limit);
5469     ldt_info.flags = tswap32(target_ldt_info->flags);
5470     unlock_user_struct(target_ldt_info, ptr, 0);
5471 
5472     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5473         return -TARGET_EINVAL;
5474     seg_32bit = ldt_info.flags & 1;
5475     contents = (ldt_info.flags >> 1) & 3;
5476     read_exec_only = (ldt_info.flags >> 3) & 1;
5477     limit_in_pages = (ldt_info.flags >> 4) & 1;
5478     seg_not_present = (ldt_info.flags >> 5) & 1;
5479     useable = (ldt_info.flags >> 6) & 1;
5480 #ifdef TARGET_ABI32
5481     lm = 0;
5482 #else
5483     lm = (ldt_info.flags >> 7) & 1;
5484 #endif
5485     if (contents == 3) {
5486         if (oldmode)
5487             return -TARGET_EINVAL;
5488         if (seg_not_present == 0)
5489             return -TARGET_EINVAL;
5490     }
5491     /* allocate the LDT */
5492     if (!ldt_table) {
5493         env->ldt.base = target_mmap(0,
5494                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5495                                     PROT_READ|PROT_WRITE,
5496                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5497         if (env->ldt.base == -1)
5498             return -TARGET_ENOMEM;
5499         memset(g2h(env->ldt.base), 0,
5500                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5501         env->ldt.limit = 0xffff;
5502         ldt_table = g2h(env->ldt.base);
5503     }
5504 
5505     /* NOTE: same code as Linux kernel */
5506     /* Allow LDTs to be cleared by the user. */
5507     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5508         if (oldmode ||
5509             (contents == 0		&&
5510              read_exec_only == 1	&&
5511              seg_32bit == 0		&&
5512              limit_in_pages == 0	&&
5513              seg_not_present == 1	&&
5514              useable == 0 )) {
5515             entry_1 = 0;
5516             entry_2 = 0;
5517             goto install;
5518         }
5519     }
5520 
5521     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5522         (ldt_info.limit & 0x0ffff);
5523     entry_2 = (ldt_info.base_addr & 0xff000000) |
5524         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5525         (ldt_info.limit & 0xf0000) |
5526         ((read_exec_only ^ 1) << 9) |
5527         (contents << 10) |
5528         ((seg_not_present ^ 1) << 15) |
5529         (seg_32bit << 22) |
5530         (limit_in_pages << 23) |
5531         (lm << 21) |
5532         0x7000;
5533     if (!oldmode)
5534         entry_2 |= (useable << 20);
5535 
5536     /* Install the new entry ...  */
5537 install:
5538     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5539     lp[0] = tswap32(entry_1);
5540     lp[1] = tswap32(entry_2);
5541     return 0;
5542 }
5543 
5544 /* specific and weird i386 syscalls */
5545 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5546                               unsigned long bytecount)
5547 {
5548     abi_long ret;
5549 
5550     switch (func) {
5551     case 0:
5552         ret = read_ldt(ptr, bytecount);
5553         break;
5554     case 1:
5555         ret = write_ldt(env, ptr, bytecount, 1);
5556         break;
5557     case 0x11:
5558         ret = write_ldt(env, ptr, bytecount, 0);
5559         break;
5560     default:
5561         ret = -TARGET_ENOSYS;
5562         break;
5563     }
5564     return ret;
5565 }
5566 
5567 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5568 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5569 {
5570     uint64_t *gdt_table = g2h(env->gdt.base);
5571     struct target_modify_ldt_ldt_s ldt_info;
5572     struct target_modify_ldt_ldt_s *target_ldt_info;
5573     int seg_32bit, contents, read_exec_only, limit_in_pages;
5574     int seg_not_present, useable, lm;
5575     uint32_t *lp, entry_1, entry_2;
5576     int i;
5577 
5578     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5579     if (!target_ldt_info)
5580         return -TARGET_EFAULT;
5581     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5582     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5583     ldt_info.limit = tswap32(target_ldt_info->limit);
5584     ldt_info.flags = tswap32(target_ldt_info->flags);
5585     if (ldt_info.entry_number == -1) {
5586         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5587             if (gdt_table[i] == 0) {
5588                 ldt_info.entry_number = i;
5589                 target_ldt_info->entry_number = tswap32(i);
5590                 break;
5591             }
5592         }
5593     }
5594     unlock_user_struct(target_ldt_info, ptr, 1);
5595 
5596     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5597         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5598            return -TARGET_EINVAL;
5599     seg_32bit = ldt_info.flags & 1;
5600     contents = (ldt_info.flags >> 1) & 3;
5601     read_exec_only = (ldt_info.flags >> 3) & 1;
5602     limit_in_pages = (ldt_info.flags >> 4) & 1;
5603     seg_not_present = (ldt_info.flags >> 5) & 1;
5604     useable = (ldt_info.flags >> 6) & 1;
5605 #ifdef TARGET_ABI32
5606     lm = 0;
5607 #else
5608     lm = (ldt_info.flags >> 7) & 1;
5609 #endif
5610 
5611     if (contents == 3) {
5612         if (seg_not_present == 0)
5613             return -TARGET_EINVAL;
5614     }
5615 
5616     /* NOTE: same code as Linux kernel */
5617     /* Allow LDTs to be cleared by the user. */
5618     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5619         if ((contents == 0             &&
5620              read_exec_only == 1       &&
5621              seg_32bit == 0            &&
5622              limit_in_pages == 0       &&
5623              seg_not_present == 1      &&
5624              useable == 0 )) {
5625             entry_1 = 0;
5626             entry_2 = 0;
5627             goto install;
5628         }
5629     }
5630 
5631     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5632         (ldt_info.limit & 0x0ffff);
5633     entry_2 = (ldt_info.base_addr & 0xff000000) |
5634         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5635         (ldt_info.limit & 0xf0000) |
5636         ((read_exec_only ^ 1) << 9) |
5637         (contents << 10) |
5638         ((seg_not_present ^ 1) << 15) |
5639         (seg_32bit << 22) |
5640         (limit_in_pages << 23) |
5641         (useable << 20) |
5642         (lm << 21) |
5643         0x7000;
5644 
5645     /* Install the new entry ...  */
5646 install:
5647     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5648     lp[0] = tswap32(entry_1);
5649     lp[1] = tswap32(entry_2);
5650     return 0;
5651 }
5652 
5653 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5654 {
5655     struct target_modify_ldt_ldt_s *target_ldt_info;
5656     uint64_t *gdt_table = g2h(env->gdt.base);
5657     uint32_t base_addr, limit, flags;
5658     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5659     int seg_not_present, useable, lm;
5660     uint32_t *lp, entry_1, entry_2;
5661 
5662     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5663     if (!target_ldt_info)
5664         return -TARGET_EFAULT;
5665     idx = tswap32(target_ldt_info->entry_number);
5666     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5667         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5668         unlock_user_struct(target_ldt_info, ptr, 1);
5669         return -TARGET_EINVAL;
5670     }
5671     lp = (uint32_t *)(gdt_table + idx);
5672     entry_1 = tswap32(lp[0]);
5673     entry_2 = tswap32(lp[1]);
5674 
5675     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5676     contents = (entry_2 >> 10) & 3;
5677     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5678     seg_32bit = (entry_2 >> 22) & 1;
5679     limit_in_pages = (entry_2 >> 23) & 1;
5680     useable = (entry_2 >> 20) & 1;
5681 #ifdef TARGET_ABI32
5682     lm = 0;
5683 #else
5684     lm = (entry_2 >> 21) & 1;
5685 #endif
5686     flags = (seg_32bit << 0) | (contents << 1) |
5687         (read_exec_only << 3) | (limit_in_pages << 4) |
5688         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5689     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5690     base_addr = (entry_1 >> 16) |
5691         (entry_2 & 0xff000000) |
5692         ((entry_2 & 0xff) << 16);
5693     target_ldt_info->base_addr = tswapal(base_addr);
5694     target_ldt_info->limit = tswap32(limit);
5695     target_ldt_info->flags = tswap32(flags);
5696     unlock_user_struct(target_ldt_info, ptr, 1);
5697     return 0;
5698 }
5699 #endif /* TARGET_I386 && TARGET_ABI32 */
5700 
5701 #ifndef TARGET_ABI32
5702 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5703 {
5704     abi_long ret = 0;
5705     abi_ulong val;
5706     int idx;
5707 
5708     switch(code) {
5709     case TARGET_ARCH_SET_GS:
5710     case TARGET_ARCH_SET_FS:
5711         if (code == TARGET_ARCH_SET_GS)
5712             idx = R_GS;
5713         else
5714             idx = R_FS;
5715         cpu_x86_load_seg(env, idx, 0);
5716         env->segs[idx].base = addr;
5717         break;
5718     case TARGET_ARCH_GET_GS:
5719     case TARGET_ARCH_GET_FS:
5720         if (code == TARGET_ARCH_GET_GS)
5721             idx = R_GS;
5722         else
5723             idx = R_FS;
5724         val = env->segs[idx].base;
5725         if (put_user(val, addr, abi_ulong))
5726             ret = -TARGET_EFAULT;
5727         break;
5728     default:
5729         ret = -TARGET_EINVAL;
5730         break;
5731     }
5732     return ret;
5733 }
5734 #endif
5735 
5736 #endif /* defined(TARGET_I386) */
5737 
5738 #define NEW_STACK_SIZE 0x40000
5739 
5740 
5741 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5742 typedef struct {
5743     CPUArchState *env;
5744     pthread_mutex_t mutex;
5745     pthread_cond_t cond;
5746     pthread_t thread;
5747     uint32_t tid;
5748     abi_ulong child_tidptr;
5749     abi_ulong parent_tidptr;
5750     sigset_t sigmask;
5751 } new_thread_info;
5752 
5753 static void *clone_func(void *arg)
5754 {
5755     new_thread_info *info = arg;
5756     CPUArchState *env;
5757     CPUState *cpu;
5758     TaskState *ts;
5759 
5760     rcu_register_thread();
5761     tcg_register_thread();
5762     env = info->env;
5763     cpu = env_cpu(env);
5764     thread_cpu = cpu;
5765     ts = (TaskState *)cpu->opaque;
5766     info->tid = sys_gettid();
5767     task_settid(ts);
5768     if (info->child_tidptr)
5769         put_user_u32(info->tid, info->child_tidptr);
5770     if (info->parent_tidptr)
5771         put_user_u32(info->tid, info->parent_tidptr);
5772     qemu_guest_random_seed_thread_part2(cpu->random_seed);
5773     /* Enable signals.  */
5774     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5775     /* Signal to the parent that we're ready.  */
5776     pthread_mutex_lock(&info->mutex);
5777     pthread_cond_broadcast(&info->cond);
5778     pthread_mutex_unlock(&info->mutex);
5779     /* Wait until the parent has finished initializing the tls state.  */
5780     pthread_mutex_lock(&clone_lock);
5781     pthread_mutex_unlock(&clone_lock);
5782     cpu_loop(env);
5783     /* never exits */
5784     return NULL;
5785 }
5786 
5787 /* do_fork() Must return host values and target errnos (unlike most
5788    do_*() functions). */
5789 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5790                    abi_ulong parent_tidptr, target_ulong newtls,
5791                    abi_ulong child_tidptr)
5792 {
5793     CPUState *cpu = env_cpu(env);
5794     int ret;
5795     TaskState *ts;
5796     CPUState *new_cpu;
5797     CPUArchState *new_env;
5798     sigset_t sigmask;
5799 
5800     flags &= ~CLONE_IGNORED_FLAGS;
5801 
5802     /* Emulate vfork() with fork() */
5803     if (flags & CLONE_VFORK)
5804         flags &= ~(CLONE_VFORK | CLONE_VM);
5805 
5806     if (flags & CLONE_VM) {
5807         TaskState *parent_ts = (TaskState *)cpu->opaque;
5808         new_thread_info info;
5809         pthread_attr_t attr;
5810 
5811         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5812             (flags & CLONE_INVALID_THREAD_FLAGS)) {
5813             return -TARGET_EINVAL;
5814         }
5815 
5816         ts = g_new0(TaskState, 1);
5817         init_task_state(ts);
5818 
5819         /* Grab a mutex so that thread setup appears atomic.  */
5820         pthread_mutex_lock(&clone_lock);
5821 
5822         /* we create a new CPU instance. */
5823         new_env = cpu_copy(env);
5824         /* Init regs that differ from the parent.  */
5825         cpu_clone_regs_child(new_env, newsp, flags);
5826         cpu_clone_regs_parent(env, flags);
5827         new_cpu = env_cpu(new_env);
5828         new_cpu->opaque = ts;
5829         ts->bprm = parent_ts->bprm;
5830         ts->info = parent_ts->info;
5831         ts->signal_mask = parent_ts->signal_mask;
5832 
5833         if (flags & CLONE_CHILD_CLEARTID) {
5834             ts->child_tidptr = child_tidptr;
5835         }
5836 
5837         if (flags & CLONE_SETTLS) {
5838             cpu_set_tls (new_env, newtls);
5839         }
5840 
5841         memset(&info, 0, sizeof(info));
5842         pthread_mutex_init(&info.mutex, NULL);
5843         pthread_mutex_lock(&info.mutex);
5844         pthread_cond_init(&info.cond, NULL);
5845         info.env = new_env;
5846         if (flags & CLONE_CHILD_SETTID) {
5847             info.child_tidptr = child_tidptr;
5848         }
5849         if (flags & CLONE_PARENT_SETTID) {
5850             info.parent_tidptr = parent_tidptr;
5851         }
5852 
5853         ret = pthread_attr_init(&attr);
5854         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5855         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5856         /* It is not safe to deliver signals until the child has finished
5857            initializing, so temporarily block all signals.  */
5858         sigfillset(&sigmask);
5859         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5860         cpu->random_seed = qemu_guest_random_seed_thread_part1();
5861 
5862         /* If this is our first additional thread, we need to ensure we
5863          * generate code for parallel execution and flush old translations.
5864          */
5865         if (!parallel_cpus) {
5866             parallel_cpus = true;
5867             tb_flush(cpu);
5868         }
5869 
5870         ret = pthread_create(&info.thread, &attr, clone_func, &info);
5871         /* TODO: Free new CPU state if thread creation failed.  */
5872 
5873         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5874         pthread_attr_destroy(&attr);
5875         if (ret == 0) {
5876             /* Wait for the child to initialize.  */
5877             pthread_cond_wait(&info.cond, &info.mutex);
5878             ret = info.tid;
5879         } else {
5880             ret = -1;
5881         }
5882         pthread_mutex_unlock(&info.mutex);
5883         pthread_cond_destroy(&info.cond);
5884         pthread_mutex_destroy(&info.mutex);
5885         pthread_mutex_unlock(&clone_lock);
5886     } else {
5887         /* if no CLONE_VM, we consider it is a fork */
5888         if (flags & CLONE_INVALID_FORK_FLAGS) {
5889             return -TARGET_EINVAL;
5890         }
5891 
5892         /* We can't support custom termination signals */
5893         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5894             return -TARGET_EINVAL;
5895         }
5896 
5897         if (block_signals()) {
5898             return -TARGET_ERESTARTSYS;
5899         }
5900 
5901         fork_start();
5902         ret = fork();
5903         if (ret == 0) {
5904             /* Child Process.  */
5905             cpu_clone_regs_child(env, newsp, flags);
5906             fork_end(1);
5907             /* There is a race condition here.  The parent process could
5908                theoretically read the TID in the child process before the child
5909                tid is set.  This would require using either ptrace
5910                (not implemented) or having *_tidptr to point at a shared memory
5911                mapping.  We can't repeat the spinlock hack used above because
5912                the child process gets its own copy of the lock.  */
5913             if (flags & CLONE_CHILD_SETTID)
5914                 put_user_u32(sys_gettid(), child_tidptr);
5915             if (flags & CLONE_PARENT_SETTID)
5916                 put_user_u32(sys_gettid(), parent_tidptr);
5917             ts = (TaskState *)cpu->opaque;
5918             if (flags & CLONE_SETTLS)
5919                 cpu_set_tls (env, newtls);
5920             if (flags & CLONE_CHILD_CLEARTID)
5921                 ts->child_tidptr = child_tidptr;
5922         } else {
5923             cpu_clone_regs_parent(env, flags);
5924             fork_end(0);
5925         }
5926     }
5927     return ret;
5928 }
5929 
5930 /* warning : doesn't handle linux specific flags... */
5931 static int target_to_host_fcntl_cmd(int cmd)
5932 {
5933     int ret;
5934 
5935     switch(cmd) {
5936     case TARGET_F_DUPFD:
5937     case TARGET_F_GETFD:
5938     case TARGET_F_SETFD:
5939     case TARGET_F_GETFL:
5940     case TARGET_F_SETFL:
5941         ret = cmd;
5942         break;
5943     case TARGET_F_GETLK:
5944         ret = F_GETLK64;
5945         break;
5946     case TARGET_F_SETLK:
5947         ret = F_SETLK64;
5948         break;
5949     case TARGET_F_SETLKW:
5950         ret = F_SETLKW64;
5951         break;
5952     case TARGET_F_GETOWN:
5953         ret = F_GETOWN;
5954         break;
5955     case TARGET_F_SETOWN:
5956         ret = F_SETOWN;
5957         break;
5958     case TARGET_F_GETSIG:
5959         ret = F_GETSIG;
5960         break;
5961     case TARGET_F_SETSIG:
5962         ret = F_SETSIG;
5963         break;
5964 #if TARGET_ABI_BITS == 32
5965     case TARGET_F_GETLK64:
5966         ret = F_GETLK64;
5967         break;
5968     case TARGET_F_SETLK64:
5969         ret = F_SETLK64;
5970         break;
5971     case TARGET_F_SETLKW64:
5972         ret = F_SETLKW64;
5973         break;
5974 #endif
5975     case TARGET_F_SETLEASE:
5976         ret = F_SETLEASE;
5977         break;
5978     case TARGET_F_GETLEASE:
5979         ret = F_GETLEASE;
5980         break;
5981 #ifdef F_DUPFD_CLOEXEC
5982     case TARGET_F_DUPFD_CLOEXEC:
5983         ret = F_DUPFD_CLOEXEC;
5984         break;
5985 #endif
5986     case TARGET_F_NOTIFY:
5987         ret = F_NOTIFY;
5988         break;
5989 #ifdef F_GETOWN_EX
5990     case TARGET_F_GETOWN_EX:
5991         ret = F_GETOWN_EX;
5992         break;
5993 #endif
5994 #ifdef F_SETOWN_EX
5995     case TARGET_F_SETOWN_EX:
5996         ret = F_SETOWN_EX;
5997         break;
5998 #endif
5999 #ifdef F_SETPIPE_SZ
6000     case TARGET_F_SETPIPE_SZ:
6001         ret = F_SETPIPE_SZ;
6002         break;
6003     case TARGET_F_GETPIPE_SZ:
6004         ret = F_GETPIPE_SZ;
6005         break;
6006 #endif
6007     default:
6008         ret = -TARGET_EINVAL;
6009         break;
6010     }
6011 
6012 #if defined(__powerpc64__)
6013     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6014      * is not supported by kernel. The glibc fcntl call actually adjusts
6015      * them to 5, 6 and 7 before making the syscall(). Since we make the
6016      * syscall directly, adjust to what is supported by the kernel.
6017      */
6018     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6019         ret -= F_GETLK64 - 5;
6020     }
6021 #endif
6022 
6023     return ret;
6024 }
6025 
6026 #define FLOCK_TRANSTBL \
6027     switch (type) { \
6028     TRANSTBL_CONVERT(F_RDLCK); \
6029     TRANSTBL_CONVERT(F_WRLCK); \
6030     TRANSTBL_CONVERT(F_UNLCK); \
6031     TRANSTBL_CONVERT(F_EXLCK); \
6032     TRANSTBL_CONVERT(F_SHLCK); \
6033     }
6034 
6035 static int target_to_host_flock(int type)
6036 {
6037 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6038     FLOCK_TRANSTBL
6039 #undef  TRANSTBL_CONVERT
6040     return -TARGET_EINVAL;
6041 }
6042 
6043 static int host_to_target_flock(int type)
6044 {
6045 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6046     FLOCK_TRANSTBL
6047 #undef  TRANSTBL_CONVERT
6048     /* if we don't know how to convert the value coming
6049      * from the host we copy to the target field as-is
6050      */
6051     return type;
6052 }
6053 
6054 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6055                                             abi_ulong target_flock_addr)
6056 {
6057     struct target_flock *target_fl;
6058     int l_type;
6059 
6060     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6061         return -TARGET_EFAULT;
6062     }
6063 
6064     __get_user(l_type, &target_fl->l_type);
6065     l_type = target_to_host_flock(l_type);
6066     if (l_type < 0) {
6067         return l_type;
6068     }
6069     fl->l_type = l_type;
6070     __get_user(fl->l_whence, &target_fl->l_whence);
6071     __get_user(fl->l_start, &target_fl->l_start);
6072     __get_user(fl->l_len, &target_fl->l_len);
6073     __get_user(fl->l_pid, &target_fl->l_pid);
6074     unlock_user_struct(target_fl, target_flock_addr, 0);
6075     return 0;
6076 }
6077 
6078 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6079                                           const struct flock64 *fl)
6080 {
6081     struct target_flock *target_fl;
6082     short l_type;
6083 
6084     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6085         return -TARGET_EFAULT;
6086     }
6087 
6088     l_type = host_to_target_flock(fl->l_type);
6089     __put_user(l_type, &target_fl->l_type);
6090     __put_user(fl->l_whence, &target_fl->l_whence);
6091     __put_user(fl->l_start, &target_fl->l_start);
6092     __put_user(fl->l_len, &target_fl->l_len);
6093     __put_user(fl->l_pid, &target_fl->l_pid);
6094     unlock_user_struct(target_fl, target_flock_addr, 1);
6095     return 0;
6096 }
6097 
6098 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6099 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6100 
6101 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6102 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6103                                                    abi_ulong target_flock_addr)
6104 {
6105     struct target_oabi_flock64 *target_fl;
6106     int l_type;
6107 
6108     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6109         return -TARGET_EFAULT;
6110     }
6111 
6112     __get_user(l_type, &target_fl->l_type);
6113     l_type = target_to_host_flock(l_type);
6114     if (l_type < 0) {
6115         return l_type;
6116     }
6117     fl->l_type = l_type;
6118     __get_user(fl->l_whence, &target_fl->l_whence);
6119     __get_user(fl->l_start, &target_fl->l_start);
6120     __get_user(fl->l_len, &target_fl->l_len);
6121     __get_user(fl->l_pid, &target_fl->l_pid);
6122     unlock_user_struct(target_fl, target_flock_addr, 0);
6123     return 0;
6124 }
6125 
6126 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6127                                                  const struct flock64 *fl)
6128 {
6129     struct target_oabi_flock64 *target_fl;
6130     short l_type;
6131 
6132     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6133         return -TARGET_EFAULT;
6134     }
6135 
6136     l_type = host_to_target_flock(fl->l_type);
6137     __put_user(l_type, &target_fl->l_type);
6138     __put_user(fl->l_whence, &target_fl->l_whence);
6139     __put_user(fl->l_start, &target_fl->l_start);
6140     __put_user(fl->l_len, &target_fl->l_len);
6141     __put_user(fl->l_pid, &target_fl->l_pid);
6142     unlock_user_struct(target_fl, target_flock_addr, 1);
6143     return 0;
6144 }
6145 #endif
6146 
6147 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6148                                               abi_ulong target_flock_addr)
6149 {
6150     struct target_flock64 *target_fl;
6151     int l_type;
6152 
6153     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6154         return -TARGET_EFAULT;
6155     }
6156 
6157     __get_user(l_type, &target_fl->l_type);
6158     l_type = target_to_host_flock(l_type);
6159     if (l_type < 0) {
6160         return l_type;
6161     }
6162     fl->l_type = l_type;
6163     __get_user(fl->l_whence, &target_fl->l_whence);
6164     __get_user(fl->l_start, &target_fl->l_start);
6165     __get_user(fl->l_len, &target_fl->l_len);
6166     __get_user(fl->l_pid, &target_fl->l_pid);
6167     unlock_user_struct(target_fl, target_flock_addr, 0);
6168     return 0;
6169 }
6170 
6171 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6172                                             const struct flock64 *fl)
6173 {
6174     struct target_flock64 *target_fl;
6175     short l_type;
6176 
6177     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6178         return -TARGET_EFAULT;
6179     }
6180 
6181     l_type = host_to_target_flock(fl->l_type);
6182     __put_user(l_type, &target_fl->l_type);
6183     __put_user(fl->l_whence, &target_fl->l_whence);
6184     __put_user(fl->l_start, &target_fl->l_start);
6185     __put_user(fl->l_len, &target_fl->l_len);
6186     __put_user(fl->l_pid, &target_fl->l_pid);
6187     unlock_user_struct(target_fl, target_flock_addr, 1);
6188     return 0;
6189 }
6190 
6191 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6192 {
6193     struct flock64 fl64;
6194 #ifdef F_GETOWN_EX
6195     struct f_owner_ex fox;
6196     struct target_f_owner_ex *target_fox;
6197 #endif
6198     abi_long ret;
6199     int host_cmd = target_to_host_fcntl_cmd(cmd);
6200 
6201     if (host_cmd == -TARGET_EINVAL)
6202 	    return host_cmd;
6203 
6204     switch(cmd) {
6205     case TARGET_F_GETLK:
6206         ret = copy_from_user_flock(&fl64, arg);
6207         if (ret) {
6208             return ret;
6209         }
6210         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6211         if (ret == 0) {
6212             ret = copy_to_user_flock(arg, &fl64);
6213         }
6214         break;
6215 
6216     case TARGET_F_SETLK:
6217     case TARGET_F_SETLKW:
6218         ret = copy_from_user_flock(&fl64, arg);
6219         if (ret) {
6220             return ret;
6221         }
6222         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6223         break;
6224 
6225     case TARGET_F_GETLK64:
6226         ret = copy_from_user_flock64(&fl64, arg);
6227         if (ret) {
6228             return ret;
6229         }
6230         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6231         if (ret == 0) {
6232             ret = copy_to_user_flock64(arg, &fl64);
6233         }
6234         break;
6235     case TARGET_F_SETLK64:
6236     case TARGET_F_SETLKW64:
6237         ret = copy_from_user_flock64(&fl64, arg);
6238         if (ret) {
6239             return ret;
6240         }
6241         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6242         break;
6243 
6244     case TARGET_F_GETFL:
6245         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6246         if (ret >= 0) {
6247             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6248         }
6249         break;
6250 
6251     case TARGET_F_SETFL:
6252         ret = get_errno(safe_fcntl(fd, host_cmd,
6253                                    target_to_host_bitmask(arg,
6254                                                           fcntl_flags_tbl)));
6255         break;
6256 
6257 #ifdef F_GETOWN_EX
6258     case TARGET_F_GETOWN_EX:
6259         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6260         if (ret >= 0) {
6261             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6262                 return -TARGET_EFAULT;
6263             target_fox->type = tswap32(fox.type);
6264             target_fox->pid = tswap32(fox.pid);
6265             unlock_user_struct(target_fox, arg, 1);
6266         }
6267         break;
6268 #endif
6269 
6270 #ifdef F_SETOWN_EX
6271     case TARGET_F_SETOWN_EX:
6272         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6273             return -TARGET_EFAULT;
6274         fox.type = tswap32(target_fox->type);
6275         fox.pid = tswap32(target_fox->pid);
6276         unlock_user_struct(target_fox, arg, 0);
6277         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6278         break;
6279 #endif
6280 
6281     case TARGET_F_SETOWN:
6282     case TARGET_F_GETOWN:
6283     case TARGET_F_SETSIG:
6284     case TARGET_F_GETSIG:
6285     case TARGET_F_SETLEASE:
6286     case TARGET_F_GETLEASE:
6287     case TARGET_F_SETPIPE_SZ:
6288     case TARGET_F_GETPIPE_SZ:
6289         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6290         break;
6291 
6292     default:
6293         ret = get_errno(safe_fcntl(fd, cmd, arg));
6294         break;
6295     }
6296     return ret;
6297 }
6298 
6299 #ifdef USE_UID16
6300 
6301 static inline int high2lowuid(int uid)
6302 {
6303     if (uid > 65535)
6304         return 65534;
6305     else
6306         return uid;
6307 }
6308 
6309 static inline int high2lowgid(int gid)
6310 {
6311     if (gid > 65535)
6312         return 65534;
6313     else
6314         return gid;
6315 }
6316 
6317 static inline int low2highuid(int uid)
6318 {
6319     if ((int16_t)uid == -1)
6320         return -1;
6321     else
6322         return uid;
6323 }
6324 
6325 static inline int low2highgid(int gid)
6326 {
6327     if ((int16_t)gid == -1)
6328         return -1;
6329     else
6330         return gid;
6331 }
6332 static inline int tswapid(int id)
6333 {
6334     return tswap16(id);
6335 }
6336 
6337 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6338 
6339 #else /* !USE_UID16 */
6340 static inline int high2lowuid(int uid)
6341 {
6342     return uid;
6343 }
6344 static inline int high2lowgid(int gid)
6345 {
6346     return gid;
6347 }
6348 static inline int low2highuid(int uid)
6349 {
6350     return uid;
6351 }
6352 static inline int low2highgid(int gid)
6353 {
6354     return gid;
6355 }
6356 static inline int tswapid(int id)
6357 {
6358     return tswap32(id);
6359 }
6360 
6361 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6362 
6363 #endif /* USE_UID16 */
6364 
6365 /* We must do direct syscalls for setting UID/GID, because we want to
6366  * implement the Linux system call semantics of "change only for this thread",
6367  * not the libc/POSIX semantics of "change for all threads in process".
6368  * (See http://ewontfix.com/17/ for more details.)
6369  * We use the 32-bit version of the syscalls if present; if it is not
6370  * then either the host architecture supports 32-bit UIDs natively with
6371  * the standard syscall, or the 16-bit UID is the best we can do.
6372  */
6373 #ifdef __NR_setuid32
6374 #define __NR_sys_setuid __NR_setuid32
6375 #else
6376 #define __NR_sys_setuid __NR_setuid
6377 #endif
6378 #ifdef __NR_setgid32
6379 #define __NR_sys_setgid __NR_setgid32
6380 #else
6381 #define __NR_sys_setgid __NR_setgid
6382 #endif
6383 #ifdef __NR_setresuid32
6384 #define __NR_sys_setresuid __NR_setresuid32
6385 #else
6386 #define __NR_sys_setresuid __NR_setresuid
6387 #endif
6388 #ifdef __NR_setresgid32
6389 #define __NR_sys_setresgid __NR_setresgid32
6390 #else
6391 #define __NR_sys_setresgid __NR_setresgid
6392 #endif
6393 
6394 _syscall1(int, sys_setuid, uid_t, uid)
6395 _syscall1(int, sys_setgid, gid_t, gid)
6396 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6397 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6398 
6399 void syscall_init(void)
6400 {
6401     IOCTLEntry *ie;
6402     const argtype *arg_type;
6403     int size;
6404     int i;
6405 
6406     thunk_init(STRUCT_MAX);
6407 
6408 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6409 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6410 #include "syscall_types.h"
6411 #undef STRUCT
6412 #undef STRUCT_SPECIAL
6413 
6414     /* Build target_to_host_errno_table[] table from
6415      * host_to_target_errno_table[]. */
6416     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6417         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6418     }
6419 
6420     /* we patch the ioctl size if necessary. We rely on the fact that
6421        no ioctl has all the bits at '1' in the size field */
6422     ie = ioctl_entries;
6423     while (ie->target_cmd != 0) {
6424         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6425             TARGET_IOC_SIZEMASK) {
6426             arg_type = ie->arg_type;
6427             if (arg_type[0] != TYPE_PTR) {
6428                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6429                         ie->target_cmd);
6430                 exit(1);
6431             }
6432             arg_type++;
6433             size = thunk_type_size(arg_type, 0);
6434             ie->target_cmd = (ie->target_cmd &
6435                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6436                 (size << TARGET_IOC_SIZESHIFT);
6437         }
6438 
6439         /* automatic consistency check if same arch */
6440 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6441     (defined(__x86_64__) && defined(TARGET_X86_64))
6442         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6443             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6444                     ie->name, ie->target_cmd, ie->host_cmd);
6445         }
6446 #endif
6447         ie++;
6448     }
6449 }
6450 
6451 #if TARGET_ABI_BITS == 32
6452 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6453 {
6454 #ifdef TARGET_WORDS_BIGENDIAN
6455     return ((uint64_t)word0 << 32) | word1;
6456 #else
6457     return ((uint64_t)word1 << 32) | word0;
6458 #endif
6459 }
6460 #else /* TARGET_ABI_BITS == 32 */
6461 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6462 {
6463     return word0;
6464 }
6465 #endif /* TARGET_ABI_BITS != 32 */
6466 
6467 #ifdef TARGET_NR_truncate64
6468 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6469                                          abi_long arg2,
6470                                          abi_long arg3,
6471                                          abi_long arg4)
6472 {
6473     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6474         arg2 = arg3;
6475         arg3 = arg4;
6476     }
6477     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6478 }
6479 #endif
6480 
6481 #ifdef TARGET_NR_ftruncate64
6482 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6483                                           abi_long arg2,
6484                                           abi_long arg3,
6485                                           abi_long arg4)
6486 {
6487     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6488         arg2 = arg3;
6489         arg3 = arg4;
6490     }
6491     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6492 }
6493 #endif
6494 
6495 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6496                                                  abi_ulong target_addr)
6497 {
6498     struct target_itimerspec *target_itspec;
6499 
6500     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6501         return -TARGET_EFAULT;
6502     }
6503 
6504     host_itspec->it_interval.tv_sec =
6505                             tswapal(target_itspec->it_interval.tv_sec);
6506     host_itspec->it_interval.tv_nsec =
6507                             tswapal(target_itspec->it_interval.tv_nsec);
6508     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6509     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6510 
6511     unlock_user_struct(target_itspec, target_addr, 1);
6512     return 0;
6513 }
6514 
6515 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6516                                                struct itimerspec *host_its)
6517 {
6518     struct target_itimerspec *target_itspec;
6519 
6520     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6521         return -TARGET_EFAULT;
6522     }
6523 
6524     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6525     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6526 
6527     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6528     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6529 
6530     unlock_user_struct(target_itspec, target_addr, 0);
6531     return 0;
6532 }
6533 
6534 static inline abi_long target_to_host_timex(struct timex *host_tx,
6535                                             abi_long target_addr)
6536 {
6537     struct target_timex *target_tx;
6538 
6539     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6540         return -TARGET_EFAULT;
6541     }
6542 
6543     __get_user(host_tx->modes, &target_tx->modes);
6544     __get_user(host_tx->offset, &target_tx->offset);
6545     __get_user(host_tx->freq, &target_tx->freq);
6546     __get_user(host_tx->maxerror, &target_tx->maxerror);
6547     __get_user(host_tx->esterror, &target_tx->esterror);
6548     __get_user(host_tx->status, &target_tx->status);
6549     __get_user(host_tx->constant, &target_tx->constant);
6550     __get_user(host_tx->precision, &target_tx->precision);
6551     __get_user(host_tx->tolerance, &target_tx->tolerance);
6552     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6553     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6554     __get_user(host_tx->tick, &target_tx->tick);
6555     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6556     __get_user(host_tx->jitter, &target_tx->jitter);
6557     __get_user(host_tx->shift, &target_tx->shift);
6558     __get_user(host_tx->stabil, &target_tx->stabil);
6559     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6560     __get_user(host_tx->calcnt, &target_tx->calcnt);
6561     __get_user(host_tx->errcnt, &target_tx->errcnt);
6562     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6563     __get_user(host_tx->tai, &target_tx->tai);
6564 
6565     unlock_user_struct(target_tx, target_addr, 0);
6566     return 0;
6567 }
6568 
6569 static inline abi_long host_to_target_timex(abi_long target_addr,
6570                                             struct timex *host_tx)
6571 {
6572     struct target_timex *target_tx;
6573 
6574     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6575         return -TARGET_EFAULT;
6576     }
6577 
6578     __put_user(host_tx->modes, &target_tx->modes);
6579     __put_user(host_tx->offset, &target_tx->offset);
6580     __put_user(host_tx->freq, &target_tx->freq);
6581     __put_user(host_tx->maxerror, &target_tx->maxerror);
6582     __put_user(host_tx->esterror, &target_tx->esterror);
6583     __put_user(host_tx->status, &target_tx->status);
6584     __put_user(host_tx->constant, &target_tx->constant);
6585     __put_user(host_tx->precision, &target_tx->precision);
6586     __put_user(host_tx->tolerance, &target_tx->tolerance);
6587     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6588     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6589     __put_user(host_tx->tick, &target_tx->tick);
6590     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6591     __put_user(host_tx->jitter, &target_tx->jitter);
6592     __put_user(host_tx->shift, &target_tx->shift);
6593     __put_user(host_tx->stabil, &target_tx->stabil);
6594     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6595     __put_user(host_tx->calcnt, &target_tx->calcnt);
6596     __put_user(host_tx->errcnt, &target_tx->errcnt);
6597     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6598     __put_user(host_tx->tai, &target_tx->tai);
6599 
6600     unlock_user_struct(target_tx, target_addr, 1);
6601     return 0;
6602 }
6603 
6604 
6605 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6606                                                abi_ulong target_addr)
6607 {
6608     struct target_sigevent *target_sevp;
6609 
6610     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6611         return -TARGET_EFAULT;
6612     }
6613 
6614     /* This union is awkward on 64 bit systems because it has a 32 bit
6615      * integer and a pointer in it; we follow the conversion approach
6616      * used for handling sigval types in signal.c so the guest should get
6617      * the correct value back even if we did a 64 bit byteswap and it's
6618      * using the 32 bit integer.
6619      */
6620     host_sevp->sigev_value.sival_ptr =
6621         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6622     host_sevp->sigev_signo =
6623         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6624     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6625     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6626 
6627     unlock_user_struct(target_sevp, target_addr, 1);
6628     return 0;
6629 }
6630 
6631 #if defined(TARGET_NR_mlockall)
6632 static inline int target_to_host_mlockall_arg(int arg)
6633 {
6634     int result = 0;
6635 
6636     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6637         result |= MCL_CURRENT;
6638     }
6639     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6640         result |= MCL_FUTURE;
6641     }
6642     return result;
6643 }
6644 #endif
6645 
6646 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6647      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6648      defined(TARGET_NR_newfstatat))
6649 static inline abi_long host_to_target_stat64(void *cpu_env,
6650                                              abi_ulong target_addr,
6651                                              struct stat *host_st)
6652 {
6653 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6654     if (((CPUARMState *)cpu_env)->eabi) {
6655         struct target_eabi_stat64 *target_st;
6656 
6657         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6658             return -TARGET_EFAULT;
6659         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6660         __put_user(host_st->st_dev, &target_st->st_dev);
6661         __put_user(host_st->st_ino, &target_st->st_ino);
6662 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6663         __put_user(host_st->st_ino, &target_st->__st_ino);
6664 #endif
6665         __put_user(host_st->st_mode, &target_st->st_mode);
6666         __put_user(host_st->st_nlink, &target_st->st_nlink);
6667         __put_user(host_st->st_uid, &target_st->st_uid);
6668         __put_user(host_st->st_gid, &target_st->st_gid);
6669         __put_user(host_st->st_rdev, &target_st->st_rdev);
6670         __put_user(host_st->st_size, &target_st->st_size);
6671         __put_user(host_st->st_blksize, &target_st->st_blksize);
6672         __put_user(host_st->st_blocks, &target_st->st_blocks);
6673         __put_user(host_st->st_atime, &target_st->target_st_atime);
6674         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6675         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6676 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6677         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6678         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6679         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6680 #endif
6681         unlock_user_struct(target_st, target_addr, 1);
6682     } else
6683 #endif
6684     {
6685 #if defined(TARGET_HAS_STRUCT_STAT64)
6686         struct target_stat64 *target_st;
6687 #else
6688         struct target_stat *target_st;
6689 #endif
6690 
6691         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6692             return -TARGET_EFAULT;
6693         memset(target_st, 0, sizeof(*target_st));
6694         __put_user(host_st->st_dev, &target_st->st_dev);
6695         __put_user(host_st->st_ino, &target_st->st_ino);
6696 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6697         __put_user(host_st->st_ino, &target_st->__st_ino);
6698 #endif
6699         __put_user(host_st->st_mode, &target_st->st_mode);
6700         __put_user(host_st->st_nlink, &target_st->st_nlink);
6701         __put_user(host_st->st_uid, &target_st->st_uid);
6702         __put_user(host_st->st_gid, &target_st->st_gid);
6703         __put_user(host_st->st_rdev, &target_st->st_rdev);
6704         /* XXX: better use of kernel struct */
6705         __put_user(host_st->st_size, &target_st->st_size);
6706         __put_user(host_st->st_blksize, &target_st->st_blksize);
6707         __put_user(host_st->st_blocks, &target_st->st_blocks);
6708         __put_user(host_st->st_atime, &target_st->target_st_atime);
6709         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6710         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6711 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6712         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6713         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6714         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6715 #endif
6716         unlock_user_struct(target_st, target_addr, 1);
6717     }
6718 
6719     return 0;
6720 }
6721 #endif
6722 
6723 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6724 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6725                                             abi_ulong target_addr)
6726 {
6727     struct target_statx *target_stx;
6728 
6729     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6730         return -TARGET_EFAULT;
6731     }
6732     memset(target_stx, 0, sizeof(*target_stx));
6733 
6734     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6735     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6736     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6737     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6738     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6739     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6740     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6741     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6742     __put_user(host_stx->stx_size, &target_stx->stx_size);
6743     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6744     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6745     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6746     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6747     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
6748     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
6749     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
6750     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
6751     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
6752     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
6753     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6754     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6755     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6756     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6757 
6758     unlock_user_struct(target_stx, target_addr, 1);
6759 
6760     return 0;
6761 }
6762 #endif
6763 
6764 
6765 /* ??? Using host futex calls even when target atomic operations
6766    are not really atomic probably breaks things.  However implementing
6767    futexes locally would make futexes shared between multiple processes
6768    tricky.  However they're probably useless because guest atomic
6769    operations won't work either.  */
6770 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6771                     target_ulong uaddr2, int val3)
6772 {
6773     struct timespec ts, *pts;
6774     int base_op;
6775 
6776     /* ??? We assume FUTEX_* constants are the same on both host
6777        and target.  */
6778 #ifdef FUTEX_CMD_MASK
6779     base_op = op & FUTEX_CMD_MASK;
6780 #else
6781     base_op = op;
6782 #endif
6783     switch (base_op) {
6784     case FUTEX_WAIT:
6785     case FUTEX_WAIT_BITSET:
6786         if (timeout) {
6787             pts = &ts;
6788             target_to_host_timespec(pts, timeout);
6789         } else {
6790             pts = NULL;
6791         }
6792         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6793                          pts, NULL, val3));
6794     case FUTEX_WAKE:
6795         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6796     case FUTEX_FD:
6797         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6798     case FUTEX_REQUEUE:
6799     case FUTEX_CMP_REQUEUE:
6800     case FUTEX_WAKE_OP:
6801         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6802            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6803            But the prototype takes a `struct timespec *'; insert casts
6804            to satisfy the compiler.  We do not need to tswap TIMEOUT
6805            since it's not compared to guest memory.  */
6806         pts = (struct timespec *)(uintptr_t) timeout;
6807         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6808                                     g2h(uaddr2),
6809                                     (base_op == FUTEX_CMP_REQUEUE
6810                                      ? tswap32(val3)
6811                                      : val3)));
6812     default:
6813         return -TARGET_ENOSYS;
6814     }
6815 }
6816 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6817 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6818                                      abi_long handle, abi_long mount_id,
6819                                      abi_long flags)
6820 {
6821     struct file_handle *target_fh;
6822     struct file_handle *fh;
6823     int mid = 0;
6824     abi_long ret;
6825     char *name;
6826     unsigned int size, total_size;
6827 
6828     if (get_user_s32(size, handle)) {
6829         return -TARGET_EFAULT;
6830     }
6831 
6832     name = lock_user_string(pathname);
6833     if (!name) {
6834         return -TARGET_EFAULT;
6835     }
6836 
6837     total_size = sizeof(struct file_handle) + size;
6838     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6839     if (!target_fh) {
6840         unlock_user(name, pathname, 0);
6841         return -TARGET_EFAULT;
6842     }
6843 
6844     fh = g_malloc0(total_size);
6845     fh->handle_bytes = size;
6846 
6847     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6848     unlock_user(name, pathname, 0);
6849 
6850     /* man name_to_handle_at(2):
6851      * Other than the use of the handle_bytes field, the caller should treat
6852      * the file_handle structure as an opaque data type
6853      */
6854 
6855     memcpy(target_fh, fh, total_size);
6856     target_fh->handle_bytes = tswap32(fh->handle_bytes);
6857     target_fh->handle_type = tswap32(fh->handle_type);
6858     g_free(fh);
6859     unlock_user(target_fh, handle, total_size);
6860 
6861     if (put_user_s32(mid, mount_id)) {
6862         return -TARGET_EFAULT;
6863     }
6864 
6865     return ret;
6866 
6867 }
6868 #endif
6869 
6870 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6871 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6872                                      abi_long flags)
6873 {
6874     struct file_handle *target_fh;
6875     struct file_handle *fh;
6876     unsigned int size, total_size;
6877     abi_long ret;
6878 
6879     if (get_user_s32(size, handle)) {
6880         return -TARGET_EFAULT;
6881     }
6882 
6883     total_size = sizeof(struct file_handle) + size;
6884     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6885     if (!target_fh) {
6886         return -TARGET_EFAULT;
6887     }
6888 
6889     fh = g_memdup(target_fh, total_size);
6890     fh->handle_bytes = size;
6891     fh->handle_type = tswap32(target_fh->handle_type);
6892 
6893     ret = get_errno(open_by_handle_at(mount_fd, fh,
6894                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
6895 
6896     g_free(fh);
6897 
6898     unlock_user(target_fh, handle, total_size);
6899 
6900     return ret;
6901 }
6902 #endif
6903 
6904 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6905 
6906 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6907 {
6908     int host_flags;
6909     target_sigset_t *target_mask;
6910     sigset_t host_mask;
6911     abi_long ret;
6912 
6913     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6914         return -TARGET_EINVAL;
6915     }
6916     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6917         return -TARGET_EFAULT;
6918     }
6919 
6920     target_to_host_sigset(&host_mask, target_mask);
6921 
6922     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6923 
6924     ret = get_errno(signalfd(fd, &host_mask, host_flags));
6925     if (ret >= 0) {
6926         fd_trans_register(ret, &target_signalfd_trans);
6927     }
6928 
6929     unlock_user_struct(target_mask, mask, 0);
6930 
6931     return ret;
6932 }
6933 #endif
6934 
6935 /* Map host to target signal numbers for the wait family of syscalls.
6936    Assume all other status bits are the same.  */
6937 int host_to_target_waitstatus(int status)
6938 {
6939     if (WIFSIGNALED(status)) {
6940         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6941     }
6942     if (WIFSTOPPED(status)) {
6943         return (host_to_target_signal(WSTOPSIG(status)) << 8)
6944                | (status & 0xff);
6945     }
6946     return status;
6947 }
6948 
6949 static int open_self_cmdline(void *cpu_env, int fd)
6950 {
6951     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6952     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6953     int i;
6954 
6955     for (i = 0; i < bprm->argc; i++) {
6956         size_t len = strlen(bprm->argv[i]) + 1;
6957 
6958         if (write(fd, bprm->argv[i], len) != len) {
6959             return -1;
6960         }
6961     }
6962 
6963     return 0;
6964 }
6965 
6966 static int open_self_maps(void *cpu_env, int fd)
6967 {
6968     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
6969     TaskState *ts = cpu->opaque;
6970     FILE *fp;
6971     char *line = NULL;
6972     size_t len = 0;
6973     ssize_t read;
6974 
6975     fp = fopen("/proc/self/maps", "r");
6976     if (fp == NULL) {
6977         return -1;
6978     }
6979 
6980     while ((read = getline(&line, &len, fp)) != -1) {
6981         int fields, dev_maj, dev_min, inode;
6982         uint64_t min, max, offset;
6983         char flag_r, flag_w, flag_x, flag_p;
6984         char path[512] = "";
6985         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6986                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6987                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6988 
6989         if ((fields < 10) || (fields > 11)) {
6990             continue;
6991         }
6992         if (h2g_valid(min)) {
6993             int flags = page_get_flags(h2g(min));
6994             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6995             if (page_check_range(h2g(min), max - min, flags) == -1) {
6996                 continue;
6997             }
6998             if (h2g(min) == ts->info->stack_limit) {
6999                 pstrcpy(path, sizeof(path), "      [stack]");
7000             }
7001             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7002                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7003                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7004                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7005                     path[0] ? "         " : "", path);
7006         }
7007     }
7008 
7009     free(line);
7010     fclose(fp);
7011 
7012     return 0;
7013 }
7014 
7015 static int open_self_stat(void *cpu_env, int fd)
7016 {
7017     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7018     TaskState *ts = cpu->opaque;
7019     abi_ulong start_stack = ts->info->start_stack;
7020     int i;
7021 
7022     for (i = 0; i < 44; i++) {
7023       char buf[128];
7024       int len;
7025       uint64_t val = 0;
7026 
7027       if (i == 0) {
7028         /* pid */
7029         val = getpid();
7030         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7031       } else if (i == 1) {
7032         /* app name */
7033         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7034       } else if (i == 27) {
7035         /* stack bottom */
7036         val = start_stack;
7037         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7038       } else {
7039         /* for the rest, there is MasterCard */
7040         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7041       }
7042 
7043       len = strlen(buf);
7044       if (write(fd, buf, len) != len) {
7045           return -1;
7046       }
7047     }
7048 
7049     return 0;
7050 }
7051 
7052 static int open_self_auxv(void *cpu_env, int fd)
7053 {
7054     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7055     TaskState *ts = cpu->opaque;
7056     abi_ulong auxv = ts->info->saved_auxv;
7057     abi_ulong len = ts->info->auxv_len;
7058     char *ptr;
7059 
7060     /*
7061      * Auxiliary vector is stored in target process stack.
7062      * read in whole auxv vector and copy it to file
7063      */
7064     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7065     if (ptr != NULL) {
7066         while (len > 0) {
7067             ssize_t r;
7068             r = write(fd, ptr, len);
7069             if (r <= 0) {
7070                 break;
7071             }
7072             len -= r;
7073             ptr += r;
7074         }
7075         lseek(fd, 0, SEEK_SET);
7076         unlock_user(ptr, auxv, len);
7077     }
7078 
7079     return 0;
7080 }
7081 
7082 static int is_proc_myself(const char *filename, const char *entry)
7083 {
7084     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7085         filename += strlen("/proc/");
7086         if (!strncmp(filename, "self/", strlen("self/"))) {
7087             filename += strlen("self/");
7088         } else if (*filename >= '1' && *filename <= '9') {
7089             char myself[80];
7090             snprintf(myself, sizeof(myself), "%d/", getpid());
7091             if (!strncmp(filename, myself, strlen(myself))) {
7092                 filename += strlen(myself);
7093             } else {
7094                 return 0;
7095             }
7096         } else {
7097             return 0;
7098         }
7099         if (!strcmp(filename, entry)) {
7100             return 1;
7101         }
7102     }
7103     return 0;
7104 }
7105 
7106 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7107     defined(TARGET_SPARC) || defined(TARGET_M68K)
7108 static int is_proc(const char *filename, const char *entry)
7109 {
7110     return strcmp(filename, entry) == 0;
7111 }
7112 #endif
7113 
7114 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7115 static int open_net_route(void *cpu_env, int fd)
7116 {
7117     FILE *fp;
7118     char *line = NULL;
7119     size_t len = 0;
7120     ssize_t read;
7121 
7122     fp = fopen("/proc/net/route", "r");
7123     if (fp == NULL) {
7124         return -1;
7125     }
7126 
7127     /* read header */
7128 
7129     read = getline(&line, &len, fp);
7130     dprintf(fd, "%s", line);
7131 
7132     /* read routes */
7133 
7134     while ((read = getline(&line, &len, fp)) != -1) {
7135         char iface[16];
7136         uint32_t dest, gw, mask;
7137         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7138         int fields;
7139 
7140         fields = sscanf(line,
7141                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7142                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7143                         &mask, &mtu, &window, &irtt);
7144         if (fields != 11) {
7145             continue;
7146         }
7147         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7148                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7149                 metric, tswap32(mask), mtu, window, irtt);
7150     }
7151 
7152     free(line);
7153     fclose(fp);
7154 
7155     return 0;
7156 }
7157 #endif
7158 
7159 #if defined(TARGET_SPARC)
7160 static int open_cpuinfo(void *cpu_env, int fd)
7161 {
7162     dprintf(fd, "type\t\t: sun4u\n");
7163     return 0;
7164 }
7165 #endif
7166 
7167 #if defined(TARGET_M68K)
7168 static int open_hardware(void *cpu_env, int fd)
7169 {
7170     dprintf(fd, "Model:\t\tqemu-m68k\n");
7171     return 0;
7172 }
7173 #endif
7174 
7175 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7176 {
7177     struct fake_open {
7178         const char *filename;
7179         int (*fill)(void *cpu_env, int fd);
7180         int (*cmp)(const char *s1, const char *s2);
7181     };
7182     const struct fake_open *fake_open;
7183     static const struct fake_open fakes[] = {
7184         { "maps", open_self_maps, is_proc_myself },
7185         { "stat", open_self_stat, is_proc_myself },
7186         { "auxv", open_self_auxv, is_proc_myself },
7187         { "cmdline", open_self_cmdline, is_proc_myself },
7188 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7189         { "/proc/net/route", open_net_route, is_proc },
7190 #endif
7191 #if defined(TARGET_SPARC)
7192         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7193 #endif
7194 #if defined(TARGET_M68K)
7195         { "/proc/hardware", open_hardware, is_proc },
7196 #endif
7197         { NULL, NULL, NULL }
7198     };
7199 
7200     if (is_proc_myself(pathname, "exe")) {
7201         int execfd = qemu_getauxval(AT_EXECFD);
7202         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7203     }
7204 
7205     for (fake_open = fakes; fake_open->filename; fake_open++) {
7206         if (fake_open->cmp(pathname, fake_open->filename)) {
7207             break;
7208         }
7209     }
7210 
7211     if (fake_open->filename) {
7212         const char *tmpdir;
7213         char filename[PATH_MAX];
7214         int fd, r;
7215 
7216         /* create temporary file to map stat to */
7217         tmpdir = getenv("TMPDIR");
7218         if (!tmpdir)
7219             tmpdir = "/tmp";
7220         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7221         fd = mkstemp(filename);
7222         if (fd < 0) {
7223             return fd;
7224         }
7225         unlink(filename);
7226 
7227         if ((r = fake_open->fill(cpu_env, fd))) {
7228             int e = errno;
7229             close(fd);
7230             errno = e;
7231             return r;
7232         }
7233         lseek(fd, 0, SEEK_SET);
7234 
7235         return fd;
7236     }
7237 
7238     return safe_openat(dirfd, path(pathname), flags, mode);
7239 }
7240 
7241 #define TIMER_MAGIC 0x0caf0000
7242 #define TIMER_MAGIC_MASK 0xffff0000
7243 
7244 /* Convert QEMU provided timer ID back to internal 16bit index format */
7245 static target_timer_t get_timer_id(abi_long arg)
7246 {
7247     target_timer_t timerid = arg;
7248 
7249     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7250         return -TARGET_EINVAL;
7251     }
7252 
7253     timerid &= 0xffff;
7254 
7255     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7256         return -TARGET_EINVAL;
7257     }
7258 
7259     return timerid;
7260 }
7261 
7262 static int target_to_host_cpu_mask(unsigned long *host_mask,
7263                                    size_t host_size,
7264                                    abi_ulong target_addr,
7265                                    size_t target_size)
7266 {
7267     unsigned target_bits = sizeof(abi_ulong) * 8;
7268     unsigned host_bits = sizeof(*host_mask) * 8;
7269     abi_ulong *target_mask;
7270     unsigned i, j;
7271 
7272     assert(host_size >= target_size);
7273 
7274     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7275     if (!target_mask) {
7276         return -TARGET_EFAULT;
7277     }
7278     memset(host_mask, 0, host_size);
7279 
7280     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7281         unsigned bit = i * target_bits;
7282         abi_ulong val;
7283 
7284         __get_user(val, &target_mask[i]);
7285         for (j = 0; j < target_bits; j++, bit++) {
7286             if (val & (1UL << j)) {
7287                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7288             }
7289         }
7290     }
7291 
7292     unlock_user(target_mask, target_addr, 0);
7293     return 0;
7294 }
7295 
7296 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7297                                    size_t host_size,
7298                                    abi_ulong target_addr,
7299                                    size_t target_size)
7300 {
7301     unsigned target_bits = sizeof(abi_ulong) * 8;
7302     unsigned host_bits = sizeof(*host_mask) * 8;
7303     abi_ulong *target_mask;
7304     unsigned i, j;
7305 
7306     assert(host_size >= target_size);
7307 
7308     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7309     if (!target_mask) {
7310         return -TARGET_EFAULT;
7311     }
7312 
7313     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7314         unsigned bit = i * target_bits;
7315         abi_ulong val = 0;
7316 
7317         for (j = 0; j < target_bits; j++, bit++) {
7318             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7319                 val |= 1UL << j;
7320             }
7321         }
7322         __put_user(val, &target_mask[i]);
7323     }
7324 
7325     unlock_user(target_mask, target_addr, target_size);
7326     return 0;
7327 }
7328 
7329 /* This is an internal helper for do_syscall so that it is easier
7330  * to have a single return point, so that actions, such as logging
7331  * of syscall results, can be performed.
7332  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7333  */
7334 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7335                             abi_long arg2, abi_long arg3, abi_long arg4,
7336                             abi_long arg5, abi_long arg6, abi_long arg7,
7337                             abi_long arg8)
7338 {
7339     CPUState *cpu = env_cpu(cpu_env);
7340     abi_long ret;
7341 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7342     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7343     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7344     || defined(TARGET_NR_statx)
7345     struct stat st;
7346 #endif
7347 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7348     || defined(TARGET_NR_fstatfs)
7349     struct statfs stfs;
7350 #endif
7351     void *p;
7352 
7353     switch(num) {
7354     case TARGET_NR_exit:
7355         /* In old applications this may be used to implement _exit(2).
7356            However in threaded applictions it is used for thread termination,
7357            and _exit_group is used for application termination.
7358            Do thread termination if we have more then one thread.  */
7359 
7360         if (block_signals()) {
7361             return -TARGET_ERESTARTSYS;
7362         }
7363 
7364         cpu_list_lock();
7365 
7366         if (CPU_NEXT(first_cpu)) {
7367             TaskState *ts;
7368 
7369             /* Remove the CPU from the list.  */
7370             QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7371 
7372             cpu_list_unlock();
7373 
7374             ts = cpu->opaque;
7375             if (ts->child_tidptr) {
7376                 put_user_u32(0, ts->child_tidptr);
7377                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7378                           NULL, NULL, 0);
7379             }
7380             thread_cpu = NULL;
7381             object_unref(OBJECT(cpu));
7382             g_free(ts);
7383             rcu_unregister_thread();
7384             pthread_exit(NULL);
7385         }
7386 
7387         cpu_list_unlock();
7388         preexit_cleanup(cpu_env, arg1);
7389         _exit(arg1);
7390         return 0; /* avoid warning */
7391     case TARGET_NR_read:
7392         if (arg2 == 0 && arg3 == 0) {
7393             return get_errno(safe_read(arg1, 0, 0));
7394         } else {
7395             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7396                 return -TARGET_EFAULT;
7397             ret = get_errno(safe_read(arg1, p, arg3));
7398             if (ret >= 0 &&
7399                 fd_trans_host_to_target_data(arg1)) {
7400                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7401             }
7402             unlock_user(p, arg2, ret);
7403         }
7404         return ret;
7405     case TARGET_NR_write:
7406         if (arg2 == 0 && arg3 == 0) {
7407             return get_errno(safe_write(arg1, 0, 0));
7408         }
7409         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7410             return -TARGET_EFAULT;
7411         if (fd_trans_target_to_host_data(arg1)) {
7412             void *copy = g_malloc(arg3);
7413             memcpy(copy, p, arg3);
7414             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7415             if (ret >= 0) {
7416                 ret = get_errno(safe_write(arg1, copy, ret));
7417             }
7418             g_free(copy);
7419         } else {
7420             ret = get_errno(safe_write(arg1, p, arg3));
7421         }
7422         unlock_user(p, arg2, 0);
7423         return ret;
7424 
7425 #ifdef TARGET_NR_open
7426     case TARGET_NR_open:
7427         if (!(p = lock_user_string(arg1)))
7428             return -TARGET_EFAULT;
7429         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7430                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7431                                   arg3));
7432         fd_trans_unregister(ret);
7433         unlock_user(p, arg1, 0);
7434         return ret;
7435 #endif
7436     case TARGET_NR_openat:
7437         if (!(p = lock_user_string(arg2)))
7438             return -TARGET_EFAULT;
7439         ret = get_errno(do_openat(cpu_env, arg1, p,
7440                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7441                                   arg4));
7442         fd_trans_unregister(ret);
7443         unlock_user(p, arg2, 0);
7444         return ret;
7445 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7446     case TARGET_NR_name_to_handle_at:
7447         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7448         return ret;
7449 #endif
7450 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7451     case TARGET_NR_open_by_handle_at:
7452         ret = do_open_by_handle_at(arg1, arg2, arg3);
7453         fd_trans_unregister(ret);
7454         return ret;
7455 #endif
7456     case TARGET_NR_close:
7457         fd_trans_unregister(arg1);
7458         return get_errno(close(arg1));
7459 
7460     case TARGET_NR_brk:
7461         return do_brk(arg1);
7462 #ifdef TARGET_NR_fork
7463     case TARGET_NR_fork:
7464         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7465 #endif
7466 #ifdef TARGET_NR_waitpid
7467     case TARGET_NR_waitpid:
7468         {
7469             int status;
7470             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7471             if (!is_error(ret) && arg2 && ret
7472                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7473                 return -TARGET_EFAULT;
7474         }
7475         return ret;
7476 #endif
7477 #ifdef TARGET_NR_waitid
7478     case TARGET_NR_waitid:
7479         {
7480             siginfo_t info;
7481             info.si_pid = 0;
7482             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7483             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7484                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7485                     return -TARGET_EFAULT;
7486                 host_to_target_siginfo(p, &info);
7487                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7488             }
7489         }
7490         return ret;
7491 #endif
7492 #ifdef TARGET_NR_creat /* not on alpha */
7493     case TARGET_NR_creat:
7494         if (!(p = lock_user_string(arg1)))
7495             return -TARGET_EFAULT;
7496         ret = get_errno(creat(p, arg2));
7497         fd_trans_unregister(ret);
7498         unlock_user(p, arg1, 0);
7499         return ret;
7500 #endif
7501 #ifdef TARGET_NR_link
7502     case TARGET_NR_link:
7503         {
7504             void * p2;
7505             p = lock_user_string(arg1);
7506             p2 = lock_user_string(arg2);
7507             if (!p || !p2)
7508                 ret = -TARGET_EFAULT;
7509             else
7510                 ret = get_errno(link(p, p2));
7511             unlock_user(p2, arg2, 0);
7512             unlock_user(p, arg1, 0);
7513         }
7514         return ret;
7515 #endif
7516 #if defined(TARGET_NR_linkat)
7517     case TARGET_NR_linkat:
7518         {
7519             void * p2 = NULL;
7520             if (!arg2 || !arg4)
7521                 return -TARGET_EFAULT;
7522             p  = lock_user_string(arg2);
7523             p2 = lock_user_string(arg4);
7524             if (!p || !p2)
7525                 ret = -TARGET_EFAULT;
7526             else
7527                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7528             unlock_user(p, arg2, 0);
7529             unlock_user(p2, arg4, 0);
7530         }
7531         return ret;
7532 #endif
7533 #ifdef TARGET_NR_unlink
7534     case TARGET_NR_unlink:
7535         if (!(p = lock_user_string(arg1)))
7536             return -TARGET_EFAULT;
7537         ret = get_errno(unlink(p));
7538         unlock_user(p, arg1, 0);
7539         return ret;
7540 #endif
7541 #if defined(TARGET_NR_unlinkat)
7542     case TARGET_NR_unlinkat:
7543         if (!(p = lock_user_string(arg2)))
7544             return -TARGET_EFAULT;
7545         ret = get_errno(unlinkat(arg1, p, arg3));
7546         unlock_user(p, arg2, 0);
7547         return ret;
7548 #endif
7549     case TARGET_NR_execve:
7550         {
7551             char **argp, **envp;
7552             int argc, envc;
7553             abi_ulong gp;
7554             abi_ulong guest_argp;
7555             abi_ulong guest_envp;
7556             abi_ulong addr;
7557             char **q;
7558             int total_size = 0;
7559 
7560             argc = 0;
7561             guest_argp = arg2;
7562             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7563                 if (get_user_ual(addr, gp))
7564                     return -TARGET_EFAULT;
7565                 if (!addr)
7566                     break;
7567                 argc++;
7568             }
7569             envc = 0;
7570             guest_envp = arg3;
7571             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7572                 if (get_user_ual(addr, gp))
7573                     return -TARGET_EFAULT;
7574                 if (!addr)
7575                     break;
7576                 envc++;
7577             }
7578 
7579             argp = g_new0(char *, argc + 1);
7580             envp = g_new0(char *, envc + 1);
7581 
7582             for (gp = guest_argp, q = argp; gp;
7583                   gp += sizeof(abi_ulong), q++) {
7584                 if (get_user_ual(addr, gp))
7585                     goto execve_efault;
7586                 if (!addr)
7587                     break;
7588                 if (!(*q = lock_user_string(addr)))
7589                     goto execve_efault;
7590                 total_size += strlen(*q) + 1;
7591             }
7592             *q = NULL;
7593 
7594             for (gp = guest_envp, q = envp; gp;
7595                   gp += sizeof(abi_ulong), q++) {
7596                 if (get_user_ual(addr, gp))
7597                     goto execve_efault;
7598                 if (!addr)
7599                     break;
7600                 if (!(*q = lock_user_string(addr)))
7601                     goto execve_efault;
7602                 total_size += strlen(*q) + 1;
7603             }
7604             *q = NULL;
7605 
7606             if (!(p = lock_user_string(arg1)))
7607                 goto execve_efault;
7608             /* Although execve() is not an interruptible syscall it is
7609              * a special case where we must use the safe_syscall wrapper:
7610              * if we allow a signal to happen before we make the host
7611              * syscall then we will 'lose' it, because at the point of
7612              * execve the process leaves QEMU's control. So we use the
7613              * safe syscall wrapper to ensure that we either take the
7614              * signal as a guest signal, or else it does not happen
7615              * before the execve completes and makes it the other
7616              * program's problem.
7617              */
7618             ret = get_errno(safe_execve(p, argp, envp));
7619             unlock_user(p, arg1, 0);
7620 
7621             goto execve_end;
7622 
7623         execve_efault:
7624             ret = -TARGET_EFAULT;
7625 
7626         execve_end:
7627             for (gp = guest_argp, q = argp; *q;
7628                   gp += sizeof(abi_ulong), q++) {
7629                 if (get_user_ual(addr, gp)
7630                     || !addr)
7631                     break;
7632                 unlock_user(*q, addr, 0);
7633             }
7634             for (gp = guest_envp, q = envp; *q;
7635                   gp += sizeof(abi_ulong), q++) {
7636                 if (get_user_ual(addr, gp)
7637                     || !addr)
7638                     break;
7639                 unlock_user(*q, addr, 0);
7640             }
7641 
7642             g_free(argp);
7643             g_free(envp);
7644         }
7645         return ret;
7646     case TARGET_NR_chdir:
7647         if (!(p = lock_user_string(arg1)))
7648             return -TARGET_EFAULT;
7649         ret = get_errno(chdir(p));
7650         unlock_user(p, arg1, 0);
7651         return ret;
7652 #ifdef TARGET_NR_time
7653     case TARGET_NR_time:
7654         {
7655             time_t host_time;
7656             ret = get_errno(time(&host_time));
7657             if (!is_error(ret)
7658                 && arg1
7659                 && put_user_sal(host_time, arg1))
7660                 return -TARGET_EFAULT;
7661         }
7662         return ret;
7663 #endif
7664 #ifdef TARGET_NR_mknod
7665     case TARGET_NR_mknod:
7666         if (!(p = lock_user_string(arg1)))
7667             return -TARGET_EFAULT;
7668         ret = get_errno(mknod(p, arg2, arg3));
7669         unlock_user(p, arg1, 0);
7670         return ret;
7671 #endif
7672 #if defined(TARGET_NR_mknodat)
7673     case TARGET_NR_mknodat:
7674         if (!(p = lock_user_string(arg2)))
7675             return -TARGET_EFAULT;
7676         ret = get_errno(mknodat(arg1, p, arg3, arg4));
7677         unlock_user(p, arg2, 0);
7678         return ret;
7679 #endif
7680 #ifdef TARGET_NR_chmod
7681     case TARGET_NR_chmod:
7682         if (!(p = lock_user_string(arg1)))
7683             return -TARGET_EFAULT;
7684         ret = get_errno(chmod(p, arg2));
7685         unlock_user(p, arg1, 0);
7686         return ret;
7687 #endif
7688 #ifdef TARGET_NR_lseek
7689     case TARGET_NR_lseek:
7690         return get_errno(lseek(arg1, arg2, arg3));
7691 #endif
7692 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7693     /* Alpha specific */
7694     case TARGET_NR_getxpid:
7695         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7696         return get_errno(getpid());
7697 #endif
7698 #ifdef TARGET_NR_getpid
7699     case TARGET_NR_getpid:
7700         return get_errno(getpid());
7701 #endif
7702     case TARGET_NR_mount:
7703         {
7704             /* need to look at the data field */
7705             void *p2, *p3;
7706 
7707             if (arg1) {
7708                 p = lock_user_string(arg1);
7709                 if (!p) {
7710                     return -TARGET_EFAULT;
7711                 }
7712             } else {
7713                 p = NULL;
7714             }
7715 
7716             p2 = lock_user_string(arg2);
7717             if (!p2) {
7718                 if (arg1) {
7719                     unlock_user(p, arg1, 0);
7720                 }
7721                 return -TARGET_EFAULT;
7722             }
7723 
7724             if (arg3) {
7725                 p3 = lock_user_string(arg3);
7726                 if (!p3) {
7727                     if (arg1) {
7728                         unlock_user(p, arg1, 0);
7729                     }
7730                     unlock_user(p2, arg2, 0);
7731                     return -TARGET_EFAULT;
7732                 }
7733             } else {
7734                 p3 = NULL;
7735             }
7736 
7737             /* FIXME - arg5 should be locked, but it isn't clear how to
7738              * do that since it's not guaranteed to be a NULL-terminated
7739              * string.
7740              */
7741             if (!arg5) {
7742                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7743             } else {
7744                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7745             }
7746             ret = get_errno(ret);
7747 
7748             if (arg1) {
7749                 unlock_user(p, arg1, 0);
7750             }
7751             unlock_user(p2, arg2, 0);
7752             if (arg3) {
7753                 unlock_user(p3, arg3, 0);
7754             }
7755         }
7756         return ret;
7757 #ifdef TARGET_NR_umount
7758     case TARGET_NR_umount:
7759         if (!(p = lock_user_string(arg1)))
7760             return -TARGET_EFAULT;
7761         ret = get_errno(umount(p));
7762         unlock_user(p, arg1, 0);
7763         return ret;
7764 #endif
7765 #ifdef TARGET_NR_stime /* not on alpha */
7766     case TARGET_NR_stime:
7767         {
7768             struct timespec ts;
7769             ts.tv_nsec = 0;
7770             if (get_user_sal(ts.tv_sec, arg1)) {
7771                 return -TARGET_EFAULT;
7772             }
7773             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
7774         }
7775 #endif
7776 #ifdef TARGET_NR_alarm /* not on alpha */
7777     case TARGET_NR_alarm:
7778         return alarm(arg1);
7779 #endif
7780 #ifdef TARGET_NR_pause /* not on alpha */
7781     case TARGET_NR_pause:
7782         if (!block_signals()) {
7783             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7784         }
7785         return -TARGET_EINTR;
7786 #endif
7787 #ifdef TARGET_NR_utime
7788     case TARGET_NR_utime:
7789         {
7790             struct utimbuf tbuf, *host_tbuf;
7791             struct target_utimbuf *target_tbuf;
7792             if (arg2) {
7793                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7794                     return -TARGET_EFAULT;
7795                 tbuf.actime = tswapal(target_tbuf->actime);
7796                 tbuf.modtime = tswapal(target_tbuf->modtime);
7797                 unlock_user_struct(target_tbuf, arg2, 0);
7798                 host_tbuf = &tbuf;
7799             } else {
7800                 host_tbuf = NULL;
7801             }
7802             if (!(p = lock_user_string(arg1)))
7803                 return -TARGET_EFAULT;
7804             ret = get_errno(utime(p, host_tbuf));
7805             unlock_user(p, arg1, 0);
7806         }
7807         return ret;
7808 #endif
7809 #ifdef TARGET_NR_utimes
7810     case TARGET_NR_utimes:
7811         {
7812             struct timeval *tvp, tv[2];
7813             if (arg2) {
7814                 if (copy_from_user_timeval(&tv[0], arg2)
7815                     || copy_from_user_timeval(&tv[1],
7816                                               arg2 + sizeof(struct target_timeval)))
7817                     return -TARGET_EFAULT;
7818                 tvp = tv;
7819             } else {
7820                 tvp = NULL;
7821             }
7822             if (!(p = lock_user_string(arg1)))
7823                 return -TARGET_EFAULT;
7824             ret = get_errno(utimes(p, tvp));
7825             unlock_user(p, arg1, 0);
7826         }
7827         return ret;
7828 #endif
7829 #if defined(TARGET_NR_futimesat)
7830     case TARGET_NR_futimesat:
7831         {
7832             struct timeval *tvp, tv[2];
7833             if (arg3) {
7834                 if (copy_from_user_timeval(&tv[0], arg3)
7835                     || copy_from_user_timeval(&tv[1],
7836                                               arg3 + sizeof(struct target_timeval)))
7837                     return -TARGET_EFAULT;
7838                 tvp = tv;
7839             } else {
7840                 tvp = NULL;
7841             }
7842             if (!(p = lock_user_string(arg2))) {
7843                 return -TARGET_EFAULT;
7844             }
7845             ret = get_errno(futimesat(arg1, path(p), tvp));
7846             unlock_user(p, arg2, 0);
7847         }
7848         return ret;
7849 #endif
7850 #ifdef TARGET_NR_access
7851     case TARGET_NR_access:
7852         if (!(p = lock_user_string(arg1))) {
7853             return -TARGET_EFAULT;
7854         }
7855         ret = get_errno(access(path(p), arg2));
7856         unlock_user(p, arg1, 0);
7857         return ret;
7858 #endif
7859 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7860     case TARGET_NR_faccessat:
7861         if (!(p = lock_user_string(arg2))) {
7862             return -TARGET_EFAULT;
7863         }
7864         ret = get_errno(faccessat(arg1, p, arg3, 0));
7865         unlock_user(p, arg2, 0);
7866         return ret;
7867 #endif
7868 #ifdef TARGET_NR_nice /* not on alpha */
7869     case TARGET_NR_nice:
7870         return get_errno(nice(arg1));
7871 #endif
7872     case TARGET_NR_sync:
7873         sync();
7874         return 0;
7875 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7876     case TARGET_NR_syncfs:
7877         return get_errno(syncfs(arg1));
7878 #endif
7879     case TARGET_NR_kill:
7880         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7881 #ifdef TARGET_NR_rename
7882     case TARGET_NR_rename:
7883         {
7884             void *p2;
7885             p = lock_user_string(arg1);
7886             p2 = lock_user_string(arg2);
7887             if (!p || !p2)
7888                 ret = -TARGET_EFAULT;
7889             else
7890                 ret = get_errno(rename(p, p2));
7891             unlock_user(p2, arg2, 0);
7892             unlock_user(p, arg1, 0);
7893         }
7894         return ret;
7895 #endif
7896 #if defined(TARGET_NR_renameat)
7897     case TARGET_NR_renameat:
7898         {
7899             void *p2;
7900             p  = lock_user_string(arg2);
7901             p2 = lock_user_string(arg4);
7902             if (!p || !p2)
7903                 ret = -TARGET_EFAULT;
7904             else
7905                 ret = get_errno(renameat(arg1, p, arg3, p2));
7906             unlock_user(p2, arg4, 0);
7907             unlock_user(p, arg2, 0);
7908         }
7909         return ret;
7910 #endif
7911 #if defined(TARGET_NR_renameat2)
7912     case TARGET_NR_renameat2:
7913         {
7914             void *p2;
7915             p  = lock_user_string(arg2);
7916             p2 = lock_user_string(arg4);
7917             if (!p || !p2) {
7918                 ret = -TARGET_EFAULT;
7919             } else {
7920                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7921             }
7922             unlock_user(p2, arg4, 0);
7923             unlock_user(p, arg2, 0);
7924         }
7925         return ret;
7926 #endif
7927 #ifdef TARGET_NR_mkdir
7928     case TARGET_NR_mkdir:
7929         if (!(p = lock_user_string(arg1)))
7930             return -TARGET_EFAULT;
7931         ret = get_errno(mkdir(p, arg2));
7932         unlock_user(p, arg1, 0);
7933         return ret;
7934 #endif
7935 #if defined(TARGET_NR_mkdirat)
7936     case TARGET_NR_mkdirat:
7937         if (!(p = lock_user_string(arg2)))
7938             return -TARGET_EFAULT;
7939         ret = get_errno(mkdirat(arg1, p, arg3));
7940         unlock_user(p, arg2, 0);
7941         return ret;
7942 #endif
7943 #ifdef TARGET_NR_rmdir
7944     case TARGET_NR_rmdir:
7945         if (!(p = lock_user_string(arg1)))
7946             return -TARGET_EFAULT;
7947         ret = get_errno(rmdir(p));
7948         unlock_user(p, arg1, 0);
7949         return ret;
7950 #endif
7951     case TARGET_NR_dup:
7952         ret = get_errno(dup(arg1));
7953         if (ret >= 0) {
7954             fd_trans_dup(arg1, ret);
7955         }
7956         return ret;
7957 #ifdef TARGET_NR_pipe
7958     case TARGET_NR_pipe:
7959         return do_pipe(cpu_env, arg1, 0, 0);
7960 #endif
7961 #ifdef TARGET_NR_pipe2
7962     case TARGET_NR_pipe2:
7963         return do_pipe(cpu_env, arg1,
7964                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7965 #endif
7966     case TARGET_NR_times:
7967         {
7968             struct target_tms *tmsp;
7969             struct tms tms;
7970             ret = get_errno(times(&tms));
7971             if (arg1) {
7972                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7973                 if (!tmsp)
7974                     return -TARGET_EFAULT;
7975                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7976                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7977                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7978                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7979             }
7980             if (!is_error(ret))
7981                 ret = host_to_target_clock_t(ret);
7982         }
7983         return ret;
7984     case TARGET_NR_acct:
7985         if (arg1 == 0) {
7986             ret = get_errno(acct(NULL));
7987         } else {
7988             if (!(p = lock_user_string(arg1))) {
7989                 return -TARGET_EFAULT;
7990             }
7991             ret = get_errno(acct(path(p)));
7992             unlock_user(p, arg1, 0);
7993         }
7994         return ret;
7995 #ifdef TARGET_NR_umount2
7996     case TARGET_NR_umount2:
7997         if (!(p = lock_user_string(arg1)))
7998             return -TARGET_EFAULT;
7999         ret = get_errno(umount2(p, arg2));
8000         unlock_user(p, arg1, 0);
8001         return ret;
8002 #endif
8003     case TARGET_NR_ioctl:
8004         return do_ioctl(arg1, arg2, arg3);
8005 #ifdef TARGET_NR_fcntl
8006     case TARGET_NR_fcntl:
8007         return do_fcntl(arg1, arg2, arg3);
8008 #endif
8009     case TARGET_NR_setpgid:
8010         return get_errno(setpgid(arg1, arg2));
8011     case TARGET_NR_umask:
8012         return get_errno(umask(arg1));
8013     case TARGET_NR_chroot:
8014         if (!(p = lock_user_string(arg1)))
8015             return -TARGET_EFAULT;
8016         ret = get_errno(chroot(p));
8017         unlock_user(p, arg1, 0);
8018         return ret;
8019 #ifdef TARGET_NR_dup2
8020     case TARGET_NR_dup2:
8021         ret = get_errno(dup2(arg1, arg2));
8022         if (ret >= 0) {
8023             fd_trans_dup(arg1, arg2);
8024         }
8025         return ret;
8026 #endif
8027 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8028     case TARGET_NR_dup3:
8029     {
8030         int host_flags;
8031 
8032         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8033             return -EINVAL;
8034         }
8035         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8036         ret = get_errno(dup3(arg1, arg2, host_flags));
8037         if (ret >= 0) {
8038             fd_trans_dup(arg1, arg2);
8039         }
8040         return ret;
8041     }
8042 #endif
8043 #ifdef TARGET_NR_getppid /* not on alpha */
8044     case TARGET_NR_getppid:
8045         return get_errno(getppid());
8046 #endif
8047 #ifdef TARGET_NR_getpgrp
8048     case TARGET_NR_getpgrp:
8049         return get_errno(getpgrp());
8050 #endif
8051     case TARGET_NR_setsid:
8052         return get_errno(setsid());
8053 #ifdef TARGET_NR_sigaction
8054     case TARGET_NR_sigaction:
8055         {
8056 #if defined(TARGET_ALPHA)
8057             struct target_sigaction act, oact, *pact = 0;
8058             struct target_old_sigaction *old_act;
8059             if (arg2) {
8060                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8061                     return -TARGET_EFAULT;
8062                 act._sa_handler = old_act->_sa_handler;
8063                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8064                 act.sa_flags = old_act->sa_flags;
8065                 act.sa_restorer = 0;
8066                 unlock_user_struct(old_act, arg2, 0);
8067                 pact = &act;
8068             }
8069             ret = get_errno(do_sigaction(arg1, pact, &oact));
8070             if (!is_error(ret) && arg3) {
8071                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8072                     return -TARGET_EFAULT;
8073                 old_act->_sa_handler = oact._sa_handler;
8074                 old_act->sa_mask = oact.sa_mask.sig[0];
8075                 old_act->sa_flags = oact.sa_flags;
8076                 unlock_user_struct(old_act, arg3, 1);
8077             }
8078 #elif defined(TARGET_MIPS)
8079 	    struct target_sigaction act, oact, *pact, *old_act;
8080 
8081 	    if (arg2) {
8082                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8083                     return -TARGET_EFAULT;
8084 		act._sa_handler = old_act->_sa_handler;
8085 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8086 		act.sa_flags = old_act->sa_flags;
8087 		unlock_user_struct(old_act, arg2, 0);
8088 		pact = &act;
8089 	    } else {
8090 		pact = NULL;
8091 	    }
8092 
8093 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8094 
8095 	    if (!is_error(ret) && arg3) {
8096                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8097                     return -TARGET_EFAULT;
8098 		old_act->_sa_handler = oact._sa_handler;
8099 		old_act->sa_flags = oact.sa_flags;
8100 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8101 		old_act->sa_mask.sig[1] = 0;
8102 		old_act->sa_mask.sig[2] = 0;
8103 		old_act->sa_mask.sig[3] = 0;
8104 		unlock_user_struct(old_act, arg3, 1);
8105 	    }
8106 #else
8107             struct target_old_sigaction *old_act;
8108             struct target_sigaction act, oact, *pact;
8109             if (arg2) {
8110                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8111                     return -TARGET_EFAULT;
8112                 act._sa_handler = old_act->_sa_handler;
8113                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8114                 act.sa_flags = old_act->sa_flags;
8115                 act.sa_restorer = old_act->sa_restorer;
8116 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8117                 act.ka_restorer = 0;
8118 #endif
8119                 unlock_user_struct(old_act, arg2, 0);
8120                 pact = &act;
8121             } else {
8122                 pact = NULL;
8123             }
8124             ret = get_errno(do_sigaction(arg1, pact, &oact));
8125             if (!is_error(ret) && arg3) {
8126                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8127                     return -TARGET_EFAULT;
8128                 old_act->_sa_handler = oact._sa_handler;
8129                 old_act->sa_mask = oact.sa_mask.sig[0];
8130                 old_act->sa_flags = oact.sa_flags;
8131                 old_act->sa_restorer = oact.sa_restorer;
8132                 unlock_user_struct(old_act, arg3, 1);
8133             }
8134 #endif
8135         }
8136         return ret;
8137 #endif
8138     case TARGET_NR_rt_sigaction:
8139         {
8140 #if defined(TARGET_ALPHA)
8141             /* For Alpha and SPARC this is a 5 argument syscall, with
8142              * a 'restorer' parameter which must be copied into the
8143              * sa_restorer field of the sigaction struct.
8144              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8145              * and arg5 is the sigsetsize.
8146              * Alpha also has a separate rt_sigaction struct that it uses
8147              * here; SPARC uses the usual sigaction struct.
8148              */
8149             struct target_rt_sigaction *rt_act;
8150             struct target_sigaction act, oact, *pact = 0;
8151 
8152             if (arg4 != sizeof(target_sigset_t)) {
8153                 return -TARGET_EINVAL;
8154             }
8155             if (arg2) {
8156                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8157                     return -TARGET_EFAULT;
8158                 act._sa_handler = rt_act->_sa_handler;
8159                 act.sa_mask = rt_act->sa_mask;
8160                 act.sa_flags = rt_act->sa_flags;
8161                 act.sa_restorer = arg5;
8162                 unlock_user_struct(rt_act, arg2, 0);
8163                 pact = &act;
8164             }
8165             ret = get_errno(do_sigaction(arg1, pact, &oact));
8166             if (!is_error(ret) && arg3) {
8167                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8168                     return -TARGET_EFAULT;
8169                 rt_act->_sa_handler = oact._sa_handler;
8170                 rt_act->sa_mask = oact.sa_mask;
8171                 rt_act->sa_flags = oact.sa_flags;
8172                 unlock_user_struct(rt_act, arg3, 1);
8173             }
8174 #else
8175 #ifdef TARGET_SPARC
8176             target_ulong restorer = arg4;
8177             target_ulong sigsetsize = arg5;
8178 #else
8179             target_ulong sigsetsize = arg4;
8180 #endif
8181             struct target_sigaction *act;
8182             struct target_sigaction *oact;
8183 
8184             if (sigsetsize != sizeof(target_sigset_t)) {
8185                 return -TARGET_EINVAL;
8186             }
8187             if (arg2) {
8188                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8189                     return -TARGET_EFAULT;
8190                 }
8191 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8192                 act->ka_restorer = restorer;
8193 #endif
8194             } else {
8195                 act = NULL;
8196             }
8197             if (arg3) {
8198                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8199                     ret = -TARGET_EFAULT;
8200                     goto rt_sigaction_fail;
8201                 }
8202             } else
8203                 oact = NULL;
8204             ret = get_errno(do_sigaction(arg1, act, oact));
8205 	rt_sigaction_fail:
8206             if (act)
8207                 unlock_user_struct(act, arg2, 0);
8208             if (oact)
8209                 unlock_user_struct(oact, arg3, 1);
8210 #endif
8211         }
8212         return ret;
8213 #ifdef TARGET_NR_sgetmask /* not on alpha */
8214     case TARGET_NR_sgetmask:
8215         {
8216             sigset_t cur_set;
8217             abi_ulong target_set;
8218             ret = do_sigprocmask(0, NULL, &cur_set);
8219             if (!ret) {
8220                 host_to_target_old_sigset(&target_set, &cur_set);
8221                 ret = target_set;
8222             }
8223         }
8224         return ret;
8225 #endif
8226 #ifdef TARGET_NR_ssetmask /* not on alpha */
8227     case TARGET_NR_ssetmask:
8228         {
8229             sigset_t set, oset;
8230             abi_ulong target_set = arg1;
8231             target_to_host_old_sigset(&set, &target_set);
8232             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8233             if (!ret) {
8234                 host_to_target_old_sigset(&target_set, &oset);
8235                 ret = target_set;
8236             }
8237         }
8238         return ret;
8239 #endif
8240 #ifdef TARGET_NR_sigprocmask
8241     case TARGET_NR_sigprocmask:
8242         {
8243 #if defined(TARGET_ALPHA)
8244             sigset_t set, oldset;
8245             abi_ulong mask;
8246             int how;
8247 
8248             switch (arg1) {
8249             case TARGET_SIG_BLOCK:
8250                 how = SIG_BLOCK;
8251                 break;
8252             case TARGET_SIG_UNBLOCK:
8253                 how = SIG_UNBLOCK;
8254                 break;
8255             case TARGET_SIG_SETMASK:
8256                 how = SIG_SETMASK;
8257                 break;
8258             default:
8259                 return -TARGET_EINVAL;
8260             }
8261             mask = arg2;
8262             target_to_host_old_sigset(&set, &mask);
8263 
8264             ret = do_sigprocmask(how, &set, &oldset);
8265             if (!is_error(ret)) {
8266                 host_to_target_old_sigset(&mask, &oldset);
8267                 ret = mask;
8268                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8269             }
8270 #else
8271             sigset_t set, oldset, *set_ptr;
8272             int how;
8273 
8274             if (arg2) {
8275                 switch (arg1) {
8276                 case TARGET_SIG_BLOCK:
8277                     how = SIG_BLOCK;
8278                     break;
8279                 case TARGET_SIG_UNBLOCK:
8280                     how = SIG_UNBLOCK;
8281                     break;
8282                 case TARGET_SIG_SETMASK:
8283                     how = SIG_SETMASK;
8284                     break;
8285                 default:
8286                     return -TARGET_EINVAL;
8287                 }
8288                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8289                     return -TARGET_EFAULT;
8290                 target_to_host_old_sigset(&set, p);
8291                 unlock_user(p, arg2, 0);
8292                 set_ptr = &set;
8293             } else {
8294                 how = 0;
8295                 set_ptr = NULL;
8296             }
8297             ret = do_sigprocmask(how, set_ptr, &oldset);
8298             if (!is_error(ret) && arg3) {
8299                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8300                     return -TARGET_EFAULT;
8301                 host_to_target_old_sigset(p, &oldset);
8302                 unlock_user(p, arg3, sizeof(target_sigset_t));
8303             }
8304 #endif
8305         }
8306         return ret;
8307 #endif
8308     case TARGET_NR_rt_sigprocmask:
8309         {
8310             int how = arg1;
8311             sigset_t set, oldset, *set_ptr;
8312 
8313             if (arg4 != sizeof(target_sigset_t)) {
8314                 return -TARGET_EINVAL;
8315             }
8316 
8317             if (arg2) {
8318                 switch(how) {
8319                 case TARGET_SIG_BLOCK:
8320                     how = SIG_BLOCK;
8321                     break;
8322                 case TARGET_SIG_UNBLOCK:
8323                     how = SIG_UNBLOCK;
8324                     break;
8325                 case TARGET_SIG_SETMASK:
8326                     how = SIG_SETMASK;
8327                     break;
8328                 default:
8329                     return -TARGET_EINVAL;
8330                 }
8331                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8332                     return -TARGET_EFAULT;
8333                 target_to_host_sigset(&set, p);
8334                 unlock_user(p, arg2, 0);
8335                 set_ptr = &set;
8336             } else {
8337                 how = 0;
8338                 set_ptr = NULL;
8339             }
8340             ret = do_sigprocmask(how, set_ptr, &oldset);
8341             if (!is_error(ret) && arg3) {
8342                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8343                     return -TARGET_EFAULT;
8344                 host_to_target_sigset(p, &oldset);
8345                 unlock_user(p, arg3, sizeof(target_sigset_t));
8346             }
8347         }
8348         return ret;
8349 #ifdef TARGET_NR_sigpending
8350     case TARGET_NR_sigpending:
8351         {
8352             sigset_t set;
8353             ret = get_errno(sigpending(&set));
8354             if (!is_error(ret)) {
8355                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8356                     return -TARGET_EFAULT;
8357                 host_to_target_old_sigset(p, &set);
8358                 unlock_user(p, arg1, sizeof(target_sigset_t));
8359             }
8360         }
8361         return ret;
8362 #endif
8363     case TARGET_NR_rt_sigpending:
8364         {
8365             sigset_t set;
8366 
8367             /* Yes, this check is >, not != like most. We follow the kernel's
8368              * logic and it does it like this because it implements
8369              * NR_sigpending through the same code path, and in that case
8370              * the old_sigset_t is smaller in size.
8371              */
8372             if (arg2 > sizeof(target_sigset_t)) {
8373                 return -TARGET_EINVAL;
8374             }
8375 
8376             ret = get_errno(sigpending(&set));
8377             if (!is_error(ret)) {
8378                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8379                     return -TARGET_EFAULT;
8380                 host_to_target_sigset(p, &set);
8381                 unlock_user(p, arg1, sizeof(target_sigset_t));
8382             }
8383         }
8384         return ret;
8385 #ifdef TARGET_NR_sigsuspend
8386     case TARGET_NR_sigsuspend:
8387         {
8388             TaskState *ts = cpu->opaque;
8389 #if defined(TARGET_ALPHA)
8390             abi_ulong mask = arg1;
8391             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8392 #else
8393             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8394                 return -TARGET_EFAULT;
8395             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8396             unlock_user(p, arg1, 0);
8397 #endif
8398             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8399                                                SIGSET_T_SIZE));
8400             if (ret != -TARGET_ERESTARTSYS) {
8401                 ts->in_sigsuspend = 1;
8402             }
8403         }
8404         return ret;
8405 #endif
8406     case TARGET_NR_rt_sigsuspend:
8407         {
8408             TaskState *ts = cpu->opaque;
8409 
8410             if (arg2 != sizeof(target_sigset_t)) {
8411                 return -TARGET_EINVAL;
8412             }
8413             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8414                 return -TARGET_EFAULT;
8415             target_to_host_sigset(&ts->sigsuspend_mask, p);
8416             unlock_user(p, arg1, 0);
8417             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8418                                                SIGSET_T_SIZE));
8419             if (ret != -TARGET_ERESTARTSYS) {
8420                 ts->in_sigsuspend = 1;
8421             }
8422         }
8423         return ret;
8424     case TARGET_NR_rt_sigtimedwait:
8425         {
8426             sigset_t set;
8427             struct timespec uts, *puts;
8428             siginfo_t uinfo;
8429 
8430             if (arg4 != sizeof(target_sigset_t)) {
8431                 return -TARGET_EINVAL;
8432             }
8433 
8434             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8435                 return -TARGET_EFAULT;
8436             target_to_host_sigset(&set, p);
8437             unlock_user(p, arg1, 0);
8438             if (arg3) {
8439                 puts = &uts;
8440                 target_to_host_timespec(puts, arg3);
8441             } else {
8442                 puts = NULL;
8443             }
8444             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8445                                                  SIGSET_T_SIZE));
8446             if (!is_error(ret)) {
8447                 if (arg2) {
8448                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8449                                   0);
8450                     if (!p) {
8451                         return -TARGET_EFAULT;
8452                     }
8453                     host_to_target_siginfo(p, &uinfo);
8454                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8455                 }
8456                 ret = host_to_target_signal(ret);
8457             }
8458         }
8459         return ret;
8460     case TARGET_NR_rt_sigqueueinfo:
8461         {
8462             siginfo_t uinfo;
8463 
8464             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8465             if (!p) {
8466                 return -TARGET_EFAULT;
8467             }
8468             target_to_host_siginfo(&uinfo, p);
8469             unlock_user(p, arg3, 0);
8470             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8471         }
8472         return ret;
8473     case TARGET_NR_rt_tgsigqueueinfo:
8474         {
8475             siginfo_t uinfo;
8476 
8477             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8478             if (!p) {
8479                 return -TARGET_EFAULT;
8480             }
8481             target_to_host_siginfo(&uinfo, p);
8482             unlock_user(p, arg4, 0);
8483             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8484         }
8485         return ret;
8486 #ifdef TARGET_NR_sigreturn
8487     case TARGET_NR_sigreturn:
8488         if (block_signals()) {
8489             return -TARGET_ERESTARTSYS;
8490         }
8491         return do_sigreturn(cpu_env);
8492 #endif
8493     case TARGET_NR_rt_sigreturn:
8494         if (block_signals()) {
8495             return -TARGET_ERESTARTSYS;
8496         }
8497         return do_rt_sigreturn(cpu_env);
8498     case TARGET_NR_sethostname:
8499         if (!(p = lock_user_string(arg1)))
8500             return -TARGET_EFAULT;
8501         ret = get_errno(sethostname(p, arg2));
8502         unlock_user(p, arg1, 0);
8503         return ret;
8504 #ifdef TARGET_NR_setrlimit
8505     case TARGET_NR_setrlimit:
8506         {
8507             int resource = target_to_host_resource(arg1);
8508             struct target_rlimit *target_rlim;
8509             struct rlimit rlim;
8510             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8511                 return -TARGET_EFAULT;
8512             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8513             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8514             unlock_user_struct(target_rlim, arg2, 0);
8515             /*
8516              * If we just passed through resource limit settings for memory then
8517              * they would also apply to QEMU's own allocations, and QEMU will
8518              * crash or hang or die if its allocations fail. Ideally we would
8519              * track the guest allocations in QEMU and apply the limits ourselves.
8520              * For now, just tell the guest the call succeeded but don't actually
8521              * limit anything.
8522              */
8523             if (resource != RLIMIT_AS &&
8524                 resource != RLIMIT_DATA &&
8525                 resource != RLIMIT_STACK) {
8526                 return get_errno(setrlimit(resource, &rlim));
8527             } else {
8528                 return 0;
8529             }
8530         }
8531 #endif
8532 #ifdef TARGET_NR_getrlimit
8533     case TARGET_NR_getrlimit:
8534         {
8535             int resource = target_to_host_resource(arg1);
8536             struct target_rlimit *target_rlim;
8537             struct rlimit rlim;
8538 
8539             ret = get_errno(getrlimit(resource, &rlim));
8540             if (!is_error(ret)) {
8541                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8542                     return -TARGET_EFAULT;
8543                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8544                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8545                 unlock_user_struct(target_rlim, arg2, 1);
8546             }
8547         }
8548         return ret;
8549 #endif
8550     case TARGET_NR_getrusage:
8551         {
8552             struct rusage rusage;
8553             ret = get_errno(getrusage(arg1, &rusage));
8554             if (!is_error(ret)) {
8555                 ret = host_to_target_rusage(arg2, &rusage);
8556             }
8557         }
8558         return ret;
8559     case TARGET_NR_gettimeofday:
8560         {
8561             struct timeval tv;
8562             ret = get_errno(gettimeofday(&tv, NULL));
8563             if (!is_error(ret)) {
8564                 if (copy_to_user_timeval(arg1, &tv))
8565                     return -TARGET_EFAULT;
8566             }
8567         }
8568         return ret;
8569     case TARGET_NR_settimeofday:
8570         {
8571             struct timeval tv, *ptv = NULL;
8572             struct timezone tz, *ptz = NULL;
8573 
8574             if (arg1) {
8575                 if (copy_from_user_timeval(&tv, arg1)) {
8576                     return -TARGET_EFAULT;
8577                 }
8578                 ptv = &tv;
8579             }
8580 
8581             if (arg2) {
8582                 if (copy_from_user_timezone(&tz, arg2)) {
8583                     return -TARGET_EFAULT;
8584                 }
8585                 ptz = &tz;
8586             }
8587 
8588             return get_errno(settimeofday(ptv, ptz));
8589         }
8590 #if defined(TARGET_NR_select)
8591     case TARGET_NR_select:
8592 #if defined(TARGET_WANT_NI_OLD_SELECT)
8593         /* some architectures used to have old_select here
8594          * but now ENOSYS it.
8595          */
8596         ret = -TARGET_ENOSYS;
8597 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8598         ret = do_old_select(arg1);
8599 #else
8600         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8601 #endif
8602         return ret;
8603 #endif
8604 #ifdef TARGET_NR_pselect6
8605     case TARGET_NR_pselect6:
8606         {
8607             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8608             fd_set rfds, wfds, efds;
8609             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8610             struct timespec ts, *ts_ptr;
8611 
8612             /*
8613              * The 6th arg is actually two args smashed together,
8614              * so we cannot use the C library.
8615              */
8616             sigset_t set;
8617             struct {
8618                 sigset_t *set;
8619                 size_t size;
8620             } sig, *sig_ptr;
8621 
8622             abi_ulong arg_sigset, arg_sigsize, *arg7;
8623             target_sigset_t *target_sigset;
8624 
8625             n = arg1;
8626             rfd_addr = arg2;
8627             wfd_addr = arg3;
8628             efd_addr = arg4;
8629             ts_addr = arg5;
8630 
8631             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8632             if (ret) {
8633                 return ret;
8634             }
8635             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8636             if (ret) {
8637                 return ret;
8638             }
8639             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8640             if (ret) {
8641                 return ret;
8642             }
8643 
8644             /*
8645              * This takes a timespec, and not a timeval, so we cannot
8646              * use the do_select() helper ...
8647              */
8648             if (ts_addr) {
8649                 if (target_to_host_timespec(&ts, ts_addr)) {
8650                     return -TARGET_EFAULT;
8651                 }
8652                 ts_ptr = &ts;
8653             } else {
8654                 ts_ptr = NULL;
8655             }
8656 
8657             /* Extract the two packed args for the sigset */
8658             if (arg6) {
8659                 sig_ptr = &sig;
8660                 sig.size = SIGSET_T_SIZE;
8661 
8662                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8663                 if (!arg7) {
8664                     return -TARGET_EFAULT;
8665                 }
8666                 arg_sigset = tswapal(arg7[0]);
8667                 arg_sigsize = tswapal(arg7[1]);
8668                 unlock_user(arg7, arg6, 0);
8669 
8670                 if (arg_sigset) {
8671                     sig.set = &set;
8672                     if (arg_sigsize != sizeof(*target_sigset)) {
8673                         /* Like the kernel, we enforce correct size sigsets */
8674                         return -TARGET_EINVAL;
8675                     }
8676                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
8677                                               sizeof(*target_sigset), 1);
8678                     if (!target_sigset) {
8679                         return -TARGET_EFAULT;
8680                     }
8681                     target_to_host_sigset(&set, target_sigset);
8682                     unlock_user(target_sigset, arg_sigset, 0);
8683                 } else {
8684                     sig.set = NULL;
8685                 }
8686             } else {
8687                 sig_ptr = NULL;
8688             }
8689 
8690             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8691                                           ts_ptr, sig_ptr));
8692 
8693             if (!is_error(ret)) {
8694                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8695                     return -TARGET_EFAULT;
8696                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8697                     return -TARGET_EFAULT;
8698                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8699                     return -TARGET_EFAULT;
8700 
8701                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8702                     return -TARGET_EFAULT;
8703             }
8704         }
8705         return ret;
8706 #endif
8707 #ifdef TARGET_NR_symlink
8708     case TARGET_NR_symlink:
8709         {
8710             void *p2;
8711             p = lock_user_string(arg1);
8712             p2 = lock_user_string(arg2);
8713             if (!p || !p2)
8714                 ret = -TARGET_EFAULT;
8715             else
8716                 ret = get_errno(symlink(p, p2));
8717             unlock_user(p2, arg2, 0);
8718             unlock_user(p, arg1, 0);
8719         }
8720         return ret;
8721 #endif
8722 #if defined(TARGET_NR_symlinkat)
8723     case TARGET_NR_symlinkat:
8724         {
8725             void *p2;
8726             p  = lock_user_string(arg1);
8727             p2 = lock_user_string(arg3);
8728             if (!p || !p2)
8729                 ret = -TARGET_EFAULT;
8730             else
8731                 ret = get_errno(symlinkat(p, arg2, p2));
8732             unlock_user(p2, arg3, 0);
8733             unlock_user(p, arg1, 0);
8734         }
8735         return ret;
8736 #endif
8737 #ifdef TARGET_NR_readlink
8738     case TARGET_NR_readlink:
8739         {
8740             void *p2;
8741             p = lock_user_string(arg1);
8742             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8743             if (!p || !p2) {
8744                 ret = -TARGET_EFAULT;
8745             } else if (!arg3) {
8746                 /* Short circuit this for the magic exe check. */
8747                 ret = -TARGET_EINVAL;
8748             } else if (is_proc_myself((const char *)p, "exe")) {
8749                 char real[PATH_MAX], *temp;
8750                 temp = realpath(exec_path, real);
8751                 /* Return value is # of bytes that we wrote to the buffer. */
8752                 if (temp == NULL) {
8753                     ret = get_errno(-1);
8754                 } else {
8755                     /* Don't worry about sign mismatch as earlier mapping
8756                      * logic would have thrown a bad address error. */
8757                     ret = MIN(strlen(real), arg3);
8758                     /* We cannot NUL terminate the string. */
8759                     memcpy(p2, real, ret);
8760                 }
8761             } else {
8762                 ret = get_errno(readlink(path(p), p2, arg3));
8763             }
8764             unlock_user(p2, arg2, ret);
8765             unlock_user(p, arg1, 0);
8766         }
8767         return ret;
8768 #endif
8769 #if defined(TARGET_NR_readlinkat)
8770     case TARGET_NR_readlinkat:
8771         {
8772             void *p2;
8773             p  = lock_user_string(arg2);
8774             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8775             if (!p || !p2) {
8776                 ret = -TARGET_EFAULT;
8777             } else if (is_proc_myself((const char *)p, "exe")) {
8778                 char real[PATH_MAX], *temp;
8779                 temp = realpath(exec_path, real);
8780                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8781                 snprintf((char *)p2, arg4, "%s", real);
8782             } else {
8783                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8784             }
8785             unlock_user(p2, arg3, ret);
8786             unlock_user(p, arg2, 0);
8787         }
8788         return ret;
8789 #endif
8790 #ifdef TARGET_NR_swapon
8791     case TARGET_NR_swapon:
8792         if (!(p = lock_user_string(arg1)))
8793             return -TARGET_EFAULT;
8794         ret = get_errno(swapon(p, arg2));
8795         unlock_user(p, arg1, 0);
8796         return ret;
8797 #endif
8798     case TARGET_NR_reboot:
8799         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8800            /* arg4 must be ignored in all other cases */
8801            p = lock_user_string(arg4);
8802            if (!p) {
8803                return -TARGET_EFAULT;
8804            }
8805            ret = get_errno(reboot(arg1, arg2, arg3, p));
8806            unlock_user(p, arg4, 0);
8807         } else {
8808            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8809         }
8810         return ret;
8811 #ifdef TARGET_NR_mmap
8812     case TARGET_NR_mmap:
8813 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8814     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8815     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8816     || defined(TARGET_S390X)
8817         {
8818             abi_ulong *v;
8819             abi_ulong v1, v2, v3, v4, v5, v6;
8820             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8821                 return -TARGET_EFAULT;
8822             v1 = tswapal(v[0]);
8823             v2 = tswapal(v[1]);
8824             v3 = tswapal(v[2]);
8825             v4 = tswapal(v[3]);
8826             v5 = tswapal(v[4]);
8827             v6 = tswapal(v[5]);
8828             unlock_user(v, arg1, 0);
8829             ret = get_errno(target_mmap(v1, v2, v3,
8830                                         target_to_host_bitmask(v4, mmap_flags_tbl),
8831                                         v5, v6));
8832         }
8833 #else
8834         ret = get_errno(target_mmap(arg1, arg2, arg3,
8835                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
8836                                     arg5,
8837                                     arg6));
8838 #endif
8839         return ret;
8840 #endif
8841 #ifdef TARGET_NR_mmap2
8842     case TARGET_NR_mmap2:
8843 #ifndef MMAP_SHIFT
8844 #define MMAP_SHIFT 12
8845 #endif
8846         ret = target_mmap(arg1, arg2, arg3,
8847                           target_to_host_bitmask(arg4, mmap_flags_tbl),
8848                           arg5, arg6 << MMAP_SHIFT);
8849         return get_errno(ret);
8850 #endif
8851     case TARGET_NR_munmap:
8852         return get_errno(target_munmap(arg1, arg2));
8853     case TARGET_NR_mprotect:
8854         {
8855             TaskState *ts = cpu->opaque;
8856             /* Special hack to detect libc making the stack executable.  */
8857             if ((arg3 & PROT_GROWSDOWN)
8858                 && arg1 >= ts->info->stack_limit
8859                 && arg1 <= ts->info->start_stack) {
8860                 arg3 &= ~PROT_GROWSDOWN;
8861                 arg2 = arg2 + arg1 - ts->info->stack_limit;
8862                 arg1 = ts->info->stack_limit;
8863             }
8864         }
8865         return get_errno(target_mprotect(arg1, arg2, arg3));
8866 #ifdef TARGET_NR_mremap
8867     case TARGET_NR_mremap:
8868         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8869 #endif
8870         /* ??? msync/mlock/munlock are broken for softmmu.  */
8871 #ifdef TARGET_NR_msync
8872     case TARGET_NR_msync:
8873         return get_errno(msync(g2h(arg1), arg2, arg3));
8874 #endif
8875 #ifdef TARGET_NR_mlock
8876     case TARGET_NR_mlock:
8877         return get_errno(mlock(g2h(arg1), arg2));
8878 #endif
8879 #ifdef TARGET_NR_munlock
8880     case TARGET_NR_munlock:
8881         return get_errno(munlock(g2h(arg1), arg2));
8882 #endif
8883 #ifdef TARGET_NR_mlockall
8884     case TARGET_NR_mlockall:
8885         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8886 #endif
8887 #ifdef TARGET_NR_munlockall
8888     case TARGET_NR_munlockall:
8889         return get_errno(munlockall());
8890 #endif
8891 #ifdef TARGET_NR_truncate
8892     case TARGET_NR_truncate:
8893         if (!(p = lock_user_string(arg1)))
8894             return -TARGET_EFAULT;
8895         ret = get_errno(truncate(p, arg2));
8896         unlock_user(p, arg1, 0);
8897         return ret;
8898 #endif
8899 #ifdef TARGET_NR_ftruncate
8900     case TARGET_NR_ftruncate:
8901         return get_errno(ftruncate(arg1, arg2));
8902 #endif
8903     case TARGET_NR_fchmod:
8904         return get_errno(fchmod(arg1, arg2));
8905 #if defined(TARGET_NR_fchmodat)
8906     case TARGET_NR_fchmodat:
8907         if (!(p = lock_user_string(arg2)))
8908             return -TARGET_EFAULT;
8909         ret = get_errno(fchmodat(arg1, p, arg3, 0));
8910         unlock_user(p, arg2, 0);
8911         return ret;
8912 #endif
8913     case TARGET_NR_getpriority:
8914         /* Note that negative values are valid for getpriority, so we must
8915            differentiate based on errno settings.  */
8916         errno = 0;
8917         ret = getpriority(arg1, arg2);
8918         if (ret == -1 && errno != 0) {
8919             return -host_to_target_errno(errno);
8920         }
8921 #ifdef TARGET_ALPHA
8922         /* Return value is the unbiased priority.  Signal no error.  */
8923         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8924 #else
8925         /* Return value is a biased priority to avoid negative numbers.  */
8926         ret = 20 - ret;
8927 #endif
8928         return ret;
8929     case TARGET_NR_setpriority:
8930         return get_errno(setpriority(arg1, arg2, arg3));
8931 #ifdef TARGET_NR_statfs
8932     case TARGET_NR_statfs:
8933         if (!(p = lock_user_string(arg1))) {
8934             return -TARGET_EFAULT;
8935         }
8936         ret = get_errno(statfs(path(p), &stfs));
8937         unlock_user(p, arg1, 0);
8938     convert_statfs:
8939         if (!is_error(ret)) {
8940             struct target_statfs *target_stfs;
8941 
8942             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8943                 return -TARGET_EFAULT;
8944             __put_user(stfs.f_type, &target_stfs->f_type);
8945             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8946             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8947             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8948             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8949             __put_user(stfs.f_files, &target_stfs->f_files);
8950             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8951             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8952             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8953             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8954             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8955 #ifdef _STATFS_F_FLAGS
8956             __put_user(stfs.f_flags, &target_stfs->f_flags);
8957 #else
8958             __put_user(0, &target_stfs->f_flags);
8959 #endif
8960             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8961             unlock_user_struct(target_stfs, arg2, 1);
8962         }
8963         return ret;
8964 #endif
8965 #ifdef TARGET_NR_fstatfs
8966     case TARGET_NR_fstatfs:
8967         ret = get_errno(fstatfs(arg1, &stfs));
8968         goto convert_statfs;
8969 #endif
8970 #ifdef TARGET_NR_statfs64
8971     case TARGET_NR_statfs64:
8972         if (!(p = lock_user_string(arg1))) {
8973             return -TARGET_EFAULT;
8974         }
8975         ret = get_errno(statfs(path(p), &stfs));
8976         unlock_user(p, arg1, 0);
8977     convert_statfs64:
8978         if (!is_error(ret)) {
8979             struct target_statfs64 *target_stfs;
8980 
8981             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8982                 return -TARGET_EFAULT;
8983             __put_user(stfs.f_type, &target_stfs->f_type);
8984             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8985             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8986             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8987             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8988             __put_user(stfs.f_files, &target_stfs->f_files);
8989             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8990             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8991             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8992             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8993             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8994             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8995             unlock_user_struct(target_stfs, arg3, 1);
8996         }
8997         return ret;
8998     case TARGET_NR_fstatfs64:
8999         ret = get_errno(fstatfs(arg1, &stfs));
9000         goto convert_statfs64;
9001 #endif
9002 #ifdef TARGET_NR_socketcall
9003     case TARGET_NR_socketcall:
9004         return do_socketcall(arg1, arg2);
9005 #endif
9006 #ifdef TARGET_NR_accept
9007     case TARGET_NR_accept:
9008         return do_accept4(arg1, arg2, arg3, 0);
9009 #endif
9010 #ifdef TARGET_NR_accept4
9011     case TARGET_NR_accept4:
9012         return do_accept4(arg1, arg2, arg3, arg4);
9013 #endif
9014 #ifdef TARGET_NR_bind
9015     case TARGET_NR_bind:
9016         return do_bind(arg1, arg2, arg3);
9017 #endif
9018 #ifdef TARGET_NR_connect
9019     case TARGET_NR_connect:
9020         return do_connect(arg1, arg2, arg3);
9021 #endif
9022 #ifdef TARGET_NR_getpeername
9023     case TARGET_NR_getpeername:
9024         return do_getpeername(arg1, arg2, arg3);
9025 #endif
9026 #ifdef TARGET_NR_getsockname
9027     case TARGET_NR_getsockname:
9028         return do_getsockname(arg1, arg2, arg3);
9029 #endif
9030 #ifdef TARGET_NR_getsockopt
9031     case TARGET_NR_getsockopt:
9032         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9033 #endif
9034 #ifdef TARGET_NR_listen
9035     case TARGET_NR_listen:
9036         return get_errno(listen(arg1, arg2));
9037 #endif
9038 #ifdef TARGET_NR_recv
9039     case TARGET_NR_recv:
9040         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9041 #endif
9042 #ifdef TARGET_NR_recvfrom
9043     case TARGET_NR_recvfrom:
9044         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9045 #endif
9046 #ifdef TARGET_NR_recvmsg
9047     case TARGET_NR_recvmsg:
9048         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9049 #endif
9050 #ifdef TARGET_NR_send
9051     case TARGET_NR_send:
9052         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9053 #endif
9054 #ifdef TARGET_NR_sendmsg
9055     case TARGET_NR_sendmsg:
9056         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9057 #endif
9058 #ifdef TARGET_NR_sendmmsg
9059     case TARGET_NR_sendmmsg:
9060         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9061     case TARGET_NR_recvmmsg:
9062         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9063 #endif
9064 #ifdef TARGET_NR_sendto
9065     case TARGET_NR_sendto:
9066         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9067 #endif
9068 #ifdef TARGET_NR_shutdown
9069     case TARGET_NR_shutdown:
9070         return get_errno(shutdown(arg1, arg2));
9071 #endif
9072 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9073     case TARGET_NR_getrandom:
9074         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9075         if (!p) {
9076             return -TARGET_EFAULT;
9077         }
9078         ret = get_errno(getrandom(p, arg2, arg3));
9079         unlock_user(p, arg1, ret);
9080         return ret;
9081 #endif
9082 #ifdef TARGET_NR_socket
9083     case TARGET_NR_socket:
9084         return do_socket(arg1, arg2, arg3);
9085 #endif
9086 #ifdef TARGET_NR_socketpair
9087     case TARGET_NR_socketpair:
9088         return do_socketpair(arg1, arg2, arg3, arg4);
9089 #endif
9090 #ifdef TARGET_NR_setsockopt
9091     case TARGET_NR_setsockopt:
9092         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9093 #endif
9094 #if defined(TARGET_NR_syslog)
9095     case TARGET_NR_syslog:
9096         {
9097             int len = arg2;
9098 
9099             switch (arg1) {
9100             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9101             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9102             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9103             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9104             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9105             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9106             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9107             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9108                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9109             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9110             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9111             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9112                 {
9113                     if (len < 0) {
9114                         return -TARGET_EINVAL;
9115                     }
9116                     if (len == 0) {
9117                         return 0;
9118                     }
9119                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9120                     if (!p) {
9121                         return -TARGET_EFAULT;
9122                     }
9123                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9124                     unlock_user(p, arg2, arg3);
9125                 }
9126                 return ret;
9127             default:
9128                 return -TARGET_EINVAL;
9129             }
9130         }
9131         break;
9132 #endif
9133     case TARGET_NR_setitimer:
9134         {
9135             struct itimerval value, ovalue, *pvalue;
9136 
9137             if (arg2) {
9138                 pvalue = &value;
9139                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9140                     || copy_from_user_timeval(&pvalue->it_value,
9141                                               arg2 + sizeof(struct target_timeval)))
9142                     return -TARGET_EFAULT;
9143             } else {
9144                 pvalue = NULL;
9145             }
9146             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9147             if (!is_error(ret) && arg3) {
9148                 if (copy_to_user_timeval(arg3,
9149                                          &ovalue.it_interval)
9150                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9151                                             &ovalue.it_value))
9152                     return -TARGET_EFAULT;
9153             }
9154         }
9155         return ret;
9156     case TARGET_NR_getitimer:
9157         {
9158             struct itimerval value;
9159 
9160             ret = get_errno(getitimer(arg1, &value));
9161             if (!is_error(ret) && arg2) {
9162                 if (copy_to_user_timeval(arg2,
9163                                          &value.it_interval)
9164                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9165                                             &value.it_value))
9166                     return -TARGET_EFAULT;
9167             }
9168         }
9169         return ret;
9170 #ifdef TARGET_NR_stat
9171     case TARGET_NR_stat:
9172         if (!(p = lock_user_string(arg1))) {
9173             return -TARGET_EFAULT;
9174         }
9175         ret = get_errno(stat(path(p), &st));
9176         unlock_user(p, arg1, 0);
9177         goto do_stat;
9178 #endif
9179 #ifdef TARGET_NR_lstat
9180     case TARGET_NR_lstat:
9181         if (!(p = lock_user_string(arg1))) {
9182             return -TARGET_EFAULT;
9183         }
9184         ret = get_errno(lstat(path(p), &st));
9185         unlock_user(p, arg1, 0);
9186         goto do_stat;
9187 #endif
9188 #ifdef TARGET_NR_fstat
9189     case TARGET_NR_fstat:
9190         {
9191             ret = get_errno(fstat(arg1, &st));
9192 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9193         do_stat:
9194 #endif
9195             if (!is_error(ret)) {
9196                 struct target_stat *target_st;
9197 
9198                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9199                     return -TARGET_EFAULT;
9200                 memset(target_st, 0, sizeof(*target_st));
9201                 __put_user(st.st_dev, &target_st->st_dev);
9202                 __put_user(st.st_ino, &target_st->st_ino);
9203                 __put_user(st.st_mode, &target_st->st_mode);
9204                 __put_user(st.st_uid, &target_st->st_uid);
9205                 __put_user(st.st_gid, &target_st->st_gid);
9206                 __put_user(st.st_nlink, &target_st->st_nlink);
9207                 __put_user(st.st_rdev, &target_st->st_rdev);
9208                 __put_user(st.st_size, &target_st->st_size);
9209                 __put_user(st.st_blksize, &target_st->st_blksize);
9210                 __put_user(st.st_blocks, &target_st->st_blocks);
9211                 __put_user(st.st_atime, &target_st->target_st_atime);
9212                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9213                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9214 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9215     defined(TARGET_STAT_HAVE_NSEC)
9216                 __put_user(st.st_atim.tv_nsec,
9217                            &target_st->target_st_atime_nsec);
9218                 __put_user(st.st_mtim.tv_nsec,
9219                            &target_st->target_st_mtime_nsec);
9220                 __put_user(st.st_ctim.tv_nsec,
9221                            &target_st->target_st_ctime_nsec);
9222 #endif
9223                 unlock_user_struct(target_st, arg2, 1);
9224             }
9225         }
9226         return ret;
9227 #endif
9228     case TARGET_NR_vhangup:
9229         return get_errno(vhangup());
9230 #ifdef TARGET_NR_syscall
9231     case TARGET_NR_syscall:
9232         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9233                           arg6, arg7, arg8, 0);
9234 #endif
9235     case TARGET_NR_wait4:
9236         {
9237             int status;
9238             abi_long status_ptr = arg2;
9239             struct rusage rusage, *rusage_ptr;
9240             abi_ulong target_rusage = arg4;
9241             abi_long rusage_err;
9242             if (target_rusage)
9243                 rusage_ptr = &rusage;
9244             else
9245                 rusage_ptr = NULL;
9246             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9247             if (!is_error(ret)) {
9248                 if (status_ptr && ret) {
9249                     status = host_to_target_waitstatus(status);
9250                     if (put_user_s32(status, status_ptr))
9251                         return -TARGET_EFAULT;
9252                 }
9253                 if (target_rusage) {
9254                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9255                     if (rusage_err) {
9256                         ret = rusage_err;
9257                     }
9258                 }
9259             }
9260         }
9261         return ret;
9262 #ifdef TARGET_NR_swapoff
9263     case TARGET_NR_swapoff:
9264         if (!(p = lock_user_string(arg1)))
9265             return -TARGET_EFAULT;
9266         ret = get_errno(swapoff(p));
9267         unlock_user(p, arg1, 0);
9268         return ret;
9269 #endif
9270     case TARGET_NR_sysinfo:
9271         {
9272             struct target_sysinfo *target_value;
9273             struct sysinfo value;
9274             ret = get_errno(sysinfo(&value));
9275             if (!is_error(ret) && arg1)
9276             {
9277                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9278                     return -TARGET_EFAULT;
9279                 __put_user(value.uptime, &target_value->uptime);
9280                 __put_user(value.loads[0], &target_value->loads[0]);
9281                 __put_user(value.loads[1], &target_value->loads[1]);
9282                 __put_user(value.loads[2], &target_value->loads[2]);
9283                 __put_user(value.totalram, &target_value->totalram);
9284                 __put_user(value.freeram, &target_value->freeram);
9285                 __put_user(value.sharedram, &target_value->sharedram);
9286                 __put_user(value.bufferram, &target_value->bufferram);
9287                 __put_user(value.totalswap, &target_value->totalswap);
9288                 __put_user(value.freeswap, &target_value->freeswap);
9289                 __put_user(value.procs, &target_value->procs);
9290                 __put_user(value.totalhigh, &target_value->totalhigh);
9291                 __put_user(value.freehigh, &target_value->freehigh);
9292                 __put_user(value.mem_unit, &target_value->mem_unit);
9293                 unlock_user_struct(target_value, arg1, 1);
9294             }
9295         }
9296         return ret;
9297 #ifdef TARGET_NR_ipc
9298     case TARGET_NR_ipc:
9299         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9300 #endif
9301 #ifdef TARGET_NR_semget
9302     case TARGET_NR_semget:
9303         return get_errno(semget(arg1, arg2, arg3));
9304 #endif
9305 #ifdef TARGET_NR_semop
9306     case TARGET_NR_semop:
9307         return do_semop(arg1, arg2, arg3);
9308 #endif
9309 #ifdef TARGET_NR_semctl
9310     case TARGET_NR_semctl:
9311         return do_semctl(arg1, arg2, arg3, arg4);
9312 #endif
9313 #ifdef TARGET_NR_msgctl
9314     case TARGET_NR_msgctl:
9315         return do_msgctl(arg1, arg2, arg3);
9316 #endif
9317 #ifdef TARGET_NR_msgget
9318     case TARGET_NR_msgget:
9319         return get_errno(msgget(arg1, arg2));
9320 #endif
9321 #ifdef TARGET_NR_msgrcv
9322     case TARGET_NR_msgrcv:
9323         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9324 #endif
9325 #ifdef TARGET_NR_msgsnd
9326     case TARGET_NR_msgsnd:
9327         return do_msgsnd(arg1, arg2, arg3, arg4);
9328 #endif
9329 #ifdef TARGET_NR_shmget
9330     case TARGET_NR_shmget:
9331         return get_errno(shmget(arg1, arg2, arg3));
9332 #endif
9333 #ifdef TARGET_NR_shmctl
9334     case TARGET_NR_shmctl:
9335         return do_shmctl(arg1, arg2, arg3);
9336 #endif
9337 #ifdef TARGET_NR_shmat
9338     case TARGET_NR_shmat:
9339         return do_shmat(cpu_env, arg1, arg2, arg3);
9340 #endif
9341 #ifdef TARGET_NR_shmdt
9342     case TARGET_NR_shmdt:
9343         return do_shmdt(arg1);
9344 #endif
9345     case TARGET_NR_fsync:
9346         return get_errno(fsync(arg1));
9347     case TARGET_NR_clone:
9348         /* Linux manages to have three different orderings for its
9349          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9350          * match the kernel's CONFIG_CLONE_* settings.
9351          * Microblaze is further special in that it uses a sixth
9352          * implicit argument to clone for the TLS pointer.
9353          */
9354 #if defined(TARGET_MICROBLAZE)
9355         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9356 #elif defined(TARGET_CLONE_BACKWARDS)
9357         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9358 #elif defined(TARGET_CLONE_BACKWARDS2)
9359         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9360 #else
9361         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9362 #endif
9363         return ret;
9364 #ifdef __NR_exit_group
9365         /* new thread calls */
9366     case TARGET_NR_exit_group:
9367         preexit_cleanup(cpu_env, arg1);
9368         return get_errno(exit_group(arg1));
9369 #endif
9370     case TARGET_NR_setdomainname:
9371         if (!(p = lock_user_string(arg1)))
9372             return -TARGET_EFAULT;
9373         ret = get_errno(setdomainname(p, arg2));
9374         unlock_user(p, arg1, 0);
9375         return ret;
9376     case TARGET_NR_uname:
9377         /* no need to transcode because we use the linux syscall */
9378         {
9379             struct new_utsname * buf;
9380 
9381             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9382                 return -TARGET_EFAULT;
9383             ret = get_errno(sys_uname(buf));
9384             if (!is_error(ret)) {
9385                 /* Overwrite the native machine name with whatever is being
9386                    emulated. */
9387                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9388                           sizeof(buf->machine));
9389                 /* Allow the user to override the reported release.  */
9390                 if (qemu_uname_release && *qemu_uname_release) {
9391                     g_strlcpy(buf->release, qemu_uname_release,
9392                               sizeof(buf->release));
9393                 }
9394             }
9395             unlock_user_struct(buf, arg1, 1);
9396         }
9397         return ret;
9398 #ifdef TARGET_I386
9399     case TARGET_NR_modify_ldt:
9400         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9401 #if !defined(TARGET_X86_64)
9402     case TARGET_NR_vm86:
9403         return do_vm86(cpu_env, arg1, arg2);
9404 #endif
9405 #endif
9406     case TARGET_NR_adjtimex:
9407         {
9408             struct timex host_buf;
9409 
9410             if (target_to_host_timex(&host_buf, arg1) != 0) {
9411                 return -TARGET_EFAULT;
9412             }
9413             ret = get_errno(adjtimex(&host_buf));
9414             if (!is_error(ret)) {
9415                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9416                     return -TARGET_EFAULT;
9417                 }
9418             }
9419         }
9420         return ret;
9421 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9422     case TARGET_NR_clock_adjtime:
9423         {
9424             struct timex htx, *phtx = &htx;
9425 
9426             if (target_to_host_timex(phtx, arg2) != 0) {
9427                 return -TARGET_EFAULT;
9428             }
9429             ret = get_errno(clock_adjtime(arg1, phtx));
9430             if (!is_error(ret) && phtx) {
9431                 if (host_to_target_timex(arg2, phtx) != 0) {
9432                     return -TARGET_EFAULT;
9433                 }
9434             }
9435         }
9436         return ret;
9437 #endif
9438     case TARGET_NR_getpgid:
9439         return get_errno(getpgid(arg1));
9440     case TARGET_NR_fchdir:
9441         return get_errno(fchdir(arg1));
9442     case TARGET_NR_personality:
9443         return get_errno(personality(arg1));
9444 #ifdef TARGET_NR__llseek /* Not on alpha */
9445     case TARGET_NR__llseek:
9446         {
9447             int64_t res;
9448 #if !defined(__NR_llseek)
9449             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9450             if (res == -1) {
9451                 ret = get_errno(res);
9452             } else {
9453                 ret = 0;
9454             }
9455 #else
9456             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9457 #endif
9458             if ((ret == 0) && put_user_s64(res, arg4)) {
9459                 return -TARGET_EFAULT;
9460             }
9461         }
9462         return ret;
9463 #endif
9464 #ifdef TARGET_NR_getdents
9465     case TARGET_NR_getdents:
9466 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9467 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9468         {
9469             struct target_dirent *target_dirp;
9470             struct linux_dirent *dirp;
9471             abi_long count = arg3;
9472 
9473             dirp = g_try_malloc(count);
9474             if (!dirp) {
9475                 return -TARGET_ENOMEM;
9476             }
9477 
9478             ret = get_errno(sys_getdents(arg1, dirp, count));
9479             if (!is_error(ret)) {
9480                 struct linux_dirent *de;
9481 		struct target_dirent *tde;
9482                 int len = ret;
9483                 int reclen, treclen;
9484 		int count1, tnamelen;
9485 
9486 		count1 = 0;
9487                 de = dirp;
9488                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9489                     return -TARGET_EFAULT;
9490 		tde = target_dirp;
9491                 while (len > 0) {
9492                     reclen = de->d_reclen;
9493                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9494                     assert(tnamelen >= 0);
9495                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9496                     assert(count1 + treclen <= count);
9497                     tde->d_reclen = tswap16(treclen);
9498                     tde->d_ino = tswapal(de->d_ino);
9499                     tde->d_off = tswapal(de->d_off);
9500                     memcpy(tde->d_name, de->d_name, tnamelen);
9501                     de = (struct linux_dirent *)((char *)de + reclen);
9502                     len -= reclen;
9503                     tde = (struct target_dirent *)((char *)tde + treclen);
9504 		    count1 += treclen;
9505                 }
9506 		ret = count1;
9507                 unlock_user(target_dirp, arg2, ret);
9508             }
9509             g_free(dirp);
9510         }
9511 #else
9512         {
9513             struct linux_dirent *dirp;
9514             abi_long count = arg3;
9515 
9516             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9517                 return -TARGET_EFAULT;
9518             ret = get_errno(sys_getdents(arg1, dirp, count));
9519             if (!is_error(ret)) {
9520                 struct linux_dirent *de;
9521                 int len = ret;
9522                 int reclen;
9523                 de = dirp;
9524                 while (len > 0) {
9525                     reclen = de->d_reclen;
9526                     if (reclen > len)
9527                         break;
9528                     de->d_reclen = tswap16(reclen);
9529                     tswapls(&de->d_ino);
9530                     tswapls(&de->d_off);
9531                     de = (struct linux_dirent *)((char *)de + reclen);
9532                     len -= reclen;
9533                 }
9534             }
9535             unlock_user(dirp, arg2, ret);
9536         }
9537 #endif
9538 #else
9539         /* Implement getdents in terms of getdents64 */
9540         {
9541             struct linux_dirent64 *dirp;
9542             abi_long count = arg3;
9543 
9544             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9545             if (!dirp) {
9546                 return -TARGET_EFAULT;
9547             }
9548             ret = get_errno(sys_getdents64(arg1, dirp, count));
9549             if (!is_error(ret)) {
9550                 /* Convert the dirent64 structs to target dirent.  We do this
9551                  * in-place, since we can guarantee that a target_dirent is no
9552                  * larger than a dirent64; however this means we have to be
9553                  * careful to read everything before writing in the new format.
9554                  */
9555                 struct linux_dirent64 *de;
9556                 struct target_dirent *tde;
9557                 int len = ret;
9558                 int tlen = 0;
9559 
9560                 de = dirp;
9561                 tde = (struct target_dirent *)dirp;
9562                 while (len > 0) {
9563                     int namelen, treclen;
9564                     int reclen = de->d_reclen;
9565                     uint64_t ino = de->d_ino;
9566                     int64_t off = de->d_off;
9567                     uint8_t type = de->d_type;
9568 
9569                     namelen = strlen(de->d_name);
9570                     treclen = offsetof(struct target_dirent, d_name)
9571                         + namelen + 2;
9572                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9573 
9574                     memmove(tde->d_name, de->d_name, namelen + 1);
9575                     tde->d_ino = tswapal(ino);
9576                     tde->d_off = tswapal(off);
9577                     tde->d_reclen = tswap16(treclen);
9578                     /* The target_dirent type is in what was formerly a padding
9579                      * byte at the end of the structure:
9580                      */
9581                     *(((char *)tde) + treclen - 1) = type;
9582 
9583                     de = (struct linux_dirent64 *)((char *)de + reclen);
9584                     tde = (struct target_dirent *)((char *)tde + treclen);
9585                     len -= reclen;
9586                     tlen += treclen;
9587                 }
9588                 ret = tlen;
9589             }
9590             unlock_user(dirp, arg2, ret);
9591         }
9592 #endif
9593         return ret;
9594 #endif /* TARGET_NR_getdents */
9595 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9596     case TARGET_NR_getdents64:
9597         {
9598             struct linux_dirent64 *dirp;
9599             abi_long count = arg3;
9600             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9601                 return -TARGET_EFAULT;
9602             ret = get_errno(sys_getdents64(arg1, dirp, count));
9603             if (!is_error(ret)) {
9604                 struct linux_dirent64 *de;
9605                 int len = ret;
9606                 int reclen;
9607                 de = dirp;
9608                 while (len > 0) {
9609                     reclen = de->d_reclen;
9610                     if (reclen > len)
9611                         break;
9612                     de->d_reclen = tswap16(reclen);
9613                     tswap64s((uint64_t *)&de->d_ino);
9614                     tswap64s((uint64_t *)&de->d_off);
9615                     de = (struct linux_dirent64 *)((char *)de + reclen);
9616                     len -= reclen;
9617                 }
9618             }
9619             unlock_user(dirp, arg2, ret);
9620         }
9621         return ret;
9622 #endif /* TARGET_NR_getdents64 */
9623 #if defined(TARGET_NR__newselect)
9624     case TARGET_NR__newselect:
9625         return do_select(arg1, arg2, arg3, arg4, arg5);
9626 #endif
9627 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9628 # ifdef TARGET_NR_poll
9629     case TARGET_NR_poll:
9630 # endif
9631 # ifdef TARGET_NR_ppoll
9632     case TARGET_NR_ppoll:
9633 # endif
9634         {
9635             struct target_pollfd *target_pfd;
9636             unsigned int nfds = arg2;
9637             struct pollfd *pfd;
9638             unsigned int i;
9639 
9640             pfd = NULL;
9641             target_pfd = NULL;
9642             if (nfds) {
9643                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9644                     return -TARGET_EINVAL;
9645                 }
9646 
9647                 target_pfd = lock_user(VERIFY_WRITE, arg1,
9648                                        sizeof(struct target_pollfd) * nfds, 1);
9649                 if (!target_pfd) {
9650                     return -TARGET_EFAULT;
9651                 }
9652 
9653                 pfd = alloca(sizeof(struct pollfd) * nfds);
9654                 for (i = 0; i < nfds; i++) {
9655                     pfd[i].fd = tswap32(target_pfd[i].fd);
9656                     pfd[i].events = tswap16(target_pfd[i].events);
9657                 }
9658             }
9659 
9660             switch (num) {
9661 # ifdef TARGET_NR_ppoll
9662             case TARGET_NR_ppoll:
9663             {
9664                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9665                 target_sigset_t *target_set;
9666                 sigset_t _set, *set = &_set;
9667 
9668                 if (arg3) {
9669                     if (target_to_host_timespec(timeout_ts, arg3)) {
9670                         unlock_user(target_pfd, arg1, 0);
9671                         return -TARGET_EFAULT;
9672                     }
9673                 } else {
9674                     timeout_ts = NULL;
9675                 }
9676 
9677                 if (arg4) {
9678                     if (arg5 != sizeof(target_sigset_t)) {
9679                         unlock_user(target_pfd, arg1, 0);
9680                         return -TARGET_EINVAL;
9681                     }
9682 
9683                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9684                     if (!target_set) {
9685                         unlock_user(target_pfd, arg1, 0);
9686                         return -TARGET_EFAULT;
9687                     }
9688                     target_to_host_sigset(set, target_set);
9689                 } else {
9690                     set = NULL;
9691                 }
9692 
9693                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9694                                            set, SIGSET_T_SIZE));
9695 
9696                 if (!is_error(ret) && arg3) {
9697                     host_to_target_timespec(arg3, timeout_ts);
9698                 }
9699                 if (arg4) {
9700                     unlock_user(target_set, arg4, 0);
9701                 }
9702                 break;
9703             }
9704 # endif
9705 # ifdef TARGET_NR_poll
9706             case TARGET_NR_poll:
9707             {
9708                 struct timespec ts, *pts;
9709 
9710                 if (arg3 >= 0) {
9711                     /* Convert ms to secs, ns */
9712                     ts.tv_sec = arg3 / 1000;
9713                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9714                     pts = &ts;
9715                 } else {
9716                     /* -ve poll() timeout means "infinite" */
9717                     pts = NULL;
9718                 }
9719                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9720                 break;
9721             }
9722 # endif
9723             default:
9724                 g_assert_not_reached();
9725             }
9726 
9727             if (!is_error(ret)) {
9728                 for(i = 0; i < nfds; i++) {
9729                     target_pfd[i].revents = tswap16(pfd[i].revents);
9730                 }
9731             }
9732             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9733         }
9734         return ret;
9735 #endif
9736     case TARGET_NR_flock:
9737         /* NOTE: the flock constant seems to be the same for every
9738            Linux platform */
9739         return get_errno(safe_flock(arg1, arg2));
9740     case TARGET_NR_readv:
9741         {
9742             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9743             if (vec != NULL) {
9744                 ret = get_errno(safe_readv(arg1, vec, arg3));
9745                 unlock_iovec(vec, arg2, arg3, 1);
9746             } else {
9747                 ret = -host_to_target_errno(errno);
9748             }
9749         }
9750         return ret;
9751     case TARGET_NR_writev:
9752         {
9753             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9754             if (vec != NULL) {
9755                 ret = get_errno(safe_writev(arg1, vec, arg3));
9756                 unlock_iovec(vec, arg2, arg3, 0);
9757             } else {
9758                 ret = -host_to_target_errno(errno);
9759             }
9760         }
9761         return ret;
9762 #if defined(TARGET_NR_preadv)
9763     case TARGET_NR_preadv:
9764         {
9765             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9766             if (vec != NULL) {
9767                 unsigned long low, high;
9768 
9769                 target_to_host_low_high(arg4, arg5, &low, &high);
9770                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9771                 unlock_iovec(vec, arg2, arg3, 1);
9772             } else {
9773                 ret = -host_to_target_errno(errno);
9774            }
9775         }
9776         return ret;
9777 #endif
9778 #if defined(TARGET_NR_pwritev)
9779     case TARGET_NR_pwritev:
9780         {
9781             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9782             if (vec != NULL) {
9783                 unsigned long low, high;
9784 
9785                 target_to_host_low_high(arg4, arg5, &low, &high);
9786                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9787                 unlock_iovec(vec, arg2, arg3, 0);
9788             } else {
9789                 ret = -host_to_target_errno(errno);
9790            }
9791         }
9792         return ret;
9793 #endif
9794     case TARGET_NR_getsid:
9795         return get_errno(getsid(arg1));
9796 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9797     case TARGET_NR_fdatasync:
9798         return get_errno(fdatasync(arg1));
9799 #endif
9800 #ifdef TARGET_NR__sysctl
9801     case TARGET_NR__sysctl:
9802         /* We don't implement this, but ENOTDIR is always a safe
9803            return value. */
9804         return -TARGET_ENOTDIR;
9805 #endif
9806     case TARGET_NR_sched_getaffinity:
9807         {
9808             unsigned int mask_size;
9809             unsigned long *mask;
9810 
9811             /*
9812              * sched_getaffinity needs multiples of ulong, so need to take
9813              * care of mismatches between target ulong and host ulong sizes.
9814              */
9815             if (arg2 & (sizeof(abi_ulong) - 1)) {
9816                 return -TARGET_EINVAL;
9817             }
9818             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9819 
9820             mask = alloca(mask_size);
9821             memset(mask, 0, mask_size);
9822             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9823 
9824             if (!is_error(ret)) {
9825                 if (ret > arg2) {
9826                     /* More data returned than the caller's buffer will fit.
9827                      * This only happens if sizeof(abi_long) < sizeof(long)
9828                      * and the caller passed us a buffer holding an odd number
9829                      * of abi_longs. If the host kernel is actually using the
9830                      * extra 4 bytes then fail EINVAL; otherwise we can just
9831                      * ignore them and only copy the interesting part.
9832                      */
9833                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9834                     if (numcpus > arg2 * 8) {
9835                         return -TARGET_EINVAL;
9836                     }
9837                     ret = arg2;
9838                 }
9839 
9840                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9841                     return -TARGET_EFAULT;
9842                 }
9843             }
9844         }
9845         return ret;
9846     case TARGET_NR_sched_setaffinity:
9847         {
9848             unsigned int mask_size;
9849             unsigned long *mask;
9850 
9851             /*
9852              * sched_setaffinity needs multiples of ulong, so need to take
9853              * care of mismatches between target ulong and host ulong sizes.
9854              */
9855             if (arg2 & (sizeof(abi_ulong) - 1)) {
9856                 return -TARGET_EINVAL;
9857             }
9858             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9859             mask = alloca(mask_size);
9860 
9861             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9862             if (ret) {
9863                 return ret;
9864             }
9865 
9866             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9867         }
9868     case TARGET_NR_getcpu:
9869         {
9870             unsigned cpu, node;
9871             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9872                                        arg2 ? &node : NULL,
9873                                        NULL));
9874             if (is_error(ret)) {
9875                 return ret;
9876             }
9877             if (arg1 && put_user_u32(cpu, arg1)) {
9878                 return -TARGET_EFAULT;
9879             }
9880             if (arg2 && put_user_u32(node, arg2)) {
9881                 return -TARGET_EFAULT;
9882             }
9883         }
9884         return ret;
9885     case TARGET_NR_sched_setparam:
9886         {
9887             struct sched_param *target_schp;
9888             struct sched_param schp;
9889 
9890             if (arg2 == 0) {
9891                 return -TARGET_EINVAL;
9892             }
9893             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9894                 return -TARGET_EFAULT;
9895             schp.sched_priority = tswap32(target_schp->sched_priority);
9896             unlock_user_struct(target_schp, arg2, 0);
9897             return get_errno(sched_setparam(arg1, &schp));
9898         }
9899     case TARGET_NR_sched_getparam:
9900         {
9901             struct sched_param *target_schp;
9902             struct sched_param schp;
9903 
9904             if (arg2 == 0) {
9905                 return -TARGET_EINVAL;
9906             }
9907             ret = get_errno(sched_getparam(arg1, &schp));
9908             if (!is_error(ret)) {
9909                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9910                     return -TARGET_EFAULT;
9911                 target_schp->sched_priority = tswap32(schp.sched_priority);
9912                 unlock_user_struct(target_schp, arg2, 1);
9913             }
9914         }
9915         return ret;
9916     case TARGET_NR_sched_setscheduler:
9917         {
9918             struct sched_param *target_schp;
9919             struct sched_param schp;
9920             if (arg3 == 0) {
9921                 return -TARGET_EINVAL;
9922             }
9923             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9924                 return -TARGET_EFAULT;
9925             schp.sched_priority = tswap32(target_schp->sched_priority);
9926             unlock_user_struct(target_schp, arg3, 0);
9927             return get_errno(sched_setscheduler(arg1, arg2, &schp));
9928         }
9929     case TARGET_NR_sched_getscheduler:
9930         return get_errno(sched_getscheduler(arg1));
9931     case TARGET_NR_sched_yield:
9932         return get_errno(sched_yield());
9933     case TARGET_NR_sched_get_priority_max:
9934         return get_errno(sched_get_priority_max(arg1));
9935     case TARGET_NR_sched_get_priority_min:
9936         return get_errno(sched_get_priority_min(arg1));
9937     case TARGET_NR_sched_rr_get_interval:
9938         {
9939             struct timespec ts;
9940             ret = get_errno(sched_rr_get_interval(arg1, &ts));
9941             if (!is_error(ret)) {
9942                 ret = host_to_target_timespec(arg2, &ts);
9943             }
9944         }
9945         return ret;
9946     case TARGET_NR_nanosleep:
9947         {
9948             struct timespec req, rem;
9949             target_to_host_timespec(&req, arg1);
9950             ret = get_errno(safe_nanosleep(&req, &rem));
9951             if (is_error(ret) && arg2) {
9952                 host_to_target_timespec(arg2, &rem);
9953             }
9954         }
9955         return ret;
9956     case TARGET_NR_prctl:
9957         switch (arg1) {
9958         case PR_GET_PDEATHSIG:
9959         {
9960             int deathsig;
9961             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9962             if (!is_error(ret) && arg2
9963                 && put_user_ual(deathsig, arg2)) {
9964                 return -TARGET_EFAULT;
9965             }
9966             return ret;
9967         }
9968 #ifdef PR_GET_NAME
9969         case PR_GET_NAME:
9970         {
9971             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9972             if (!name) {
9973                 return -TARGET_EFAULT;
9974             }
9975             ret = get_errno(prctl(arg1, (unsigned long)name,
9976                                   arg3, arg4, arg5));
9977             unlock_user(name, arg2, 16);
9978             return ret;
9979         }
9980         case PR_SET_NAME:
9981         {
9982             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9983             if (!name) {
9984                 return -TARGET_EFAULT;
9985             }
9986             ret = get_errno(prctl(arg1, (unsigned long)name,
9987                                   arg3, arg4, arg5));
9988             unlock_user(name, arg2, 0);
9989             return ret;
9990         }
9991 #endif
9992 #ifdef TARGET_MIPS
9993         case TARGET_PR_GET_FP_MODE:
9994         {
9995             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9996             ret = 0;
9997             if (env->CP0_Status & (1 << CP0St_FR)) {
9998                 ret |= TARGET_PR_FP_MODE_FR;
9999             }
10000             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10001                 ret |= TARGET_PR_FP_MODE_FRE;
10002             }
10003             return ret;
10004         }
10005         case TARGET_PR_SET_FP_MODE:
10006         {
10007             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10008             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10009             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10010             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10011             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10012 
10013             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10014                                             TARGET_PR_FP_MODE_FRE;
10015 
10016             /* If nothing to change, return right away, successfully.  */
10017             if (old_fr == new_fr && old_fre == new_fre) {
10018                 return 0;
10019             }
10020             /* Check the value is valid */
10021             if (arg2 & ~known_bits) {
10022                 return -TARGET_EOPNOTSUPP;
10023             }
10024             /* Setting FRE without FR is not supported.  */
10025             if (new_fre && !new_fr) {
10026                 return -TARGET_EOPNOTSUPP;
10027             }
10028             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10029                 /* FR1 is not supported */
10030                 return -TARGET_EOPNOTSUPP;
10031             }
10032             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10033                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10034                 /* cannot set FR=0 */
10035                 return -TARGET_EOPNOTSUPP;
10036             }
10037             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10038                 /* Cannot set FRE=1 */
10039                 return -TARGET_EOPNOTSUPP;
10040             }
10041 
10042             int i;
10043             fpr_t *fpr = env->active_fpu.fpr;
10044             for (i = 0; i < 32 ; i += 2) {
10045                 if (!old_fr && new_fr) {
10046                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10047                 } else if (old_fr && !new_fr) {
10048                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10049                 }
10050             }
10051 
10052             if (new_fr) {
10053                 env->CP0_Status |= (1 << CP0St_FR);
10054                 env->hflags |= MIPS_HFLAG_F64;
10055             } else {
10056                 env->CP0_Status &= ~(1 << CP0St_FR);
10057                 env->hflags &= ~MIPS_HFLAG_F64;
10058             }
10059             if (new_fre) {
10060                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10061                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10062                     env->hflags |= MIPS_HFLAG_FRE;
10063                 }
10064             } else {
10065                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10066                 env->hflags &= ~MIPS_HFLAG_FRE;
10067             }
10068 
10069             return 0;
10070         }
10071 #endif /* MIPS */
10072 #ifdef TARGET_AARCH64
10073         case TARGET_PR_SVE_SET_VL:
10074             /*
10075              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10076              * PR_SVE_VL_INHERIT.  Note the kernel definition
10077              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10078              * even though the current architectural maximum is VQ=16.
10079              */
10080             ret = -TARGET_EINVAL;
10081             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10082                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10083                 CPUARMState *env = cpu_env;
10084                 ARMCPU *cpu = env_archcpu(env);
10085                 uint32_t vq, old_vq;
10086 
10087                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10088                 vq = MAX(arg2 / 16, 1);
10089                 vq = MIN(vq, cpu->sve_max_vq);
10090 
10091                 if (vq < old_vq) {
10092                     aarch64_sve_narrow_vq(env, vq);
10093                 }
10094                 env->vfp.zcr_el[1] = vq - 1;
10095                 arm_rebuild_hflags(env);
10096                 ret = vq * 16;
10097             }
10098             return ret;
10099         case TARGET_PR_SVE_GET_VL:
10100             ret = -TARGET_EINVAL;
10101             {
10102                 ARMCPU *cpu = env_archcpu(cpu_env);
10103                 if (cpu_isar_feature(aa64_sve, cpu)) {
10104                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10105                 }
10106             }
10107             return ret;
10108         case TARGET_PR_PAC_RESET_KEYS:
10109             {
10110                 CPUARMState *env = cpu_env;
10111                 ARMCPU *cpu = env_archcpu(env);
10112 
10113                 if (arg3 || arg4 || arg5) {
10114                     return -TARGET_EINVAL;
10115                 }
10116                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10117                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10118                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10119                                TARGET_PR_PAC_APGAKEY);
10120                     int ret = 0;
10121                     Error *err = NULL;
10122 
10123                     if (arg2 == 0) {
10124                         arg2 = all;
10125                     } else if (arg2 & ~all) {
10126                         return -TARGET_EINVAL;
10127                     }
10128                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10129                         ret |= qemu_guest_getrandom(&env->keys.apia,
10130                                                     sizeof(ARMPACKey), &err);
10131                     }
10132                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10133                         ret |= qemu_guest_getrandom(&env->keys.apib,
10134                                                     sizeof(ARMPACKey), &err);
10135                     }
10136                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10137                         ret |= qemu_guest_getrandom(&env->keys.apda,
10138                                                     sizeof(ARMPACKey), &err);
10139                     }
10140                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10141                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10142                                                     sizeof(ARMPACKey), &err);
10143                     }
10144                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10145                         ret |= qemu_guest_getrandom(&env->keys.apga,
10146                                                     sizeof(ARMPACKey), &err);
10147                     }
10148                     if (ret != 0) {
10149                         /*
10150                          * Some unknown failure in the crypto.  The best
10151                          * we can do is log it and fail the syscall.
10152                          * The real syscall cannot fail this way.
10153                          */
10154                         qemu_log_mask(LOG_UNIMP,
10155                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10156                                       error_get_pretty(err));
10157                         error_free(err);
10158                         return -TARGET_EIO;
10159                     }
10160                     return 0;
10161                 }
10162             }
10163             return -TARGET_EINVAL;
10164 #endif /* AARCH64 */
10165         case PR_GET_SECCOMP:
10166         case PR_SET_SECCOMP:
10167             /* Disable seccomp to prevent the target disabling syscalls we
10168              * need. */
10169             return -TARGET_EINVAL;
10170         default:
10171             /* Most prctl options have no pointer arguments */
10172             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10173         }
10174         break;
10175 #ifdef TARGET_NR_arch_prctl
10176     case TARGET_NR_arch_prctl:
10177 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10178         return do_arch_prctl(cpu_env, arg1, arg2);
10179 #else
10180 #error unreachable
10181 #endif
10182 #endif
10183 #ifdef TARGET_NR_pread64
10184     case TARGET_NR_pread64:
10185         if (regpairs_aligned(cpu_env, num)) {
10186             arg4 = arg5;
10187             arg5 = arg6;
10188         }
10189         if (arg2 == 0 && arg3 == 0) {
10190             /* Special-case NULL buffer and zero length, which should succeed */
10191             p = 0;
10192         } else {
10193             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10194             if (!p) {
10195                 return -TARGET_EFAULT;
10196             }
10197         }
10198         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10199         unlock_user(p, arg2, ret);
10200         return ret;
10201     case TARGET_NR_pwrite64:
10202         if (regpairs_aligned(cpu_env, num)) {
10203             arg4 = arg5;
10204             arg5 = arg6;
10205         }
10206         if (arg2 == 0 && arg3 == 0) {
10207             /* Special-case NULL buffer and zero length, which should succeed */
10208             p = 0;
10209         } else {
10210             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10211             if (!p) {
10212                 return -TARGET_EFAULT;
10213             }
10214         }
10215         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10216         unlock_user(p, arg2, 0);
10217         return ret;
10218 #endif
10219     case TARGET_NR_getcwd:
10220         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10221             return -TARGET_EFAULT;
10222         ret = get_errno(sys_getcwd1(p, arg2));
10223         unlock_user(p, arg1, ret);
10224         return ret;
10225     case TARGET_NR_capget:
10226     case TARGET_NR_capset:
10227     {
10228         struct target_user_cap_header *target_header;
10229         struct target_user_cap_data *target_data = NULL;
10230         struct __user_cap_header_struct header;
10231         struct __user_cap_data_struct data[2];
10232         struct __user_cap_data_struct *dataptr = NULL;
10233         int i, target_datalen;
10234         int data_items = 1;
10235 
10236         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10237             return -TARGET_EFAULT;
10238         }
10239         header.version = tswap32(target_header->version);
10240         header.pid = tswap32(target_header->pid);
10241 
10242         if (header.version != _LINUX_CAPABILITY_VERSION) {
10243             /* Version 2 and up takes pointer to two user_data structs */
10244             data_items = 2;
10245         }
10246 
10247         target_datalen = sizeof(*target_data) * data_items;
10248 
10249         if (arg2) {
10250             if (num == TARGET_NR_capget) {
10251                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10252             } else {
10253                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10254             }
10255             if (!target_data) {
10256                 unlock_user_struct(target_header, arg1, 0);
10257                 return -TARGET_EFAULT;
10258             }
10259 
10260             if (num == TARGET_NR_capset) {
10261                 for (i = 0; i < data_items; i++) {
10262                     data[i].effective = tswap32(target_data[i].effective);
10263                     data[i].permitted = tswap32(target_data[i].permitted);
10264                     data[i].inheritable = tswap32(target_data[i].inheritable);
10265                 }
10266             }
10267 
10268             dataptr = data;
10269         }
10270 
10271         if (num == TARGET_NR_capget) {
10272             ret = get_errno(capget(&header, dataptr));
10273         } else {
10274             ret = get_errno(capset(&header, dataptr));
10275         }
10276 
10277         /* The kernel always updates version for both capget and capset */
10278         target_header->version = tswap32(header.version);
10279         unlock_user_struct(target_header, arg1, 1);
10280 
10281         if (arg2) {
10282             if (num == TARGET_NR_capget) {
10283                 for (i = 0; i < data_items; i++) {
10284                     target_data[i].effective = tswap32(data[i].effective);
10285                     target_data[i].permitted = tswap32(data[i].permitted);
10286                     target_data[i].inheritable = tswap32(data[i].inheritable);
10287                 }
10288                 unlock_user(target_data, arg2, target_datalen);
10289             } else {
10290                 unlock_user(target_data, arg2, 0);
10291             }
10292         }
10293         return ret;
10294     }
10295     case TARGET_NR_sigaltstack:
10296         return do_sigaltstack(arg1, arg2,
10297                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10298 
10299 #ifdef CONFIG_SENDFILE
10300 #ifdef TARGET_NR_sendfile
10301     case TARGET_NR_sendfile:
10302     {
10303         off_t *offp = NULL;
10304         off_t off;
10305         if (arg3) {
10306             ret = get_user_sal(off, arg3);
10307             if (is_error(ret)) {
10308                 return ret;
10309             }
10310             offp = &off;
10311         }
10312         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10313         if (!is_error(ret) && arg3) {
10314             abi_long ret2 = put_user_sal(off, arg3);
10315             if (is_error(ret2)) {
10316                 ret = ret2;
10317             }
10318         }
10319         return ret;
10320     }
10321 #endif
10322 #ifdef TARGET_NR_sendfile64
10323     case TARGET_NR_sendfile64:
10324     {
10325         off_t *offp = NULL;
10326         off_t off;
10327         if (arg3) {
10328             ret = get_user_s64(off, arg3);
10329             if (is_error(ret)) {
10330                 return ret;
10331             }
10332             offp = &off;
10333         }
10334         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10335         if (!is_error(ret) && arg3) {
10336             abi_long ret2 = put_user_s64(off, arg3);
10337             if (is_error(ret2)) {
10338                 ret = ret2;
10339             }
10340         }
10341         return ret;
10342     }
10343 #endif
10344 #endif
10345 #ifdef TARGET_NR_vfork
10346     case TARGET_NR_vfork:
10347         return get_errno(do_fork(cpu_env,
10348                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10349                          0, 0, 0, 0));
10350 #endif
10351 #ifdef TARGET_NR_ugetrlimit
10352     case TARGET_NR_ugetrlimit:
10353     {
10354 	struct rlimit rlim;
10355 	int resource = target_to_host_resource(arg1);
10356 	ret = get_errno(getrlimit(resource, &rlim));
10357 	if (!is_error(ret)) {
10358 	    struct target_rlimit *target_rlim;
10359             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10360                 return -TARGET_EFAULT;
10361 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10362 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10363             unlock_user_struct(target_rlim, arg2, 1);
10364 	}
10365         return ret;
10366     }
10367 #endif
10368 #ifdef TARGET_NR_truncate64
10369     case TARGET_NR_truncate64:
10370         if (!(p = lock_user_string(arg1)))
10371             return -TARGET_EFAULT;
10372 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10373         unlock_user(p, arg1, 0);
10374         return ret;
10375 #endif
10376 #ifdef TARGET_NR_ftruncate64
10377     case TARGET_NR_ftruncate64:
10378         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10379 #endif
10380 #ifdef TARGET_NR_stat64
10381     case TARGET_NR_stat64:
10382         if (!(p = lock_user_string(arg1))) {
10383             return -TARGET_EFAULT;
10384         }
10385         ret = get_errno(stat(path(p), &st));
10386         unlock_user(p, arg1, 0);
10387         if (!is_error(ret))
10388             ret = host_to_target_stat64(cpu_env, arg2, &st);
10389         return ret;
10390 #endif
10391 #ifdef TARGET_NR_lstat64
10392     case TARGET_NR_lstat64:
10393         if (!(p = lock_user_string(arg1))) {
10394             return -TARGET_EFAULT;
10395         }
10396         ret = get_errno(lstat(path(p), &st));
10397         unlock_user(p, arg1, 0);
10398         if (!is_error(ret))
10399             ret = host_to_target_stat64(cpu_env, arg2, &st);
10400         return ret;
10401 #endif
10402 #ifdef TARGET_NR_fstat64
10403     case TARGET_NR_fstat64:
10404         ret = get_errno(fstat(arg1, &st));
10405         if (!is_error(ret))
10406             ret = host_to_target_stat64(cpu_env, arg2, &st);
10407         return ret;
10408 #endif
10409 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10410 #ifdef TARGET_NR_fstatat64
10411     case TARGET_NR_fstatat64:
10412 #endif
10413 #ifdef TARGET_NR_newfstatat
10414     case TARGET_NR_newfstatat:
10415 #endif
10416         if (!(p = lock_user_string(arg2))) {
10417             return -TARGET_EFAULT;
10418         }
10419         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10420         unlock_user(p, arg2, 0);
10421         if (!is_error(ret))
10422             ret = host_to_target_stat64(cpu_env, arg3, &st);
10423         return ret;
10424 #endif
10425 #if defined(TARGET_NR_statx)
10426     case TARGET_NR_statx:
10427         {
10428             struct target_statx *target_stx;
10429             int dirfd = arg1;
10430             int flags = arg3;
10431 
10432             p = lock_user_string(arg2);
10433             if (p == NULL) {
10434                 return -TARGET_EFAULT;
10435             }
10436 #if defined(__NR_statx)
10437             {
10438                 /*
10439                  * It is assumed that struct statx is architecture independent.
10440                  */
10441                 struct target_statx host_stx;
10442                 int mask = arg4;
10443 
10444                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10445                 if (!is_error(ret)) {
10446                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10447                         unlock_user(p, arg2, 0);
10448                         return -TARGET_EFAULT;
10449                     }
10450                 }
10451 
10452                 if (ret != -TARGET_ENOSYS) {
10453                     unlock_user(p, arg2, 0);
10454                     return ret;
10455                 }
10456             }
10457 #endif
10458             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10459             unlock_user(p, arg2, 0);
10460 
10461             if (!is_error(ret)) {
10462                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10463                     return -TARGET_EFAULT;
10464                 }
10465                 memset(target_stx, 0, sizeof(*target_stx));
10466                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10467                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10468                 __put_user(st.st_ino, &target_stx->stx_ino);
10469                 __put_user(st.st_mode, &target_stx->stx_mode);
10470                 __put_user(st.st_uid, &target_stx->stx_uid);
10471                 __put_user(st.st_gid, &target_stx->stx_gid);
10472                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10473                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10474                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10475                 __put_user(st.st_size, &target_stx->stx_size);
10476                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10477                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10478                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10479                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10480                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10481                 unlock_user_struct(target_stx, arg5, 1);
10482             }
10483         }
10484         return ret;
10485 #endif
10486 #ifdef TARGET_NR_lchown
10487     case TARGET_NR_lchown:
10488         if (!(p = lock_user_string(arg1)))
10489             return -TARGET_EFAULT;
10490         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10491         unlock_user(p, arg1, 0);
10492         return ret;
10493 #endif
10494 #ifdef TARGET_NR_getuid
10495     case TARGET_NR_getuid:
10496         return get_errno(high2lowuid(getuid()));
10497 #endif
10498 #ifdef TARGET_NR_getgid
10499     case TARGET_NR_getgid:
10500         return get_errno(high2lowgid(getgid()));
10501 #endif
10502 #ifdef TARGET_NR_geteuid
10503     case TARGET_NR_geteuid:
10504         return get_errno(high2lowuid(geteuid()));
10505 #endif
10506 #ifdef TARGET_NR_getegid
10507     case TARGET_NR_getegid:
10508         return get_errno(high2lowgid(getegid()));
10509 #endif
10510     case TARGET_NR_setreuid:
10511         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10512     case TARGET_NR_setregid:
10513         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10514     case TARGET_NR_getgroups:
10515         {
10516             int gidsetsize = arg1;
10517             target_id *target_grouplist;
10518             gid_t *grouplist;
10519             int i;
10520 
10521             grouplist = alloca(gidsetsize * sizeof(gid_t));
10522             ret = get_errno(getgroups(gidsetsize, grouplist));
10523             if (gidsetsize == 0)
10524                 return ret;
10525             if (!is_error(ret)) {
10526                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10527                 if (!target_grouplist)
10528                     return -TARGET_EFAULT;
10529                 for(i = 0;i < ret; i++)
10530                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10531                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10532             }
10533         }
10534         return ret;
10535     case TARGET_NR_setgroups:
10536         {
10537             int gidsetsize = arg1;
10538             target_id *target_grouplist;
10539             gid_t *grouplist = NULL;
10540             int i;
10541             if (gidsetsize) {
10542                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10543                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10544                 if (!target_grouplist) {
10545                     return -TARGET_EFAULT;
10546                 }
10547                 for (i = 0; i < gidsetsize; i++) {
10548                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10549                 }
10550                 unlock_user(target_grouplist, arg2, 0);
10551             }
10552             return get_errno(setgroups(gidsetsize, grouplist));
10553         }
10554     case TARGET_NR_fchown:
10555         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10556 #if defined(TARGET_NR_fchownat)
10557     case TARGET_NR_fchownat:
10558         if (!(p = lock_user_string(arg2)))
10559             return -TARGET_EFAULT;
10560         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10561                                  low2highgid(arg4), arg5));
10562         unlock_user(p, arg2, 0);
10563         return ret;
10564 #endif
10565 #ifdef TARGET_NR_setresuid
10566     case TARGET_NR_setresuid:
10567         return get_errno(sys_setresuid(low2highuid(arg1),
10568                                        low2highuid(arg2),
10569                                        low2highuid(arg3)));
10570 #endif
10571 #ifdef TARGET_NR_getresuid
10572     case TARGET_NR_getresuid:
10573         {
10574             uid_t ruid, euid, suid;
10575             ret = get_errno(getresuid(&ruid, &euid, &suid));
10576             if (!is_error(ret)) {
10577                 if (put_user_id(high2lowuid(ruid), arg1)
10578                     || put_user_id(high2lowuid(euid), arg2)
10579                     || put_user_id(high2lowuid(suid), arg3))
10580                     return -TARGET_EFAULT;
10581             }
10582         }
10583         return ret;
10584 #endif
10585 #ifdef TARGET_NR_getresgid
10586     case TARGET_NR_setresgid:
10587         return get_errno(sys_setresgid(low2highgid(arg1),
10588                                        low2highgid(arg2),
10589                                        low2highgid(arg3)));
10590 #endif
10591 #ifdef TARGET_NR_getresgid
10592     case TARGET_NR_getresgid:
10593         {
10594             gid_t rgid, egid, sgid;
10595             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10596             if (!is_error(ret)) {
10597                 if (put_user_id(high2lowgid(rgid), arg1)
10598                     || put_user_id(high2lowgid(egid), arg2)
10599                     || put_user_id(high2lowgid(sgid), arg3))
10600                     return -TARGET_EFAULT;
10601             }
10602         }
10603         return ret;
10604 #endif
10605 #ifdef TARGET_NR_chown
10606     case TARGET_NR_chown:
10607         if (!(p = lock_user_string(arg1)))
10608             return -TARGET_EFAULT;
10609         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10610         unlock_user(p, arg1, 0);
10611         return ret;
10612 #endif
10613     case TARGET_NR_setuid:
10614         return get_errno(sys_setuid(low2highuid(arg1)));
10615     case TARGET_NR_setgid:
10616         return get_errno(sys_setgid(low2highgid(arg1)));
10617     case TARGET_NR_setfsuid:
10618         return get_errno(setfsuid(arg1));
10619     case TARGET_NR_setfsgid:
10620         return get_errno(setfsgid(arg1));
10621 
10622 #ifdef TARGET_NR_lchown32
10623     case TARGET_NR_lchown32:
10624         if (!(p = lock_user_string(arg1)))
10625             return -TARGET_EFAULT;
10626         ret = get_errno(lchown(p, arg2, arg3));
10627         unlock_user(p, arg1, 0);
10628         return ret;
10629 #endif
10630 #ifdef TARGET_NR_getuid32
10631     case TARGET_NR_getuid32:
10632         return get_errno(getuid());
10633 #endif
10634 
10635 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10636    /* Alpha specific */
10637     case TARGET_NR_getxuid:
10638          {
10639             uid_t euid;
10640             euid=geteuid();
10641             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10642          }
10643         return get_errno(getuid());
10644 #endif
10645 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10646    /* Alpha specific */
10647     case TARGET_NR_getxgid:
10648          {
10649             uid_t egid;
10650             egid=getegid();
10651             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10652          }
10653         return get_errno(getgid());
10654 #endif
10655 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10656     /* Alpha specific */
10657     case TARGET_NR_osf_getsysinfo:
10658         ret = -TARGET_EOPNOTSUPP;
10659         switch (arg1) {
10660           case TARGET_GSI_IEEE_FP_CONTROL:
10661             {
10662                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10663                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10664 
10665                 swcr &= ~SWCR_STATUS_MASK;
10666                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10667 
10668                 if (put_user_u64 (swcr, arg2))
10669                         return -TARGET_EFAULT;
10670                 ret = 0;
10671             }
10672             break;
10673 
10674           /* case GSI_IEEE_STATE_AT_SIGNAL:
10675              -- Not implemented in linux kernel.
10676              case GSI_UACPROC:
10677              -- Retrieves current unaligned access state; not much used.
10678              case GSI_PROC_TYPE:
10679              -- Retrieves implver information; surely not used.
10680              case GSI_GET_HWRPB:
10681              -- Grabs a copy of the HWRPB; surely not used.
10682           */
10683         }
10684         return ret;
10685 #endif
10686 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10687     /* Alpha specific */
10688     case TARGET_NR_osf_setsysinfo:
10689         ret = -TARGET_EOPNOTSUPP;
10690         switch (arg1) {
10691           case TARGET_SSI_IEEE_FP_CONTROL:
10692             {
10693                 uint64_t swcr, fpcr;
10694 
10695                 if (get_user_u64 (swcr, arg2)) {
10696                     return -TARGET_EFAULT;
10697                 }
10698 
10699                 /*
10700                  * The kernel calls swcr_update_status to update the
10701                  * status bits from the fpcr at every point that it
10702                  * could be queried.  Therefore, we store the status
10703                  * bits only in FPCR.
10704                  */
10705                 ((CPUAlphaState *)cpu_env)->swcr
10706                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10707 
10708                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10709                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10710                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10711                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10712                 ret = 0;
10713             }
10714             break;
10715 
10716           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10717             {
10718                 uint64_t exc, fpcr, fex;
10719 
10720                 if (get_user_u64(exc, arg2)) {
10721                     return -TARGET_EFAULT;
10722                 }
10723                 exc &= SWCR_STATUS_MASK;
10724                 fpcr = cpu_alpha_load_fpcr(cpu_env);
10725 
10726                 /* Old exceptions are not signaled.  */
10727                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10728                 fex = exc & ~fex;
10729                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10730                 fex &= ((CPUArchState *)cpu_env)->swcr;
10731 
10732                 /* Update the hardware fpcr.  */
10733                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10734                 cpu_alpha_store_fpcr(cpu_env, fpcr);
10735 
10736                 if (fex) {
10737                     int si_code = TARGET_FPE_FLTUNK;
10738                     target_siginfo_t info;
10739 
10740                     if (fex & SWCR_TRAP_ENABLE_DNO) {
10741                         si_code = TARGET_FPE_FLTUND;
10742                     }
10743                     if (fex & SWCR_TRAP_ENABLE_INE) {
10744                         si_code = TARGET_FPE_FLTRES;
10745                     }
10746                     if (fex & SWCR_TRAP_ENABLE_UNF) {
10747                         si_code = TARGET_FPE_FLTUND;
10748                     }
10749                     if (fex & SWCR_TRAP_ENABLE_OVF) {
10750                         si_code = TARGET_FPE_FLTOVF;
10751                     }
10752                     if (fex & SWCR_TRAP_ENABLE_DZE) {
10753                         si_code = TARGET_FPE_FLTDIV;
10754                     }
10755                     if (fex & SWCR_TRAP_ENABLE_INV) {
10756                         si_code = TARGET_FPE_FLTINV;
10757                     }
10758 
10759                     info.si_signo = SIGFPE;
10760                     info.si_errno = 0;
10761                     info.si_code = si_code;
10762                     info._sifields._sigfault._addr
10763                         = ((CPUArchState *)cpu_env)->pc;
10764                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
10765                                  QEMU_SI_FAULT, &info);
10766                 }
10767                 ret = 0;
10768             }
10769             break;
10770 
10771           /* case SSI_NVPAIRS:
10772              -- Used with SSIN_UACPROC to enable unaligned accesses.
10773              case SSI_IEEE_STATE_AT_SIGNAL:
10774              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10775              -- Not implemented in linux kernel
10776           */
10777         }
10778         return ret;
10779 #endif
10780 #ifdef TARGET_NR_osf_sigprocmask
10781     /* Alpha specific.  */
10782     case TARGET_NR_osf_sigprocmask:
10783         {
10784             abi_ulong mask;
10785             int how;
10786             sigset_t set, oldset;
10787 
10788             switch(arg1) {
10789             case TARGET_SIG_BLOCK:
10790                 how = SIG_BLOCK;
10791                 break;
10792             case TARGET_SIG_UNBLOCK:
10793                 how = SIG_UNBLOCK;
10794                 break;
10795             case TARGET_SIG_SETMASK:
10796                 how = SIG_SETMASK;
10797                 break;
10798             default:
10799                 return -TARGET_EINVAL;
10800             }
10801             mask = arg2;
10802             target_to_host_old_sigset(&set, &mask);
10803             ret = do_sigprocmask(how, &set, &oldset);
10804             if (!ret) {
10805                 host_to_target_old_sigset(&mask, &oldset);
10806                 ret = mask;
10807             }
10808         }
10809         return ret;
10810 #endif
10811 
10812 #ifdef TARGET_NR_getgid32
10813     case TARGET_NR_getgid32:
10814         return get_errno(getgid());
10815 #endif
10816 #ifdef TARGET_NR_geteuid32
10817     case TARGET_NR_geteuid32:
10818         return get_errno(geteuid());
10819 #endif
10820 #ifdef TARGET_NR_getegid32
10821     case TARGET_NR_getegid32:
10822         return get_errno(getegid());
10823 #endif
10824 #ifdef TARGET_NR_setreuid32
10825     case TARGET_NR_setreuid32:
10826         return get_errno(setreuid(arg1, arg2));
10827 #endif
10828 #ifdef TARGET_NR_setregid32
10829     case TARGET_NR_setregid32:
10830         return get_errno(setregid(arg1, arg2));
10831 #endif
10832 #ifdef TARGET_NR_getgroups32
10833     case TARGET_NR_getgroups32:
10834         {
10835             int gidsetsize = arg1;
10836             uint32_t *target_grouplist;
10837             gid_t *grouplist;
10838             int i;
10839 
10840             grouplist = alloca(gidsetsize * sizeof(gid_t));
10841             ret = get_errno(getgroups(gidsetsize, grouplist));
10842             if (gidsetsize == 0)
10843                 return ret;
10844             if (!is_error(ret)) {
10845                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10846                 if (!target_grouplist) {
10847                     return -TARGET_EFAULT;
10848                 }
10849                 for(i = 0;i < ret; i++)
10850                     target_grouplist[i] = tswap32(grouplist[i]);
10851                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10852             }
10853         }
10854         return ret;
10855 #endif
10856 #ifdef TARGET_NR_setgroups32
10857     case TARGET_NR_setgroups32:
10858         {
10859             int gidsetsize = arg1;
10860             uint32_t *target_grouplist;
10861             gid_t *grouplist;
10862             int i;
10863 
10864             grouplist = alloca(gidsetsize * sizeof(gid_t));
10865             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10866             if (!target_grouplist) {
10867                 return -TARGET_EFAULT;
10868             }
10869             for(i = 0;i < gidsetsize; i++)
10870                 grouplist[i] = tswap32(target_grouplist[i]);
10871             unlock_user(target_grouplist, arg2, 0);
10872             return get_errno(setgroups(gidsetsize, grouplist));
10873         }
10874 #endif
10875 #ifdef TARGET_NR_fchown32
10876     case TARGET_NR_fchown32:
10877         return get_errno(fchown(arg1, arg2, arg3));
10878 #endif
10879 #ifdef TARGET_NR_setresuid32
10880     case TARGET_NR_setresuid32:
10881         return get_errno(sys_setresuid(arg1, arg2, arg3));
10882 #endif
10883 #ifdef TARGET_NR_getresuid32
10884     case TARGET_NR_getresuid32:
10885         {
10886             uid_t ruid, euid, suid;
10887             ret = get_errno(getresuid(&ruid, &euid, &suid));
10888             if (!is_error(ret)) {
10889                 if (put_user_u32(ruid, arg1)
10890                     || put_user_u32(euid, arg2)
10891                     || put_user_u32(suid, arg3))
10892                     return -TARGET_EFAULT;
10893             }
10894         }
10895         return ret;
10896 #endif
10897 #ifdef TARGET_NR_setresgid32
10898     case TARGET_NR_setresgid32:
10899         return get_errno(sys_setresgid(arg1, arg2, arg3));
10900 #endif
10901 #ifdef TARGET_NR_getresgid32
10902     case TARGET_NR_getresgid32:
10903         {
10904             gid_t rgid, egid, sgid;
10905             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10906             if (!is_error(ret)) {
10907                 if (put_user_u32(rgid, arg1)
10908                     || put_user_u32(egid, arg2)
10909                     || put_user_u32(sgid, arg3))
10910                     return -TARGET_EFAULT;
10911             }
10912         }
10913         return ret;
10914 #endif
10915 #ifdef TARGET_NR_chown32
10916     case TARGET_NR_chown32:
10917         if (!(p = lock_user_string(arg1)))
10918             return -TARGET_EFAULT;
10919         ret = get_errno(chown(p, arg2, arg3));
10920         unlock_user(p, arg1, 0);
10921         return ret;
10922 #endif
10923 #ifdef TARGET_NR_setuid32
10924     case TARGET_NR_setuid32:
10925         return get_errno(sys_setuid(arg1));
10926 #endif
10927 #ifdef TARGET_NR_setgid32
10928     case TARGET_NR_setgid32:
10929         return get_errno(sys_setgid(arg1));
10930 #endif
10931 #ifdef TARGET_NR_setfsuid32
10932     case TARGET_NR_setfsuid32:
10933         return get_errno(setfsuid(arg1));
10934 #endif
10935 #ifdef TARGET_NR_setfsgid32
10936     case TARGET_NR_setfsgid32:
10937         return get_errno(setfsgid(arg1));
10938 #endif
10939 #ifdef TARGET_NR_mincore
10940     case TARGET_NR_mincore:
10941         {
10942             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10943             if (!a) {
10944                 return -TARGET_ENOMEM;
10945             }
10946             p = lock_user_string(arg3);
10947             if (!p) {
10948                 ret = -TARGET_EFAULT;
10949             } else {
10950                 ret = get_errno(mincore(a, arg2, p));
10951                 unlock_user(p, arg3, ret);
10952             }
10953             unlock_user(a, arg1, 0);
10954         }
10955         return ret;
10956 #endif
10957 #ifdef TARGET_NR_arm_fadvise64_64
10958     case TARGET_NR_arm_fadvise64_64:
10959         /* arm_fadvise64_64 looks like fadvise64_64 but
10960          * with different argument order: fd, advice, offset, len
10961          * rather than the usual fd, offset, len, advice.
10962          * Note that offset and len are both 64-bit so appear as
10963          * pairs of 32-bit registers.
10964          */
10965         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10966                             target_offset64(arg5, arg6), arg2);
10967         return -host_to_target_errno(ret);
10968 #endif
10969 
10970 #if TARGET_ABI_BITS == 32
10971 
10972 #ifdef TARGET_NR_fadvise64_64
10973     case TARGET_NR_fadvise64_64:
10974 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10975         /* 6 args: fd, advice, offset (high, low), len (high, low) */
10976         ret = arg2;
10977         arg2 = arg3;
10978         arg3 = arg4;
10979         arg4 = arg5;
10980         arg5 = arg6;
10981         arg6 = ret;
10982 #else
10983         /* 6 args: fd, offset (high, low), len (high, low), advice */
10984         if (regpairs_aligned(cpu_env, num)) {
10985             /* offset is in (3,4), len in (5,6) and advice in 7 */
10986             arg2 = arg3;
10987             arg3 = arg4;
10988             arg4 = arg5;
10989             arg5 = arg6;
10990             arg6 = arg7;
10991         }
10992 #endif
10993         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10994                             target_offset64(arg4, arg5), arg6);
10995         return -host_to_target_errno(ret);
10996 #endif
10997 
10998 #ifdef TARGET_NR_fadvise64
10999     case TARGET_NR_fadvise64:
11000         /* 5 args: fd, offset (high, low), len, advice */
11001         if (regpairs_aligned(cpu_env, num)) {
11002             /* offset is in (3,4), len in 5 and advice in 6 */
11003             arg2 = arg3;
11004             arg3 = arg4;
11005             arg4 = arg5;
11006             arg5 = arg6;
11007         }
11008         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11009         return -host_to_target_errno(ret);
11010 #endif
11011 
11012 #else /* not a 32-bit ABI */
11013 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11014 #ifdef TARGET_NR_fadvise64_64
11015     case TARGET_NR_fadvise64_64:
11016 #endif
11017 #ifdef TARGET_NR_fadvise64
11018     case TARGET_NR_fadvise64:
11019 #endif
11020 #ifdef TARGET_S390X
11021         switch (arg4) {
11022         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11023         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11024         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11025         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11026         default: break;
11027         }
11028 #endif
11029         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11030 #endif
11031 #endif /* end of 64-bit ABI fadvise handling */
11032 
11033 #ifdef TARGET_NR_madvise
11034     case TARGET_NR_madvise:
11035         /* A straight passthrough may not be safe because qemu sometimes
11036            turns private file-backed mappings into anonymous mappings.
11037            This will break MADV_DONTNEED.
11038            This is a hint, so ignoring and returning success is ok.  */
11039         return 0;
11040 #endif
11041 #if TARGET_ABI_BITS == 32
11042     case TARGET_NR_fcntl64:
11043     {
11044 	int cmd;
11045 	struct flock64 fl;
11046         from_flock64_fn *copyfrom = copy_from_user_flock64;
11047         to_flock64_fn *copyto = copy_to_user_flock64;
11048 
11049 #ifdef TARGET_ARM
11050         if (!((CPUARMState *)cpu_env)->eabi) {
11051             copyfrom = copy_from_user_oabi_flock64;
11052             copyto = copy_to_user_oabi_flock64;
11053         }
11054 #endif
11055 
11056 	cmd = target_to_host_fcntl_cmd(arg2);
11057         if (cmd == -TARGET_EINVAL) {
11058             return cmd;
11059         }
11060 
11061         switch(arg2) {
11062         case TARGET_F_GETLK64:
11063             ret = copyfrom(&fl, arg3);
11064             if (ret) {
11065                 break;
11066             }
11067             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11068             if (ret == 0) {
11069                 ret = copyto(arg3, &fl);
11070             }
11071 	    break;
11072 
11073         case TARGET_F_SETLK64:
11074         case TARGET_F_SETLKW64:
11075             ret = copyfrom(&fl, arg3);
11076             if (ret) {
11077                 break;
11078             }
11079             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11080 	    break;
11081         default:
11082             ret = do_fcntl(arg1, arg2, arg3);
11083             break;
11084         }
11085         return ret;
11086     }
11087 #endif
11088 #ifdef TARGET_NR_cacheflush
11089     case TARGET_NR_cacheflush:
11090         /* self-modifying code is handled automatically, so nothing needed */
11091         return 0;
11092 #endif
11093 #ifdef TARGET_NR_getpagesize
11094     case TARGET_NR_getpagesize:
11095         return TARGET_PAGE_SIZE;
11096 #endif
11097     case TARGET_NR_gettid:
11098         return get_errno(sys_gettid());
11099 #ifdef TARGET_NR_readahead
11100     case TARGET_NR_readahead:
11101 #if TARGET_ABI_BITS == 32
11102         if (regpairs_aligned(cpu_env, num)) {
11103             arg2 = arg3;
11104             arg3 = arg4;
11105             arg4 = arg5;
11106         }
11107         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11108 #else
11109         ret = get_errno(readahead(arg1, arg2, arg3));
11110 #endif
11111         return ret;
11112 #endif
11113 #ifdef CONFIG_ATTR
11114 #ifdef TARGET_NR_setxattr
11115     case TARGET_NR_listxattr:
11116     case TARGET_NR_llistxattr:
11117     {
11118         void *p, *b = 0;
11119         if (arg2) {
11120             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11121             if (!b) {
11122                 return -TARGET_EFAULT;
11123             }
11124         }
11125         p = lock_user_string(arg1);
11126         if (p) {
11127             if (num == TARGET_NR_listxattr) {
11128                 ret = get_errno(listxattr(p, b, arg3));
11129             } else {
11130                 ret = get_errno(llistxattr(p, b, arg3));
11131             }
11132         } else {
11133             ret = -TARGET_EFAULT;
11134         }
11135         unlock_user(p, arg1, 0);
11136         unlock_user(b, arg2, arg3);
11137         return ret;
11138     }
11139     case TARGET_NR_flistxattr:
11140     {
11141         void *b = 0;
11142         if (arg2) {
11143             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11144             if (!b) {
11145                 return -TARGET_EFAULT;
11146             }
11147         }
11148         ret = get_errno(flistxattr(arg1, b, arg3));
11149         unlock_user(b, arg2, arg3);
11150         return ret;
11151     }
11152     case TARGET_NR_setxattr:
11153     case TARGET_NR_lsetxattr:
11154         {
11155             void *p, *n, *v = 0;
11156             if (arg3) {
11157                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11158                 if (!v) {
11159                     return -TARGET_EFAULT;
11160                 }
11161             }
11162             p = lock_user_string(arg1);
11163             n = lock_user_string(arg2);
11164             if (p && n) {
11165                 if (num == TARGET_NR_setxattr) {
11166                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11167                 } else {
11168                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11169                 }
11170             } else {
11171                 ret = -TARGET_EFAULT;
11172             }
11173             unlock_user(p, arg1, 0);
11174             unlock_user(n, arg2, 0);
11175             unlock_user(v, arg3, 0);
11176         }
11177         return ret;
11178     case TARGET_NR_fsetxattr:
11179         {
11180             void *n, *v = 0;
11181             if (arg3) {
11182                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11183                 if (!v) {
11184                     return -TARGET_EFAULT;
11185                 }
11186             }
11187             n = lock_user_string(arg2);
11188             if (n) {
11189                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11190             } else {
11191                 ret = -TARGET_EFAULT;
11192             }
11193             unlock_user(n, arg2, 0);
11194             unlock_user(v, arg3, 0);
11195         }
11196         return ret;
11197     case TARGET_NR_getxattr:
11198     case TARGET_NR_lgetxattr:
11199         {
11200             void *p, *n, *v = 0;
11201             if (arg3) {
11202                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11203                 if (!v) {
11204                     return -TARGET_EFAULT;
11205                 }
11206             }
11207             p = lock_user_string(arg1);
11208             n = lock_user_string(arg2);
11209             if (p && n) {
11210                 if (num == TARGET_NR_getxattr) {
11211                     ret = get_errno(getxattr(p, n, v, arg4));
11212                 } else {
11213                     ret = get_errno(lgetxattr(p, n, v, arg4));
11214                 }
11215             } else {
11216                 ret = -TARGET_EFAULT;
11217             }
11218             unlock_user(p, arg1, 0);
11219             unlock_user(n, arg2, 0);
11220             unlock_user(v, arg3, arg4);
11221         }
11222         return ret;
11223     case TARGET_NR_fgetxattr:
11224         {
11225             void *n, *v = 0;
11226             if (arg3) {
11227                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11228                 if (!v) {
11229                     return -TARGET_EFAULT;
11230                 }
11231             }
11232             n = lock_user_string(arg2);
11233             if (n) {
11234                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11235             } else {
11236                 ret = -TARGET_EFAULT;
11237             }
11238             unlock_user(n, arg2, 0);
11239             unlock_user(v, arg3, arg4);
11240         }
11241         return ret;
11242     case TARGET_NR_removexattr:
11243     case TARGET_NR_lremovexattr:
11244         {
11245             void *p, *n;
11246             p = lock_user_string(arg1);
11247             n = lock_user_string(arg2);
11248             if (p && n) {
11249                 if (num == TARGET_NR_removexattr) {
11250                     ret = get_errno(removexattr(p, n));
11251                 } else {
11252                     ret = get_errno(lremovexattr(p, n));
11253                 }
11254             } else {
11255                 ret = -TARGET_EFAULT;
11256             }
11257             unlock_user(p, arg1, 0);
11258             unlock_user(n, arg2, 0);
11259         }
11260         return ret;
11261     case TARGET_NR_fremovexattr:
11262         {
11263             void *n;
11264             n = lock_user_string(arg2);
11265             if (n) {
11266                 ret = get_errno(fremovexattr(arg1, n));
11267             } else {
11268                 ret = -TARGET_EFAULT;
11269             }
11270             unlock_user(n, arg2, 0);
11271         }
11272         return ret;
11273 #endif
11274 #endif /* CONFIG_ATTR */
11275 #ifdef TARGET_NR_set_thread_area
11276     case TARGET_NR_set_thread_area:
11277 #if defined(TARGET_MIPS)
11278       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11279       return 0;
11280 #elif defined(TARGET_CRIS)
11281       if (arg1 & 0xff)
11282           ret = -TARGET_EINVAL;
11283       else {
11284           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11285           ret = 0;
11286       }
11287       return ret;
11288 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11289       return do_set_thread_area(cpu_env, arg1);
11290 #elif defined(TARGET_M68K)
11291       {
11292           TaskState *ts = cpu->opaque;
11293           ts->tp_value = arg1;
11294           return 0;
11295       }
11296 #else
11297       return -TARGET_ENOSYS;
11298 #endif
11299 #endif
11300 #ifdef TARGET_NR_get_thread_area
11301     case TARGET_NR_get_thread_area:
11302 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11303         return do_get_thread_area(cpu_env, arg1);
11304 #elif defined(TARGET_M68K)
11305         {
11306             TaskState *ts = cpu->opaque;
11307             return ts->tp_value;
11308         }
11309 #else
11310         return -TARGET_ENOSYS;
11311 #endif
11312 #endif
11313 #ifdef TARGET_NR_getdomainname
11314     case TARGET_NR_getdomainname:
11315         return -TARGET_ENOSYS;
11316 #endif
11317 
11318 #ifdef TARGET_NR_clock_settime
11319     case TARGET_NR_clock_settime:
11320     {
11321         struct timespec ts;
11322 
11323         ret = target_to_host_timespec(&ts, arg2);
11324         if (!is_error(ret)) {
11325             ret = get_errno(clock_settime(arg1, &ts));
11326         }
11327         return ret;
11328     }
11329 #endif
11330 #ifdef TARGET_NR_clock_gettime
11331     case TARGET_NR_clock_gettime:
11332     {
11333         struct timespec ts;
11334         ret = get_errno(clock_gettime(arg1, &ts));
11335         if (!is_error(ret)) {
11336             ret = host_to_target_timespec(arg2, &ts);
11337         }
11338         return ret;
11339     }
11340 #endif
11341 #ifdef TARGET_NR_clock_getres
11342     case TARGET_NR_clock_getres:
11343     {
11344         struct timespec ts;
11345         ret = get_errno(clock_getres(arg1, &ts));
11346         if (!is_error(ret)) {
11347             host_to_target_timespec(arg2, &ts);
11348         }
11349         return ret;
11350     }
11351 #endif
11352 #ifdef TARGET_NR_clock_nanosleep
11353     case TARGET_NR_clock_nanosleep:
11354     {
11355         struct timespec ts;
11356         target_to_host_timespec(&ts, arg3);
11357         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11358                                              &ts, arg4 ? &ts : NULL));
11359         if (arg4)
11360             host_to_target_timespec(arg4, &ts);
11361 
11362 #if defined(TARGET_PPC)
11363         /* clock_nanosleep is odd in that it returns positive errno values.
11364          * On PPC, CR0 bit 3 should be set in such a situation. */
11365         if (ret && ret != -TARGET_ERESTARTSYS) {
11366             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11367         }
11368 #endif
11369         return ret;
11370     }
11371 #endif
11372 
11373 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11374     case TARGET_NR_set_tid_address:
11375         return get_errno(set_tid_address((int *)g2h(arg1)));
11376 #endif
11377 
11378     case TARGET_NR_tkill:
11379         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11380 
11381     case TARGET_NR_tgkill:
11382         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11383                          target_to_host_signal(arg3)));
11384 
11385 #ifdef TARGET_NR_set_robust_list
11386     case TARGET_NR_set_robust_list:
11387     case TARGET_NR_get_robust_list:
11388         /* The ABI for supporting robust futexes has userspace pass
11389          * the kernel a pointer to a linked list which is updated by
11390          * userspace after the syscall; the list is walked by the kernel
11391          * when the thread exits. Since the linked list in QEMU guest
11392          * memory isn't a valid linked list for the host and we have
11393          * no way to reliably intercept the thread-death event, we can't
11394          * support these. Silently return ENOSYS so that guest userspace
11395          * falls back to a non-robust futex implementation (which should
11396          * be OK except in the corner case of the guest crashing while
11397          * holding a mutex that is shared with another process via
11398          * shared memory).
11399          */
11400         return -TARGET_ENOSYS;
11401 #endif
11402 
11403 #if defined(TARGET_NR_utimensat)
11404     case TARGET_NR_utimensat:
11405         {
11406             struct timespec *tsp, ts[2];
11407             if (!arg3) {
11408                 tsp = NULL;
11409             } else {
11410                 target_to_host_timespec(ts, arg3);
11411                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11412                 tsp = ts;
11413             }
11414             if (!arg2)
11415                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11416             else {
11417                 if (!(p = lock_user_string(arg2))) {
11418                     return -TARGET_EFAULT;
11419                 }
11420                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11421                 unlock_user(p, arg2, 0);
11422             }
11423         }
11424         return ret;
11425 #endif
11426     case TARGET_NR_futex:
11427         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11428 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11429     case TARGET_NR_inotify_init:
11430         ret = get_errno(sys_inotify_init());
11431         if (ret >= 0) {
11432             fd_trans_register(ret, &target_inotify_trans);
11433         }
11434         return ret;
11435 #endif
11436 #ifdef CONFIG_INOTIFY1
11437 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11438     case TARGET_NR_inotify_init1:
11439         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11440                                           fcntl_flags_tbl)));
11441         if (ret >= 0) {
11442             fd_trans_register(ret, &target_inotify_trans);
11443         }
11444         return ret;
11445 #endif
11446 #endif
11447 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11448     case TARGET_NR_inotify_add_watch:
11449         p = lock_user_string(arg2);
11450         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11451         unlock_user(p, arg2, 0);
11452         return ret;
11453 #endif
11454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11455     case TARGET_NR_inotify_rm_watch:
11456         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11457 #endif
11458 
11459 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11460     case TARGET_NR_mq_open:
11461         {
11462             struct mq_attr posix_mq_attr;
11463             struct mq_attr *pposix_mq_attr;
11464             int host_flags;
11465 
11466             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11467             pposix_mq_attr = NULL;
11468             if (arg4) {
11469                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11470                     return -TARGET_EFAULT;
11471                 }
11472                 pposix_mq_attr = &posix_mq_attr;
11473             }
11474             p = lock_user_string(arg1 - 1);
11475             if (!p) {
11476                 return -TARGET_EFAULT;
11477             }
11478             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11479             unlock_user (p, arg1, 0);
11480         }
11481         return ret;
11482 
11483     case TARGET_NR_mq_unlink:
11484         p = lock_user_string(arg1 - 1);
11485         if (!p) {
11486             return -TARGET_EFAULT;
11487         }
11488         ret = get_errno(mq_unlink(p));
11489         unlock_user (p, arg1, 0);
11490         return ret;
11491 
11492     case TARGET_NR_mq_timedsend:
11493         {
11494             struct timespec ts;
11495 
11496             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11497             if (arg5 != 0) {
11498                 target_to_host_timespec(&ts, arg5);
11499                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11500                 host_to_target_timespec(arg5, &ts);
11501             } else {
11502                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11503             }
11504             unlock_user (p, arg2, arg3);
11505         }
11506         return ret;
11507 
11508     case TARGET_NR_mq_timedreceive:
11509         {
11510             struct timespec ts;
11511             unsigned int prio;
11512 
11513             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11514             if (arg5 != 0) {
11515                 target_to_host_timespec(&ts, arg5);
11516                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11517                                                      &prio, &ts));
11518                 host_to_target_timespec(arg5, &ts);
11519             } else {
11520                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11521                                                      &prio, NULL));
11522             }
11523             unlock_user (p, arg2, arg3);
11524             if (arg4 != 0)
11525                 put_user_u32(prio, arg4);
11526         }
11527         return ret;
11528 
11529     /* Not implemented for now... */
11530 /*     case TARGET_NR_mq_notify: */
11531 /*         break; */
11532 
11533     case TARGET_NR_mq_getsetattr:
11534         {
11535             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11536             ret = 0;
11537             if (arg2 != 0) {
11538                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11539                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11540                                            &posix_mq_attr_out));
11541             } else if (arg3 != 0) {
11542                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11543             }
11544             if (ret == 0 && arg3 != 0) {
11545                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11546             }
11547         }
11548         return ret;
11549 #endif
11550 
11551 #ifdef CONFIG_SPLICE
11552 #ifdef TARGET_NR_tee
11553     case TARGET_NR_tee:
11554         {
11555             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11556         }
11557         return ret;
11558 #endif
11559 #ifdef TARGET_NR_splice
11560     case TARGET_NR_splice:
11561         {
11562             loff_t loff_in, loff_out;
11563             loff_t *ploff_in = NULL, *ploff_out = NULL;
11564             if (arg2) {
11565                 if (get_user_u64(loff_in, arg2)) {
11566                     return -TARGET_EFAULT;
11567                 }
11568                 ploff_in = &loff_in;
11569             }
11570             if (arg4) {
11571                 if (get_user_u64(loff_out, arg4)) {
11572                     return -TARGET_EFAULT;
11573                 }
11574                 ploff_out = &loff_out;
11575             }
11576             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11577             if (arg2) {
11578                 if (put_user_u64(loff_in, arg2)) {
11579                     return -TARGET_EFAULT;
11580                 }
11581             }
11582             if (arg4) {
11583                 if (put_user_u64(loff_out, arg4)) {
11584                     return -TARGET_EFAULT;
11585                 }
11586             }
11587         }
11588         return ret;
11589 #endif
11590 #ifdef TARGET_NR_vmsplice
11591 	case TARGET_NR_vmsplice:
11592         {
11593             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11594             if (vec != NULL) {
11595                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11596                 unlock_iovec(vec, arg2, arg3, 0);
11597             } else {
11598                 ret = -host_to_target_errno(errno);
11599             }
11600         }
11601         return ret;
11602 #endif
11603 #endif /* CONFIG_SPLICE */
11604 #ifdef CONFIG_EVENTFD
11605 #if defined(TARGET_NR_eventfd)
11606     case TARGET_NR_eventfd:
11607         ret = get_errno(eventfd(arg1, 0));
11608         if (ret >= 0) {
11609             fd_trans_register(ret, &target_eventfd_trans);
11610         }
11611         return ret;
11612 #endif
11613 #if defined(TARGET_NR_eventfd2)
11614     case TARGET_NR_eventfd2:
11615     {
11616         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11617         if (arg2 & TARGET_O_NONBLOCK) {
11618             host_flags |= O_NONBLOCK;
11619         }
11620         if (arg2 & TARGET_O_CLOEXEC) {
11621             host_flags |= O_CLOEXEC;
11622         }
11623         ret = get_errno(eventfd(arg1, host_flags));
11624         if (ret >= 0) {
11625             fd_trans_register(ret, &target_eventfd_trans);
11626         }
11627         return ret;
11628     }
11629 #endif
11630 #endif /* CONFIG_EVENTFD  */
11631 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11632     case TARGET_NR_fallocate:
11633 #if TARGET_ABI_BITS == 32
11634         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11635                                   target_offset64(arg5, arg6)));
11636 #else
11637         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11638 #endif
11639         return ret;
11640 #endif
11641 #if defined(CONFIG_SYNC_FILE_RANGE)
11642 #if defined(TARGET_NR_sync_file_range)
11643     case TARGET_NR_sync_file_range:
11644 #if TARGET_ABI_BITS == 32
11645 #if defined(TARGET_MIPS)
11646         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11647                                         target_offset64(arg5, arg6), arg7));
11648 #else
11649         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11650                                         target_offset64(arg4, arg5), arg6));
11651 #endif /* !TARGET_MIPS */
11652 #else
11653         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11654 #endif
11655         return ret;
11656 #endif
11657 #if defined(TARGET_NR_sync_file_range2)
11658     case TARGET_NR_sync_file_range2:
11659         /* This is like sync_file_range but the arguments are reordered */
11660 #if TARGET_ABI_BITS == 32
11661         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11662                                         target_offset64(arg5, arg6), arg2));
11663 #else
11664         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11665 #endif
11666         return ret;
11667 #endif
11668 #endif
11669 #if defined(TARGET_NR_signalfd4)
11670     case TARGET_NR_signalfd4:
11671         return do_signalfd4(arg1, arg2, arg4);
11672 #endif
11673 #if defined(TARGET_NR_signalfd)
11674     case TARGET_NR_signalfd:
11675         return do_signalfd4(arg1, arg2, 0);
11676 #endif
11677 #if defined(CONFIG_EPOLL)
11678 #if defined(TARGET_NR_epoll_create)
11679     case TARGET_NR_epoll_create:
11680         return get_errno(epoll_create(arg1));
11681 #endif
11682 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11683     case TARGET_NR_epoll_create1:
11684         return get_errno(epoll_create1(arg1));
11685 #endif
11686 #if defined(TARGET_NR_epoll_ctl)
11687     case TARGET_NR_epoll_ctl:
11688     {
11689         struct epoll_event ep;
11690         struct epoll_event *epp = 0;
11691         if (arg4) {
11692             struct target_epoll_event *target_ep;
11693             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11694                 return -TARGET_EFAULT;
11695             }
11696             ep.events = tswap32(target_ep->events);
11697             /* The epoll_data_t union is just opaque data to the kernel,
11698              * so we transfer all 64 bits across and need not worry what
11699              * actual data type it is.
11700              */
11701             ep.data.u64 = tswap64(target_ep->data.u64);
11702             unlock_user_struct(target_ep, arg4, 0);
11703             epp = &ep;
11704         }
11705         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11706     }
11707 #endif
11708 
11709 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11710 #if defined(TARGET_NR_epoll_wait)
11711     case TARGET_NR_epoll_wait:
11712 #endif
11713 #if defined(TARGET_NR_epoll_pwait)
11714     case TARGET_NR_epoll_pwait:
11715 #endif
11716     {
11717         struct target_epoll_event *target_ep;
11718         struct epoll_event *ep;
11719         int epfd = arg1;
11720         int maxevents = arg3;
11721         int timeout = arg4;
11722 
11723         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11724             return -TARGET_EINVAL;
11725         }
11726 
11727         target_ep = lock_user(VERIFY_WRITE, arg2,
11728                               maxevents * sizeof(struct target_epoll_event), 1);
11729         if (!target_ep) {
11730             return -TARGET_EFAULT;
11731         }
11732 
11733         ep = g_try_new(struct epoll_event, maxevents);
11734         if (!ep) {
11735             unlock_user(target_ep, arg2, 0);
11736             return -TARGET_ENOMEM;
11737         }
11738 
11739         switch (num) {
11740 #if defined(TARGET_NR_epoll_pwait)
11741         case TARGET_NR_epoll_pwait:
11742         {
11743             target_sigset_t *target_set;
11744             sigset_t _set, *set = &_set;
11745 
11746             if (arg5) {
11747                 if (arg6 != sizeof(target_sigset_t)) {
11748                     ret = -TARGET_EINVAL;
11749                     break;
11750                 }
11751 
11752                 target_set = lock_user(VERIFY_READ, arg5,
11753                                        sizeof(target_sigset_t), 1);
11754                 if (!target_set) {
11755                     ret = -TARGET_EFAULT;
11756                     break;
11757                 }
11758                 target_to_host_sigset(set, target_set);
11759                 unlock_user(target_set, arg5, 0);
11760             } else {
11761                 set = NULL;
11762             }
11763 
11764             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11765                                              set, SIGSET_T_SIZE));
11766             break;
11767         }
11768 #endif
11769 #if defined(TARGET_NR_epoll_wait)
11770         case TARGET_NR_epoll_wait:
11771             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11772                                              NULL, 0));
11773             break;
11774 #endif
11775         default:
11776             ret = -TARGET_ENOSYS;
11777         }
11778         if (!is_error(ret)) {
11779             int i;
11780             for (i = 0; i < ret; i++) {
11781                 target_ep[i].events = tswap32(ep[i].events);
11782                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11783             }
11784             unlock_user(target_ep, arg2,
11785                         ret * sizeof(struct target_epoll_event));
11786         } else {
11787             unlock_user(target_ep, arg2, 0);
11788         }
11789         g_free(ep);
11790         return ret;
11791     }
11792 #endif
11793 #endif
11794 #ifdef TARGET_NR_prlimit64
11795     case TARGET_NR_prlimit64:
11796     {
11797         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11798         struct target_rlimit64 *target_rnew, *target_rold;
11799         struct host_rlimit64 rnew, rold, *rnewp = 0;
11800         int resource = target_to_host_resource(arg2);
11801         if (arg3) {
11802             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11803                 return -TARGET_EFAULT;
11804             }
11805             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11806             rnew.rlim_max = tswap64(target_rnew->rlim_max);
11807             unlock_user_struct(target_rnew, arg3, 0);
11808             rnewp = &rnew;
11809         }
11810 
11811         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11812         if (!is_error(ret) && arg4) {
11813             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11814                 return -TARGET_EFAULT;
11815             }
11816             target_rold->rlim_cur = tswap64(rold.rlim_cur);
11817             target_rold->rlim_max = tswap64(rold.rlim_max);
11818             unlock_user_struct(target_rold, arg4, 1);
11819         }
11820         return ret;
11821     }
11822 #endif
11823 #ifdef TARGET_NR_gethostname
11824     case TARGET_NR_gethostname:
11825     {
11826         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11827         if (name) {
11828             ret = get_errno(gethostname(name, arg2));
11829             unlock_user(name, arg1, arg2);
11830         } else {
11831             ret = -TARGET_EFAULT;
11832         }
11833         return ret;
11834     }
11835 #endif
11836 #ifdef TARGET_NR_atomic_cmpxchg_32
11837     case TARGET_NR_atomic_cmpxchg_32:
11838     {
11839         /* should use start_exclusive from main.c */
11840         abi_ulong mem_value;
11841         if (get_user_u32(mem_value, arg6)) {
11842             target_siginfo_t info;
11843             info.si_signo = SIGSEGV;
11844             info.si_errno = 0;
11845             info.si_code = TARGET_SEGV_MAPERR;
11846             info._sifields._sigfault._addr = arg6;
11847             queue_signal((CPUArchState *)cpu_env, info.si_signo,
11848                          QEMU_SI_FAULT, &info);
11849             ret = 0xdeadbeef;
11850 
11851         }
11852         if (mem_value == arg2)
11853             put_user_u32(arg1, arg6);
11854         return mem_value;
11855     }
11856 #endif
11857 #ifdef TARGET_NR_atomic_barrier
11858     case TARGET_NR_atomic_barrier:
11859         /* Like the kernel implementation and the
11860            qemu arm barrier, no-op this? */
11861         return 0;
11862 #endif
11863 
11864 #ifdef TARGET_NR_timer_create
11865     case TARGET_NR_timer_create:
11866     {
11867         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11868 
11869         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11870 
11871         int clkid = arg1;
11872         int timer_index = next_free_host_timer();
11873 
11874         if (timer_index < 0) {
11875             ret = -TARGET_EAGAIN;
11876         } else {
11877             timer_t *phtimer = g_posix_timers  + timer_index;
11878 
11879             if (arg2) {
11880                 phost_sevp = &host_sevp;
11881                 ret = target_to_host_sigevent(phost_sevp, arg2);
11882                 if (ret != 0) {
11883                     return ret;
11884                 }
11885             }
11886 
11887             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11888             if (ret) {
11889                 phtimer = NULL;
11890             } else {
11891                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11892                     return -TARGET_EFAULT;
11893                 }
11894             }
11895         }
11896         return ret;
11897     }
11898 #endif
11899 
11900 #ifdef TARGET_NR_timer_settime
11901     case TARGET_NR_timer_settime:
11902     {
11903         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11904          * struct itimerspec * old_value */
11905         target_timer_t timerid = get_timer_id(arg1);
11906 
11907         if (timerid < 0) {
11908             ret = timerid;
11909         } else if (arg3 == 0) {
11910             ret = -TARGET_EINVAL;
11911         } else {
11912             timer_t htimer = g_posix_timers[timerid];
11913             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11914 
11915             if (target_to_host_itimerspec(&hspec_new, arg3)) {
11916                 return -TARGET_EFAULT;
11917             }
11918             ret = get_errno(
11919                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11920             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11921                 return -TARGET_EFAULT;
11922             }
11923         }
11924         return ret;
11925     }
11926 #endif
11927 
11928 #ifdef TARGET_NR_timer_gettime
11929     case TARGET_NR_timer_gettime:
11930     {
11931         /* args: timer_t timerid, struct itimerspec *curr_value */
11932         target_timer_t timerid = get_timer_id(arg1);
11933 
11934         if (timerid < 0) {
11935             ret = timerid;
11936         } else if (!arg2) {
11937             ret = -TARGET_EFAULT;
11938         } else {
11939             timer_t htimer = g_posix_timers[timerid];
11940             struct itimerspec hspec;
11941             ret = get_errno(timer_gettime(htimer, &hspec));
11942 
11943             if (host_to_target_itimerspec(arg2, &hspec)) {
11944                 ret = -TARGET_EFAULT;
11945             }
11946         }
11947         return ret;
11948     }
11949 #endif
11950 
11951 #ifdef TARGET_NR_timer_getoverrun
11952     case TARGET_NR_timer_getoverrun:
11953     {
11954         /* args: timer_t timerid */
11955         target_timer_t timerid = get_timer_id(arg1);
11956 
11957         if (timerid < 0) {
11958             ret = timerid;
11959         } else {
11960             timer_t htimer = g_posix_timers[timerid];
11961             ret = get_errno(timer_getoverrun(htimer));
11962         }
11963         return ret;
11964     }
11965 #endif
11966 
11967 #ifdef TARGET_NR_timer_delete
11968     case TARGET_NR_timer_delete:
11969     {
11970         /* args: timer_t timerid */
11971         target_timer_t timerid = get_timer_id(arg1);
11972 
11973         if (timerid < 0) {
11974             ret = timerid;
11975         } else {
11976             timer_t htimer = g_posix_timers[timerid];
11977             ret = get_errno(timer_delete(htimer));
11978             g_posix_timers[timerid] = 0;
11979         }
11980         return ret;
11981     }
11982 #endif
11983 
11984 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11985     case TARGET_NR_timerfd_create:
11986         return get_errno(timerfd_create(arg1,
11987                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11988 #endif
11989 
11990 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11991     case TARGET_NR_timerfd_gettime:
11992         {
11993             struct itimerspec its_curr;
11994 
11995             ret = get_errno(timerfd_gettime(arg1, &its_curr));
11996 
11997             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11998                 return -TARGET_EFAULT;
11999             }
12000         }
12001         return ret;
12002 #endif
12003 
12004 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12005     case TARGET_NR_timerfd_settime:
12006         {
12007             struct itimerspec its_new, its_old, *p_new;
12008 
12009             if (arg3) {
12010                 if (target_to_host_itimerspec(&its_new, arg3)) {
12011                     return -TARGET_EFAULT;
12012                 }
12013                 p_new = &its_new;
12014             } else {
12015                 p_new = NULL;
12016             }
12017 
12018             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12019 
12020             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12021                 return -TARGET_EFAULT;
12022             }
12023         }
12024         return ret;
12025 #endif
12026 
12027 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12028     case TARGET_NR_ioprio_get:
12029         return get_errno(ioprio_get(arg1, arg2));
12030 #endif
12031 
12032 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12033     case TARGET_NR_ioprio_set:
12034         return get_errno(ioprio_set(arg1, arg2, arg3));
12035 #endif
12036 
12037 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12038     case TARGET_NR_setns:
12039         return get_errno(setns(arg1, arg2));
12040 #endif
12041 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12042     case TARGET_NR_unshare:
12043         return get_errno(unshare(arg1));
12044 #endif
12045 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12046     case TARGET_NR_kcmp:
12047         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12048 #endif
12049 #ifdef TARGET_NR_swapcontext
12050     case TARGET_NR_swapcontext:
12051         /* PowerPC specific.  */
12052         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12053 #endif
12054 #ifdef TARGET_NR_memfd_create
12055     case TARGET_NR_memfd_create:
12056         p = lock_user_string(arg1);
12057         if (!p) {
12058             return -TARGET_EFAULT;
12059         }
12060         ret = get_errno(memfd_create(p, arg2));
12061         fd_trans_unregister(ret);
12062         unlock_user(p, arg1, 0);
12063         return ret;
12064 #endif
12065 
12066     default:
12067         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12068         return -TARGET_ENOSYS;
12069     }
12070     return ret;
12071 }
12072 
12073 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12074                     abi_long arg2, abi_long arg3, abi_long arg4,
12075                     abi_long arg5, abi_long arg6, abi_long arg7,
12076                     abi_long arg8)
12077 {
12078     CPUState *cpu = env_cpu(cpu_env);
12079     abi_long ret;
12080 
12081 #ifdef DEBUG_ERESTARTSYS
12082     /* Debug-only code for exercising the syscall-restart code paths
12083      * in the per-architecture cpu main loops: restart every syscall
12084      * the guest makes once before letting it through.
12085      */
12086     {
12087         static bool flag;
12088         flag = !flag;
12089         if (flag) {
12090             return -TARGET_ERESTARTSYS;
12091         }
12092     }
12093 #endif
12094 
12095     record_syscall_start(cpu, num, arg1,
12096                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12097 
12098     if (unlikely(do_strace)) {
12099         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12100         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12101                           arg5, arg6, arg7, arg8);
12102         print_syscall_ret(num, ret);
12103     } else {
12104         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12105                           arg5, arg6, arg7, arg8);
12106     }
12107 
12108     record_syscall_return(cpu, num, ret);
12109     return ret;
12110 }
12111