xref: /openbmc/qemu/linux-user/syscall.c (revision f4d92c5e)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #endif
118 #include "linux_loop.h"
119 #include "uname.h"
120 
121 #include "qemu.h"
122 #include "qemu/guest-random.h"
123 #include "qemu/selfmap.h"
124 #include "user/syscall-trace.h"
125 #include "qapi/error.h"
126 #include "fd-trans.h"
127 #include "tcg/tcg.h"
128 
129 #ifndef CLONE_IO
130 #define CLONE_IO                0x80000000      /* Clone io context */
131 #endif
132 
133 /* We can't directly call the host clone syscall, because this will
134  * badly confuse libc (breaking mutexes, for example). So we must
135  * divide clone flags into:
136  *  * flag combinations that look like pthread_create()
137  *  * flag combinations that look like fork()
138  *  * flags we can implement within QEMU itself
139  *  * flags we can't support and will return an error for
140  */
141 /* For thread creation, all these flags must be present; for
142  * fork, none must be present.
143  */
144 #define CLONE_THREAD_FLAGS                              \
145     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
146      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
147 
148 /* These flags are ignored:
149  * CLONE_DETACHED is now ignored by the kernel;
150  * CLONE_IO is just an optimisation hint to the I/O scheduler
151  */
152 #define CLONE_IGNORED_FLAGS                     \
153     (CLONE_DETACHED | CLONE_IO)
154 
155 /* Flags for fork which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_FORK_FLAGS               \
157     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
158      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
159 
160 /* Flags for thread creation which we can implement within QEMU itself */
161 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
162     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
163      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
164 
165 #define CLONE_INVALID_FORK_FLAGS                                        \
166     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
167 
168 #define CLONE_INVALID_THREAD_FLAGS                                      \
169     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
170        CLONE_IGNORED_FLAGS))
171 
172 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
173  * have almost all been allocated. We cannot support any of
174  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
175  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
176  * The checks against the invalid thread masks above will catch these.
177  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
178  */
179 
180 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
181  * once. This exercises the codepaths for restart.
182  */
183 //#define DEBUG_ERESTARTSYS
184 
185 //#include <linux/msdos_fs.h>
186 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
187 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
188 
189 #undef _syscall0
190 #undef _syscall1
191 #undef _syscall2
192 #undef _syscall3
193 #undef _syscall4
194 #undef _syscall5
195 #undef _syscall6
196 
197 #define _syscall0(type,name)		\
198 static type name (void)			\
199 {					\
200 	return syscall(__NR_##name);	\
201 }
202 
203 #define _syscall1(type,name,type1,arg1)		\
204 static type name (type1 arg1)			\
205 {						\
206 	return syscall(__NR_##name, arg1);	\
207 }
208 
209 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
210 static type name (type1 arg1,type2 arg2)		\
211 {							\
212 	return syscall(__NR_##name, arg1, arg2);	\
213 }
214 
215 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
216 static type name (type1 arg1,type2 arg2,type3 arg3)		\
217 {								\
218 	return syscall(__NR_##name, arg1, arg2, arg3);		\
219 }
220 
221 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
223 {										\
224 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
225 }
226 
227 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
228 		  type5,arg5)							\
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
230 {										\
231 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
232 }
233 
234 
235 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5,type6,arg6)					\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
238                   type6 arg6)							\
239 {										\
240 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
241 }
242 
243 
244 #define __NR_sys_uname __NR_uname
245 #define __NR_sys_getcwd1 __NR_getcwd
246 #define __NR_sys_getdents __NR_getdents
247 #define __NR_sys_getdents64 __NR_getdents64
248 #define __NR_sys_getpriority __NR_getpriority
249 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
250 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
251 #define __NR_sys_syslog __NR_syslog
252 #if defined(__NR_futex)
253 # define __NR_sys_futex __NR_futex
254 #endif
255 #if defined(__NR_futex_time64)
256 # define __NR_sys_futex_time64 __NR_futex_time64
257 #endif
258 #define __NR_sys_inotify_init __NR_inotify_init
259 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
260 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
261 #define __NR_sys_statx __NR_statx
262 
263 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
264 #define __NR__llseek __NR_lseek
265 #endif
266 
267 /* Newer kernel ports have llseek() instead of _llseek() */
268 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
269 #define TARGET_NR__llseek TARGET_NR_llseek
270 #endif
271 
272 #define __NR_sys_gettid __NR_gettid
273 _syscall0(int, sys_gettid)
274 
275 /* For the 64-bit guest on 32-bit host case we must emulate
276  * getdents using getdents64, because otherwise the host
277  * might hand us back more dirent records than we can fit
278  * into the guest buffer after structure format conversion.
279  * Otherwise we emulate getdents with getdents if the host has it.
280  */
281 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
282 #define EMULATE_GETDENTS_WITH_GETDENTS
283 #endif
284 
285 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
286 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
287 #endif
288 #if (defined(TARGET_NR_getdents) && \
289       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
290     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
291 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
292 #endif
293 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
294 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
295           loff_t *, res, uint, wh);
296 #endif
297 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
298 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
299           siginfo_t *, uinfo)
300 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
301 #ifdef __NR_exit_group
302 _syscall1(int,exit_group,int,error_code)
303 #endif
304 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
305 _syscall1(int,set_tid_address,int *,tidptr)
306 #endif
307 #if defined(__NR_futex)
308 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
309           const struct timespec *,timeout,int *,uaddr2,int,val3)
310 #endif
311 #if defined(__NR_futex_time64)
312 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
313           const struct timespec *,timeout,int *,uaddr2,int,val3)
314 #endif
315 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
316 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
317           unsigned long *, user_mask_ptr);
318 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
319 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
320           unsigned long *, user_mask_ptr);
321 #define __NR_sys_getcpu __NR_getcpu
322 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
323 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
324           void *, arg);
325 _syscall2(int, capget, struct __user_cap_header_struct *, header,
326           struct __user_cap_data_struct *, data);
327 _syscall2(int, capset, struct __user_cap_header_struct *, header,
328           struct __user_cap_data_struct *, data);
329 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
330 _syscall2(int, ioprio_get, int, which, int, who)
331 #endif
332 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
333 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
334 #endif
335 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
336 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
337 #endif
338 
339 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
340 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
341           unsigned long, idx1, unsigned long, idx2)
342 #endif
343 
344 /*
345  * It is assumed that struct statx is architecture independent.
346  */
347 #if defined(TARGET_NR_statx) && defined(__NR_statx)
348 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
349           unsigned int, mask, struct target_statx *, statxbuf)
350 #endif
351 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
352 _syscall2(int, membarrier, int, cmd, int, flags)
353 #endif
354 
355 static bitmask_transtbl fcntl_flags_tbl[] = {
356   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
357   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
358   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
359   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
360   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
361   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
362   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
363   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
364   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
365   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
366   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
367   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
368   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
369 #if defined(O_DIRECT)
370   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
371 #endif
372 #if defined(O_NOATIME)
373   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
374 #endif
375 #if defined(O_CLOEXEC)
376   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
377 #endif
378 #if defined(O_PATH)
379   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
380 #endif
381 #if defined(O_TMPFILE)
382   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
383 #endif
384   /* Don't terminate the list prematurely on 64-bit host+guest.  */
385 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
386   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
387 #endif
388   { 0, 0, 0, 0 }
389 };
390 
391 static int sys_getcwd1(char *buf, size_t size)
392 {
393   if (getcwd(buf, size) == NULL) {
394       /* getcwd() sets errno */
395       return (-1);
396   }
397   return strlen(buf)+1;
398 }
399 
400 #ifdef TARGET_NR_utimensat
401 #if defined(__NR_utimensat)
402 #define __NR_sys_utimensat __NR_utimensat
403 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
404           const struct timespec *,tsp,int,flags)
405 #else
406 static int sys_utimensat(int dirfd, const char *pathname,
407                          const struct timespec times[2], int flags)
408 {
409     errno = ENOSYS;
410     return -1;
411 }
412 #endif
413 #endif /* TARGET_NR_utimensat */
414 
415 #ifdef TARGET_NR_renameat2
416 #if defined(__NR_renameat2)
417 #define __NR_sys_renameat2 __NR_renameat2
418 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
419           const char *, new, unsigned int, flags)
420 #else
421 static int sys_renameat2(int oldfd, const char *old,
422                          int newfd, const char *new, int flags)
423 {
424     if (flags == 0) {
425         return renameat(oldfd, old, newfd, new);
426     }
427     errno = ENOSYS;
428     return -1;
429 }
430 #endif
431 #endif /* TARGET_NR_renameat2 */
432 
433 #ifdef CONFIG_INOTIFY
434 #include <sys/inotify.h>
435 
436 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
437 static int sys_inotify_init(void)
438 {
439   return (inotify_init());
440 }
441 #endif
442 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
443 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
444 {
445   return (inotify_add_watch(fd, pathname, mask));
446 }
447 #endif
448 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
449 static int sys_inotify_rm_watch(int fd, int32_t wd)
450 {
451   return (inotify_rm_watch(fd, wd));
452 }
453 #endif
454 #ifdef CONFIG_INOTIFY1
455 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
456 static int sys_inotify_init1(int flags)
457 {
458   return (inotify_init1(flags));
459 }
460 #endif
461 #endif
462 #else
463 /* Userspace can usually survive runtime without inotify */
464 #undef TARGET_NR_inotify_init
465 #undef TARGET_NR_inotify_init1
466 #undef TARGET_NR_inotify_add_watch
467 #undef TARGET_NR_inotify_rm_watch
468 #endif /* CONFIG_INOTIFY  */
469 
470 #if defined(TARGET_NR_prlimit64)
471 #ifndef __NR_prlimit64
472 # define __NR_prlimit64 -1
473 #endif
474 #define __NR_sys_prlimit64 __NR_prlimit64
475 /* The glibc rlimit structure may not be that used by the underlying syscall */
476 struct host_rlimit64 {
477     uint64_t rlim_cur;
478     uint64_t rlim_max;
479 };
480 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
481           const struct host_rlimit64 *, new_limit,
482           struct host_rlimit64 *, old_limit)
483 #endif
484 
485 
486 #if defined(TARGET_NR_timer_create)
487 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
488 static timer_t g_posix_timers[32] = { 0, } ;
489 
490 static inline int next_free_host_timer(void)
491 {
492     int k ;
493     /* FIXME: Does finding the next free slot require a lock? */
494     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
495         if (g_posix_timers[k] == 0) {
496             g_posix_timers[k] = (timer_t) 1;
497             return k;
498         }
499     }
500     return -1;
501 }
502 #endif
503 
504 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
505 #ifdef TARGET_ARM
506 static inline int regpairs_aligned(void *cpu_env, int num)
507 {
508     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
509 }
510 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
511 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
512 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
513 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
514  * of registers which translates to the same as ARM/MIPS, because we start with
515  * r3 as arg1 */
516 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
517 #elif defined(TARGET_SH4)
518 /* SH4 doesn't align register pairs, except for p{read,write}64 */
519 static inline int regpairs_aligned(void *cpu_env, int num)
520 {
521     switch (num) {
522     case TARGET_NR_pread64:
523     case TARGET_NR_pwrite64:
524         return 1;
525 
526     default:
527         return 0;
528     }
529 }
530 #elif defined(TARGET_XTENSA)
531 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
532 #else
533 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
534 #endif
535 
536 #define ERRNO_TABLE_SIZE 1200
537 
538 /* target_to_host_errno_table[] is initialized from
539  * host_to_target_errno_table[] in syscall_init(). */
540 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
541 };
542 
543 /*
544  * This list is the union of errno values overridden in asm-<arch>/errno.h
545  * minus the errnos that are not actually generic to all archs.
546  */
547 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
548     [EAGAIN]		= TARGET_EAGAIN,
549     [EIDRM]		= TARGET_EIDRM,
550     [ECHRNG]		= TARGET_ECHRNG,
551     [EL2NSYNC]		= TARGET_EL2NSYNC,
552     [EL3HLT]		= TARGET_EL3HLT,
553     [EL3RST]		= TARGET_EL3RST,
554     [ELNRNG]		= TARGET_ELNRNG,
555     [EUNATCH]		= TARGET_EUNATCH,
556     [ENOCSI]		= TARGET_ENOCSI,
557     [EL2HLT]		= TARGET_EL2HLT,
558     [EDEADLK]		= TARGET_EDEADLK,
559     [ENOLCK]		= TARGET_ENOLCK,
560     [EBADE]		= TARGET_EBADE,
561     [EBADR]		= TARGET_EBADR,
562     [EXFULL]		= TARGET_EXFULL,
563     [ENOANO]		= TARGET_ENOANO,
564     [EBADRQC]		= TARGET_EBADRQC,
565     [EBADSLT]		= TARGET_EBADSLT,
566     [EBFONT]		= TARGET_EBFONT,
567     [ENOSTR]		= TARGET_ENOSTR,
568     [ENODATA]		= TARGET_ENODATA,
569     [ETIME]		= TARGET_ETIME,
570     [ENOSR]		= TARGET_ENOSR,
571     [ENONET]		= TARGET_ENONET,
572     [ENOPKG]		= TARGET_ENOPKG,
573     [EREMOTE]		= TARGET_EREMOTE,
574     [ENOLINK]		= TARGET_ENOLINK,
575     [EADV]		= TARGET_EADV,
576     [ESRMNT]		= TARGET_ESRMNT,
577     [ECOMM]		= TARGET_ECOMM,
578     [EPROTO]		= TARGET_EPROTO,
579     [EDOTDOT]		= TARGET_EDOTDOT,
580     [EMULTIHOP]		= TARGET_EMULTIHOP,
581     [EBADMSG]		= TARGET_EBADMSG,
582     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
583     [EOVERFLOW]		= TARGET_EOVERFLOW,
584     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
585     [EBADFD]		= TARGET_EBADFD,
586     [EREMCHG]		= TARGET_EREMCHG,
587     [ELIBACC]		= TARGET_ELIBACC,
588     [ELIBBAD]		= TARGET_ELIBBAD,
589     [ELIBSCN]		= TARGET_ELIBSCN,
590     [ELIBMAX]		= TARGET_ELIBMAX,
591     [ELIBEXEC]		= TARGET_ELIBEXEC,
592     [EILSEQ]		= TARGET_EILSEQ,
593     [ENOSYS]		= TARGET_ENOSYS,
594     [ELOOP]		= TARGET_ELOOP,
595     [ERESTART]		= TARGET_ERESTART,
596     [ESTRPIPE]		= TARGET_ESTRPIPE,
597     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
598     [EUSERS]		= TARGET_EUSERS,
599     [ENOTSOCK]		= TARGET_ENOTSOCK,
600     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
601     [EMSGSIZE]		= TARGET_EMSGSIZE,
602     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
603     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
604     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
605     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
606     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
607     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
608     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
609     [EADDRINUSE]	= TARGET_EADDRINUSE,
610     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
611     [ENETDOWN]		= TARGET_ENETDOWN,
612     [ENETUNREACH]	= TARGET_ENETUNREACH,
613     [ENETRESET]		= TARGET_ENETRESET,
614     [ECONNABORTED]	= TARGET_ECONNABORTED,
615     [ECONNRESET]	= TARGET_ECONNRESET,
616     [ENOBUFS]		= TARGET_ENOBUFS,
617     [EISCONN]		= TARGET_EISCONN,
618     [ENOTCONN]		= TARGET_ENOTCONN,
619     [EUCLEAN]		= TARGET_EUCLEAN,
620     [ENOTNAM]		= TARGET_ENOTNAM,
621     [ENAVAIL]		= TARGET_ENAVAIL,
622     [EISNAM]		= TARGET_EISNAM,
623     [EREMOTEIO]		= TARGET_EREMOTEIO,
624     [EDQUOT]            = TARGET_EDQUOT,
625     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
626     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
627     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
628     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
629     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
630     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
631     [EALREADY]		= TARGET_EALREADY,
632     [EINPROGRESS]	= TARGET_EINPROGRESS,
633     [ESTALE]		= TARGET_ESTALE,
634     [ECANCELED]		= TARGET_ECANCELED,
635     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
636     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
637 #ifdef ENOKEY
638     [ENOKEY]		= TARGET_ENOKEY,
639 #endif
640 #ifdef EKEYEXPIRED
641     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
642 #endif
643 #ifdef EKEYREVOKED
644     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
645 #endif
646 #ifdef EKEYREJECTED
647     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
648 #endif
649 #ifdef EOWNERDEAD
650     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
651 #endif
652 #ifdef ENOTRECOVERABLE
653     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
654 #endif
655 #ifdef ENOMSG
656     [ENOMSG]            = TARGET_ENOMSG,
657 #endif
658 #ifdef ERKFILL
659     [ERFKILL]           = TARGET_ERFKILL,
660 #endif
661 #ifdef EHWPOISON
662     [EHWPOISON]         = TARGET_EHWPOISON,
663 #endif
664 };
665 
666 static inline int host_to_target_errno(int err)
667 {
668     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
669         host_to_target_errno_table[err]) {
670         return host_to_target_errno_table[err];
671     }
672     return err;
673 }
674 
675 static inline int target_to_host_errno(int err)
676 {
677     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
678         target_to_host_errno_table[err]) {
679         return target_to_host_errno_table[err];
680     }
681     return err;
682 }
683 
684 static inline abi_long get_errno(abi_long ret)
685 {
686     if (ret == -1)
687         return -host_to_target_errno(errno);
688     else
689         return ret;
690 }
691 
692 const char *target_strerror(int err)
693 {
694     if (err == TARGET_ERESTARTSYS) {
695         return "To be restarted";
696     }
697     if (err == TARGET_QEMU_ESIGRETURN) {
698         return "Successful exit from sigreturn";
699     }
700 
701     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
702         return NULL;
703     }
704     return strerror(target_to_host_errno(err));
705 }
706 
707 #define safe_syscall0(type, name) \
708 static type safe_##name(void) \
709 { \
710     return safe_syscall(__NR_##name); \
711 }
712 
713 #define safe_syscall1(type, name, type1, arg1) \
714 static type safe_##name(type1 arg1) \
715 { \
716     return safe_syscall(__NR_##name, arg1); \
717 }
718 
719 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
720 static type safe_##name(type1 arg1, type2 arg2) \
721 { \
722     return safe_syscall(__NR_##name, arg1, arg2); \
723 }
724 
725 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
727 { \
728     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
729 }
730 
731 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
732     type4, arg4) \
733 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
734 { \
735     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
736 }
737 
738 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
739     type4, arg4, type5, arg5) \
740 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
741     type5 arg5) \
742 { \
743     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
744 }
745 
746 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
747     type4, arg4, type5, arg5, type6, arg6) \
748 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
749     type5 arg5, type6 arg6) \
750 { \
751     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
752 }
753 
754 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
755 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
756 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
757               int, flags, mode_t, mode)
758 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
759 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
760               struct rusage *, rusage)
761 #endif
762 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
763               int, options, struct rusage *, rusage)
764 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
765 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
766     defined(TARGET_NR_pselect6)
767 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
768               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
769 #endif
770 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
771 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
772               struct timespec *, tsp, const sigset_t *, sigmask,
773               size_t, sigsetsize)
774 #endif
775 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
776               int, maxevents, int, timeout, const sigset_t *, sigmask,
777               size_t, sigsetsize)
778 #if defined(__NR_futex)
779 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
780               const struct timespec *,timeout,int *,uaddr2,int,val3)
781 #endif
782 #if defined(__NR_futex_time64)
783 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
784               const struct timespec *,timeout,int *,uaddr2,int,val3)
785 #endif
786 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
787 safe_syscall2(int, kill, pid_t, pid, int, sig)
788 safe_syscall2(int, tkill, int, tid, int, sig)
789 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
790 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
791 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
792 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
793               unsigned long, pos_l, unsigned long, pos_h)
794 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
795               unsigned long, pos_l, unsigned long, pos_h)
796 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
797               socklen_t, addrlen)
798 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
799               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
800 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
801               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
802 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
803 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
804 safe_syscall2(int, flock, int, fd, int, operation)
805 #ifdef TARGET_NR_rt_sigtimedwait
806 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
807               const struct timespec *, uts, size_t, sigsetsize)
808 #endif
809 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
810               int, flags)
811 #if defined(TARGET_NR_nanosleep)
812 safe_syscall2(int, nanosleep, const struct timespec *, req,
813               struct timespec *, rem)
814 #endif
815 #ifdef TARGET_NR_clock_nanosleep
816 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
817               const struct timespec *, req, struct timespec *, rem)
818 #endif
819 #ifdef __NR_ipc
820 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
821               void *, ptr, long, fifth)
822 #endif
823 #ifdef __NR_msgsnd
824 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
825               int, flags)
826 #endif
827 #ifdef __NR_msgrcv
828 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
829               long, msgtype, int, flags)
830 #endif
831 #ifdef __NR_semtimedop
832 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
833               unsigned, nsops, const struct timespec *, timeout)
834 #endif
835 #ifdef TARGET_NR_mq_timedsend
836 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
837               size_t, len, unsigned, prio, const struct timespec *, timeout)
838 #endif
839 #ifdef TARGET_NR_mq_timedreceive
840 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
841               size_t, len, unsigned *, prio, const struct timespec *, timeout)
842 #endif
843 /* We do ioctl like this rather than via safe_syscall3 to preserve the
844  * "third argument might be integer or pointer or not present" behaviour of
845  * the libc function.
846  */
847 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
848 /* Similarly for fcntl. Note that callers must always:
849  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
850  *  use the flock64 struct rather than unsuffixed flock
851  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
852  */
853 #ifdef __NR_fcntl64
854 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
855 #else
856 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
857 #endif
858 
859 static inline int host_to_target_sock_type(int host_type)
860 {
861     int target_type;
862 
863     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
864     case SOCK_DGRAM:
865         target_type = TARGET_SOCK_DGRAM;
866         break;
867     case SOCK_STREAM:
868         target_type = TARGET_SOCK_STREAM;
869         break;
870     default:
871         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
872         break;
873     }
874 
875 #if defined(SOCK_CLOEXEC)
876     if (host_type & SOCK_CLOEXEC) {
877         target_type |= TARGET_SOCK_CLOEXEC;
878     }
879 #endif
880 
881 #if defined(SOCK_NONBLOCK)
882     if (host_type & SOCK_NONBLOCK) {
883         target_type |= TARGET_SOCK_NONBLOCK;
884     }
885 #endif
886 
887     return target_type;
888 }
889 
890 static abi_ulong target_brk;
891 static abi_ulong target_original_brk;
892 static abi_ulong brk_page;
893 
894 void target_set_brk(abi_ulong new_brk)
895 {
896     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
897     brk_page = HOST_PAGE_ALIGN(target_brk);
898 }
899 
900 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
901 #define DEBUGF_BRK(message, args...)
902 
903 /* do_brk() must return target values and target errnos. */
904 abi_long do_brk(abi_ulong new_brk)
905 {
906     abi_long mapped_addr;
907     abi_ulong new_alloc_size;
908 
909     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
910 
911     if (!new_brk) {
912         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
913         return target_brk;
914     }
915     if (new_brk < target_original_brk) {
916         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
917                    target_brk);
918         return target_brk;
919     }
920 
921     /* If the new brk is less than the highest page reserved to the
922      * target heap allocation, set it and we're almost done...  */
923     if (new_brk <= brk_page) {
924         /* Heap contents are initialized to zero, as for anonymous
925          * mapped pages.  */
926         if (new_brk > target_brk) {
927             memset(g2h(target_brk), 0, new_brk - target_brk);
928         }
929 	target_brk = new_brk;
930         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
931 	return target_brk;
932     }
933 
934     /* We need to allocate more memory after the brk... Note that
935      * we don't use MAP_FIXED because that will map over the top of
936      * any existing mapping (like the one with the host libc or qemu
937      * itself); instead we treat "mapped but at wrong address" as
938      * a failure and unmap again.
939      */
940     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
941     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
942                                         PROT_READ|PROT_WRITE,
943                                         MAP_ANON|MAP_PRIVATE, 0, 0));
944 
945     if (mapped_addr == brk_page) {
946         /* Heap contents are initialized to zero, as for anonymous
947          * mapped pages.  Technically the new pages are already
948          * initialized to zero since they *are* anonymous mapped
949          * pages, however we have to take care with the contents that
950          * come from the remaining part of the previous page: it may
951          * contains garbage data due to a previous heap usage (grown
952          * then shrunken).  */
953         memset(g2h(target_brk), 0, brk_page - target_brk);
954 
955         target_brk = new_brk;
956         brk_page = HOST_PAGE_ALIGN(target_brk);
957         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
958             target_brk);
959         return target_brk;
960     } else if (mapped_addr != -1) {
961         /* Mapped but at wrong address, meaning there wasn't actually
962          * enough space for this brk.
963          */
964         target_munmap(mapped_addr, new_alloc_size);
965         mapped_addr = -1;
966         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
967     }
968     else {
969         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
970     }
971 
972 #if defined(TARGET_ALPHA)
973     /* We (partially) emulate OSF/1 on Alpha, which requires we
974        return a proper errno, not an unchanged brk value.  */
975     return -TARGET_ENOMEM;
976 #endif
977     /* For everything else, return the previous break. */
978     return target_brk;
979 }
980 
981 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
982     defined(TARGET_NR_pselect6)
983 static inline abi_long copy_from_user_fdset(fd_set *fds,
984                                             abi_ulong target_fds_addr,
985                                             int n)
986 {
987     int i, nw, j, k;
988     abi_ulong b, *target_fds;
989 
990     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
991     if (!(target_fds = lock_user(VERIFY_READ,
992                                  target_fds_addr,
993                                  sizeof(abi_ulong) * nw,
994                                  1)))
995         return -TARGET_EFAULT;
996 
997     FD_ZERO(fds);
998     k = 0;
999     for (i = 0; i < nw; i++) {
1000         /* grab the abi_ulong */
1001         __get_user(b, &target_fds[i]);
1002         for (j = 0; j < TARGET_ABI_BITS; j++) {
1003             /* check the bit inside the abi_ulong */
1004             if ((b >> j) & 1)
1005                 FD_SET(k, fds);
1006             k++;
1007         }
1008     }
1009 
1010     unlock_user(target_fds, target_fds_addr, 0);
1011 
1012     return 0;
1013 }
1014 
1015 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1016                                                  abi_ulong target_fds_addr,
1017                                                  int n)
1018 {
1019     if (target_fds_addr) {
1020         if (copy_from_user_fdset(fds, target_fds_addr, n))
1021             return -TARGET_EFAULT;
1022         *fds_ptr = fds;
1023     } else {
1024         *fds_ptr = NULL;
1025     }
1026     return 0;
1027 }
1028 
1029 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1030                                           const fd_set *fds,
1031                                           int n)
1032 {
1033     int i, nw, j, k;
1034     abi_long v;
1035     abi_ulong *target_fds;
1036 
1037     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1038     if (!(target_fds = lock_user(VERIFY_WRITE,
1039                                  target_fds_addr,
1040                                  sizeof(abi_ulong) * nw,
1041                                  0)))
1042         return -TARGET_EFAULT;
1043 
1044     k = 0;
1045     for (i = 0; i < nw; i++) {
1046         v = 0;
1047         for (j = 0; j < TARGET_ABI_BITS; j++) {
1048             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1049             k++;
1050         }
1051         __put_user(v, &target_fds[i]);
1052     }
1053 
1054     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1055 
1056     return 0;
1057 }
1058 #endif
1059 
1060 #if defined(__alpha__)
1061 #define HOST_HZ 1024
1062 #else
1063 #define HOST_HZ 100
1064 #endif
1065 
1066 static inline abi_long host_to_target_clock_t(long ticks)
1067 {
1068 #if HOST_HZ == TARGET_HZ
1069     return ticks;
1070 #else
1071     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1072 #endif
1073 }
1074 
1075 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1076                                              const struct rusage *rusage)
1077 {
1078     struct target_rusage *target_rusage;
1079 
1080     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1081         return -TARGET_EFAULT;
1082     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1083     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1084     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1085     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1086     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1087     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1088     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1089     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1090     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1091     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1092     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1093     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1094     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1095     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1096     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1097     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1098     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1099     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1100     unlock_user_struct(target_rusage, target_addr, 1);
1101 
1102     return 0;
1103 }
1104 
1105 #ifdef TARGET_NR_setrlimit
1106 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1107 {
1108     abi_ulong target_rlim_swap;
1109     rlim_t result;
1110 
1111     target_rlim_swap = tswapal(target_rlim);
1112     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1113         return RLIM_INFINITY;
1114 
1115     result = target_rlim_swap;
1116     if (target_rlim_swap != (rlim_t)result)
1117         return RLIM_INFINITY;
1118 
1119     return result;
1120 }
1121 #endif
1122 
1123 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1124 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1125 {
1126     abi_ulong target_rlim_swap;
1127     abi_ulong result;
1128 
1129     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1130         target_rlim_swap = TARGET_RLIM_INFINITY;
1131     else
1132         target_rlim_swap = rlim;
1133     result = tswapal(target_rlim_swap);
1134 
1135     return result;
1136 }
1137 #endif
1138 
1139 static inline int target_to_host_resource(int code)
1140 {
1141     switch (code) {
1142     case TARGET_RLIMIT_AS:
1143         return RLIMIT_AS;
1144     case TARGET_RLIMIT_CORE:
1145         return RLIMIT_CORE;
1146     case TARGET_RLIMIT_CPU:
1147         return RLIMIT_CPU;
1148     case TARGET_RLIMIT_DATA:
1149         return RLIMIT_DATA;
1150     case TARGET_RLIMIT_FSIZE:
1151         return RLIMIT_FSIZE;
1152     case TARGET_RLIMIT_LOCKS:
1153         return RLIMIT_LOCKS;
1154     case TARGET_RLIMIT_MEMLOCK:
1155         return RLIMIT_MEMLOCK;
1156     case TARGET_RLIMIT_MSGQUEUE:
1157         return RLIMIT_MSGQUEUE;
1158     case TARGET_RLIMIT_NICE:
1159         return RLIMIT_NICE;
1160     case TARGET_RLIMIT_NOFILE:
1161         return RLIMIT_NOFILE;
1162     case TARGET_RLIMIT_NPROC:
1163         return RLIMIT_NPROC;
1164     case TARGET_RLIMIT_RSS:
1165         return RLIMIT_RSS;
1166     case TARGET_RLIMIT_RTPRIO:
1167         return RLIMIT_RTPRIO;
1168     case TARGET_RLIMIT_SIGPENDING:
1169         return RLIMIT_SIGPENDING;
1170     case TARGET_RLIMIT_STACK:
1171         return RLIMIT_STACK;
1172     default:
1173         return code;
1174     }
1175 }
1176 
1177 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1178                                               abi_ulong target_tv_addr)
1179 {
1180     struct target_timeval *target_tv;
1181 
1182     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1183         return -TARGET_EFAULT;
1184     }
1185 
1186     __get_user(tv->tv_sec, &target_tv->tv_sec);
1187     __get_user(tv->tv_usec, &target_tv->tv_usec);
1188 
1189     unlock_user_struct(target_tv, target_tv_addr, 0);
1190 
1191     return 0;
1192 }
1193 
1194 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1195                                             const struct timeval *tv)
1196 {
1197     struct target_timeval *target_tv;
1198 
1199     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1200         return -TARGET_EFAULT;
1201     }
1202 
1203     __put_user(tv->tv_sec, &target_tv->tv_sec);
1204     __put_user(tv->tv_usec, &target_tv->tv_usec);
1205 
1206     unlock_user_struct(target_tv, target_tv_addr, 1);
1207 
1208     return 0;
1209 }
1210 
1211 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1212                                              const struct timeval *tv)
1213 {
1214     struct target__kernel_sock_timeval *target_tv;
1215 
1216     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1217         return -TARGET_EFAULT;
1218     }
1219 
1220     __put_user(tv->tv_sec, &target_tv->tv_sec);
1221     __put_user(tv->tv_usec, &target_tv->tv_usec);
1222 
1223     unlock_user_struct(target_tv, target_tv_addr, 1);
1224 
1225     return 0;
1226 }
1227 
1228 #if defined(TARGET_NR_futex) || \
1229     defined(TARGET_NR_rt_sigtimedwait) || \
1230     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1231     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1232     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1233     defined(TARGET_NR_mq_timedreceive)
1234 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1235                                                abi_ulong target_addr)
1236 {
1237     struct target_timespec *target_ts;
1238 
1239     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1240         return -TARGET_EFAULT;
1241     }
1242     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1243     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1244     unlock_user_struct(target_ts, target_addr, 0);
1245     return 0;
1246 }
1247 #endif
1248 
1249 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64)
1250 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1251                                                  abi_ulong target_addr)
1252 {
1253     struct target__kernel_timespec *target_ts;
1254 
1255     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1256         return -TARGET_EFAULT;
1257     }
1258     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1259     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1260     unlock_user_struct(target_ts, target_addr, 0);
1261     return 0;
1262 }
1263 #endif
1264 
1265 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1266                                                struct timespec *host_ts)
1267 {
1268     struct target_timespec *target_ts;
1269 
1270     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1271         return -TARGET_EFAULT;
1272     }
1273     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1274     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1275     unlock_user_struct(target_ts, target_addr, 1);
1276     return 0;
1277 }
1278 
1279 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1280                                                  struct timespec *host_ts)
1281 {
1282     struct target__kernel_timespec *target_ts;
1283 
1284     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1285         return -TARGET_EFAULT;
1286     }
1287     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1288     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1289     unlock_user_struct(target_ts, target_addr, 1);
1290     return 0;
1291 }
1292 
1293 #if defined(TARGET_NR_gettimeofday)
1294 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1295                                              struct timezone *tz)
1296 {
1297     struct target_timezone *target_tz;
1298 
1299     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1300         return -TARGET_EFAULT;
1301     }
1302 
1303     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1304     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1305 
1306     unlock_user_struct(target_tz, target_tz_addr, 1);
1307 
1308     return 0;
1309 }
1310 #endif
1311 
1312 #if defined(TARGET_NR_settimeofday)
1313 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1314                                                abi_ulong target_tz_addr)
1315 {
1316     struct target_timezone *target_tz;
1317 
1318     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1319         return -TARGET_EFAULT;
1320     }
1321 
1322     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1323     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1324 
1325     unlock_user_struct(target_tz, target_tz_addr, 0);
1326 
1327     return 0;
1328 }
1329 #endif
1330 
1331 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1332 #include <mqueue.h>
1333 
1334 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1335                                               abi_ulong target_mq_attr_addr)
1336 {
1337     struct target_mq_attr *target_mq_attr;
1338 
1339     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1340                           target_mq_attr_addr, 1))
1341         return -TARGET_EFAULT;
1342 
1343     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1344     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1345     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1346     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1347 
1348     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1349 
1350     return 0;
1351 }
1352 
1353 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1354                                             const struct mq_attr *attr)
1355 {
1356     struct target_mq_attr *target_mq_attr;
1357 
1358     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1359                           target_mq_attr_addr, 0))
1360         return -TARGET_EFAULT;
1361 
1362     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1363     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1364     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1365     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1366 
1367     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1368 
1369     return 0;
1370 }
1371 #endif
1372 
1373 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1374 /* do_select() must return target values and target errnos. */
1375 static abi_long do_select(int n,
1376                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1377                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1378 {
1379     fd_set rfds, wfds, efds;
1380     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1381     struct timeval tv;
1382     struct timespec ts, *ts_ptr;
1383     abi_long ret;
1384 
1385     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1386     if (ret) {
1387         return ret;
1388     }
1389     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1390     if (ret) {
1391         return ret;
1392     }
1393     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1394     if (ret) {
1395         return ret;
1396     }
1397 
1398     if (target_tv_addr) {
1399         if (copy_from_user_timeval(&tv, target_tv_addr))
1400             return -TARGET_EFAULT;
1401         ts.tv_sec = tv.tv_sec;
1402         ts.tv_nsec = tv.tv_usec * 1000;
1403         ts_ptr = &ts;
1404     } else {
1405         ts_ptr = NULL;
1406     }
1407 
1408     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1409                                   ts_ptr, NULL));
1410 
1411     if (!is_error(ret)) {
1412         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1413             return -TARGET_EFAULT;
1414         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1415             return -TARGET_EFAULT;
1416         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1417             return -TARGET_EFAULT;
1418 
1419         if (target_tv_addr) {
1420             tv.tv_sec = ts.tv_sec;
1421             tv.tv_usec = ts.tv_nsec / 1000;
1422             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1423                 return -TARGET_EFAULT;
1424             }
1425         }
1426     }
1427 
1428     return ret;
1429 }
1430 
1431 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1432 static abi_long do_old_select(abi_ulong arg1)
1433 {
1434     struct target_sel_arg_struct *sel;
1435     abi_ulong inp, outp, exp, tvp;
1436     long nsel;
1437 
1438     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1439         return -TARGET_EFAULT;
1440     }
1441 
1442     nsel = tswapal(sel->n);
1443     inp = tswapal(sel->inp);
1444     outp = tswapal(sel->outp);
1445     exp = tswapal(sel->exp);
1446     tvp = tswapal(sel->tvp);
1447 
1448     unlock_user_struct(sel, arg1, 0);
1449 
1450     return do_select(nsel, inp, outp, exp, tvp);
1451 }
1452 #endif
1453 #endif
1454 
1455 static abi_long do_pipe2(int host_pipe[], int flags)
1456 {
1457 #ifdef CONFIG_PIPE2
1458     return pipe2(host_pipe, flags);
1459 #else
1460     return -ENOSYS;
1461 #endif
1462 }
1463 
1464 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1465                         int flags, int is_pipe2)
1466 {
1467     int host_pipe[2];
1468     abi_long ret;
1469     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1470 
1471     if (is_error(ret))
1472         return get_errno(ret);
1473 
1474     /* Several targets have special calling conventions for the original
1475        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1476     if (!is_pipe2) {
1477 #if defined(TARGET_ALPHA)
1478         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1479         return host_pipe[0];
1480 #elif defined(TARGET_MIPS)
1481         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1482         return host_pipe[0];
1483 #elif defined(TARGET_SH4)
1484         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1485         return host_pipe[0];
1486 #elif defined(TARGET_SPARC)
1487         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1488         return host_pipe[0];
1489 #endif
1490     }
1491 
1492     if (put_user_s32(host_pipe[0], pipedes)
1493         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1494         return -TARGET_EFAULT;
1495     return get_errno(ret);
1496 }
1497 
1498 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1499                                               abi_ulong target_addr,
1500                                               socklen_t len)
1501 {
1502     struct target_ip_mreqn *target_smreqn;
1503 
1504     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1505     if (!target_smreqn)
1506         return -TARGET_EFAULT;
1507     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1508     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1509     if (len == sizeof(struct target_ip_mreqn))
1510         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1511     unlock_user(target_smreqn, target_addr, 0);
1512 
1513     return 0;
1514 }
1515 
1516 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1517                                                abi_ulong target_addr,
1518                                                socklen_t len)
1519 {
1520     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1521     sa_family_t sa_family;
1522     struct target_sockaddr *target_saddr;
1523 
1524     if (fd_trans_target_to_host_addr(fd)) {
1525         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1526     }
1527 
1528     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1529     if (!target_saddr)
1530         return -TARGET_EFAULT;
1531 
1532     sa_family = tswap16(target_saddr->sa_family);
1533 
1534     /* Oops. The caller might send a incomplete sun_path; sun_path
1535      * must be terminated by \0 (see the manual page), but
1536      * unfortunately it is quite common to specify sockaddr_un
1537      * length as "strlen(x->sun_path)" while it should be
1538      * "strlen(...) + 1". We'll fix that here if needed.
1539      * Linux kernel has a similar feature.
1540      */
1541 
1542     if (sa_family == AF_UNIX) {
1543         if (len < unix_maxlen && len > 0) {
1544             char *cp = (char*)target_saddr;
1545 
1546             if ( cp[len-1] && !cp[len] )
1547                 len++;
1548         }
1549         if (len > unix_maxlen)
1550             len = unix_maxlen;
1551     }
1552 
1553     memcpy(addr, target_saddr, len);
1554     addr->sa_family = sa_family;
1555     if (sa_family == AF_NETLINK) {
1556         struct sockaddr_nl *nladdr;
1557 
1558         nladdr = (struct sockaddr_nl *)addr;
1559         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1560         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1561     } else if (sa_family == AF_PACKET) {
1562 	struct target_sockaddr_ll *lladdr;
1563 
1564 	lladdr = (struct target_sockaddr_ll *)addr;
1565 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1566 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1567     }
1568     unlock_user(target_saddr, target_addr, 0);
1569 
1570     return 0;
1571 }
1572 
1573 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1574                                                struct sockaddr *addr,
1575                                                socklen_t len)
1576 {
1577     struct target_sockaddr *target_saddr;
1578 
1579     if (len == 0) {
1580         return 0;
1581     }
1582     assert(addr);
1583 
1584     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1585     if (!target_saddr)
1586         return -TARGET_EFAULT;
1587     memcpy(target_saddr, addr, len);
1588     if (len >= offsetof(struct target_sockaddr, sa_family) +
1589         sizeof(target_saddr->sa_family)) {
1590         target_saddr->sa_family = tswap16(addr->sa_family);
1591     }
1592     if (addr->sa_family == AF_NETLINK &&
1593         len >= sizeof(struct target_sockaddr_nl)) {
1594         struct target_sockaddr_nl *target_nl =
1595                (struct target_sockaddr_nl *)target_saddr;
1596         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1597         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1598     } else if (addr->sa_family == AF_PACKET) {
1599         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1600         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1601         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1602     } else if (addr->sa_family == AF_INET6 &&
1603                len >= sizeof(struct target_sockaddr_in6)) {
1604         struct target_sockaddr_in6 *target_in6 =
1605                (struct target_sockaddr_in6 *)target_saddr;
1606         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1607     }
1608     unlock_user(target_saddr, target_addr, len);
1609 
1610     return 0;
1611 }
1612 
1613 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1614                                            struct target_msghdr *target_msgh)
1615 {
1616     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1617     abi_long msg_controllen;
1618     abi_ulong target_cmsg_addr;
1619     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1620     socklen_t space = 0;
1621 
1622     msg_controllen = tswapal(target_msgh->msg_controllen);
1623     if (msg_controllen < sizeof (struct target_cmsghdr))
1624         goto the_end;
1625     target_cmsg_addr = tswapal(target_msgh->msg_control);
1626     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1627     target_cmsg_start = target_cmsg;
1628     if (!target_cmsg)
1629         return -TARGET_EFAULT;
1630 
1631     while (cmsg && target_cmsg) {
1632         void *data = CMSG_DATA(cmsg);
1633         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1634 
1635         int len = tswapal(target_cmsg->cmsg_len)
1636             - sizeof(struct target_cmsghdr);
1637 
1638         space += CMSG_SPACE(len);
1639         if (space > msgh->msg_controllen) {
1640             space -= CMSG_SPACE(len);
1641             /* This is a QEMU bug, since we allocated the payload
1642              * area ourselves (unlike overflow in host-to-target
1643              * conversion, which is just the guest giving us a buffer
1644              * that's too small). It can't happen for the payload types
1645              * we currently support; if it becomes an issue in future
1646              * we would need to improve our allocation strategy to
1647              * something more intelligent than "twice the size of the
1648              * target buffer we're reading from".
1649              */
1650             qemu_log_mask(LOG_UNIMP,
1651                           ("Unsupported ancillary data %d/%d: "
1652                            "unhandled msg size\n"),
1653                           tswap32(target_cmsg->cmsg_level),
1654                           tswap32(target_cmsg->cmsg_type));
1655             break;
1656         }
1657 
1658         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1659             cmsg->cmsg_level = SOL_SOCKET;
1660         } else {
1661             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1662         }
1663         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1664         cmsg->cmsg_len = CMSG_LEN(len);
1665 
1666         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1667             int *fd = (int *)data;
1668             int *target_fd = (int *)target_data;
1669             int i, numfds = len / sizeof(int);
1670 
1671             for (i = 0; i < numfds; i++) {
1672                 __get_user(fd[i], target_fd + i);
1673             }
1674         } else if (cmsg->cmsg_level == SOL_SOCKET
1675                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1676             struct ucred *cred = (struct ucred *)data;
1677             struct target_ucred *target_cred =
1678                 (struct target_ucred *)target_data;
1679 
1680             __get_user(cred->pid, &target_cred->pid);
1681             __get_user(cred->uid, &target_cred->uid);
1682             __get_user(cred->gid, &target_cred->gid);
1683         } else {
1684             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1685                           cmsg->cmsg_level, cmsg->cmsg_type);
1686             memcpy(data, target_data, len);
1687         }
1688 
1689         cmsg = CMSG_NXTHDR(msgh, cmsg);
1690         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1691                                          target_cmsg_start);
1692     }
1693     unlock_user(target_cmsg, target_cmsg_addr, 0);
1694  the_end:
1695     msgh->msg_controllen = space;
1696     return 0;
1697 }
1698 
1699 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1700                                            struct msghdr *msgh)
1701 {
1702     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1703     abi_long msg_controllen;
1704     abi_ulong target_cmsg_addr;
1705     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1706     socklen_t space = 0;
1707 
1708     msg_controllen = tswapal(target_msgh->msg_controllen);
1709     if (msg_controllen < sizeof (struct target_cmsghdr))
1710         goto the_end;
1711     target_cmsg_addr = tswapal(target_msgh->msg_control);
1712     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1713     target_cmsg_start = target_cmsg;
1714     if (!target_cmsg)
1715         return -TARGET_EFAULT;
1716 
1717     while (cmsg && target_cmsg) {
1718         void *data = CMSG_DATA(cmsg);
1719         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1720 
1721         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1722         int tgt_len, tgt_space;
1723 
1724         /* We never copy a half-header but may copy half-data;
1725          * this is Linux's behaviour in put_cmsg(). Note that
1726          * truncation here is a guest problem (which we report
1727          * to the guest via the CTRUNC bit), unlike truncation
1728          * in target_to_host_cmsg, which is a QEMU bug.
1729          */
1730         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1731             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1732             break;
1733         }
1734 
1735         if (cmsg->cmsg_level == SOL_SOCKET) {
1736             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1737         } else {
1738             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1739         }
1740         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1741 
1742         /* Payload types which need a different size of payload on
1743          * the target must adjust tgt_len here.
1744          */
1745         tgt_len = len;
1746         switch (cmsg->cmsg_level) {
1747         case SOL_SOCKET:
1748             switch (cmsg->cmsg_type) {
1749             case SO_TIMESTAMP:
1750                 tgt_len = sizeof(struct target_timeval);
1751                 break;
1752             default:
1753                 break;
1754             }
1755             break;
1756         default:
1757             break;
1758         }
1759 
1760         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1761             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1762             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1763         }
1764 
1765         /* We must now copy-and-convert len bytes of payload
1766          * into tgt_len bytes of destination space. Bear in mind
1767          * that in both source and destination we may be dealing
1768          * with a truncated value!
1769          */
1770         switch (cmsg->cmsg_level) {
1771         case SOL_SOCKET:
1772             switch (cmsg->cmsg_type) {
1773             case SCM_RIGHTS:
1774             {
1775                 int *fd = (int *)data;
1776                 int *target_fd = (int *)target_data;
1777                 int i, numfds = tgt_len / sizeof(int);
1778 
1779                 for (i = 0; i < numfds; i++) {
1780                     __put_user(fd[i], target_fd + i);
1781                 }
1782                 break;
1783             }
1784             case SO_TIMESTAMP:
1785             {
1786                 struct timeval *tv = (struct timeval *)data;
1787                 struct target_timeval *target_tv =
1788                     (struct target_timeval *)target_data;
1789 
1790                 if (len != sizeof(struct timeval) ||
1791                     tgt_len != sizeof(struct target_timeval)) {
1792                     goto unimplemented;
1793                 }
1794 
1795                 /* copy struct timeval to target */
1796                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1797                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1798                 break;
1799             }
1800             case SCM_CREDENTIALS:
1801             {
1802                 struct ucred *cred = (struct ucred *)data;
1803                 struct target_ucred *target_cred =
1804                     (struct target_ucred *)target_data;
1805 
1806                 __put_user(cred->pid, &target_cred->pid);
1807                 __put_user(cred->uid, &target_cred->uid);
1808                 __put_user(cred->gid, &target_cred->gid);
1809                 break;
1810             }
1811             default:
1812                 goto unimplemented;
1813             }
1814             break;
1815 
1816         case SOL_IP:
1817             switch (cmsg->cmsg_type) {
1818             case IP_TTL:
1819             {
1820                 uint32_t *v = (uint32_t *)data;
1821                 uint32_t *t_int = (uint32_t *)target_data;
1822 
1823                 if (len != sizeof(uint32_t) ||
1824                     tgt_len != sizeof(uint32_t)) {
1825                     goto unimplemented;
1826                 }
1827                 __put_user(*v, t_int);
1828                 break;
1829             }
1830             case IP_RECVERR:
1831             {
1832                 struct errhdr_t {
1833                    struct sock_extended_err ee;
1834                    struct sockaddr_in offender;
1835                 };
1836                 struct errhdr_t *errh = (struct errhdr_t *)data;
1837                 struct errhdr_t *target_errh =
1838                     (struct errhdr_t *)target_data;
1839 
1840                 if (len != sizeof(struct errhdr_t) ||
1841                     tgt_len != sizeof(struct errhdr_t)) {
1842                     goto unimplemented;
1843                 }
1844                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1845                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1846                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1847                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1848                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1849                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1850                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1851                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1852                     (void *) &errh->offender, sizeof(errh->offender));
1853                 break;
1854             }
1855             default:
1856                 goto unimplemented;
1857             }
1858             break;
1859 
1860         case SOL_IPV6:
1861             switch (cmsg->cmsg_type) {
1862             case IPV6_HOPLIMIT:
1863             {
1864                 uint32_t *v = (uint32_t *)data;
1865                 uint32_t *t_int = (uint32_t *)target_data;
1866 
1867                 if (len != sizeof(uint32_t) ||
1868                     tgt_len != sizeof(uint32_t)) {
1869                     goto unimplemented;
1870                 }
1871                 __put_user(*v, t_int);
1872                 break;
1873             }
1874             case IPV6_RECVERR:
1875             {
1876                 struct errhdr6_t {
1877                    struct sock_extended_err ee;
1878                    struct sockaddr_in6 offender;
1879                 };
1880                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1881                 struct errhdr6_t *target_errh =
1882                     (struct errhdr6_t *)target_data;
1883 
1884                 if (len != sizeof(struct errhdr6_t) ||
1885                     tgt_len != sizeof(struct errhdr6_t)) {
1886                     goto unimplemented;
1887                 }
1888                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1889                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1890                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1891                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1892                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1893                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1894                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1895                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1896                     (void *) &errh->offender, sizeof(errh->offender));
1897                 break;
1898             }
1899             default:
1900                 goto unimplemented;
1901             }
1902             break;
1903 
1904         default:
1905         unimplemented:
1906             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1907                           cmsg->cmsg_level, cmsg->cmsg_type);
1908             memcpy(target_data, data, MIN(len, tgt_len));
1909             if (tgt_len > len) {
1910                 memset(target_data + len, 0, tgt_len - len);
1911             }
1912         }
1913 
1914         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1915         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1916         if (msg_controllen < tgt_space) {
1917             tgt_space = msg_controllen;
1918         }
1919         msg_controllen -= tgt_space;
1920         space += tgt_space;
1921         cmsg = CMSG_NXTHDR(msgh, cmsg);
1922         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1923                                          target_cmsg_start);
1924     }
1925     unlock_user(target_cmsg, target_cmsg_addr, space);
1926  the_end:
1927     target_msgh->msg_controllen = tswapal(space);
1928     return 0;
1929 }
1930 
1931 /* do_setsockopt() Must return target values and target errnos. */
1932 static abi_long do_setsockopt(int sockfd, int level, int optname,
1933                               abi_ulong optval_addr, socklen_t optlen)
1934 {
1935     abi_long ret;
1936     int val;
1937     struct ip_mreqn *ip_mreq;
1938     struct ip_mreq_source *ip_mreq_source;
1939 
1940     switch(level) {
1941     case SOL_TCP:
1942         /* TCP options all take an 'int' value.  */
1943         if (optlen < sizeof(uint32_t))
1944             return -TARGET_EINVAL;
1945 
1946         if (get_user_u32(val, optval_addr))
1947             return -TARGET_EFAULT;
1948         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1949         break;
1950     case SOL_IP:
1951         switch(optname) {
1952         case IP_TOS:
1953         case IP_TTL:
1954         case IP_HDRINCL:
1955         case IP_ROUTER_ALERT:
1956         case IP_RECVOPTS:
1957         case IP_RETOPTS:
1958         case IP_PKTINFO:
1959         case IP_MTU_DISCOVER:
1960         case IP_RECVERR:
1961         case IP_RECVTTL:
1962         case IP_RECVTOS:
1963 #ifdef IP_FREEBIND
1964         case IP_FREEBIND:
1965 #endif
1966         case IP_MULTICAST_TTL:
1967         case IP_MULTICAST_LOOP:
1968             val = 0;
1969             if (optlen >= sizeof(uint32_t)) {
1970                 if (get_user_u32(val, optval_addr))
1971                     return -TARGET_EFAULT;
1972             } else if (optlen >= 1) {
1973                 if (get_user_u8(val, optval_addr))
1974                     return -TARGET_EFAULT;
1975             }
1976             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1977             break;
1978         case IP_ADD_MEMBERSHIP:
1979         case IP_DROP_MEMBERSHIP:
1980             if (optlen < sizeof (struct target_ip_mreq) ||
1981                 optlen > sizeof (struct target_ip_mreqn))
1982                 return -TARGET_EINVAL;
1983 
1984             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1985             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1986             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1987             break;
1988 
1989         case IP_BLOCK_SOURCE:
1990         case IP_UNBLOCK_SOURCE:
1991         case IP_ADD_SOURCE_MEMBERSHIP:
1992         case IP_DROP_SOURCE_MEMBERSHIP:
1993             if (optlen != sizeof (struct target_ip_mreq_source))
1994                 return -TARGET_EINVAL;
1995 
1996             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1997             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1998             unlock_user (ip_mreq_source, optval_addr, 0);
1999             break;
2000 
2001         default:
2002             goto unimplemented;
2003         }
2004         break;
2005     case SOL_IPV6:
2006         switch (optname) {
2007         case IPV6_MTU_DISCOVER:
2008         case IPV6_MTU:
2009         case IPV6_V6ONLY:
2010         case IPV6_RECVPKTINFO:
2011         case IPV6_UNICAST_HOPS:
2012         case IPV6_MULTICAST_HOPS:
2013         case IPV6_MULTICAST_LOOP:
2014         case IPV6_RECVERR:
2015         case IPV6_RECVHOPLIMIT:
2016         case IPV6_2292HOPLIMIT:
2017         case IPV6_CHECKSUM:
2018         case IPV6_ADDRFORM:
2019         case IPV6_2292PKTINFO:
2020         case IPV6_RECVTCLASS:
2021         case IPV6_RECVRTHDR:
2022         case IPV6_2292RTHDR:
2023         case IPV6_RECVHOPOPTS:
2024         case IPV6_2292HOPOPTS:
2025         case IPV6_RECVDSTOPTS:
2026         case IPV6_2292DSTOPTS:
2027         case IPV6_TCLASS:
2028 #ifdef IPV6_RECVPATHMTU
2029         case IPV6_RECVPATHMTU:
2030 #endif
2031 #ifdef IPV6_TRANSPARENT
2032         case IPV6_TRANSPARENT:
2033 #endif
2034 #ifdef IPV6_FREEBIND
2035         case IPV6_FREEBIND:
2036 #endif
2037 #ifdef IPV6_RECVORIGDSTADDR
2038         case IPV6_RECVORIGDSTADDR:
2039 #endif
2040             val = 0;
2041             if (optlen < sizeof(uint32_t)) {
2042                 return -TARGET_EINVAL;
2043             }
2044             if (get_user_u32(val, optval_addr)) {
2045                 return -TARGET_EFAULT;
2046             }
2047             ret = get_errno(setsockopt(sockfd, level, optname,
2048                                        &val, sizeof(val)));
2049             break;
2050         case IPV6_PKTINFO:
2051         {
2052             struct in6_pktinfo pki;
2053 
2054             if (optlen < sizeof(pki)) {
2055                 return -TARGET_EINVAL;
2056             }
2057 
2058             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2059                 return -TARGET_EFAULT;
2060             }
2061 
2062             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2063 
2064             ret = get_errno(setsockopt(sockfd, level, optname,
2065                                        &pki, sizeof(pki)));
2066             break;
2067         }
2068         case IPV6_ADD_MEMBERSHIP:
2069         case IPV6_DROP_MEMBERSHIP:
2070         {
2071             struct ipv6_mreq ipv6mreq;
2072 
2073             if (optlen < sizeof(ipv6mreq)) {
2074                 return -TARGET_EINVAL;
2075             }
2076 
2077             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2078                 return -TARGET_EFAULT;
2079             }
2080 
2081             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2082 
2083             ret = get_errno(setsockopt(sockfd, level, optname,
2084                                        &ipv6mreq, sizeof(ipv6mreq)));
2085             break;
2086         }
2087         default:
2088             goto unimplemented;
2089         }
2090         break;
2091     case SOL_ICMPV6:
2092         switch (optname) {
2093         case ICMPV6_FILTER:
2094         {
2095             struct icmp6_filter icmp6f;
2096 
2097             if (optlen > sizeof(icmp6f)) {
2098                 optlen = sizeof(icmp6f);
2099             }
2100 
2101             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2102                 return -TARGET_EFAULT;
2103             }
2104 
2105             for (val = 0; val < 8; val++) {
2106                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2107             }
2108 
2109             ret = get_errno(setsockopt(sockfd, level, optname,
2110                                        &icmp6f, optlen));
2111             break;
2112         }
2113         default:
2114             goto unimplemented;
2115         }
2116         break;
2117     case SOL_RAW:
2118         switch (optname) {
2119         case ICMP_FILTER:
2120         case IPV6_CHECKSUM:
2121             /* those take an u32 value */
2122             if (optlen < sizeof(uint32_t)) {
2123                 return -TARGET_EINVAL;
2124             }
2125 
2126             if (get_user_u32(val, optval_addr)) {
2127                 return -TARGET_EFAULT;
2128             }
2129             ret = get_errno(setsockopt(sockfd, level, optname,
2130                                        &val, sizeof(val)));
2131             break;
2132 
2133         default:
2134             goto unimplemented;
2135         }
2136         break;
2137 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2138     case SOL_ALG:
2139         switch (optname) {
2140         case ALG_SET_KEY:
2141         {
2142             char *alg_key = g_malloc(optlen);
2143 
2144             if (!alg_key) {
2145                 return -TARGET_ENOMEM;
2146             }
2147             if (copy_from_user(alg_key, optval_addr, optlen)) {
2148                 g_free(alg_key);
2149                 return -TARGET_EFAULT;
2150             }
2151             ret = get_errno(setsockopt(sockfd, level, optname,
2152                                        alg_key, optlen));
2153             g_free(alg_key);
2154             break;
2155         }
2156         case ALG_SET_AEAD_AUTHSIZE:
2157         {
2158             ret = get_errno(setsockopt(sockfd, level, optname,
2159                                        NULL, optlen));
2160             break;
2161         }
2162         default:
2163             goto unimplemented;
2164         }
2165         break;
2166 #endif
2167     case TARGET_SOL_SOCKET:
2168         switch (optname) {
2169         case TARGET_SO_RCVTIMEO:
2170         {
2171                 struct timeval tv;
2172 
2173                 optname = SO_RCVTIMEO;
2174 
2175 set_timeout:
2176                 if (optlen != sizeof(struct target_timeval)) {
2177                     return -TARGET_EINVAL;
2178                 }
2179 
2180                 if (copy_from_user_timeval(&tv, optval_addr)) {
2181                     return -TARGET_EFAULT;
2182                 }
2183 
2184                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2185                                 &tv, sizeof(tv)));
2186                 return ret;
2187         }
2188         case TARGET_SO_SNDTIMEO:
2189                 optname = SO_SNDTIMEO;
2190                 goto set_timeout;
2191         case TARGET_SO_ATTACH_FILTER:
2192         {
2193                 struct target_sock_fprog *tfprog;
2194                 struct target_sock_filter *tfilter;
2195                 struct sock_fprog fprog;
2196                 struct sock_filter *filter;
2197                 int i;
2198 
2199                 if (optlen != sizeof(*tfprog)) {
2200                     return -TARGET_EINVAL;
2201                 }
2202                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2203                     return -TARGET_EFAULT;
2204                 }
2205                 if (!lock_user_struct(VERIFY_READ, tfilter,
2206                                       tswapal(tfprog->filter), 0)) {
2207                     unlock_user_struct(tfprog, optval_addr, 1);
2208                     return -TARGET_EFAULT;
2209                 }
2210 
2211                 fprog.len = tswap16(tfprog->len);
2212                 filter = g_try_new(struct sock_filter, fprog.len);
2213                 if (filter == NULL) {
2214                     unlock_user_struct(tfilter, tfprog->filter, 1);
2215                     unlock_user_struct(tfprog, optval_addr, 1);
2216                     return -TARGET_ENOMEM;
2217                 }
2218                 for (i = 0; i < fprog.len; i++) {
2219                     filter[i].code = tswap16(tfilter[i].code);
2220                     filter[i].jt = tfilter[i].jt;
2221                     filter[i].jf = tfilter[i].jf;
2222                     filter[i].k = tswap32(tfilter[i].k);
2223                 }
2224                 fprog.filter = filter;
2225 
2226                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2227                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2228                 g_free(filter);
2229 
2230                 unlock_user_struct(tfilter, tfprog->filter, 1);
2231                 unlock_user_struct(tfprog, optval_addr, 1);
2232                 return ret;
2233         }
2234 	case TARGET_SO_BINDTODEVICE:
2235 	{
2236 		char *dev_ifname, *addr_ifname;
2237 
2238 		if (optlen > IFNAMSIZ - 1) {
2239 		    optlen = IFNAMSIZ - 1;
2240 		}
2241 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2242 		if (!dev_ifname) {
2243 		    return -TARGET_EFAULT;
2244 		}
2245 		optname = SO_BINDTODEVICE;
2246 		addr_ifname = alloca(IFNAMSIZ);
2247 		memcpy(addr_ifname, dev_ifname, optlen);
2248 		addr_ifname[optlen] = 0;
2249 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2250                                            addr_ifname, optlen));
2251 		unlock_user (dev_ifname, optval_addr, 0);
2252 		return ret;
2253 	}
2254         case TARGET_SO_LINGER:
2255         {
2256                 struct linger lg;
2257                 struct target_linger *tlg;
2258 
2259                 if (optlen != sizeof(struct target_linger)) {
2260                     return -TARGET_EINVAL;
2261                 }
2262                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2263                     return -TARGET_EFAULT;
2264                 }
2265                 __get_user(lg.l_onoff, &tlg->l_onoff);
2266                 __get_user(lg.l_linger, &tlg->l_linger);
2267                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2268                                 &lg, sizeof(lg)));
2269                 unlock_user_struct(tlg, optval_addr, 0);
2270                 return ret;
2271         }
2272             /* Options with 'int' argument.  */
2273         case TARGET_SO_DEBUG:
2274 		optname = SO_DEBUG;
2275 		break;
2276         case TARGET_SO_REUSEADDR:
2277 		optname = SO_REUSEADDR;
2278 		break;
2279 #ifdef SO_REUSEPORT
2280         case TARGET_SO_REUSEPORT:
2281                 optname = SO_REUSEPORT;
2282                 break;
2283 #endif
2284         case TARGET_SO_TYPE:
2285 		optname = SO_TYPE;
2286 		break;
2287         case TARGET_SO_ERROR:
2288 		optname = SO_ERROR;
2289 		break;
2290         case TARGET_SO_DONTROUTE:
2291 		optname = SO_DONTROUTE;
2292 		break;
2293         case TARGET_SO_BROADCAST:
2294 		optname = SO_BROADCAST;
2295 		break;
2296         case TARGET_SO_SNDBUF:
2297 		optname = SO_SNDBUF;
2298 		break;
2299         case TARGET_SO_SNDBUFFORCE:
2300                 optname = SO_SNDBUFFORCE;
2301                 break;
2302         case TARGET_SO_RCVBUF:
2303 		optname = SO_RCVBUF;
2304 		break;
2305         case TARGET_SO_RCVBUFFORCE:
2306                 optname = SO_RCVBUFFORCE;
2307                 break;
2308         case TARGET_SO_KEEPALIVE:
2309 		optname = SO_KEEPALIVE;
2310 		break;
2311         case TARGET_SO_OOBINLINE:
2312 		optname = SO_OOBINLINE;
2313 		break;
2314         case TARGET_SO_NO_CHECK:
2315 		optname = SO_NO_CHECK;
2316 		break;
2317         case TARGET_SO_PRIORITY:
2318 		optname = SO_PRIORITY;
2319 		break;
2320 #ifdef SO_BSDCOMPAT
2321         case TARGET_SO_BSDCOMPAT:
2322 		optname = SO_BSDCOMPAT;
2323 		break;
2324 #endif
2325         case TARGET_SO_PASSCRED:
2326 		optname = SO_PASSCRED;
2327 		break;
2328         case TARGET_SO_PASSSEC:
2329                 optname = SO_PASSSEC;
2330                 break;
2331         case TARGET_SO_TIMESTAMP:
2332 		optname = SO_TIMESTAMP;
2333 		break;
2334         case TARGET_SO_RCVLOWAT:
2335 		optname = SO_RCVLOWAT;
2336 		break;
2337         default:
2338             goto unimplemented;
2339         }
2340 	if (optlen < sizeof(uint32_t))
2341             return -TARGET_EINVAL;
2342 
2343 	if (get_user_u32(val, optval_addr))
2344             return -TARGET_EFAULT;
2345 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2346         break;
2347 #ifdef SOL_NETLINK
2348     case SOL_NETLINK:
2349         switch (optname) {
2350         case NETLINK_PKTINFO:
2351         case NETLINK_ADD_MEMBERSHIP:
2352         case NETLINK_DROP_MEMBERSHIP:
2353         case NETLINK_BROADCAST_ERROR:
2354         case NETLINK_NO_ENOBUFS:
2355 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2356         case NETLINK_LISTEN_ALL_NSID:
2357         case NETLINK_CAP_ACK:
2358 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2359 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2360         case NETLINK_EXT_ACK:
2361 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2362 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2363         case NETLINK_GET_STRICT_CHK:
2364 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2365             break;
2366         default:
2367             goto unimplemented;
2368         }
2369         val = 0;
2370         if (optlen < sizeof(uint32_t)) {
2371             return -TARGET_EINVAL;
2372         }
2373         if (get_user_u32(val, optval_addr)) {
2374             return -TARGET_EFAULT;
2375         }
2376         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2377                                    sizeof(val)));
2378         break;
2379 #endif /* SOL_NETLINK */
2380     default:
2381     unimplemented:
2382         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2383                       level, optname);
2384         ret = -TARGET_ENOPROTOOPT;
2385     }
2386     return ret;
2387 }
2388 
2389 /* do_getsockopt() Must return target values and target errnos. */
2390 static abi_long do_getsockopt(int sockfd, int level, int optname,
2391                               abi_ulong optval_addr, abi_ulong optlen)
2392 {
2393     abi_long ret;
2394     int len, val;
2395     socklen_t lv;
2396 
2397     switch(level) {
2398     case TARGET_SOL_SOCKET:
2399         level = SOL_SOCKET;
2400         switch (optname) {
2401         /* These don't just return a single integer */
2402         case TARGET_SO_PEERNAME:
2403             goto unimplemented;
2404         case TARGET_SO_RCVTIMEO: {
2405             struct timeval tv;
2406             socklen_t tvlen;
2407 
2408             optname = SO_RCVTIMEO;
2409 
2410 get_timeout:
2411             if (get_user_u32(len, optlen)) {
2412                 return -TARGET_EFAULT;
2413             }
2414             if (len < 0) {
2415                 return -TARGET_EINVAL;
2416             }
2417 
2418             tvlen = sizeof(tv);
2419             ret = get_errno(getsockopt(sockfd, level, optname,
2420                                        &tv, &tvlen));
2421             if (ret < 0) {
2422                 return ret;
2423             }
2424             if (len > sizeof(struct target_timeval)) {
2425                 len = sizeof(struct target_timeval);
2426             }
2427             if (copy_to_user_timeval(optval_addr, &tv)) {
2428                 return -TARGET_EFAULT;
2429             }
2430             if (put_user_u32(len, optlen)) {
2431                 return -TARGET_EFAULT;
2432             }
2433             break;
2434         }
2435         case TARGET_SO_SNDTIMEO:
2436             optname = SO_SNDTIMEO;
2437             goto get_timeout;
2438         case TARGET_SO_PEERCRED: {
2439             struct ucred cr;
2440             socklen_t crlen;
2441             struct target_ucred *tcr;
2442 
2443             if (get_user_u32(len, optlen)) {
2444                 return -TARGET_EFAULT;
2445             }
2446             if (len < 0) {
2447                 return -TARGET_EINVAL;
2448             }
2449 
2450             crlen = sizeof(cr);
2451             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2452                                        &cr, &crlen));
2453             if (ret < 0) {
2454                 return ret;
2455             }
2456             if (len > crlen) {
2457                 len = crlen;
2458             }
2459             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2460                 return -TARGET_EFAULT;
2461             }
2462             __put_user(cr.pid, &tcr->pid);
2463             __put_user(cr.uid, &tcr->uid);
2464             __put_user(cr.gid, &tcr->gid);
2465             unlock_user_struct(tcr, optval_addr, 1);
2466             if (put_user_u32(len, optlen)) {
2467                 return -TARGET_EFAULT;
2468             }
2469             break;
2470         }
2471         case TARGET_SO_PEERSEC: {
2472             char *name;
2473 
2474             if (get_user_u32(len, optlen)) {
2475                 return -TARGET_EFAULT;
2476             }
2477             if (len < 0) {
2478                 return -TARGET_EINVAL;
2479             }
2480             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2481             if (!name) {
2482                 return -TARGET_EFAULT;
2483             }
2484             lv = len;
2485             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2486                                        name, &lv));
2487             if (put_user_u32(lv, optlen)) {
2488                 ret = -TARGET_EFAULT;
2489             }
2490             unlock_user(name, optval_addr, lv);
2491             break;
2492         }
2493         case TARGET_SO_LINGER:
2494         {
2495             struct linger lg;
2496             socklen_t lglen;
2497             struct target_linger *tlg;
2498 
2499             if (get_user_u32(len, optlen)) {
2500                 return -TARGET_EFAULT;
2501             }
2502             if (len < 0) {
2503                 return -TARGET_EINVAL;
2504             }
2505 
2506             lglen = sizeof(lg);
2507             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2508                                        &lg, &lglen));
2509             if (ret < 0) {
2510                 return ret;
2511             }
2512             if (len > lglen) {
2513                 len = lglen;
2514             }
2515             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2516                 return -TARGET_EFAULT;
2517             }
2518             __put_user(lg.l_onoff, &tlg->l_onoff);
2519             __put_user(lg.l_linger, &tlg->l_linger);
2520             unlock_user_struct(tlg, optval_addr, 1);
2521             if (put_user_u32(len, optlen)) {
2522                 return -TARGET_EFAULT;
2523             }
2524             break;
2525         }
2526         /* Options with 'int' argument.  */
2527         case TARGET_SO_DEBUG:
2528             optname = SO_DEBUG;
2529             goto int_case;
2530         case TARGET_SO_REUSEADDR:
2531             optname = SO_REUSEADDR;
2532             goto int_case;
2533 #ifdef SO_REUSEPORT
2534         case TARGET_SO_REUSEPORT:
2535             optname = SO_REUSEPORT;
2536             goto int_case;
2537 #endif
2538         case TARGET_SO_TYPE:
2539             optname = SO_TYPE;
2540             goto int_case;
2541         case TARGET_SO_ERROR:
2542             optname = SO_ERROR;
2543             goto int_case;
2544         case TARGET_SO_DONTROUTE:
2545             optname = SO_DONTROUTE;
2546             goto int_case;
2547         case TARGET_SO_BROADCAST:
2548             optname = SO_BROADCAST;
2549             goto int_case;
2550         case TARGET_SO_SNDBUF:
2551             optname = SO_SNDBUF;
2552             goto int_case;
2553         case TARGET_SO_RCVBUF:
2554             optname = SO_RCVBUF;
2555             goto int_case;
2556         case TARGET_SO_KEEPALIVE:
2557             optname = SO_KEEPALIVE;
2558             goto int_case;
2559         case TARGET_SO_OOBINLINE:
2560             optname = SO_OOBINLINE;
2561             goto int_case;
2562         case TARGET_SO_NO_CHECK:
2563             optname = SO_NO_CHECK;
2564             goto int_case;
2565         case TARGET_SO_PRIORITY:
2566             optname = SO_PRIORITY;
2567             goto int_case;
2568 #ifdef SO_BSDCOMPAT
2569         case TARGET_SO_BSDCOMPAT:
2570             optname = SO_BSDCOMPAT;
2571             goto int_case;
2572 #endif
2573         case TARGET_SO_PASSCRED:
2574             optname = SO_PASSCRED;
2575             goto int_case;
2576         case TARGET_SO_TIMESTAMP:
2577             optname = SO_TIMESTAMP;
2578             goto int_case;
2579         case TARGET_SO_RCVLOWAT:
2580             optname = SO_RCVLOWAT;
2581             goto int_case;
2582         case TARGET_SO_ACCEPTCONN:
2583             optname = SO_ACCEPTCONN;
2584             goto int_case;
2585         default:
2586             goto int_case;
2587         }
2588         break;
2589     case SOL_TCP:
2590         /* TCP options all take an 'int' value.  */
2591     int_case:
2592         if (get_user_u32(len, optlen))
2593             return -TARGET_EFAULT;
2594         if (len < 0)
2595             return -TARGET_EINVAL;
2596         lv = sizeof(lv);
2597         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2598         if (ret < 0)
2599             return ret;
2600         if (optname == SO_TYPE) {
2601             val = host_to_target_sock_type(val);
2602         }
2603         if (len > lv)
2604             len = lv;
2605         if (len == 4) {
2606             if (put_user_u32(val, optval_addr))
2607                 return -TARGET_EFAULT;
2608         } else {
2609             if (put_user_u8(val, optval_addr))
2610                 return -TARGET_EFAULT;
2611         }
2612         if (put_user_u32(len, optlen))
2613             return -TARGET_EFAULT;
2614         break;
2615     case SOL_IP:
2616         switch(optname) {
2617         case IP_TOS:
2618         case IP_TTL:
2619         case IP_HDRINCL:
2620         case IP_ROUTER_ALERT:
2621         case IP_RECVOPTS:
2622         case IP_RETOPTS:
2623         case IP_PKTINFO:
2624         case IP_MTU_DISCOVER:
2625         case IP_RECVERR:
2626         case IP_RECVTOS:
2627 #ifdef IP_FREEBIND
2628         case IP_FREEBIND:
2629 #endif
2630         case IP_MULTICAST_TTL:
2631         case IP_MULTICAST_LOOP:
2632             if (get_user_u32(len, optlen))
2633                 return -TARGET_EFAULT;
2634             if (len < 0)
2635                 return -TARGET_EINVAL;
2636             lv = sizeof(lv);
2637             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2638             if (ret < 0)
2639                 return ret;
2640             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2641                 len = 1;
2642                 if (put_user_u32(len, optlen)
2643                     || put_user_u8(val, optval_addr))
2644                     return -TARGET_EFAULT;
2645             } else {
2646                 if (len > sizeof(int))
2647                     len = sizeof(int);
2648                 if (put_user_u32(len, optlen)
2649                     || put_user_u32(val, optval_addr))
2650                     return -TARGET_EFAULT;
2651             }
2652             break;
2653         default:
2654             ret = -TARGET_ENOPROTOOPT;
2655             break;
2656         }
2657         break;
2658     case SOL_IPV6:
2659         switch (optname) {
2660         case IPV6_MTU_DISCOVER:
2661         case IPV6_MTU:
2662         case IPV6_V6ONLY:
2663         case IPV6_RECVPKTINFO:
2664         case IPV6_UNICAST_HOPS:
2665         case IPV6_MULTICAST_HOPS:
2666         case IPV6_MULTICAST_LOOP:
2667         case IPV6_RECVERR:
2668         case IPV6_RECVHOPLIMIT:
2669         case IPV6_2292HOPLIMIT:
2670         case IPV6_CHECKSUM:
2671         case IPV6_ADDRFORM:
2672         case IPV6_2292PKTINFO:
2673         case IPV6_RECVTCLASS:
2674         case IPV6_RECVRTHDR:
2675         case IPV6_2292RTHDR:
2676         case IPV6_RECVHOPOPTS:
2677         case IPV6_2292HOPOPTS:
2678         case IPV6_RECVDSTOPTS:
2679         case IPV6_2292DSTOPTS:
2680         case IPV6_TCLASS:
2681 #ifdef IPV6_RECVPATHMTU
2682         case IPV6_RECVPATHMTU:
2683 #endif
2684 #ifdef IPV6_TRANSPARENT
2685         case IPV6_TRANSPARENT:
2686 #endif
2687 #ifdef IPV6_FREEBIND
2688         case IPV6_FREEBIND:
2689 #endif
2690 #ifdef IPV6_RECVORIGDSTADDR
2691         case IPV6_RECVORIGDSTADDR:
2692 #endif
2693             if (get_user_u32(len, optlen))
2694                 return -TARGET_EFAULT;
2695             if (len < 0)
2696                 return -TARGET_EINVAL;
2697             lv = sizeof(lv);
2698             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2699             if (ret < 0)
2700                 return ret;
2701             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2702                 len = 1;
2703                 if (put_user_u32(len, optlen)
2704                     || put_user_u8(val, optval_addr))
2705                     return -TARGET_EFAULT;
2706             } else {
2707                 if (len > sizeof(int))
2708                     len = sizeof(int);
2709                 if (put_user_u32(len, optlen)
2710                     || put_user_u32(val, optval_addr))
2711                     return -TARGET_EFAULT;
2712             }
2713             break;
2714         default:
2715             ret = -TARGET_ENOPROTOOPT;
2716             break;
2717         }
2718         break;
2719 #ifdef SOL_NETLINK
2720     case SOL_NETLINK:
2721         switch (optname) {
2722         case NETLINK_PKTINFO:
2723         case NETLINK_BROADCAST_ERROR:
2724         case NETLINK_NO_ENOBUFS:
2725 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2726         case NETLINK_LISTEN_ALL_NSID:
2727         case NETLINK_CAP_ACK:
2728 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2729 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2730         case NETLINK_EXT_ACK:
2731 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2732 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2733         case NETLINK_GET_STRICT_CHK:
2734 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2735             if (get_user_u32(len, optlen)) {
2736                 return -TARGET_EFAULT;
2737             }
2738             if (len != sizeof(val)) {
2739                 return -TARGET_EINVAL;
2740             }
2741             lv = len;
2742             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2743             if (ret < 0) {
2744                 return ret;
2745             }
2746             if (put_user_u32(lv, optlen)
2747                 || put_user_u32(val, optval_addr)) {
2748                 return -TARGET_EFAULT;
2749             }
2750             break;
2751 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2752         case NETLINK_LIST_MEMBERSHIPS:
2753         {
2754             uint32_t *results;
2755             int i;
2756             if (get_user_u32(len, optlen)) {
2757                 return -TARGET_EFAULT;
2758             }
2759             if (len < 0) {
2760                 return -TARGET_EINVAL;
2761             }
2762             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2763             if (!results) {
2764                 return -TARGET_EFAULT;
2765             }
2766             lv = len;
2767             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2768             if (ret < 0) {
2769                 unlock_user(results, optval_addr, 0);
2770                 return ret;
2771             }
2772             /* swap host endianess to target endianess. */
2773             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2774                 results[i] = tswap32(results[i]);
2775             }
2776             if (put_user_u32(lv, optlen)) {
2777                 return -TARGET_EFAULT;
2778             }
2779             unlock_user(results, optval_addr, 0);
2780             break;
2781         }
2782 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2783         default:
2784             goto unimplemented;
2785         }
2786         break;
2787 #endif /* SOL_NETLINK */
2788     default:
2789     unimplemented:
2790         qemu_log_mask(LOG_UNIMP,
2791                       "getsockopt level=%d optname=%d not yet supported\n",
2792                       level, optname);
2793         ret = -TARGET_EOPNOTSUPP;
2794         break;
2795     }
2796     return ret;
2797 }
2798 
2799 /* Convert target low/high pair representing file offset into the host
2800  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2801  * as the kernel doesn't handle them either.
2802  */
2803 static void target_to_host_low_high(abi_ulong tlow,
2804                                     abi_ulong thigh,
2805                                     unsigned long *hlow,
2806                                     unsigned long *hhigh)
2807 {
2808     uint64_t off = tlow |
2809         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2810         TARGET_LONG_BITS / 2;
2811 
2812     *hlow = off;
2813     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2814 }
2815 
2816 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2817                                 abi_ulong count, int copy)
2818 {
2819     struct target_iovec *target_vec;
2820     struct iovec *vec;
2821     abi_ulong total_len, max_len;
2822     int i;
2823     int err = 0;
2824     bool bad_address = false;
2825 
2826     if (count == 0) {
2827         errno = 0;
2828         return NULL;
2829     }
2830     if (count > IOV_MAX) {
2831         errno = EINVAL;
2832         return NULL;
2833     }
2834 
2835     vec = g_try_new0(struct iovec, count);
2836     if (vec == NULL) {
2837         errno = ENOMEM;
2838         return NULL;
2839     }
2840 
2841     target_vec = lock_user(VERIFY_READ, target_addr,
2842                            count * sizeof(struct target_iovec), 1);
2843     if (target_vec == NULL) {
2844         err = EFAULT;
2845         goto fail2;
2846     }
2847 
2848     /* ??? If host page size > target page size, this will result in a
2849        value larger than what we can actually support.  */
2850     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2851     total_len = 0;
2852 
2853     for (i = 0; i < count; i++) {
2854         abi_ulong base = tswapal(target_vec[i].iov_base);
2855         abi_long len = tswapal(target_vec[i].iov_len);
2856 
2857         if (len < 0) {
2858             err = EINVAL;
2859             goto fail;
2860         } else if (len == 0) {
2861             /* Zero length pointer is ignored.  */
2862             vec[i].iov_base = 0;
2863         } else {
2864             vec[i].iov_base = lock_user(type, base, len, copy);
2865             /* If the first buffer pointer is bad, this is a fault.  But
2866              * subsequent bad buffers will result in a partial write; this
2867              * is realized by filling the vector with null pointers and
2868              * zero lengths. */
2869             if (!vec[i].iov_base) {
2870                 if (i == 0) {
2871                     err = EFAULT;
2872                     goto fail;
2873                 } else {
2874                     bad_address = true;
2875                 }
2876             }
2877             if (bad_address) {
2878                 len = 0;
2879             }
2880             if (len > max_len - total_len) {
2881                 len = max_len - total_len;
2882             }
2883         }
2884         vec[i].iov_len = len;
2885         total_len += len;
2886     }
2887 
2888     unlock_user(target_vec, target_addr, 0);
2889     return vec;
2890 
2891  fail:
2892     while (--i >= 0) {
2893         if (tswapal(target_vec[i].iov_len) > 0) {
2894             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2895         }
2896     }
2897     unlock_user(target_vec, target_addr, 0);
2898  fail2:
2899     g_free(vec);
2900     errno = err;
2901     return NULL;
2902 }
2903 
2904 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2905                          abi_ulong count, int copy)
2906 {
2907     struct target_iovec *target_vec;
2908     int i;
2909 
2910     target_vec = lock_user(VERIFY_READ, target_addr,
2911                            count * sizeof(struct target_iovec), 1);
2912     if (target_vec) {
2913         for (i = 0; i < count; i++) {
2914             abi_ulong base = tswapal(target_vec[i].iov_base);
2915             abi_long len = tswapal(target_vec[i].iov_len);
2916             if (len < 0) {
2917                 break;
2918             }
2919             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2920         }
2921         unlock_user(target_vec, target_addr, 0);
2922     }
2923 
2924     g_free(vec);
2925 }
2926 
2927 static inline int target_to_host_sock_type(int *type)
2928 {
2929     int host_type = 0;
2930     int target_type = *type;
2931 
2932     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2933     case TARGET_SOCK_DGRAM:
2934         host_type = SOCK_DGRAM;
2935         break;
2936     case TARGET_SOCK_STREAM:
2937         host_type = SOCK_STREAM;
2938         break;
2939     default:
2940         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2941         break;
2942     }
2943     if (target_type & TARGET_SOCK_CLOEXEC) {
2944 #if defined(SOCK_CLOEXEC)
2945         host_type |= SOCK_CLOEXEC;
2946 #else
2947         return -TARGET_EINVAL;
2948 #endif
2949     }
2950     if (target_type & TARGET_SOCK_NONBLOCK) {
2951 #if defined(SOCK_NONBLOCK)
2952         host_type |= SOCK_NONBLOCK;
2953 #elif !defined(O_NONBLOCK)
2954         return -TARGET_EINVAL;
2955 #endif
2956     }
2957     *type = host_type;
2958     return 0;
2959 }
2960 
2961 /* Try to emulate socket type flags after socket creation.  */
2962 static int sock_flags_fixup(int fd, int target_type)
2963 {
2964 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2965     if (target_type & TARGET_SOCK_NONBLOCK) {
2966         int flags = fcntl(fd, F_GETFL);
2967         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2968             close(fd);
2969             return -TARGET_EINVAL;
2970         }
2971     }
2972 #endif
2973     return fd;
2974 }
2975 
2976 /* do_socket() Must return target values and target errnos. */
2977 static abi_long do_socket(int domain, int type, int protocol)
2978 {
2979     int target_type = type;
2980     int ret;
2981 
2982     ret = target_to_host_sock_type(&type);
2983     if (ret) {
2984         return ret;
2985     }
2986 
2987     if (domain == PF_NETLINK && !(
2988 #ifdef CONFIG_RTNETLINK
2989          protocol == NETLINK_ROUTE ||
2990 #endif
2991          protocol == NETLINK_KOBJECT_UEVENT ||
2992          protocol == NETLINK_AUDIT)) {
2993         return -TARGET_EPFNOSUPPORT;
2994     }
2995 
2996     if (domain == AF_PACKET ||
2997         (domain == AF_INET && type == SOCK_PACKET)) {
2998         protocol = tswap16(protocol);
2999     }
3000 
3001     ret = get_errno(socket(domain, type, protocol));
3002     if (ret >= 0) {
3003         ret = sock_flags_fixup(ret, target_type);
3004         if (type == SOCK_PACKET) {
3005             /* Manage an obsolete case :
3006              * if socket type is SOCK_PACKET, bind by name
3007              */
3008             fd_trans_register(ret, &target_packet_trans);
3009         } else if (domain == PF_NETLINK) {
3010             switch (protocol) {
3011 #ifdef CONFIG_RTNETLINK
3012             case NETLINK_ROUTE:
3013                 fd_trans_register(ret, &target_netlink_route_trans);
3014                 break;
3015 #endif
3016             case NETLINK_KOBJECT_UEVENT:
3017                 /* nothing to do: messages are strings */
3018                 break;
3019             case NETLINK_AUDIT:
3020                 fd_trans_register(ret, &target_netlink_audit_trans);
3021                 break;
3022             default:
3023                 g_assert_not_reached();
3024             }
3025         }
3026     }
3027     return ret;
3028 }
3029 
3030 /* do_bind() Must return target values and target errnos. */
3031 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3032                         socklen_t addrlen)
3033 {
3034     void *addr;
3035     abi_long ret;
3036 
3037     if ((int)addrlen < 0) {
3038         return -TARGET_EINVAL;
3039     }
3040 
3041     addr = alloca(addrlen+1);
3042 
3043     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3044     if (ret)
3045         return ret;
3046 
3047     return get_errno(bind(sockfd, addr, addrlen));
3048 }
3049 
3050 /* do_connect() Must return target values and target errnos. */
3051 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3052                            socklen_t addrlen)
3053 {
3054     void *addr;
3055     abi_long ret;
3056 
3057     if ((int)addrlen < 0) {
3058         return -TARGET_EINVAL;
3059     }
3060 
3061     addr = alloca(addrlen+1);
3062 
3063     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3064     if (ret)
3065         return ret;
3066 
3067     return get_errno(safe_connect(sockfd, addr, addrlen));
3068 }
3069 
3070 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3071 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3072                                       int flags, int send)
3073 {
3074     abi_long ret, len;
3075     struct msghdr msg;
3076     abi_ulong count;
3077     struct iovec *vec;
3078     abi_ulong target_vec;
3079 
3080     if (msgp->msg_name) {
3081         msg.msg_namelen = tswap32(msgp->msg_namelen);
3082         msg.msg_name = alloca(msg.msg_namelen+1);
3083         ret = target_to_host_sockaddr(fd, msg.msg_name,
3084                                       tswapal(msgp->msg_name),
3085                                       msg.msg_namelen);
3086         if (ret == -TARGET_EFAULT) {
3087             /* For connected sockets msg_name and msg_namelen must
3088              * be ignored, so returning EFAULT immediately is wrong.
3089              * Instead, pass a bad msg_name to the host kernel, and
3090              * let it decide whether to return EFAULT or not.
3091              */
3092             msg.msg_name = (void *)-1;
3093         } else if (ret) {
3094             goto out2;
3095         }
3096     } else {
3097         msg.msg_name = NULL;
3098         msg.msg_namelen = 0;
3099     }
3100     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3101     msg.msg_control = alloca(msg.msg_controllen);
3102     memset(msg.msg_control, 0, msg.msg_controllen);
3103 
3104     msg.msg_flags = tswap32(msgp->msg_flags);
3105 
3106     count = tswapal(msgp->msg_iovlen);
3107     target_vec = tswapal(msgp->msg_iov);
3108 
3109     if (count > IOV_MAX) {
3110         /* sendrcvmsg returns a different errno for this condition than
3111          * readv/writev, so we must catch it here before lock_iovec() does.
3112          */
3113         ret = -TARGET_EMSGSIZE;
3114         goto out2;
3115     }
3116 
3117     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3118                      target_vec, count, send);
3119     if (vec == NULL) {
3120         ret = -host_to_target_errno(errno);
3121         goto out2;
3122     }
3123     msg.msg_iovlen = count;
3124     msg.msg_iov = vec;
3125 
3126     if (send) {
3127         if (fd_trans_target_to_host_data(fd)) {
3128             void *host_msg;
3129 
3130             host_msg = g_malloc(msg.msg_iov->iov_len);
3131             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3132             ret = fd_trans_target_to_host_data(fd)(host_msg,
3133                                                    msg.msg_iov->iov_len);
3134             if (ret >= 0) {
3135                 msg.msg_iov->iov_base = host_msg;
3136                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3137             }
3138             g_free(host_msg);
3139         } else {
3140             ret = target_to_host_cmsg(&msg, msgp);
3141             if (ret == 0) {
3142                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3143             }
3144         }
3145     } else {
3146         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3147         if (!is_error(ret)) {
3148             len = ret;
3149             if (fd_trans_host_to_target_data(fd)) {
3150                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3151                                                MIN(msg.msg_iov->iov_len, len));
3152             } else {
3153                 ret = host_to_target_cmsg(msgp, &msg);
3154             }
3155             if (!is_error(ret)) {
3156                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3157                 msgp->msg_flags = tswap32(msg.msg_flags);
3158                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3159                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3160                                     msg.msg_name, msg.msg_namelen);
3161                     if (ret) {
3162                         goto out;
3163                     }
3164                 }
3165 
3166                 ret = len;
3167             }
3168         }
3169     }
3170 
3171 out:
3172     unlock_iovec(vec, target_vec, count, !send);
3173 out2:
3174     return ret;
3175 }
3176 
3177 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3178                                int flags, int send)
3179 {
3180     abi_long ret;
3181     struct target_msghdr *msgp;
3182 
3183     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3184                           msgp,
3185                           target_msg,
3186                           send ? 1 : 0)) {
3187         return -TARGET_EFAULT;
3188     }
3189     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3190     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3191     return ret;
3192 }
3193 
3194 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3195  * so it might not have this *mmsg-specific flag either.
3196  */
3197 #ifndef MSG_WAITFORONE
3198 #define MSG_WAITFORONE 0x10000
3199 #endif
3200 
3201 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3202                                 unsigned int vlen, unsigned int flags,
3203                                 int send)
3204 {
3205     struct target_mmsghdr *mmsgp;
3206     abi_long ret = 0;
3207     int i;
3208 
3209     if (vlen > UIO_MAXIOV) {
3210         vlen = UIO_MAXIOV;
3211     }
3212 
3213     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3214     if (!mmsgp) {
3215         return -TARGET_EFAULT;
3216     }
3217 
3218     for (i = 0; i < vlen; i++) {
3219         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3220         if (is_error(ret)) {
3221             break;
3222         }
3223         mmsgp[i].msg_len = tswap32(ret);
3224         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3225         if (flags & MSG_WAITFORONE) {
3226             flags |= MSG_DONTWAIT;
3227         }
3228     }
3229 
3230     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3231 
3232     /* Return number of datagrams sent if we sent any at all;
3233      * otherwise return the error.
3234      */
3235     if (i) {
3236         return i;
3237     }
3238     return ret;
3239 }
3240 
3241 /* do_accept4() Must return target values and target errnos. */
3242 static abi_long do_accept4(int fd, abi_ulong target_addr,
3243                            abi_ulong target_addrlen_addr, int flags)
3244 {
3245     socklen_t addrlen, ret_addrlen;
3246     void *addr;
3247     abi_long ret;
3248     int host_flags;
3249 
3250     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3251 
3252     if (target_addr == 0) {
3253         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3254     }
3255 
3256     /* linux returns EINVAL if addrlen pointer is invalid */
3257     if (get_user_u32(addrlen, target_addrlen_addr))
3258         return -TARGET_EINVAL;
3259 
3260     if ((int)addrlen < 0) {
3261         return -TARGET_EINVAL;
3262     }
3263 
3264     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3265         return -TARGET_EINVAL;
3266 
3267     addr = alloca(addrlen);
3268 
3269     ret_addrlen = addrlen;
3270     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3271     if (!is_error(ret)) {
3272         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3273         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3274             ret = -TARGET_EFAULT;
3275         }
3276     }
3277     return ret;
3278 }
3279 
3280 /* do_getpeername() Must return target values and target errnos. */
3281 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3282                                abi_ulong target_addrlen_addr)
3283 {
3284     socklen_t addrlen, ret_addrlen;
3285     void *addr;
3286     abi_long ret;
3287 
3288     if (get_user_u32(addrlen, target_addrlen_addr))
3289         return -TARGET_EFAULT;
3290 
3291     if ((int)addrlen < 0) {
3292         return -TARGET_EINVAL;
3293     }
3294 
3295     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3296         return -TARGET_EFAULT;
3297 
3298     addr = alloca(addrlen);
3299 
3300     ret_addrlen = addrlen;
3301     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3302     if (!is_error(ret)) {
3303         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3304         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3305             ret = -TARGET_EFAULT;
3306         }
3307     }
3308     return ret;
3309 }
3310 
3311 /* do_getsockname() Must return target values and target errnos. */
3312 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3313                                abi_ulong target_addrlen_addr)
3314 {
3315     socklen_t addrlen, ret_addrlen;
3316     void *addr;
3317     abi_long ret;
3318 
3319     if (get_user_u32(addrlen, target_addrlen_addr))
3320         return -TARGET_EFAULT;
3321 
3322     if ((int)addrlen < 0) {
3323         return -TARGET_EINVAL;
3324     }
3325 
3326     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3327         return -TARGET_EFAULT;
3328 
3329     addr = alloca(addrlen);
3330 
3331     ret_addrlen = addrlen;
3332     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3333     if (!is_error(ret)) {
3334         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3335         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3336             ret = -TARGET_EFAULT;
3337         }
3338     }
3339     return ret;
3340 }
3341 
3342 /* do_socketpair() Must return target values and target errnos. */
3343 static abi_long do_socketpair(int domain, int type, int protocol,
3344                               abi_ulong target_tab_addr)
3345 {
3346     int tab[2];
3347     abi_long ret;
3348 
3349     target_to_host_sock_type(&type);
3350 
3351     ret = get_errno(socketpair(domain, type, protocol, tab));
3352     if (!is_error(ret)) {
3353         if (put_user_s32(tab[0], target_tab_addr)
3354             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3355             ret = -TARGET_EFAULT;
3356     }
3357     return ret;
3358 }
3359 
3360 /* do_sendto() Must return target values and target errnos. */
3361 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3362                           abi_ulong target_addr, socklen_t addrlen)
3363 {
3364     void *addr;
3365     void *host_msg;
3366     void *copy_msg = NULL;
3367     abi_long ret;
3368 
3369     if ((int)addrlen < 0) {
3370         return -TARGET_EINVAL;
3371     }
3372 
3373     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3374     if (!host_msg)
3375         return -TARGET_EFAULT;
3376     if (fd_trans_target_to_host_data(fd)) {
3377         copy_msg = host_msg;
3378         host_msg = g_malloc(len);
3379         memcpy(host_msg, copy_msg, len);
3380         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3381         if (ret < 0) {
3382             goto fail;
3383         }
3384     }
3385     if (target_addr) {
3386         addr = alloca(addrlen+1);
3387         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3388         if (ret) {
3389             goto fail;
3390         }
3391         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3392     } else {
3393         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3394     }
3395 fail:
3396     if (copy_msg) {
3397         g_free(host_msg);
3398         host_msg = copy_msg;
3399     }
3400     unlock_user(host_msg, msg, 0);
3401     return ret;
3402 }
3403 
3404 /* do_recvfrom() Must return target values and target errnos. */
3405 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3406                             abi_ulong target_addr,
3407                             abi_ulong target_addrlen)
3408 {
3409     socklen_t addrlen, ret_addrlen;
3410     void *addr;
3411     void *host_msg;
3412     abi_long ret;
3413 
3414     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3415     if (!host_msg)
3416         return -TARGET_EFAULT;
3417     if (target_addr) {
3418         if (get_user_u32(addrlen, target_addrlen)) {
3419             ret = -TARGET_EFAULT;
3420             goto fail;
3421         }
3422         if ((int)addrlen < 0) {
3423             ret = -TARGET_EINVAL;
3424             goto fail;
3425         }
3426         addr = alloca(addrlen);
3427         ret_addrlen = addrlen;
3428         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3429                                       addr, &ret_addrlen));
3430     } else {
3431         addr = NULL; /* To keep compiler quiet.  */
3432         addrlen = 0; /* To keep compiler quiet.  */
3433         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3434     }
3435     if (!is_error(ret)) {
3436         if (fd_trans_host_to_target_data(fd)) {
3437             abi_long trans;
3438             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3439             if (is_error(trans)) {
3440                 ret = trans;
3441                 goto fail;
3442             }
3443         }
3444         if (target_addr) {
3445             host_to_target_sockaddr(target_addr, addr,
3446                                     MIN(addrlen, ret_addrlen));
3447             if (put_user_u32(ret_addrlen, target_addrlen)) {
3448                 ret = -TARGET_EFAULT;
3449                 goto fail;
3450             }
3451         }
3452         unlock_user(host_msg, msg, len);
3453     } else {
3454 fail:
3455         unlock_user(host_msg, msg, 0);
3456     }
3457     return ret;
3458 }
3459 
3460 #ifdef TARGET_NR_socketcall
3461 /* do_socketcall() must return target values and target errnos. */
3462 static abi_long do_socketcall(int num, abi_ulong vptr)
3463 {
3464     static const unsigned nargs[] = { /* number of arguments per operation */
3465         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3466         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3467         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3468         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3469         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3470         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3471         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3472         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3473         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3474         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3475         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3476         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3477         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3478         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3479         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3480         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3481         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3482         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3483         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3484         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3485     };
3486     abi_long a[6]; /* max 6 args */
3487     unsigned i;
3488 
3489     /* check the range of the first argument num */
3490     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3491     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3492         return -TARGET_EINVAL;
3493     }
3494     /* ensure we have space for args */
3495     if (nargs[num] > ARRAY_SIZE(a)) {
3496         return -TARGET_EINVAL;
3497     }
3498     /* collect the arguments in a[] according to nargs[] */
3499     for (i = 0; i < nargs[num]; ++i) {
3500         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3501             return -TARGET_EFAULT;
3502         }
3503     }
3504     /* now when we have the args, invoke the appropriate underlying function */
3505     switch (num) {
3506     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3507         return do_socket(a[0], a[1], a[2]);
3508     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3509         return do_bind(a[0], a[1], a[2]);
3510     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3511         return do_connect(a[0], a[1], a[2]);
3512     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3513         return get_errno(listen(a[0], a[1]));
3514     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3515         return do_accept4(a[0], a[1], a[2], 0);
3516     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3517         return do_getsockname(a[0], a[1], a[2]);
3518     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3519         return do_getpeername(a[0], a[1], a[2]);
3520     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3521         return do_socketpair(a[0], a[1], a[2], a[3]);
3522     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3523         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3524     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3525         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3526     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3527         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3528     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3529         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3530     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3531         return get_errno(shutdown(a[0], a[1]));
3532     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3533         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3534     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3535         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3536     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3537         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3538     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3539         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3540     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3541         return do_accept4(a[0], a[1], a[2], a[3]);
3542     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3543         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3544     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3545         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3546     default:
3547         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3548         return -TARGET_EINVAL;
3549     }
3550 }
3551 #endif
3552 
3553 #define N_SHM_REGIONS	32
3554 
3555 static struct shm_region {
3556     abi_ulong start;
3557     abi_ulong size;
3558     bool in_use;
3559 } shm_regions[N_SHM_REGIONS];
3560 
3561 #ifndef TARGET_SEMID64_DS
3562 /* asm-generic version of this struct */
3563 struct target_semid64_ds
3564 {
3565   struct target_ipc_perm sem_perm;
3566   abi_ulong sem_otime;
3567 #if TARGET_ABI_BITS == 32
3568   abi_ulong __unused1;
3569 #endif
3570   abi_ulong sem_ctime;
3571 #if TARGET_ABI_BITS == 32
3572   abi_ulong __unused2;
3573 #endif
3574   abi_ulong sem_nsems;
3575   abi_ulong __unused3;
3576   abi_ulong __unused4;
3577 };
3578 #endif
3579 
3580 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3581                                                abi_ulong target_addr)
3582 {
3583     struct target_ipc_perm *target_ip;
3584     struct target_semid64_ds *target_sd;
3585 
3586     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3587         return -TARGET_EFAULT;
3588     target_ip = &(target_sd->sem_perm);
3589     host_ip->__key = tswap32(target_ip->__key);
3590     host_ip->uid = tswap32(target_ip->uid);
3591     host_ip->gid = tswap32(target_ip->gid);
3592     host_ip->cuid = tswap32(target_ip->cuid);
3593     host_ip->cgid = tswap32(target_ip->cgid);
3594 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3595     host_ip->mode = tswap32(target_ip->mode);
3596 #else
3597     host_ip->mode = tswap16(target_ip->mode);
3598 #endif
3599 #if defined(TARGET_PPC)
3600     host_ip->__seq = tswap32(target_ip->__seq);
3601 #else
3602     host_ip->__seq = tswap16(target_ip->__seq);
3603 #endif
3604     unlock_user_struct(target_sd, target_addr, 0);
3605     return 0;
3606 }
3607 
3608 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3609                                                struct ipc_perm *host_ip)
3610 {
3611     struct target_ipc_perm *target_ip;
3612     struct target_semid64_ds *target_sd;
3613 
3614     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3615         return -TARGET_EFAULT;
3616     target_ip = &(target_sd->sem_perm);
3617     target_ip->__key = tswap32(host_ip->__key);
3618     target_ip->uid = tswap32(host_ip->uid);
3619     target_ip->gid = tswap32(host_ip->gid);
3620     target_ip->cuid = tswap32(host_ip->cuid);
3621     target_ip->cgid = tswap32(host_ip->cgid);
3622 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3623     target_ip->mode = tswap32(host_ip->mode);
3624 #else
3625     target_ip->mode = tswap16(host_ip->mode);
3626 #endif
3627 #if defined(TARGET_PPC)
3628     target_ip->__seq = tswap32(host_ip->__seq);
3629 #else
3630     target_ip->__seq = tswap16(host_ip->__seq);
3631 #endif
3632     unlock_user_struct(target_sd, target_addr, 1);
3633     return 0;
3634 }
3635 
3636 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3637                                                abi_ulong target_addr)
3638 {
3639     struct target_semid64_ds *target_sd;
3640 
3641     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3642         return -TARGET_EFAULT;
3643     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3644         return -TARGET_EFAULT;
3645     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3646     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3647     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3648     unlock_user_struct(target_sd, target_addr, 0);
3649     return 0;
3650 }
3651 
3652 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3653                                                struct semid_ds *host_sd)
3654 {
3655     struct target_semid64_ds *target_sd;
3656 
3657     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3658         return -TARGET_EFAULT;
3659     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3660         return -TARGET_EFAULT;
3661     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3662     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3663     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3664     unlock_user_struct(target_sd, target_addr, 1);
3665     return 0;
3666 }
3667 
3668 struct target_seminfo {
3669     int semmap;
3670     int semmni;
3671     int semmns;
3672     int semmnu;
3673     int semmsl;
3674     int semopm;
3675     int semume;
3676     int semusz;
3677     int semvmx;
3678     int semaem;
3679 };
3680 
3681 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3682                                               struct seminfo *host_seminfo)
3683 {
3684     struct target_seminfo *target_seminfo;
3685     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3686         return -TARGET_EFAULT;
3687     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3688     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3689     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3690     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3691     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3692     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3693     __put_user(host_seminfo->semume, &target_seminfo->semume);
3694     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3695     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3696     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3697     unlock_user_struct(target_seminfo, target_addr, 1);
3698     return 0;
3699 }
3700 
3701 union semun {
3702 	int val;
3703 	struct semid_ds *buf;
3704 	unsigned short *array;
3705 	struct seminfo *__buf;
3706 };
3707 
3708 union target_semun {
3709 	int val;
3710 	abi_ulong buf;
3711 	abi_ulong array;
3712 	abi_ulong __buf;
3713 };
3714 
3715 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3716                                                abi_ulong target_addr)
3717 {
3718     int nsems;
3719     unsigned short *array;
3720     union semun semun;
3721     struct semid_ds semid_ds;
3722     int i, ret;
3723 
3724     semun.buf = &semid_ds;
3725 
3726     ret = semctl(semid, 0, IPC_STAT, semun);
3727     if (ret == -1)
3728         return get_errno(ret);
3729 
3730     nsems = semid_ds.sem_nsems;
3731 
3732     *host_array = g_try_new(unsigned short, nsems);
3733     if (!*host_array) {
3734         return -TARGET_ENOMEM;
3735     }
3736     array = lock_user(VERIFY_READ, target_addr,
3737                       nsems*sizeof(unsigned short), 1);
3738     if (!array) {
3739         g_free(*host_array);
3740         return -TARGET_EFAULT;
3741     }
3742 
3743     for(i=0; i<nsems; i++) {
3744         __get_user((*host_array)[i], &array[i]);
3745     }
3746     unlock_user(array, target_addr, 0);
3747 
3748     return 0;
3749 }
3750 
3751 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3752                                                unsigned short **host_array)
3753 {
3754     int nsems;
3755     unsigned short *array;
3756     union semun semun;
3757     struct semid_ds semid_ds;
3758     int i, ret;
3759 
3760     semun.buf = &semid_ds;
3761 
3762     ret = semctl(semid, 0, IPC_STAT, semun);
3763     if (ret == -1)
3764         return get_errno(ret);
3765 
3766     nsems = semid_ds.sem_nsems;
3767 
3768     array = lock_user(VERIFY_WRITE, target_addr,
3769                       nsems*sizeof(unsigned short), 0);
3770     if (!array)
3771         return -TARGET_EFAULT;
3772 
3773     for(i=0; i<nsems; i++) {
3774         __put_user((*host_array)[i], &array[i]);
3775     }
3776     g_free(*host_array);
3777     unlock_user(array, target_addr, 1);
3778 
3779     return 0;
3780 }
3781 
3782 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3783                                  abi_ulong target_arg)
3784 {
3785     union target_semun target_su = { .buf = target_arg };
3786     union semun arg;
3787     struct semid_ds dsarg;
3788     unsigned short *array = NULL;
3789     struct seminfo seminfo;
3790     abi_long ret = -TARGET_EINVAL;
3791     abi_long err;
3792     cmd &= 0xff;
3793 
3794     switch( cmd ) {
3795 	case GETVAL:
3796 	case SETVAL:
3797             /* In 64 bit cross-endian situations, we will erroneously pick up
3798              * the wrong half of the union for the "val" element.  To rectify
3799              * this, the entire 8-byte structure is byteswapped, followed by
3800 	     * a swap of the 4 byte val field. In other cases, the data is
3801 	     * already in proper host byte order. */
3802 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3803 		target_su.buf = tswapal(target_su.buf);
3804 		arg.val = tswap32(target_su.val);
3805 	    } else {
3806 		arg.val = target_su.val;
3807 	    }
3808             ret = get_errno(semctl(semid, semnum, cmd, arg));
3809             break;
3810 	case GETALL:
3811 	case SETALL:
3812             err = target_to_host_semarray(semid, &array, target_su.array);
3813             if (err)
3814                 return err;
3815             arg.array = array;
3816             ret = get_errno(semctl(semid, semnum, cmd, arg));
3817             err = host_to_target_semarray(semid, target_su.array, &array);
3818             if (err)
3819                 return err;
3820             break;
3821 	case IPC_STAT:
3822 	case IPC_SET:
3823 	case SEM_STAT:
3824             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3825             if (err)
3826                 return err;
3827             arg.buf = &dsarg;
3828             ret = get_errno(semctl(semid, semnum, cmd, arg));
3829             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3830             if (err)
3831                 return err;
3832             break;
3833 	case IPC_INFO:
3834 	case SEM_INFO:
3835             arg.__buf = &seminfo;
3836             ret = get_errno(semctl(semid, semnum, cmd, arg));
3837             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3838             if (err)
3839                 return err;
3840             break;
3841 	case IPC_RMID:
3842 	case GETPID:
3843 	case GETNCNT:
3844 	case GETZCNT:
3845             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3846             break;
3847     }
3848 
3849     return ret;
3850 }
3851 
3852 struct target_sembuf {
3853     unsigned short sem_num;
3854     short sem_op;
3855     short sem_flg;
3856 };
3857 
3858 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3859                                              abi_ulong target_addr,
3860                                              unsigned nsops)
3861 {
3862     struct target_sembuf *target_sembuf;
3863     int i;
3864 
3865     target_sembuf = lock_user(VERIFY_READ, target_addr,
3866                               nsops*sizeof(struct target_sembuf), 1);
3867     if (!target_sembuf)
3868         return -TARGET_EFAULT;
3869 
3870     for(i=0; i<nsops; i++) {
3871         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3872         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3873         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3874     }
3875 
3876     unlock_user(target_sembuf, target_addr, 0);
3877 
3878     return 0;
3879 }
3880 
3881 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3882 {
3883     struct sembuf sops[nsops];
3884     abi_long ret;
3885 
3886     if (target_to_host_sembuf(sops, ptr, nsops))
3887         return -TARGET_EFAULT;
3888 
3889     ret = -TARGET_ENOSYS;
3890 #ifdef __NR_semtimedop
3891     ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3892 #endif
3893 #ifdef __NR_ipc
3894     if (ret == -TARGET_ENOSYS) {
3895         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3896     }
3897 #endif
3898     return ret;
3899 }
3900 
3901 struct target_msqid_ds
3902 {
3903     struct target_ipc_perm msg_perm;
3904     abi_ulong msg_stime;
3905 #if TARGET_ABI_BITS == 32
3906     abi_ulong __unused1;
3907 #endif
3908     abi_ulong msg_rtime;
3909 #if TARGET_ABI_BITS == 32
3910     abi_ulong __unused2;
3911 #endif
3912     abi_ulong msg_ctime;
3913 #if TARGET_ABI_BITS == 32
3914     abi_ulong __unused3;
3915 #endif
3916     abi_ulong __msg_cbytes;
3917     abi_ulong msg_qnum;
3918     abi_ulong msg_qbytes;
3919     abi_ulong msg_lspid;
3920     abi_ulong msg_lrpid;
3921     abi_ulong __unused4;
3922     abi_ulong __unused5;
3923 };
3924 
3925 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3926                                                abi_ulong target_addr)
3927 {
3928     struct target_msqid_ds *target_md;
3929 
3930     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3931         return -TARGET_EFAULT;
3932     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3933         return -TARGET_EFAULT;
3934     host_md->msg_stime = tswapal(target_md->msg_stime);
3935     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3936     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3937     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3938     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3939     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3940     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3941     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3942     unlock_user_struct(target_md, target_addr, 0);
3943     return 0;
3944 }
3945 
3946 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3947                                                struct msqid_ds *host_md)
3948 {
3949     struct target_msqid_ds *target_md;
3950 
3951     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3952         return -TARGET_EFAULT;
3953     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3954         return -TARGET_EFAULT;
3955     target_md->msg_stime = tswapal(host_md->msg_stime);
3956     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3957     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3958     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3959     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3960     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3961     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3962     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3963     unlock_user_struct(target_md, target_addr, 1);
3964     return 0;
3965 }
3966 
3967 struct target_msginfo {
3968     int msgpool;
3969     int msgmap;
3970     int msgmax;
3971     int msgmnb;
3972     int msgmni;
3973     int msgssz;
3974     int msgtql;
3975     unsigned short int msgseg;
3976 };
3977 
3978 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3979                                               struct msginfo *host_msginfo)
3980 {
3981     struct target_msginfo *target_msginfo;
3982     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3983         return -TARGET_EFAULT;
3984     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3985     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3986     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3987     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3988     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3989     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3990     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3991     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3992     unlock_user_struct(target_msginfo, target_addr, 1);
3993     return 0;
3994 }
3995 
3996 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3997 {
3998     struct msqid_ds dsarg;
3999     struct msginfo msginfo;
4000     abi_long ret = -TARGET_EINVAL;
4001 
4002     cmd &= 0xff;
4003 
4004     switch (cmd) {
4005     case IPC_STAT:
4006     case IPC_SET:
4007     case MSG_STAT:
4008         if (target_to_host_msqid_ds(&dsarg,ptr))
4009             return -TARGET_EFAULT;
4010         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4011         if (host_to_target_msqid_ds(ptr,&dsarg))
4012             return -TARGET_EFAULT;
4013         break;
4014     case IPC_RMID:
4015         ret = get_errno(msgctl(msgid, cmd, NULL));
4016         break;
4017     case IPC_INFO:
4018     case MSG_INFO:
4019         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4020         if (host_to_target_msginfo(ptr, &msginfo))
4021             return -TARGET_EFAULT;
4022         break;
4023     }
4024 
4025     return ret;
4026 }
4027 
4028 struct target_msgbuf {
4029     abi_long mtype;
4030     char	mtext[1];
4031 };
4032 
4033 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4034                                  ssize_t msgsz, int msgflg)
4035 {
4036     struct target_msgbuf *target_mb;
4037     struct msgbuf *host_mb;
4038     abi_long ret = 0;
4039 
4040     if (msgsz < 0) {
4041         return -TARGET_EINVAL;
4042     }
4043 
4044     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4045         return -TARGET_EFAULT;
4046     host_mb = g_try_malloc(msgsz + sizeof(long));
4047     if (!host_mb) {
4048         unlock_user_struct(target_mb, msgp, 0);
4049         return -TARGET_ENOMEM;
4050     }
4051     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4052     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4053     ret = -TARGET_ENOSYS;
4054 #ifdef __NR_msgsnd
4055     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4056 #endif
4057 #ifdef __NR_ipc
4058     if (ret == -TARGET_ENOSYS) {
4059         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4060                                  host_mb, 0));
4061     }
4062 #endif
4063     g_free(host_mb);
4064     unlock_user_struct(target_mb, msgp, 0);
4065 
4066     return ret;
4067 }
4068 
4069 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4070                                  ssize_t msgsz, abi_long msgtyp,
4071                                  int msgflg)
4072 {
4073     struct target_msgbuf *target_mb;
4074     char *target_mtext;
4075     struct msgbuf *host_mb;
4076     abi_long ret = 0;
4077 
4078     if (msgsz < 0) {
4079         return -TARGET_EINVAL;
4080     }
4081 
4082     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4083         return -TARGET_EFAULT;
4084 
4085     host_mb = g_try_malloc(msgsz + sizeof(long));
4086     if (!host_mb) {
4087         ret = -TARGET_ENOMEM;
4088         goto end;
4089     }
4090     ret = -TARGET_ENOSYS;
4091 #ifdef __NR_msgrcv
4092     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4093 #endif
4094 #ifdef __NR_ipc
4095     if (ret == -TARGET_ENOSYS) {
4096         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4097                         msgflg, host_mb, msgtyp));
4098     }
4099 #endif
4100 
4101     if (ret > 0) {
4102         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4103         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4104         if (!target_mtext) {
4105             ret = -TARGET_EFAULT;
4106             goto end;
4107         }
4108         memcpy(target_mb->mtext, host_mb->mtext, ret);
4109         unlock_user(target_mtext, target_mtext_addr, ret);
4110     }
4111 
4112     target_mb->mtype = tswapal(host_mb->mtype);
4113 
4114 end:
4115     if (target_mb)
4116         unlock_user_struct(target_mb, msgp, 1);
4117     g_free(host_mb);
4118     return ret;
4119 }
4120 
4121 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4122                                                abi_ulong target_addr)
4123 {
4124     struct target_shmid_ds *target_sd;
4125 
4126     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4127         return -TARGET_EFAULT;
4128     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4129         return -TARGET_EFAULT;
4130     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4131     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4132     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4133     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4134     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4135     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4136     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4137     unlock_user_struct(target_sd, target_addr, 0);
4138     return 0;
4139 }
4140 
4141 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4142                                                struct shmid_ds *host_sd)
4143 {
4144     struct target_shmid_ds *target_sd;
4145 
4146     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4147         return -TARGET_EFAULT;
4148     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4149         return -TARGET_EFAULT;
4150     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4151     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4152     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4153     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4154     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4155     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4156     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4157     unlock_user_struct(target_sd, target_addr, 1);
4158     return 0;
4159 }
4160 
4161 struct  target_shminfo {
4162     abi_ulong shmmax;
4163     abi_ulong shmmin;
4164     abi_ulong shmmni;
4165     abi_ulong shmseg;
4166     abi_ulong shmall;
4167 };
4168 
4169 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4170                                               struct shminfo *host_shminfo)
4171 {
4172     struct target_shminfo *target_shminfo;
4173     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4174         return -TARGET_EFAULT;
4175     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4176     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4177     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4178     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4179     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4180     unlock_user_struct(target_shminfo, target_addr, 1);
4181     return 0;
4182 }
4183 
4184 struct target_shm_info {
4185     int used_ids;
4186     abi_ulong shm_tot;
4187     abi_ulong shm_rss;
4188     abi_ulong shm_swp;
4189     abi_ulong swap_attempts;
4190     abi_ulong swap_successes;
4191 };
4192 
4193 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4194                                                struct shm_info *host_shm_info)
4195 {
4196     struct target_shm_info *target_shm_info;
4197     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4198         return -TARGET_EFAULT;
4199     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4200     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4201     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4202     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4203     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4204     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4205     unlock_user_struct(target_shm_info, target_addr, 1);
4206     return 0;
4207 }
4208 
4209 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4210 {
4211     struct shmid_ds dsarg;
4212     struct shminfo shminfo;
4213     struct shm_info shm_info;
4214     abi_long ret = -TARGET_EINVAL;
4215 
4216     cmd &= 0xff;
4217 
4218     switch(cmd) {
4219     case IPC_STAT:
4220     case IPC_SET:
4221     case SHM_STAT:
4222         if (target_to_host_shmid_ds(&dsarg, buf))
4223             return -TARGET_EFAULT;
4224         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4225         if (host_to_target_shmid_ds(buf, &dsarg))
4226             return -TARGET_EFAULT;
4227         break;
4228     case IPC_INFO:
4229         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4230         if (host_to_target_shminfo(buf, &shminfo))
4231             return -TARGET_EFAULT;
4232         break;
4233     case SHM_INFO:
4234         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4235         if (host_to_target_shm_info(buf, &shm_info))
4236             return -TARGET_EFAULT;
4237         break;
4238     case IPC_RMID:
4239     case SHM_LOCK:
4240     case SHM_UNLOCK:
4241         ret = get_errno(shmctl(shmid, cmd, NULL));
4242         break;
4243     }
4244 
4245     return ret;
4246 }
4247 
4248 #ifndef TARGET_FORCE_SHMLBA
4249 /* For most architectures, SHMLBA is the same as the page size;
4250  * some architectures have larger values, in which case they should
4251  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4252  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4253  * and defining its own value for SHMLBA.
4254  *
4255  * The kernel also permits SHMLBA to be set by the architecture to a
4256  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4257  * this means that addresses are rounded to the large size if
4258  * SHM_RND is set but addresses not aligned to that size are not rejected
4259  * as long as they are at least page-aligned. Since the only architecture
4260  * which uses this is ia64 this code doesn't provide for that oddity.
4261  */
4262 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4263 {
4264     return TARGET_PAGE_SIZE;
4265 }
4266 #endif
4267 
4268 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4269                                  int shmid, abi_ulong shmaddr, int shmflg)
4270 {
4271     abi_long raddr;
4272     void *host_raddr;
4273     struct shmid_ds shm_info;
4274     int i,ret;
4275     abi_ulong shmlba;
4276 
4277     /* find out the length of the shared memory segment */
4278     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4279     if (is_error(ret)) {
4280         /* can't get length, bail out */
4281         return ret;
4282     }
4283 
4284     shmlba = target_shmlba(cpu_env);
4285 
4286     if (shmaddr & (shmlba - 1)) {
4287         if (shmflg & SHM_RND) {
4288             shmaddr &= ~(shmlba - 1);
4289         } else {
4290             return -TARGET_EINVAL;
4291         }
4292     }
4293     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4294         return -TARGET_EINVAL;
4295     }
4296 
4297     mmap_lock();
4298 
4299     if (shmaddr)
4300         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4301     else {
4302         abi_ulong mmap_start;
4303 
4304         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4305         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4306 
4307         if (mmap_start == -1) {
4308             errno = ENOMEM;
4309             host_raddr = (void *)-1;
4310         } else
4311             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4312     }
4313 
4314     if (host_raddr == (void *)-1) {
4315         mmap_unlock();
4316         return get_errno((long)host_raddr);
4317     }
4318     raddr=h2g((unsigned long)host_raddr);
4319 
4320     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4321                    PAGE_VALID | PAGE_READ |
4322                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4323 
4324     for (i = 0; i < N_SHM_REGIONS; i++) {
4325         if (!shm_regions[i].in_use) {
4326             shm_regions[i].in_use = true;
4327             shm_regions[i].start = raddr;
4328             shm_regions[i].size = shm_info.shm_segsz;
4329             break;
4330         }
4331     }
4332 
4333     mmap_unlock();
4334     return raddr;
4335 
4336 }
4337 
4338 static inline abi_long do_shmdt(abi_ulong shmaddr)
4339 {
4340     int i;
4341     abi_long rv;
4342 
4343     mmap_lock();
4344 
4345     for (i = 0; i < N_SHM_REGIONS; ++i) {
4346         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4347             shm_regions[i].in_use = false;
4348             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4349             break;
4350         }
4351     }
4352     rv = get_errno(shmdt(g2h(shmaddr)));
4353 
4354     mmap_unlock();
4355 
4356     return rv;
4357 }
4358 
4359 #ifdef TARGET_NR_ipc
4360 /* ??? This only works with linear mappings.  */
4361 /* do_ipc() must return target values and target errnos. */
4362 static abi_long do_ipc(CPUArchState *cpu_env,
4363                        unsigned int call, abi_long first,
4364                        abi_long second, abi_long third,
4365                        abi_long ptr, abi_long fifth)
4366 {
4367     int version;
4368     abi_long ret = 0;
4369 
4370     version = call >> 16;
4371     call &= 0xffff;
4372 
4373     switch (call) {
4374     case IPCOP_semop:
4375         ret = do_semop(first, ptr, second);
4376         break;
4377 
4378     case IPCOP_semget:
4379         ret = get_errno(semget(first, second, third));
4380         break;
4381 
4382     case IPCOP_semctl: {
4383         /* The semun argument to semctl is passed by value, so dereference the
4384          * ptr argument. */
4385         abi_ulong atptr;
4386         get_user_ual(atptr, ptr);
4387         ret = do_semctl(first, second, third, atptr);
4388         break;
4389     }
4390 
4391     case IPCOP_msgget:
4392         ret = get_errno(msgget(first, second));
4393         break;
4394 
4395     case IPCOP_msgsnd:
4396         ret = do_msgsnd(first, ptr, second, third);
4397         break;
4398 
4399     case IPCOP_msgctl:
4400         ret = do_msgctl(first, second, ptr);
4401         break;
4402 
4403     case IPCOP_msgrcv:
4404         switch (version) {
4405         case 0:
4406             {
4407                 struct target_ipc_kludge {
4408                     abi_long msgp;
4409                     abi_long msgtyp;
4410                 } *tmp;
4411 
4412                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4413                     ret = -TARGET_EFAULT;
4414                     break;
4415                 }
4416 
4417                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4418 
4419                 unlock_user_struct(tmp, ptr, 0);
4420                 break;
4421             }
4422         default:
4423             ret = do_msgrcv(first, ptr, second, fifth, third);
4424         }
4425         break;
4426 
4427     case IPCOP_shmat:
4428         switch (version) {
4429         default:
4430         {
4431             abi_ulong raddr;
4432             raddr = do_shmat(cpu_env, first, ptr, second);
4433             if (is_error(raddr))
4434                 return get_errno(raddr);
4435             if (put_user_ual(raddr, third))
4436                 return -TARGET_EFAULT;
4437             break;
4438         }
4439         case 1:
4440             ret = -TARGET_EINVAL;
4441             break;
4442         }
4443 	break;
4444     case IPCOP_shmdt:
4445         ret = do_shmdt(ptr);
4446 	break;
4447 
4448     case IPCOP_shmget:
4449 	/* IPC_* flag values are the same on all linux platforms */
4450 	ret = get_errno(shmget(first, second, third));
4451 	break;
4452 
4453 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4454     case IPCOP_shmctl:
4455         ret = do_shmctl(first, second, ptr);
4456         break;
4457     default:
4458         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4459                       call, version);
4460 	ret = -TARGET_ENOSYS;
4461 	break;
4462     }
4463     return ret;
4464 }
4465 #endif
4466 
4467 /* kernel structure types definitions */
4468 
4469 #define STRUCT(name, ...) STRUCT_ ## name,
4470 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4471 enum {
4472 #include "syscall_types.h"
4473 STRUCT_MAX
4474 };
4475 #undef STRUCT
4476 #undef STRUCT_SPECIAL
4477 
4478 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4479 #define STRUCT_SPECIAL(name)
4480 #include "syscall_types.h"
4481 #undef STRUCT
4482 #undef STRUCT_SPECIAL
4483 
4484 typedef struct IOCTLEntry IOCTLEntry;
4485 
4486 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4487                              int fd, int cmd, abi_long arg);
4488 
4489 struct IOCTLEntry {
4490     int target_cmd;
4491     unsigned int host_cmd;
4492     const char *name;
4493     int access;
4494     do_ioctl_fn *do_ioctl;
4495     const argtype arg_type[5];
4496 };
4497 
4498 #define IOC_R 0x0001
4499 #define IOC_W 0x0002
4500 #define IOC_RW (IOC_R | IOC_W)
4501 
4502 #define MAX_STRUCT_SIZE 4096
4503 
4504 #ifdef CONFIG_FIEMAP
4505 /* So fiemap access checks don't overflow on 32 bit systems.
4506  * This is very slightly smaller than the limit imposed by
4507  * the underlying kernel.
4508  */
4509 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4510                             / sizeof(struct fiemap_extent))
4511 
4512 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4513                                        int fd, int cmd, abi_long arg)
4514 {
4515     /* The parameter for this ioctl is a struct fiemap followed
4516      * by an array of struct fiemap_extent whose size is set
4517      * in fiemap->fm_extent_count. The array is filled in by the
4518      * ioctl.
4519      */
4520     int target_size_in, target_size_out;
4521     struct fiemap *fm;
4522     const argtype *arg_type = ie->arg_type;
4523     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4524     void *argptr, *p;
4525     abi_long ret;
4526     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4527     uint32_t outbufsz;
4528     int free_fm = 0;
4529 
4530     assert(arg_type[0] == TYPE_PTR);
4531     assert(ie->access == IOC_RW);
4532     arg_type++;
4533     target_size_in = thunk_type_size(arg_type, 0);
4534     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4535     if (!argptr) {
4536         return -TARGET_EFAULT;
4537     }
4538     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4539     unlock_user(argptr, arg, 0);
4540     fm = (struct fiemap *)buf_temp;
4541     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4542         return -TARGET_EINVAL;
4543     }
4544 
4545     outbufsz = sizeof (*fm) +
4546         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4547 
4548     if (outbufsz > MAX_STRUCT_SIZE) {
4549         /* We can't fit all the extents into the fixed size buffer.
4550          * Allocate one that is large enough and use it instead.
4551          */
4552         fm = g_try_malloc(outbufsz);
4553         if (!fm) {
4554             return -TARGET_ENOMEM;
4555         }
4556         memcpy(fm, buf_temp, sizeof(struct fiemap));
4557         free_fm = 1;
4558     }
4559     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4560     if (!is_error(ret)) {
4561         target_size_out = target_size_in;
4562         /* An extent_count of 0 means we were only counting the extents
4563          * so there are no structs to copy
4564          */
4565         if (fm->fm_extent_count != 0) {
4566             target_size_out += fm->fm_mapped_extents * extent_size;
4567         }
4568         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4569         if (!argptr) {
4570             ret = -TARGET_EFAULT;
4571         } else {
4572             /* Convert the struct fiemap */
4573             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4574             if (fm->fm_extent_count != 0) {
4575                 p = argptr + target_size_in;
4576                 /* ...and then all the struct fiemap_extents */
4577                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4578                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4579                                   THUNK_TARGET);
4580                     p += extent_size;
4581                 }
4582             }
4583             unlock_user(argptr, arg, target_size_out);
4584         }
4585     }
4586     if (free_fm) {
4587         g_free(fm);
4588     }
4589     return ret;
4590 }
4591 #endif
4592 
4593 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4594                                 int fd, int cmd, abi_long arg)
4595 {
4596     const argtype *arg_type = ie->arg_type;
4597     int target_size;
4598     void *argptr;
4599     int ret;
4600     struct ifconf *host_ifconf;
4601     uint32_t outbufsz;
4602     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4603     int target_ifreq_size;
4604     int nb_ifreq;
4605     int free_buf = 0;
4606     int i;
4607     int target_ifc_len;
4608     abi_long target_ifc_buf;
4609     int host_ifc_len;
4610     char *host_ifc_buf;
4611 
4612     assert(arg_type[0] == TYPE_PTR);
4613     assert(ie->access == IOC_RW);
4614 
4615     arg_type++;
4616     target_size = thunk_type_size(arg_type, 0);
4617 
4618     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4619     if (!argptr)
4620         return -TARGET_EFAULT;
4621     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4622     unlock_user(argptr, arg, 0);
4623 
4624     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4625     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4626     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4627 
4628     if (target_ifc_buf != 0) {
4629         target_ifc_len = host_ifconf->ifc_len;
4630         nb_ifreq = target_ifc_len / target_ifreq_size;
4631         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4632 
4633         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4634         if (outbufsz > MAX_STRUCT_SIZE) {
4635             /*
4636              * We can't fit all the extents into the fixed size buffer.
4637              * Allocate one that is large enough and use it instead.
4638              */
4639             host_ifconf = malloc(outbufsz);
4640             if (!host_ifconf) {
4641                 return -TARGET_ENOMEM;
4642             }
4643             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4644             free_buf = 1;
4645         }
4646         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4647 
4648         host_ifconf->ifc_len = host_ifc_len;
4649     } else {
4650       host_ifc_buf = NULL;
4651     }
4652     host_ifconf->ifc_buf = host_ifc_buf;
4653 
4654     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4655     if (!is_error(ret)) {
4656 	/* convert host ifc_len to target ifc_len */
4657 
4658         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4659         target_ifc_len = nb_ifreq * target_ifreq_size;
4660         host_ifconf->ifc_len = target_ifc_len;
4661 
4662 	/* restore target ifc_buf */
4663 
4664         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4665 
4666 	/* copy struct ifconf to target user */
4667 
4668         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4669         if (!argptr)
4670             return -TARGET_EFAULT;
4671         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4672         unlock_user(argptr, arg, target_size);
4673 
4674         if (target_ifc_buf != 0) {
4675             /* copy ifreq[] to target user */
4676             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4677             for (i = 0; i < nb_ifreq ; i++) {
4678                 thunk_convert(argptr + i * target_ifreq_size,
4679                               host_ifc_buf + i * sizeof(struct ifreq),
4680                               ifreq_arg_type, THUNK_TARGET);
4681             }
4682             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4683         }
4684     }
4685 
4686     if (free_buf) {
4687         free(host_ifconf);
4688     }
4689 
4690     return ret;
4691 }
4692 
4693 #if defined(CONFIG_USBFS)
4694 #if HOST_LONG_BITS > 64
4695 #error USBDEVFS thunks do not support >64 bit hosts yet.
4696 #endif
4697 struct live_urb {
4698     uint64_t target_urb_adr;
4699     uint64_t target_buf_adr;
4700     char *target_buf_ptr;
4701     struct usbdevfs_urb host_urb;
4702 };
4703 
4704 static GHashTable *usbdevfs_urb_hashtable(void)
4705 {
4706     static GHashTable *urb_hashtable;
4707 
4708     if (!urb_hashtable) {
4709         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4710     }
4711     return urb_hashtable;
4712 }
4713 
4714 static void urb_hashtable_insert(struct live_urb *urb)
4715 {
4716     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4717     g_hash_table_insert(urb_hashtable, urb, urb);
4718 }
4719 
4720 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4721 {
4722     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4723     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4724 }
4725 
4726 static void urb_hashtable_remove(struct live_urb *urb)
4727 {
4728     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4729     g_hash_table_remove(urb_hashtable, urb);
4730 }
4731 
4732 static abi_long
4733 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4734                           int fd, int cmd, abi_long arg)
4735 {
4736     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4737     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4738     struct live_urb *lurb;
4739     void *argptr;
4740     uint64_t hurb;
4741     int target_size;
4742     uintptr_t target_urb_adr;
4743     abi_long ret;
4744 
4745     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4746 
4747     memset(buf_temp, 0, sizeof(uint64_t));
4748     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4749     if (is_error(ret)) {
4750         return ret;
4751     }
4752 
4753     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4754     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4755     if (!lurb->target_urb_adr) {
4756         return -TARGET_EFAULT;
4757     }
4758     urb_hashtable_remove(lurb);
4759     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4760         lurb->host_urb.buffer_length);
4761     lurb->target_buf_ptr = NULL;
4762 
4763     /* restore the guest buffer pointer */
4764     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4765 
4766     /* update the guest urb struct */
4767     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4768     if (!argptr) {
4769         g_free(lurb);
4770         return -TARGET_EFAULT;
4771     }
4772     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4773     unlock_user(argptr, lurb->target_urb_adr, target_size);
4774 
4775     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4776     /* write back the urb handle */
4777     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4778     if (!argptr) {
4779         g_free(lurb);
4780         return -TARGET_EFAULT;
4781     }
4782 
4783     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4784     target_urb_adr = lurb->target_urb_adr;
4785     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4786     unlock_user(argptr, arg, target_size);
4787 
4788     g_free(lurb);
4789     return ret;
4790 }
4791 
4792 static abi_long
4793 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4794                              uint8_t *buf_temp __attribute__((unused)),
4795                              int fd, int cmd, abi_long arg)
4796 {
4797     struct live_urb *lurb;
4798 
4799     /* map target address back to host URB with metadata. */
4800     lurb = urb_hashtable_lookup(arg);
4801     if (!lurb) {
4802         return -TARGET_EFAULT;
4803     }
4804     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4805 }
4806 
4807 static abi_long
4808 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4809                             int fd, int cmd, abi_long arg)
4810 {
4811     const argtype *arg_type = ie->arg_type;
4812     int target_size;
4813     abi_long ret;
4814     void *argptr;
4815     int rw_dir;
4816     struct live_urb *lurb;
4817 
4818     /*
4819      * each submitted URB needs to map to a unique ID for the
4820      * kernel, and that unique ID needs to be a pointer to
4821      * host memory.  hence, we need to malloc for each URB.
4822      * isochronous transfers have a variable length struct.
4823      */
4824     arg_type++;
4825     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4826 
4827     /* construct host copy of urb and metadata */
4828     lurb = g_try_malloc0(sizeof(struct live_urb));
4829     if (!lurb) {
4830         return -TARGET_ENOMEM;
4831     }
4832 
4833     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4834     if (!argptr) {
4835         g_free(lurb);
4836         return -TARGET_EFAULT;
4837     }
4838     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4839     unlock_user(argptr, arg, 0);
4840 
4841     lurb->target_urb_adr = arg;
4842     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4843 
4844     /* buffer space used depends on endpoint type so lock the entire buffer */
4845     /* control type urbs should check the buffer contents for true direction */
4846     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4847     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4848         lurb->host_urb.buffer_length, 1);
4849     if (lurb->target_buf_ptr == NULL) {
4850         g_free(lurb);
4851         return -TARGET_EFAULT;
4852     }
4853 
4854     /* update buffer pointer in host copy */
4855     lurb->host_urb.buffer = lurb->target_buf_ptr;
4856 
4857     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4858     if (is_error(ret)) {
4859         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4860         g_free(lurb);
4861     } else {
4862         urb_hashtable_insert(lurb);
4863     }
4864 
4865     return ret;
4866 }
4867 #endif /* CONFIG_USBFS */
4868 
4869 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4870                             int cmd, abi_long arg)
4871 {
4872     void *argptr;
4873     struct dm_ioctl *host_dm;
4874     abi_long guest_data;
4875     uint32_t guest_data_size;
4876     int target_size;
4877     const argtype *arg_type = ie->arg_type;
4878     abi_long ret;
4879     void *big_buf = NULL;
4880     char *host_data;
4881 
4882     arg_type++;
4883     target_size = thunk_type_size(arg_type, 0);
4884     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4885     if (!argptr) {
4886         ret = -TARGET_EFAULT;
4887         goto out;
4888     }
4889     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4890     unlock_user(argptr, arg, 0);
4891 
4892     /* buf_temp is too small, so fetch things into a bigger buffer */
4893     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4894     memcpy(big_buf, buf_temp, target_size);
4895     buf_temp = big_buf;
4896     host_dm = big_buf;
4897 
4898     guest_data = arg + host_dm->data_start;
4899     if ((guest_data - arg) < 0) {
4900         ret = -TARGET_EINVAL;
4901         goto out;
4902     }
4903     guest_data_size = host_dm->data_size - host_dm->data_start;
4904     host_data = (char*)host_dm + host_dm->data_start;
4905 
4906     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4907     if (!argptr) {
4908         ret = -TARGET_EFAULT;
4909         goto out;
4910     }
4911 
4912     switch (ie->host_cmd) {
4913     case DM_REMOVE_ALL:
4914     case DM_LIST_DEVICES:
4915     case DM_DEV_CREATE:
4916     case DM_DEV_REMOVE:
4917     case DM_DEV_SUSPEND:
4918     case DM_DEV_STATUS:
4919     case DM_DEV_WAIT:
4920     case DM_TABLE_STATUS:
4921     case DM_TABLE_CLEAR:
4922     case DM_TABLE_DEPS:
4923     case DM_LIST_VERSIONS:
4924         /* no input data */
4925         break;
4926     case DM_DEV_RENAME:
4927     case DM_DEV_SET_GEOMETRY:
4928         /* data contains only strings */
4929         memcpy(host_data, argptr, guest_data_size);
4930         break;
4931     case DM_TARGET_MSG:
4932         memcpy(host_data, argptr, guest_data_size);
4933         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4934         break;
4935     case DM_TABLE_LOAD:
4936     {
4937         void *gspec = argptr;
4938         void *cur_data = host_data;
4939         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4940         int spec_size = thunk_type_size(arg_type, 0);
4941         int i;
4942 
4943         for (i = 0; i < host_dm->target_count; i++) {
4944             struct dm_target_spec *spec = cur_data;
4945             uint32_t next;
4946             int slen;
4947 
4948             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4949             slen = strlen((char*)gspec + spec_size) + 1;
4950             next = spec->next;
4951             spec->next = sizeof(*spec) + slen;
4952             strcpy((char*)&spec[1], gspec + spec_size);
4953             gspec += next;
4954             cur_data += spec->next;
4955         }
4956         break;
4957     }
4958     default:
4959         ret = -TARGET_EINVAL;
4960         unlock_user(argptr, guest_data, 0);
4961         goto out;
4962     }
4963     unlock_user(argptr, guest_data, 0);
4964 
4965     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4966     if (!is_error(ret)) {
4967         guest_data = arg + host_dm->data_start;
4968         guest_data_size = host_dm->data_size - host_dm->data_start;
4969         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4970         switch (ie->host_cmd) {
4971         case DM_REMOVE_ALL:
4972         case DM_DEV_CREATE:
4973         case DM_DEV_REMOVE:
4974         case DM_DEV_RENAME:
4975         case DM_DEV_SUSPEND:
4976         case DM_DEV_STATUS:
4977         case DM_TABLE_LOAD:
4978         case DM_TABLE_CLEAR:
4979         case DM_TARGET_MSG:
4980         case DM_DEV_SET_GEOMETRY:
4981             /* no return data */
4982             break;
4983         case DM_LIST_DEVICES:
4984         {
4985             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4986             uint32_t remaining_data = guest_data_size;
4987             void *cur_data = argptr;
4988             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4989             int nl_size = 12; /* can't use thunk_size due to alignment */
4990 
4991             while (1) {
4992                 uint32_t next = nl->next;
4993                 if (next) {
4994                     nl->next = nl_size + (strlen(nl->name) + 1);
4995                 }
4996                 if (remaining_data < nl->next) {
4997                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
4998                     break;
4999                 }
5000                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5001                 strcpy(cur_data + nl_size, nl->name);
5002                 cur_data += nl->next;
5003                 remaining_data -= nl->next;
5004                 if (!next) {
5005                     break;
5006                 }
5007                 nl = (void*)nl + next;
5008             }
5009             break;
5010         }
5011         case DM_DEV_WAIT:
5012         case DM_TABLE_STATUS:
5013         {
5014             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5015             void *cur_data = argptr;
5016             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5017             int spec_size = thunk_type_size(arg_type, 0);
5018             int i;
5019 
5020             for (i = 0; i < host_dm->target_count; i++) {
5021                 uint32_t next = spec->next;
5022                 int slen = strlen((char*)&spec[1]) + 1;
5023                 spec->next = (cur_data - argptr) + spec_size + slen;
5024                 if (guest_data_size < spec->next) {
5025                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5026                     break;
5027                 }
5028                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5029                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5030                 cur_data = argptr + spec->next;
5031                 spec = (void*)host_dm + host_dm->data_start + next;
5032             }
5033             break;
5034         }
5035         case DM_TABLE_DEPS:
5036         {
5037             void *hdata = (void*)host_dm + host_dm->data_start;
5038             int count = *(uint32_t*)hdata;
5039             uint64_t *hdev = hdata + 8;
5040             uint64_t *gdev = argptr + 8;
5041             int i;
5042 
5043             *(uint32_t*)argptr = tswap32(count);
5044             for (i = 0; i < count; i++) {
5045                 *gdev = tswap64(*hdev);
5046                 gdev++;
5047                 hdev++;
5048             }
5049             break;
5050         }
5051         case DM_LIST_VERSIONS:
5052         {
5053             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5054             uint32_t remaining_data = guest_data_size;
5055             void *cur_data = argptr;
5056             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5057             int vers_size = thunk_type_size(arg_type, 0);
5058 
5059             while (1) {
5060                 uint32_t next = vers->next;
5061                 if (next) {
5062                     vers->next = vers_size + (strlen(vers->name) + 1);
5063                 }
5064                 if (remaining_data < vers->next) {
5065                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5066                     break;
5067                 }
5068                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5069                 strcpy(cur_data + vers_size, vers->name);
5070                 cur_data += vers->next;
5071                 remaining_data -= vers->next;
5072                 if (!next) {
5073                     break;
5074                 }
5075                 vers = (void*)vers + next;
5076             }
5077             break;
5078         }
5079         default:
5080             unlock_user(argptr, guest_data, 0);
5081             ret = -TARGET_EINVAL;
5082             goto out;
5083         }
5084         unlock_user(argptr, guest_data, guest_data_size);
5085 
5086         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5087         if (!argptr) {
5088             ret = -TARGET_EFAULT;
5089             goto out;
5090         }
5091         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5092         unlock_user(argptr, arg, target_size);
5093     }
5094 out:
5095     g_free(big_buf);
5096     return ret;
5097 }
5098 
5099 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5100                                int cmd, abi_long arg)
5101 {
5102     void *argptr;
5103     int target_size;
5104     const argtype *arg_type = ie->arg_type;
5105     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5106     abi_long ret;
5107 
5108     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5109     struct blkpg_partition host_part;
5110 
5111     /* Read and convert blkpg */
5112     arg_type++;
5113     target_size = thunk_type_size(arg_type, 0);
5114     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5115     if (!argptr) {
5116         ret = -TARGET_EFAULT;
5117         goto out;
5118     }
5119     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5120     unlock_user(argptr, arg, 0);
5121 
5122     switch (host_blkpg->op) {
5123     case BLKPG_ADD_PARTITION:
5124     case BLKPG_DEL_PARTITION:
5125         /* payload is struct blkpg_partition */
5126         break;
5127     default:
5128         /* Unknown opcode */
5129         ret = -TARGET_EINVAL;
5130         goto out;
5131     }
5132 
5133     /* Read and convert blkpg->data */
5134     arg = (abi_long)(uintptr_t)host_blkpg->data;
5135     target_size = thunk_type_size(part_arg_type, 0);
5136     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5137     if (!argptr) {
5138         ret = -TARGET_EFAULT;
5139         goto out;
5140     }
5141     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5142     unlock_user(argptr, arg, 0);
5143 
5144     /* Swizzle the data pointer to our local copy and call! */
5145     host_blkpg->data = &host_part;
5146     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5147 
5148 out:
5149     return ret;
5150 }
5151 
5152 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5153                                 int fd, int cmd, abi_long arg)
5154 {
5155     const argtype *arg_type = ie->arg_type;
5156     const StructEntry *se;
5157     const argtype *field_types;
5158     const int *dst_offsets, *src_offsets;
5159     int target_size;
5160     void *argptr;
5161     abi_ulong *target_rt_dev_ptr = NULL;
5162     unsigned long *host_rt_dev_ptr = NULL;
5163     abi_long ret;
5164     int i;
5165 
5166     assert(ie->access == IOC_W);
5167     assert(*arg_type == TYPE_PTR);
5168     arg_type++;
5169     assert(*arg_type == TYPE_STRUCT);
5170     target_size = thunk_type_size(arg_type, 0);
5171     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5172     if (!argptr) {
5173         return -TARGET_EFAULT;
5174     }
5175     arg_type++;
5176     assert(*arg_type == (int)STRUCT_rtentry);
5177     se = struct_entries + *arg_type++;
5178     assert(se->convert[0] == NULL);
5179     /* convert struct here to be able to catch rt_dev string */
5180     field_types = se->field_types;
5181     dst_offsets = se->field_offsets[THUNK_HOST];
5182     src_offsets = se->field_offsets[THUNK_TARGET];
5183     for (i = 0; i < se->nb_fields; i++) {
5184         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5185             assert(*field_types == TYPE_PTRVOID);
5186             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5187             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5188             if (*target_rt_dev_ptr != 0) {
5189                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5190                                                   tswapal(*target_rt_dev_ptr));
5191                 if (!*host_rt_dev_ptr) {
5192                     unlock_user(argptr, arg, 0);
5193                     return -TARGET_EFAULT;
5194                 }
5195             } else {
5196                 *host_rt_dev_ptr = 0;
5197             }
5198             field_types++;
5199             continue;
5200         }
5201         field_types = thunk_convert(buf_temp + dst_offsets[i],
5202                                     argptr + src_offsets[i],
5203                                     field_types, THUNK_HOST);
5204     }
5205     unlock_user(argptr, arg, 0);
5206 
5207     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5208 
5209     assert(host_rt_dev_ptr != NULL);
5210     assert(target_rt_dev_ptr != NULL);
5211     if (*host_rt_dev_ptr != 0) {
5212         unlock_user((void *)*host_rt_dev_ptr,
5213                     *target_rt_dev_ptr, 0);
5214     }
5215     return ret;
5216 }
5217 
5218 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5219                                      int fd, int cmd, abi_long arg)
5220 {
5221     int sig = target_to_host_signal(arg);
5222     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5223 }
5224 
5225 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5226                                     int fd, int cmd, abi_long arg)
5227 {
5228     struct timeval tv;
5229     abi_long ret;
5230 
5231     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5232     if (is_error(ret)) {
5233         return ret;
5234     }
5235 
5236     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5237         if (copy_to_user_timeval(arg, &tv)) {
5238             return -TARGET_EFAULT;
5239         }
5240     } else {
5241         if (copy_to_user_timeval64(arg, &tv)) {
5242             return -TARGET_EFAULT;
5243         }
5244     }
5245 
5246     return ret;
5247 }
5248 
5249 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5250                                       int fd, int cmd, abi_long arg)
5251 {
5252     struct timespec ts;
5253     abi_long ret;
5254 
5255     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5256     if (is_error(ret)) {
5257         return ret;
5258     }
5259 
5260     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5261         if (host_to_target_timespec(arg, &ts)) {
5262             return -TARGET_EFAULT;
5263         }
5264     } else{
5265         if (host_to_target_timespec64(arg, &ts)) {
5266             return -TARGET_EFAULT;
5267         }
5268     }
5269 
5270     return ret;
5271 }
5272 
5273 #ifdef TIOCGPTPEER
5274 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5275                                      int fd, int cmd, abi_long arg)
5276 {
5277     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5278     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5279 }
5280 #endif
5281 
5282 #ifdef HAVE_DRM_H
5283 
5284 static void unlock_drm_version(struct drm_version *host_ver,
5285                                struct target_drm_version *target_ver,
5286                                bool copy)
5287 {
5288     unlock_user(host_ver->name, target_ver->name,
5289                                 copy ? host_ver->name_len : 0);
5290     unlock_user(host_ver->date, target_ver->date,
5291                                 copy ? host_ver->date_len : 0);
5292     unlock_user(host_ver->desc, target_ver->desc,
5293                                 copy ? host_ver->desc_len : 0);
5294 }
5295 
5296 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5297                                           struct target_drm_version *target_ver)
5298 {
5299     memset(host_ver, 0, sizeof(*host_ver));
5300 
5301     __get_user(host_ver->name_len, &target_ver->name_len);
5302     if (host_ver->name_len) {
5303         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5304                                    target_ver->name_len, 0);
5305         if (!host_ver->name) {
5306             return -EFAULT;
5307         }
5308     }
5309 
5310     __get_user(host_ver->date_len, &target_ver->date_len);
5311     if (host_ver->date_len) {
5312         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5313                                    target_ver->date_len, 0);
5314         if (!host_ver->date) {
5315             goto err;
5316         }
5317     }
5318 
5319     __get_user(host_ver->desc_len, &target_ver->desc_len);
5320     if (host_ver->desc_len) {
5321         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5322                                    target_ver->desc_len, 0);
5323         if (!host_ver->desc) {
5324             goto err;
5325         }
5326     }
5327 
5328     return 0;
5329 err:
5330     unlock_drm_version(host_ver, target_ver, false);
5331     return -EFAULT;
5332 }
5333 
5334 static inline void host_to_target_drmversion(
5335                                           struct target_drm_version *target_ver,
5336                                           struct drm_version *host_ver)
5337 {
5338     __put_user(host_ver->version_major, &target_ver->version_major);
5339     __put_user(host_ver->version_minor, &target_ver->version_minor);
5340     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5341     __put_user(host_ver->name_len, &target_ver->name_len);
5342     __put_user(host_ver->date_len, &target_ver->date_len);
5343     __put_user(host_ver->desc_len, &target_ver->desc_len);
5344     unlock_drm_version(host_ver, target_ver, true);
5345 }
5346 
5347 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5348                              int fd, int cmd, abi_long arg)
5349 {
5350     struct drm_version *ver;
5351     struct target_drm_version *target_ver;
5352     abi_long ret;
5353 
5354     switch (ie->host_cmd) {
5355     case DRM_IOCTL_VERSION:
5356         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5357             return -TARGET_EFAULT;
5358         }
5359         ver = (struct drm_version *)buf_temp;
5360         ret = target_to_host_drmversion(ver, target_ver);
5361         if (!is_error(ret)) {
5362             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5363             if (is_error(ret)) {
5364                 unlock_drm_version(ver, target_ver, false);
5365             } else {
5366                 host_to_target_drmversion(target_ver, ver);
5367             }
5368         }
5369         unlock_user_struct(target_ver, arg, 0);
5370         return ret;
5371     }
5372     return -TARGET_ENOSYS;
5373 }
5374 
5375 #endif
5376 
5377 static IOCTLEntry ioctl_entries[] = {
5378 #define IOCTL(cmd, access, ...) \
5379     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5380 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5381     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5382 #define IOCTL_IGNORE(cmd) \
5383     { TARGET_ ## cmd, 0, #cmd },
5384 #include "ioctls.h"
5385     { 0, 0, },
5386 };
5387 
5388 /* ??? Implement proper locking for ioctls.  */
5389 /* do_ioctl() Must return target values and target errnos. */
5390 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5391 {
5392     const IOCTLEntry *ie;
5393     const argtype *arg_type;
5394     abi_long ret;
5395     uint8_t buf_temp[MAX_STRUCT_SIZE];
5396     int target_size;
5397     void *argptr;
5398 
5399     ie = ioctl_entries;
5400     for(;;) {
5401         if (ie->target_cmd == 0) {
5402             qemu_log_mask(
5403                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5404             return -TARGET_ENOSYS;
5405         }
5406         if (ie->target_cmd == cmd)
5407             break;
5408         ie++;
5409     }
5410     arg_type = ie->arg_type;
5411     if (ie->do_ioctl) {
5412         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5413     } else if (!ie->host_cmd) {
5414         /* Some architectures define BSD ioctls in their headers
5415            that are not implemented in Linux.  */
5416         return -TARGET_ENOSYS;
5417     }
5418 
5419     switch(arg_type[0]) {
5420     case TYPE_NULL:
5421         /* no argument */
5422         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5423         break;
5424     case TYPE_PTRVOID:
5425     case TYPE_INT:
5426     case TYPE_LONG:
5427     case TYPE_ULONG:
5428         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5429         break;
5430     case TYPE_PTR:
5431         arg_type++;
5432         target_size = thunk_type_size(arg_type, 0);
5433         switch(ie->access) {
5434         case IOC_R:
5435             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5436             if (!is_error(ret)) {
5437                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5438                 if (!argptr)
5439                     return -TARGET_EFAULT;
5440                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5441                 unlock_user(argptr, arg, target_size);
5442             }
5443             break;
5444         case IOC_W:
5445             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5446             if (!argptr)
5447                 return -TARGET_EFAULT;
5448             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5449             unlock_user(argptr, arg, 0);
5450             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5451             break;
5452         default:
5453         case IOC_RW:
5454             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5455             if (!argptr)
5456                 return -TARGET_EFAULT;
5457             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5458             unlock_user(argptr, arg, 0);
5459             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5460             if (!is_error(ret)) {
5461                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5462                 if (!argptr)
5463                     return -TARGET_EFAULT;
5464                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5465                 unlock_user(argptr, arg, target_size);
5466             }
5467             break;
5468         }
5469         break;
5470     default:
5471         qemu_log_mask(LOG_UNIMP,
5472                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5473                       (long)cmd, arg_type[0]);
5474         ret = -TARGET_ENOSYS;
5475         break;
5476     }
5477     return ret;
5478 }
5479 
5480 static const bitmask_transtbl iflag_tbl[] = {
5481         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5482         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5483         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5484         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5485         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5486         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5487         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5488         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5489         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5490         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5491         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5492         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5493         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5494         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5495         { 0, 0, 0, 0 }
5496 };
5497 
5498 static const bitmask_transtbl oflag_tbl[] = {
5499 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5500 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5501 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5502 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5503 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5504 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5505 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5506 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5507 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5508 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5509 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5510 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5511 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5512 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5513 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5514 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5515 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5516 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5517 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5518 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5519 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5520 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5521 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5522 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5523 	{ 0, 0, 0, 0 }
5524 };
5525 
5526 static const bitmask_transtbl cflag_tbl[] = {
5527 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5528 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5529 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5530 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5531 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5532 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5533 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5534 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5535 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5536 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5537 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5538 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5539 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5540 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5541 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5542 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5543 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5544 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5545 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5546 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5547 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5548 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5549 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5550 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5551 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5552 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5553 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5554 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5555 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5556 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5557 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5558 	{ 0, 0, 0, 0 }
5559 };
5560 
5561 static const bitmask_transtbl lflag_tbl[] = {
5562 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5563 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5564 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5565 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5566 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5567 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5568 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5569 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5570 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5571 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5572 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5573 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5574 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5575 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5576 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5577 	{ 0, 0, 0, 0 }
5578 };
5579 
5580 static void target_to_host_termios (void *dst, const void *src)
5581 {
5582     struct host_termios *host = dst;
5583     const struct target_termios *target = src;
5584 
5585     host->c_iflag =
5586         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5587     host->c_oflag =
5588         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5589     host->c_cflag =
5590         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5591     host->c_lflag =
5592         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5593     host->c_line = target->c_line;
5594 
5595     memset(host->c_cc, 0, sizeof(host->c_cc));
5596     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5597     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5598     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5599     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5600     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5601     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5602     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5603     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5604     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5605     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5606     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5607     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5608     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5609     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5610     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5611     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5612     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5613 }
5614 
5615 static void host_to_target_termios (void *dst, const void *src)
5616 {
5617     struct target_termios *target = dst;
5618     const struct host_termios *host = src;
5619 
5620     target->c_iflag =
5621         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5622     target->c_oflag =
5623         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5624     target->c_cflag =
5625         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5626     target->c_lflag =
5627         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5628     target->c_line = host->c_line;
5629 
5630     memset(target->c_cc, 0, sizeof(target->c_cc));
5631     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5632     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5633     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5634     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5635     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5636     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5637     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5638     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5639     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5640     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5641     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5642     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5643     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5644     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5645     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5646     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5647     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5648 }
5649 
5650 static const StructEntry struct_termios_def = {
5651     .convert = { host_to_target_termios, target_to_host_termios },
5652     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5653     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5654 };
5655 
5656 static bitmask_transtbl mmap_flags_tbl[] = {
5657     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5658     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5659     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5660     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5661       MAP_ANONYMOUS, MAP_ANONYMOUS },
5662     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5663       MAP_GROWSDOWN, MAP_GROWSDOWN },
5664     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5665       MAP_DENYWRITE, MAP_DENYWRITE },
5666     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5667       MAP_EXECUTABLE, MAP_EXECUTABLE },
5668     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5669     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5670       MAP_NORESERVE, MAP_NORESERVE },
5671     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5672     /* MAP_STACK had been ignored by the kernel for quite some time.
5673        Recognize it for the target insofar as we do not want to pass
5674        it through to the host.  */
5675     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5676     { 0, 0, 0, 0 }
5677 };
5678 
5679 /*
5680  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5681  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5682  */
5683 #if defined(TARGET_I386)
5684 
5685 /* NOTE: there is really one LDT for all the threads */
5686 static uint8_t *ldt_table;
5687 
5688 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5689 {
5690     int size;
5691     void *p;
5692 
5693     if (!ldt_table)
5694         return 0;
5695     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5696     if (size > bytecount)
5697         size = bytecount;
5698     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5699     if (!p)
5700         return -TARGET_EFAULT;
5701     /* ??? Should this by byteswapped?  */
5702     memcpy(p, ldt_table, size);
5703     unlock_user(p, ptr, size);
5704     return size;
5705 }
5706 
5707 /* XXX: add locking support */
5708 static abi_long write_ldt(CPUX86State *env,
5709                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5710 {
5711     struct target_modify_ldt_ldt_s ldt_info;
5712     struct target_modify_ldt_ldt_s *target_ldt_info;
5713     int seg_32bit, contents, read_exec_only, limit_in_pages;
5714     int seg_not_present, useable, lm;
5715     uint32_t *lp, entry_1, entry_2;
5716 
5717     if (bytecount != sizeof(ldt_info))
5718         return -TARGET_EINVAL;
5719     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5720         return -TARGET_EFAULT;
5721     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5722     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5723     ldt_info.limit = tswap32(target_ldt_info->limit);
5724     ldt_info.flags = tswap32(target_ldt_info->flags);
5725     unlock_user_struct(target_ldt_info, ptr, 0);
5726 
5727     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5728         return -TARGET_EINVAL;
5729     seg_32bit = ldt_info.flags & 1;
5730     contents = (ldt_info.flags >> 1) & 3;
5731     read_exec_only = (ldt_info.flags >> 3) & 1;
5732     limit_in_pages = (ldt_info.flags >> 4) & 1;
5733     seg_not_present = (ldt_info.flags >> 5) & 1;
5734     useable = (ldt_info.flags >> 6) & 1;
5735 #ifdef TARGET_ABI32
5736     lm = 0;
5737 #else
5738     lm = (ldt_info.flags >> 7) & 1;
5739 #endif
5740     if (contents == 3) {
5741         if (oldmode)
5742             return -TARGET_EINVAL;
5743         if (seg_not_present == 0)
5744             return -TARGET_EINVAL;
5745     }
5746     /* allocate the LDT */
5747     if (!ldt_table) {
5748         env->ldt.base = target_mmap(0,
5749                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5750                                     PROT_READ|PROT_WRITE,
5751                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5752         if (env->ldt.base == -1)
5753             return -TARGET_ENOMEM;
5754         memset(g2h(env->ldt.base), 0,
5755                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5756         env->ldt.limit = 0xffff;
5757         ldt_table = g2h(env->ldt.base);
5758     }
5759 
5760     /* NOTE: same code as Linux kernel */
5761     /* Allow LDTs to be cleared by the user. */
5762     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5763         if (oldmode ||
5764             (contents == 0		&&
5765              read_exec_only == 1	&&
5766              seg_32bit == 0		&&
5767              limit_in_pages == 0	&&
5768              seg_not_present == 1	&&
5769              useable == 0 )) {
5770             entry_1 = 0;
5771             entry_2 = 0;
5772             goto install;
5773         }
5774     }
5775 
5776     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5777         (ldt_info.limit & 0x0ffff);
5778     entry_2 = (ldt_info.base_addr & 0xff000000) |
5779         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5780         (ldt_info.limit & 0xf0000) |
5781         ((read_exec_only ^ 1) << 9) |
5782         (contents << 10) |
5783         ((seg_not_present ^ 1) << 15) |
5784         (seg_32bit << 22) |
5785         (limit_in_pages << 23) |
5786         (lm << 21) |
5787         0x7000;
5788     if (!oldmode)
5789         entry_2 |= (useable << 20);
5790 
5791     /* Install the new entry ...  */
5792 install:
5793     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5794     lp[0] = tswap32(entry_1);
5795     lp[1] = tswap32(entry_2);
5796     return 0;
5797 }
5798 
5799 /* specific and weird i386 syscalls */
5800 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5801                               unsigned long bytecount)
5802 {
5803     abi_long ret;
5804 
5805     switch (func) {
5806     case 0:
5807         ret = read_ldt(ptr, bytecount);
5808         break;
5809     case 1:
5810         ret = write_ldt(env, ptr, bytecount, 1);
5811         break;
5812     case 0x11:
5813         ret = write_ldt(env, ptr, bytecount, 0);
5814         break;
5815     default:
5816         ret = -TARGET_ENOSYS;
5817         break;
5818     }
5819     return ret;
5820 }
5821 
5822 #if defined(TARGET_ABI32)
5823 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5824 {
5825     uint64_t *gdt_table = g2h(env->gdt.base);
5826     struct target_modify_ldt_ldt_s ldt_info;
5827     struct target_modify_ldt_ldt_s *target_ldt_info;
5828     int seg_32bit, contents, read_exec_only, limit_in_pages;
5829     int seg_not_present, useable, lm;
5830     uint32_t *lp, entry_1, entry_2;
5831     int i;
5832 
5833     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5834     if (!target_ldt_info)
5835         return -TARGET_EFAULT;
5836     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5837     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5838     ldt_info.limit = tswap32(target_ldt_info->limit);
5839     ldt_info.flags = tswap32(target_ldt_info->flags);
5840     if (ldt_info.entry_number == -1) {
5841         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5842             if (gdt_table[i] == 0) {
5843                 ldt_info.entry_number = i;
5844                 target_ldt_info->entry_number = tswap32(i);
5845                 break;
5846             }
5847         }
5848     }
5849     unlock_user_struct(target_ldt_info, ptr, 1);
5850 
5851     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5852         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5853            return -TARGET_EINVAL;
5854     seg_32bit = ldt_info.flags & 1;
5855     contents = (ldt_info.flags >> 1) & 3;
5856     read_exec_only = (ldt_info.flags >> 3) & 1;
5857     limit_in_pages = (ldt_info.flags >> 4) & 1;
5858     seg_not_present = (ldt_info.flags >> 5) & 1;
5859     useable = (ldt_info.flags >> 6) & 1;
5860 #ifdef TARGET_ABI32
5861     lm = 0;
5862 #else
5863     lm = (ldt_info.flags >> 7) & 1;
5864 #endif
5865 
5866     if (contents == 3) {
5867         if (seg_not_present == 0)
5868             return -TARGET_EINVAL;
5869     }
5870 
5871     /* NOTE: same code as Linux kernel */
5872     /* Allow LDTs to be cleared by the user. */
5873     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5874         if ((contents == 0             &&
5875              read_exec_only == 1       &&
5876              seg_32bit == 0            &&
5877              limit_in_pages == 0       &&
5878              seg_not_present == 1      &&
5879              useable == 0 )) {
5880             entry_1 = 0;
5881             entry_2 = 0;
5882             goto install;
5883         }
5884     }
5885 
5886     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5887         (ldt_info.limit & 0x0ffff);
5888     entry_2 = (ldt_info.base_addr & 0xff000000) |
5889         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5890         (ldt_info.limit & 0xf0000) |
5891         ((read_exec_only ^ 1) << 9) |
5892         (contents << 10) |
5893         ((seg_not_present ^ 1) << 15) |
5894         (seg_32bit << 22) |
5895         (limit_in_pages << 23) |
5896         (useable << 20) |
5897         (lm << 21) |
5898         0x7000;
5899 
5900     /* Install the new entry ...  */
5901 install:
5902     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5903     lp[0] = tswap32(entry_1);
5904     lp[1] = tswap32(entry_2);
5905     return 0;
5906 }
5907 
5908 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5909 {
5910     struct target_modify_ldt_ldt_s *target_ldt_info;
5911     uint64_t *gdt_table = g2h(env->gdt.base);
5912     uint32_t base_addr, limit, flags;
5913     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5914     int seg_not_present, useable, lm;
5915     uint32_t *lp, entry_1, entry_2;
5916 
5917     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5918     if (!target_ldt_info)
5919         return -TARGET_EFAULT;
5920     idx = tswap32(target_ldt_info->entry_number);
5921     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5922         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5923         unlock_user_struct(target_ldt_info, ptr, 1);
5924         return -TARGET_EINVAL;
5925     }
5926     lp = (uint32_t *)(gdt_table + idx);
5927     entry_1 = tswap32(lp[0]);
5928     entry_2 = tswap32(lp[1]);
5929 
5930     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5931     contents = (entry_2 >> 10) & 3;
5932     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5933     seg_32bit = (entry_2 >> 22) & 1;
5934     limit_in_pages = (entry_2 >> 23) & 1;
5935     useable = (entry_2 >> 20) & 1;
5936 #ifdef TARGET_ABI32
5937     lm = 0;
5938 #else
5939     lm = (entry_2 >> 21) & 1;
5940 #endif
5941     flags = (seg_32bit << 0) | (contents << 1) |
5942         (read_exec_only << 3) | (limit_in_pages << 4) |
5943         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5944     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5945     base_addr = (entry_1 >> 16) |
5946         (entry_2 & 0xff000000) |
5947         ((entry_2 & 0xff) << 16);
5948     target_ldt_info->base_addr = tswapal(base_addr);
5949     target_ldt_info->limit = tswap32(limit);
5950     target_ldt_info->flags = tswap32(flags);
5951     unlock_user_struct(target_ldt_info, ptr, 1);
5952     return 0;
5953 }
5954 
5955 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5956 {
5957     return -TARGET_ENOSYS;
5958 }
5959 #else
5960 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5961 {
5962     abi_long ret = 0;
5963     abi_ulong val;
5964     int idx;
5965 
5966     switch(code) {
5967     case TARGET_ARCH_SET_GS:
5968     case TARGET_ARCH_SET_FS:
5969         if (code == TARGET_ARCH_SET_GS)
5970             idx = R_GS;
5971         else
5972             idx = R_FS;
5973         cpu_x86_load_seg(env, idx, 0);
5974         env->segs[idx].base = addr;
5975         break;
5976     case TARGET_ARCH_GET_GS:
5977     case TARGET_ARCH_GET_FS:
5978         if (code == TARGET_ARCH_GET_GS)
5979             idx = R_GS;
5980         else
5981             idx = R_FS;
5982         val = env->segs[idx].base;
5983         if (put_user(val, addr, abi_ulong))
5984             ret = -TARGET_EFAULT;
5985         break;
5986     default:
5987         ret = -TARGET_EINVAL;
5988         break;
5989     }
5990     return ret;
5991 }
5992 #endif /* defined(TARGET_ABI32 */
5993 
5994 #endif /* defined(TARGET_I386) */
5995 
5996 #define NEW_STACK_SIZE 0x40000
5997 
5998 
5999 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6000 typedef struct {
6001     CPUArchState *env;
6002     pthread_mutex_t mutex;
6003     pthread_cond_t cond;
6004     pthread_t thread;
6005     uint32_t tid;
6006     abi_ulong child_tidptr;
6007     abi_ulong parent_tidptr;
6008     sigset_t sigmask;
6009 } new_thread_info;
6010 
6011 static void *clone_func(void *arg)
6012 {
6013     new_thread_info *info = arg;
6014     CPUArchState *env;
6015     CPUState *cpu;
6016     TaskState *ts;
6017 
6018     rcu_register_thread();
6019     tcg_register_thread();
6020     env = info->env;
6021     cpu = env_cpu(env);
6022     thread_cpu = cpu;
6023     ts = (TaskState *)cpu->opaque;
6024     info->tid = sys_gettid();
6025     task_settid(ts);
6026     if (info->child_tidptr)
6027         put_user_u32(info->tid, info->child_tidptr);
6028     if (info->parent_tidptr)
6029         put_user_u32(info->tid, info->parent_tidptr);
6030     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6031     /* Enable signals.  */
6032     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6033     /* Signal to the parent that we're ready.  */
6034     pthread_mutex_lock(&info->mutex);
6035     pthread_cond_broadcast(&info->cond);
6036     pthread_mutex_unlock(&info->mutex);
6037     /* Wait until the parent has finished initializing the tls state.  */
6038     pthread_mutex_lock(&clone_lock);
6039     pthread_mutex_unlock(&clone_lock);
6040     cpu_loop(env);
6041     /* never exits */
6042     return NULL;
6043 }
6044 
6045 /* do_fork() Must return host values and target errnos (unlike most
6046    do_*() functions). */
6047 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6048                    abi_ulong parent_tidptr, target_ulong newtls,
6049                    abi_ulong child_tidptr)
6050 {
6051     CPUState *cpu = env_cpu(env);
6052     int ret;
6053     TaskState *ts;
6054     CPUState *new_cpu;
6055     CPUArchState *new_env;
6056     sigset_t sigmask;
6057 
6058     flags &= ~CLONE_IGNORED_FLAGS;
6059 
6060     /* Emulate vfork() with fork() */
6061     if (flags & CLONE_VFORK)
6062         flags &= ~(CLONE_VFORK | CLONE_VM);
6063 
6064     if (flags & CLONE_VM) {
6065         TaskState *parent_ts = (TaskState *)cpu->opaque;
6066         new_thread_info info;
6067         pthread_attr_t attr;
6068 
6069         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6070             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6071             return -TARGET_EINVAL;
6072         }
6073 
6074         ts = g_new0(TaskState, 1);
6075         init_task_state(ts);
6076 
6077         /* Grab a mutex so that thread setup appears atomic.  */
6078         pthread_mutex_lock(&clone_lock);
6079 
6080         /* we create a new CPU instance. */
6081         new_env = cpu_copy(env);
6082         /* Init regs that differ from the parent.  */
6083         cpu_clone_regs_child(new_env, newsp, flags);
6084         cpu_clone_regs_parent(env, flags);
6085         new_cpu = env_cpu(new_env);
6086         new_cpu->opaque = ts;
6087         ts->bprm = parent_ts->bprm;
6088         ts->info = parent_ts->info;
6089         ts->signal_mask = parent_ts->signal_mask;
6090 
6091         if (flags & CLONE_CHILD_CLEARTID) {
6092             ts->child_tidptr = child_tidptr;
6093         }
6094 
6095         if (flags & CLONE_SETTLS) {
6096             cpu_set_tls (new_env, newtls);
6097         }
6098 
6099         memset(&info, 0, sizeof(info));
6100         pthread_mutex_init(&info.mutex, NULL);
6101         pthread_mutex_lock(&info.mutex);
6102         pthread_cond_init(&info.cond, NULL);
6103         info.env = new_env;
6104         if (flags & CLONE_CHILD_SETTID) {
6105             info.child_tidptr = child_tidptr;
6106         }
6107         if (flags & CLONE_PARENT_SETTID) {
6108             info.parent_tidptr = parent_tidptr;
6109         }
6110 
6111         ret = pthread_attr_init(&attr);
6112         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6113         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6114         /* It is not safe to deliver signals until the child has finished
6115            initializing, so temporarily block all signals.  */
6116         sigfillset(&sigmask);
6117         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6118         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6119 
6120         /* If this is our first additional thread, we need to ensure we
6121          * generate code for parallel execution and flush old translations.
6122          */
6123         if (!parallel_cpus) {
6124             parallel_cpus = true;
6125             tb_flush(cpu);
6126         }
6127 
6128         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6129         /* TODO: Free new CPU state if thread creation failed.  */
6130 
6131         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6132         pthread_attr_destroy(&attr);
6133         if (ret == 0) {
6134             /* Wait for the child to initialize.  */
6135             pthread_cond_wait(&info.cond, &info.mutex);
6136             ret = info.tid;
6137         } else {
6138             ret = -1;
6139         }
6140         pthread_mutex_unlock(&info.mutex);
6141         pthread_cond_destroy(&info.cond);
6142         pthread_mutex_destroy(&info.mutex);
6143         pthread_mutex_unlock(&clone_lock);
6144     } else {
6145         /* if no CLONE_VM, we consider it is a fork */
6146         if (flags & CLONE_INVALID_FORK_FLAGS) {
6147             return -TARGET_EINVAL;
6148         }
6149 
6150         /* We can't support custom termination signals */
6151         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6152             return -TARGET_EINVAL;
6153         }
6154 
6155         if (block_signals()) {
6156             return -TARGET_ERESTARTSYS;
6157         }
6158 
6159         fork_start();
6160         ret = fork();
6161         if (ret == 0) {
6162             /* Child Process.  */
6163             cpu_clone_regs_child(env, newsp, flags);
6164             fork_end(1);
6165             /* There is a race condition here.  The parent process could
6166                theoretically read the TID in the child process before the child
6167                tid is set.  This would require using either ptrace
6168                (not implemented) or having *_tidptr to point at a shared memory
6169                mapping.  We can't repeat the spinlock hack used above because
6170                the child process gets its own copy of the lock.  */
6171             if (flags & CLONE_CHILD_SETTID)
6172                 put_user_u32(sys_gettid(), child_tidptr);
6173             if (flags & CLONE_PARENT_SETTID)
6174                 put_user_u32(sys_gettid(), parent_tidptr);
6175             ts = (TaskState *)cpu->opaque;
6176             if (flags & CLONE_SETTLS)
6177                 cpu_set_tls (env, newtls);
6178             if (flags & CLONE_CHILD_CLEARTID)
6179                 ts->child_tidptr = child_tidptr;
6180         } else {
6181             cpu_clone_regs_parent(env, flags);
6182             fork_end(0);
6183         }
6184     }
6185     return ret;
6186 }
6187 
6188 /* warning : doesn't handle linux specific flags... */
6189 static int target_to_host_fcntl_cmd(int cmd)
6190 {
6191     int ret;
6192 
6193     switch(cmd) {
6194     case TARGET_F_DUPFD:
6195     case TARGET_F_GETFD:
6196     case TARGET_F_SETFD:
6197     case TARGET_F_GETFL:
6198     case TARGET_F_SETFL:
6199     case TARGET_F_OFD_GETLK:
6200     case TARGET_F_OFD_SETLK:
6201     case TARGET_F_OFD_SETLKW:
6202         ret = cmd;
6203         break;
6204     case TARGET_F_GETLK:
6205         ret = F_GETLK64;
6206         break;
6207     case TARGET_F_SETLK:
6208         ret = F_SETLK64;
6209         break;
6210     case TARGET_F_SETLKW:
6211         ret = F_SETLKW64;
6212         break;
6213     case TARGET_F_GETOWN:
6214         ret = F_GETOWN;
6215         break;
6216     case TARGET_F_SETOWN:
6217         ret = F_SETOWN;
6218         break;
6219     case TARGET_F_GETSIG:
6220         ret = F_GETSIG;
6221         break;
6222     case TARGET_F_SETSIG:
6223         ret = F_SETSIG;
6224         break;
6225 #if TARGET_ABI_BITS == 32
6226     case TARGET_F_GETLK64:
6227         ret = F_GETLK64;
6228         break;
6229     case TARGET_F_SETLK64:
6230         ret = F_SETLK64;
6231         break;
6232     case TARGET_F_SETLKW64:
6233         ret = F_SETLKW64;
6234         break;
6235 #endif
6236     case TARGET_F_SETLEASE:
6237         ret = F_SETLEASE;
6238         break;
6239     case TARGET_F_GETLEASE:
6240         ret = F_GETLEASE;
6241         break;
6242 #ifdef F_DUPFD_CLOEXEC
6243     case TARGET_F_DUPFD_CLOEXEC:
6244         ret = F_DUPFD_CLOEXEC;
6245         break;
6246 #endif
6247     case TARGET_F_NOTIFY:
6248         ret = F_NOTIFY;
6249         break;
6250 #ifdef F_GETOWN_EX
6251     case TARGET_F_GETOWN_EX:
6252         ret = F_GETOWN_EX;
6253         break;
6254 #endif
6255 #ifdef F_SETOWN_EX
6256     case TARGET_F_SETOWN_EX:
6257         ret = F_SETOWN_EX;
6258         break;
6259 #endif
6260 #ifdef F_SETPIPE_SZ
6261     case TARGET_F_SETPIPE_SZ:
6262         ret = F_SETPIPE_SZ;
6263         break;
6264     case TARGET_F_GETPIPE_SZ:
6265         ret = F_GETPIPE_SZ;
6266         break;
6267 #endif
6268     default:
6269         ret = -TARGET_EINVAL;
6270         break;
6271     }
6272 
6273 #if defined(__powerpc64__)
6274     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6275      * is not supported by kernel. The glibc fcntl call actually adjusts
6276      * them to 5, 6 and 7 before making the syscall(). Since we make the
6277      * syscall directly, adjust to what is supported by the kernel.
6278      */
6279     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6280         ret -= F_GETLK64 - 5;
6281     }
6282 #endif
6283 
6284     return ret;
6285 }
6286 
6287 #define FLOCK_TRANSTBL \
6288     switch (type) { \
6289     TRANSTBL_CONVERT(F_RDLCK); \
6290     TRANSTBL_CONVERT(F_WRLCK); \
6291     TRANSTBL_CONVERT(F_UNLCK); \
6292     TRANSTBL_CONVERT(F_EXLCK); \
6293     TRANSTBL_CONVERT(F_SHLCK); \
6294     }
6295 
6296 static int target_to_host_flock(int type)
6297 {
6298 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6299     FLOCK_TRANSTBL
6300 #undef  TRANSTBL_CONVERT
6301     return -TARGET_EINVAL;
6302 }
6303 
6304 static int host_to_target_flock(int type)
6305 {
6306 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6307     FLOCK_TRANSTBL
6308 #undef  TRANSTBL_CONVERT
6309     /* if we don't know how to convert the value coming
6310      * from the host we copy to the target field as-is
6311      */
6312     return type;
6313 }
6314 
6315 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6316                                             abi_ulong target_flock_addr)
6317 {
6318     struct target_flock *target_fl;
6319     int l_type;
6320 
6321     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6322         return -TARGET_EFAULT;
6323     }
6324 
6325     __get_user(l_type, &target_fl->l_type);
6326     l_type = target_to_host_flock(l_type);
6327     if (l_type < 0) {
6328         return l_type;
6329     }
6330     fl->l_type = l_type;
6331     __get_user(fl->l_whence, &target_fl->l_whence);
6332     __get_user(fl->l_start, &target_fl->l_start);
6333     __get_user(fl->l_len, &target_fl->l_len);
6334     __get_user(fl->l_pid, &target_fl->l_pid);
6335     unlock_user_struct(target_fl, target_flock_addr, 0);
6336     return 0;
6337 }
6338 
6339 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6340                                           const struct flock64 *fl)
6341 {
6342     struct target_flock *target_fl;
6343     short l_type;
6344 
6345     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6346         return -TARGET_EFAULT;
6347     }
6348 
6349     l_type = host_to_target_flock(fl->l_type);
6350     __put_user(l_type, &target_fl->l_type);
6351     __put_user(fl->l_whence, &target_fl->l_whence);
6352     __put_user(fl->l_start, &target_fl->l_start);
6353     __put_user(fl->l_len, &target_fl->l_len);
6354     __put_user(fl->l_pid, &target_fl->l_pid);
6355     unlock_user_struct(target_fl, target_flock_addr, 1);
6356     return 0;
6357 }
6358 
6359 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6360 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6361 
6362 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6363 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6364                                                    abi_ulong target_flock_addr)
6365 {
6366     struct target_oabi_flock64 *target_fl;
6367     int l_type;
6368 
6369     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6370         return -TARGET_EFAULT;
6371     }
6372 
6373     __get_user(l_type, &target_fl->l_type);
6374     l_type = target_to_host_flock(l_type);
6375     if (l_type < 0) {
6376         return l_type;
6377     }
6378     fl->l_type = l_type;
6379     __get_user(fl->l_whence, &target_fl->l_whence);
6380     __get_user(fl->l_start, &target_fl->l_start);
6381     __get_user(fl->l_len, &target_fl->l_len);
6382     __get_user(fl->l_pid, &target_fl->l_pid);
6383     unlock_user_struct(target_fl, target_flock_addr, 0);
6384     return 0;
6385 }
6386 
6387 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6388                                                  const struct flock64 *fl)
6389 {
6390     struct target_oabi_flock64 *target_fl;
6391     short l_type;
6392 
6393     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6394         return -TARGET_EFAULT;
6395     }
6396 
6397     l_type = host_to_target_flock(fl->l_type);
6398     __put_user(l_type, &target_fl->l_type);
6399     __put_user(fl->l_whence, &target_fl->l_whence);
6400     __put_user(fl->l_start, &target_fl->l_start);
6401     __put_user(fl->l_len, &target_fl->l_len);
6402     __put_user(fl->l_pid, &target_fl->l_pid);
6403     unlock_user_struct(target_fl, target_flock_addr, 1);
6404     return 0;
6405 }
6406 #endif
6407 
6408 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6409                                               abi_ulong target_flock_addr)
6410 {
6411     struct target_flock64 *target_fl;
6412     int l_type;
6413 
6414     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6415         return -TARGET_EFAULT;
6416     }
6417 
6418     __get_user(l_type, &target_fl->l_type);
6419     l_type = target_to_host_flock(l_type);
6420     if (l_type < 0) {
6421         return l_type;
6422     }
6423     fl->l_type = l_type;
6424     __get_user(fl->l_whence, &target_fl->l_whence);
6425     __get_user(fl->l_start, &target_fl->l_start);
6426     __get_user(fl->l_len, &target_fl->l_len);
6427     __get_user(fl->l_pid, &target_fl->l_pid);
6428     unlock_user_struct(target_fl, target_flock_addr, 0);
6429     return 0;
6430 }
6431 
6432 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6433                                             const struct flock64 *fl)
6434 {
6435     struct target_flock64 *target_fl;
6436     short l_type;
6437 
6438     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6439         return -TARGET_EFAULT;
6440     }
6441 
6442     l_type = host_to_target_flock(fl->l_type);
6443     __put_user(l_type, &target_fl->l_type);
6444     __put_user(fl->l_whence, &target_fl->l_whence);
6445     __put_user(fl->l_start, &target_fl->l_start);
6446     __put_user(fl->l_len, &target_fl->l_len);
6447     __put_user(fl->l_pid, &target_fl->l_pid);
6448     unlock_user_struct(target_fl, target_flock_addr, 1);
6449     return 0;
6450 }
6451 
6452 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6453 {
6454     struct flock64 fl64;
6455 #ifdef F_GETOWN_EX
6456     struct f_owner_ex fox;
6457     struct target_f_owner_ex *target_fox;
6458 #endif
6459     abi_long ret;
6460     int host_cmd = target_to_host_fcntl_cmd(cmd);
6461 
6462     if (host_cmd == -TARGET_EINVAL)
6463 	    return host_cmd;
6464 
6465     switch(cmd) {
6466     case TARGET_F_GETLK:
6467         ret = copy_from_user_flock(&fl64, arg);
6468         if (ret) {
6469             return ret;
6470         }
6471         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6472         if (ret == 0) {
6473             ret = copy_to_user_flock(arg, &fl64);
6474         }
6475         break;
6476 
6477     case TARGET_F_SETLK:
6478     case TARGET_F_SETLKW:
6479         ret = copy_from_user_flock(&fl64, arg);
6480         if (ret) {
6481             return ret;
6482         }
6483         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6484         break;
6485 
6486     case TARGET_F_GETLK64:
6487     case TARGET_F_OFD_GETLK:
6488         ret = copy_from_user_flock64(&fl64, arg);
6489         if (ret) {
6490             return ret;
6491         }
6492         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6493         if (ret == 0) {
6494             ret = copy_to_user_flock64(arg, &fl64);
6495         }
6496         break;
6497     case TARGET_F_SETLK64:
6498     case TARGET_F_SETLKW64:
6499     case TARGET_F_OFD_SETLK:
6500     case TARGET_F_OFD_SETLKW:
6501         ret = copy_from_user_flock64(&fl64, arg);
6502         if (ret) {
6503             return ret;
6504         }
6505         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6506         break;
6507 
6508     case TARGET_F_GETFL:
6509         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6510         if (ret >= 0) {
6511             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6512         }
6513         break;
6514 
6515     case TARGET_F_SETFL:
6516         ret = get_errno(safe_fcntl(fd, host_cmd,
6517                                    target_to_host_bitmask(arg,
6518                                                           fcntl_flags_tbl)));
6519         break;
6520 
6521 #ifdef F_GETOWN_EX
6522     case TARGET_F_GETOWN_EX:
6523         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6524         if (ret >= 0) {
6525             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6526                 return -TARGET_EFAULT;
6527             target_fox->type = tswap32(fox.type);
6528             target_fox->pid = tswap32(fox.pid);
6529             unlock_user_struct(target_fox, arg, 1);
6530         }
6531         break;
6532 #endif
6533 
6534 #ifdef F_SETOWN_EX
6535     case TARGET_F_SETOWN_EX:
6536         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6537             return -TARGET_EFAULT;
6538         fox.type = tswap32(target_fox->type);
6539         fox.pid = tswap32(target_fox->pid);
6540         unlock_user_struct(target_fox, arg, 0);
6541         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6542         break;
6543 #endif
6544 
6545     case TARGET_F_SETOWN:
6546     case TARGET_F_GETOWN:
6547     case TARGET_F_SETSIG:
6548     case TARGET_F_GETSIG:
6549     case TARGET_F_SETLEASE:
6550     case TARGET_F_GETLEASE:
6551     case TARGET_F_SETPIPE_SZ:
6552     case TARGET_F_GETPIPE_SZ:
6553         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6554         break;
6555 
6556     default:
6557         ret = get_errno(safe_fcntl(fd, cmd, arg));
6558         break;
6559     }
6560     return ret;
6561 }
6562 
6563 #ifdef USE_UID16
6564 
6565 static inline int high2lowuid(int uid)
6566 {
6567     if (uid > 65535)
6568         return 65534;
6569     else
6570         return uid;
6571 }
6572 
6573 static inline int high2lowgid(int gid)
6574 {
6575     if (gid > 65535)
6576         return 65534;
6577     else
6578         return gid;
6579 }
6580 
6581 static inline int low2highuid(int uid)
6582 {
6583     if ((int16_t)uid == -1)
6584         return -1;
6585     else
6586         return uid;
6587 }
6588 
6589 static inline int low2highgid(int gid)
6590 {
6591     if ((int16_t)gid == -1)
6592         return -1;
6593     else
6594         return gid;
6595 }
6596 static inline int tswapid(int id)
6597 {
6598     return tswap16(id);
6599 }
6600 
6601 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6602 
6603 #else /* !USE_UID16 */
6604 static inline int high2lowuid(int uid)
6605 {
6606     return uid;
6607 }
6608 static inline int high2lowgid(int gid)
6609 {
6610     return gid;
6611 }
6612 static inline int low2highuid(int uid)
6613 {
6614     return uid;
6615 }
6616 static inline int low2highgid(int gid)
6617 {
6618     return gid;
6619 }
6620 static inline int tswapid(int id)
6621 {
6622     return tswap32(id);
6623 }
6624 
6625 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6626 
6627 #endif /* USE_UID16 */
6628 
6629 /* We must do direct syscalls for setting UID/GID, because we want to
6630  * implement the Linux system call semantics of "change only for this thread",
6631  * not the libc/POSIX semantics of "change for all threads in process".
6632  * (See http://ewontfix.com/17/ for more details.)
6633  * We use the 32-bit version of the syscalls if present; if it is not
6634  * then either the host architecture supports 32-bit UIDs natively with
6635  * the standard syscall, or the 16-bit UID is the best we can do.
6636  */
6637 #ifdef __NR_setuid32
6638 #define __NR_sys_setuid __NR_setuid32
6639 #else
6640 #define __NR_sys_setuid __NR_setuid
6641 #endif
6642 #ifdef __NR_setgid32
6643 #define __NR_sys_setgid __NR_setgid32
6644 #else
6645 #define __NR_sys_setgid __NR_setgid
6646 #endif
6647 #ifdef __NR_setresuid32
6648 #define __NR_sys_setresuid __NR_setresuid32
6649 #else
6650 #define __NR_sys_setresuid __NR_setresuid
6651 #endif
6652 #ifdef __NR_setresgid32
6653 #define __NR_sys_setresgid __NR_setresgid32
6654 #else
6655 #define __NR_sys_setresgid __NR_setresgid
6656 #endif
6657 
6658 _syscall1(int, sys_setuid, uid_t, uid)
6659 _syscall1(int, sys_setgid, gid_t, gid)
6660 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6661 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6662 
6663 void syscall_init(void)
6664 {
6665     IOCTLEntry *ie;
6666     const argtype *arg_type;
6667     int size;
6668     int i;
6669 
6670     thunk_init(STRUCT_MAX);
6671 
6672 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6673 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6674 #include "syscall_types.h"
6675 #undef STRUCT
6676 #undef STRUCT_SPECIAL
6677 
6678     /* Build target_to_host_errno_table[] table from
6679      * host_to_target_errno_table[]. */
6680     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6681         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6682     }
6683 
6684     /* we patch the ioctl size if necessary. We rely on the fact that
6685        no ioctl has all the bits at '1' in the size field */
6686     ie = ioctl_entries;
6687     while (ie->target_cmd != 0) {
6688         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6689             TARGET_IOC_SIZEMASK) {
6690             arg_type = ie->arg_type;
6691             if (arg_type[0] != TYPE_PTR) {
6692                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6693                         ie->target_cmd);
6694                 exit(1);
6695             }
6696             arg_type++;
6697             size = thunk_type_size(arg_type, 0);
6698             ie->target_cmd = (ie->target_cmd &
6699                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6700                 (size << TARGET_IOC_SIZESHIFT);
6701         }
6702 
6703         /* automatic consistency check if same arch */
6704 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6705     (defined(__x86_64__) && defined(TARGET_X86_64))
6706         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6707             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6708                     ie->name, ie->target_cmd, ie->host_cmd);
6709         }
6710 #endif
6711         ie++;
6712     }
6713 }
6714 
6715 #ifdef TARGET_NR_truncate64
6716 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6717                                          abi_long arg2,
6718                                          abi_long arg3,
6719                                          abi_long arg4)
6720 {
6721     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6722         arg2 = arg3;
6723         arg3 = arg4;
6724     }
6725     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6726 }
6727 #endif
6728 
6729 #ifdef TARGET_NR_ftruncate64
6730 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6731                                           abi_long arg2,
6732                                           abi_long arg3,
6733                                           abi_long arg4)
6734 {
6735     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6736         arg2 = arg3;
6737         arg3 = arg4;
6738     }
6739     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6740 }
6741 #endif
6742 
6743 #if defined(TARGET_NR_timer_settime) || \
6744     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6745 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6746                                                  abi_ulong target_addr)
6747 {
6748     struct target_itimerspec *target_itspec;
6749 
6750     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6751         return -TARGET_EFAULT;
6752     }
6753 
6754     host_itspec->it_interval.tv_sec =
6755                             tswapal(target_itspec->it_interval.tv_sec);
6756     host_itspec->it_interval.tv_nsec =
6757                             tswapal(target_itspec->it_interval.tv_nsec);
6758     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6759     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6760 
6761     unlock_user_struct(target_itspec, target_addr, 1);
6762     return 0;
6763 }
6764 #endif
6765 
6766 #if ((defined(TARGET_NR_timerfd_gettime) || \
6767       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6768     defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6769 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6770                                                struct itimerspec *host_its)
6771 {
6772     struct target_itimerspec *target_itspec;
6773 
6774     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6775         return -TARGET_EFAULT;
6776     }
6777 
6778     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6779     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6780 
6781     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6782     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6783 
6784     unlock_user_struct(target_itspec, target_addr, 0);
6785     return 0;
6786 }
6787 #endif
6788 
6789 #if defined(TARGET_NR_adjtimex) || \
6790     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6791 static inline abi_long target_to_host_timex(struct timex *host_tx,
6792                                             abi_long target_addr)
6793 {
6794     struct target_timex *target_tx;
6795 
6796     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6797         return -TARGET_EFAULT;
6798     }
6799 
6800     __get_user(host_tx->modes, &target_tx->modes);
6801     __get_user(host_tx->offset, &target_tx->offset);
6802     __get_user(host_tx->freq, &target_tx->freq);
6803     __get_user(host_tx->maxerror, &target_tx->maxerror);
6804     __get_user(host_tx->esterror, &target_tx->esterror);
6805     __get_user(host_tx->status, &target_tx->status);
6806     __get_user(host_tx->constant, &target_tx->constant);
6807     __get_user(host_tx->precision, &target_tx->precision);
6808     __get_user(host_tx->tolerance, &target_tx->tolerance);
6809     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6810     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6811     __get_user(host_tx->tick, &target_tx->tick);
6812     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6813     __get_user(host_tx->jitter, &target_tx->jitter);
6814     __get_user(host_tx->shift, &target_tx->shift);
6815     __get_user(host_tx->stabil, &target_tx->stabil);
6816     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6817     __get_user(host_tx->calcnt, &target_tx->calcnt);
6818     __get_user(host_tx->errcnt, &target_tx->errcnt);
6819     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6820     __get_user(host_tx->tai, &target_tx->tai);
6821 
6822     unlock_user_struct(target_tx, target_addr, 0);
6823     return 0;
6824 }
6825 
6826 static inline abi_long host_to_target_timex(abi_long target_addr,
6827                                             struct timex *host_tx)
6828 {
6829     struct target_timex *target_tx;
6830 
6831     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6832         return -TARGET_EFAULT;
6833     }
6834 
6835     __put_user(host_tx->modes, &target_tx->modes);
6836     __put_user(host_tx->offset, &target_tx->offset);
6837     __put_user(host_tx->freq, &target_tx->freq);
6838     __put_user(host_tx->maxerror, &target_tx->maxerror);
6839     __put_user(host_tx->esterror, &target_tx->esterror);
6840     __put_user(host_tx->status, &target_tx->status);
6841     __put_user(host_tx->constant, &target_tx->constant);
6842     __put_user(host_tx->precision, &target_tx->precision);
6843     __put_user(host_tx->tolerance, &target_tx->tolerance);
6844     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6845     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6846     __put_user(host_tx->tick, &target_tx->tick);
6847     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6848     __put_user(host_tx->jitter, &target_tx->jitter);
6849     __put_user(host_tx->shift, &target_tx->shift);
6850     __put_user(host_tx->stabil, &target_tx->stabil);
6851     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6852     __put_user(host_tx->calcnt, &target_tx->calcnt);
6853     __put_user(host_tx->errcnt, &target_tx->errcnt);
6854     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6855     __put_user(host_tx->tai, &target_tx->tai);
6856 
6857     unlock_user_struct(target_tx, target_addr, 1);
6858     return 0;
6859 }
6860 #endif
6861 
6862 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6863                                                abi_ulong target_addr)
6864 {
6865     struct target_sigevent *target_sevp;
6866 
6867     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6868         return -TARGET_EFAULT;
6869     }
6870 
6871     /* This union is awkward on 64 bit systems because it has a 32 bit
6872      * integer and a pointer in it; we follow the conversion approach
6873      * used for handling sigval types in signal.c so the guest should get
6874      * the correct value back even if we did a 64 bit byteswap and it's
6875      * using the 32 bit integer.
6876      */
6877     host_sevp->sigev_value.sival_ptr =
6878         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6879     host_sevp->sigev_signo =
6880         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6881     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6882     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6883 
6884     unlock_user_struct(target_sevp, target_addr, 1);
6885     return 0;
6886 }
6887 
6888 #if defined(TARGET_NR_mlockall)
6889 static inline int target_to_host_mlockall_arg(int arg)
6890 {
6891     int result = 0;
6892 
6893     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6894         result |= MCL_CURRENT;
6895     }
6896     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6897         result |= MCL_FUTURE;
6898     }
6899     return result;
6900 }
6901 #endif
6902 
6903 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6904      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6905      defined(TARGET_NR_newfstatat))
6906 static inline abi_long host_to_target_stat64(void *cpu_env,
6907                                              abi_ulong target_addr,
6908                                              struct stat *host_st)
6909 {
6910 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6911     if (((CPUARMState *)cpu_env)->eabi) {
6912         struct target_eabi_stat64 *target_st;
6913 
6914         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6915             return -TARGET_EFAULT;
6916         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6917         __put_user(host_st->st_dev, &target_st->st_dev);
6918         __put_user(host_st->st_ino, &target_st->st_ino);
6919 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6920         __put_user(host_st->st_ino, &target_st->__st_ino);
6921 #endif
6922         __put_user(host_st->st_mode, &target_st->st_mode);
6923         __put_user(host_st->st_nlink, &target_st->st_nlink);
6924         __put_user(host_st->st_uid, &target_st->st_uid);
6925         __put_user(host_st->st_gid, &target_st->st_gid);
6926         __put_user(host_st->st_rdev, &target_st->st_rdev);
6927         __put_user(host_st->st_size, &target_st->st_size);
6928         __put_user(host_st->st_blksize, &target_st->st_blksize);
6929         __put_user(host_st->st_blocks, &target_st->st_blocks);
6930         __put_user(host_st->st_atime, &target_st->target_st_atime);
6931         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6932         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6933 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6934         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6935         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6936         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6937 #endif
6938         unlock_user_struct(target_st, target_addr, 1);
6939     } else
6940 #endif
6941     {
6942 #if defined(TARGET_HAS_STRUCT_STAT64)
6943         struct target_stat64 *target_st;
6944 #else
6945         struct target_stat *target_st;
6946 #endif
6947 
6948         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6949             return -TARGET_EFAULT;
6950         memset(target_st, 0, sizeof(*target_st));
6951         __put_user(host_st->st_dev, &target_st->st_dev);
6952         __put_user(host_st->st_ino, &target_st->st_ino);
6953 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6954         __put_user(host_st->st_ino, &target_st->__st_ino);
6955 #endif
6956         __put_user(host_st->st_mode, &target_st->st_mode);
6957         __put_user(host_st->st_nlink, &target_st->st_nlink);
6958         __put_user(host_st->st_uid, &target_st->st_uid);
6959         __put_user(host_st->st_gid, &target_st->st_gid);
6960         __put_user(host_st->st_rdev, &target_st->st_rdev);
6961         /* XXX: better use of kernel struct */
6962         __put_user(host_st->st_size, &target_st->st_size);
6963         __put_user(host_st->st_blksize, &target_st->st_blksize);
6964         __put_user(host_st->st_blocks, &target_st->st_blocks);
6965         __put_user(host_st->st_atime, &target_st->target_st_atime);
6966         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6967         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6968 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6969         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6970         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6971         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6972 #endif
6973         unlock_user_struct(target_st, target_addr, 1);
6974     }
6975 
6976     return 0;
6977 }
6978 #endif
6979 
6980 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6981 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6982                                             abi_ulong target_addr)
6983 {
6984     struct target_statx *target_stx;
6985 
6986     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
6987         return -TARGET_EFAULT;
6988     }
6989     memset(target_stx, 0, sizeof(*target_stx));
6990 
6991     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6992     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6993     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6994     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6995     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6996     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6997     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6998     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6999     __put_user(host_stx->stx_size, &target_stx->stx_size);
7000     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7001     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7002     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7003     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7004     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7005     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7006     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7007     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7008     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7009     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7010     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7011     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7012     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7013     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7014 
7015     unlock_user_struct(target_stx, target_addr, 1);
7016 
7017     return 0;
7018 }
7019 #endif
7020 
7021 static int do_sys_futex(int *uaddr, int op, int val,
7022                          const struct timespec *timeout, int *uaddr2,
7023                          int val3)
7024 {
7025 #if HOST_LONG_BITS == 64
7026 #if defined(__NR_futex)
7027     /* always a 64-bit time_t, it doesn't define _time64 version  */
7028     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7029 
7030 #endif
7031 #else /* HOST_LONG_BITS == 64 */
7032 #if defined(__NR_futex_time64)
7033     if (sizeof(timeout->tv_sec) == 8) {
7034         /* _time64 function on 32bit arch */
7035         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7036     }
7037 #endif
7038 #if defined(__NR_futex)
7039     /* old function on 32bit arch */
7040     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7041 #endif
7042 #endif /* HOST_LONG_BITS == 64 */
7043     g_assert_not_reached();
7044 }
7045 
7046 static int do_safe_futex(int *uaddr, int op, int val,
7047                          const struct timespec *timeout, int *uaddr2,
7048                          int val3)
7049 {
7050 #if HOST_LONG_BITS == 64
7051 #if defined(__NR_futex)
7052     /* always a 64-bit time_t, it doesn't define _time64 version  */
7053     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7054 #endif
7055 #else /* HOST_LONG_BITS == 64 */
7056 #if defined(__NR_futex_time64)
7057     if (sizeof(timeout->tv_sec) == 8) {
7058         /* _time64 function on 32bit arch */
7059         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7060                                            val3));
7061     }
7062 #endif
7063 #if defined(__NR_futex)
7064     /* old function on 32bit arch */
7065     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7066 #endif
7067 #endif /* HOST_LONG_BITS == 64 */
7068     return -TARGET_ENOSYS;
7069 }
7070 
7071 /* ??? Using host futex calls even when target atomic operations
7072    are not really atomic probably breaks things.  However implementing
7073    futexes locally would make futexes shared between multiple processes
7074    tricky.  However they're probably useless because guest atomic
7075    operations won't work either.  */
7076 #if defined(TARGET_NR_futex)
7077 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7078                     target_ulong uaddr2, int val3)
7079 {
7080     struct timespec ts, *pts;
7081     int base_op;
7082 
7083     /* ??? We assume FUTEX_* constants are the same on both host
7084        and target.  */
7085 #ifdef FUTEX_CMD_MASK
7086     base_op = op & FUTEX_CMD_MASK;
7087 #else
7088     base_op = op;
7089 #endif
7090     switch (base_op) {
7091     case FUTEX_WAIT:
7092     case FUTEX_WAIT_BITSET:
7093         if (timeout) {
7094             pts = &ts;
7095             target_to_host_timespec(pts, timeout);
7096         } else {
7097             pts = NULL;
7098         }
7099         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7100     case FUTEX_WAKE:
7101         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7102     case FUTEX_FD:
7103         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7104     case FUTEX_REQUEUE:
7105     case FUTEX_CMP_REQUEUE:
7106     case FUTEX_WAKE_OP:
7107         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7108            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7109            But the prototype takes a `struct timespec *'; insert casts
7110            to satisfy the compiler.  We do not need to tswap TIMEOUT
7111            since it's not compared to guest memory.  */
7112         pts = (struct timespec *)(uintptr_t) timeout;
7113         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7114                              (base_op == FUTEX_CMP_REQUEUE
7115                                       ? tswap32(val3)
7116                                       : val3));
7117     default:
7118         return -TARGET_ENOSYS;
7119     }
7120 }
7121 #endif
7122 
7123 #if defined(TARGET_NR_futex_time64)
7124 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7125                            target_ulong uaddr2, int val3)
7126 {
7127     struct timespec ts, *pts;
7128     int base_op;
7129 
7130     /* ??? We assume FUTEX_* constants are the same on both host
7131        and target.  */
7132 #ifdef FUTEX_CMD_MASK
7133     base_op = op & FUTEX_CMD_MASK;
7134 #else
7135     base_op = op;
7136 #endif
7137     switch (base_op) {
7138     case FUTEX_WAIT:
7139     case FUTEX_WAIT_BITSET:
7140         if (timeout) {
7141             pts = &ts;
7142             target_to_host_timespec64(pts, timeout);
7143         } else {
7144             pts = NULL;
7145         }
7146         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7147     case FUTEX_WAKE:
7148         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7149     case FUTEX_FD:
7150         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7151     case FUTEX_REQUEUE:
7152     case FUTEX_CMP_REQUEUE:
7153     case FUTEX_WAKE_OP:
7154         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7155            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7156            But the prototype takes a `struct timespec *'; insert casts
7157            to satisfy the compiler.  We do not need to tswap TIMEOUT
7158            since it's not compared to guest memory.  */
7159         pts = (struct timespec *)(uintptr_t) timeout;
7160         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7161                              (base_op == FUTEX_CMP_REQUEUE
7162                                       ? tswap32(val3)
7163                                       : val3));
7164     default:
7165         return -TARGET_ENOSYS;
7166     }
7167 }
7168 #endif
7169 
7170 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7171 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7172                                      abi_long handle, abi_long mount_id,
7173                                      abi_long flags)
7174 {
7175     struct file_handle *target_fh;
7176     struct file_handle *fh;
7177     int mid = 0;
7178     abi_long ret;
7179     char *name;
7180     unsigned int size, total_size;
7181 
7182     if (get_user_s32(size, handle)) {
7183         return -TARGET_EFAULT;
7184     }
7185 
7186     name = lock_user_string(pathname);
7187     if (!name) {
7188         return -TARGET_EFAULT;
7189     }
7190 
7191     total_size = sizeof(struct file_handle) + size;
7192     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7193     if (!target_fh) {
7194         unlock_user(name, pathname, 0);
7195         return -TARGET_EFAULT;
7196     }
7197 
7198     fh = g_malloc0(total_size);
7199     fh->handle_bytes = size;
7200 
7201     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7202     unlock_user(name, pathname, 0);
7203 
7204     /* man name_to_handle_at(2):
7205      * Other than the use of the handle_bytes field, the caller should treat
7206      * the file_handle structure as an opaque data type
7207      */
7208 
7209     memcpy(target_fh, fh, total_size);
7210     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7211     target_fh->handle_type = tswap32(fh->handle_type);
7212     g_free(fh);
7213     unlock_user(target_fh, handle, total_size);
7214 
7215     if (put_user_s32(mid, mount_id)) {
7216         return -TARGET_EFAULT;
7217     }
7218 
7219     return ret;
7220 
7221 }
7222 #endif
7223 
7224 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7225 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7226                                      abi_long flags)
7227 {
7228     struct file_handle *target_fh;
7229     struct file_handle *fh;
7230     unsigned int size, total_size;
7231     abi_long ret;
7232 
7233     if (get_user_s32(size, handle)) {
7234         return -TARGET_EFAULT;
7235     }
7236 
7237     total_size = sizeof(struct file_handle) + size;
7238     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7239     if (!target_fh) {
7240         return -TARGET_EFAULT;
7241     }
7242 
7243     fh = g_memdup(target_fh, total_size);
7244     fh->handle_bytes = size;
7245     fh->handle_type = tswap32(target_fh->handle_type);
7246 
7247     ret = get_errno(open_by_handle_at(mount_fd, fh,
7248                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7249 
7250     g_free(fh);
7251 
7252     unlock_user(target_fh, handle, total_size);
7253 
7254     return ret;
7255 }
7256 #endif
7257 
7258 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7259 
7260 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7261 {
7262     int host_flags;
7263     target_sigset_t *target_mask;
7264     sigset_t host_mask;
7265     abi_long ret;
7266 
7267     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7268         return -TARGET_EINVAL;
7269     }
7270     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7271         return -TARGET_EFAULT;
7272     }
7273 
7274     target_to_host_sigset(&host_mask, target_mask);
7275 
7276     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7277 
7278     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7279     if (ret >= 0) {
7280         fd_trans_register(ret, &target_signalfd_trans);
7281     }
7282 
7283     unlock_user_struct(target_mask, mask, 0);
7284 
7285     return ret;
7286 }
7287 #endif
7288 
7289 /* Map host to target signal numbers for the wait family of syscalls.
7290    Assume all other status bits are the same.  */
7291 int host_to_target_waitstatus(int status)
7292 {
7293     if (WIFSIGNALED(status)) {
7294         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7295     }
7296     if (WIFSTOPPED(status)) {
7297         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7298                | (status & 0xff);
7299     }
7300     return status;
7301 }
7302 
7303 static int open_self_cmdline(void *cpu_env, int fd)
7304 {
7305     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7306     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7307     int i;
7308 
7309     for (i = 0; i < bprm->argc; i++) {
7310         size_t len = strlen(bprm->argv[i]) + 1;
7311 
7312         if (write(fd, bprm->argv[i], len) != len) {
7313             return -1;
7314         }
7315     }
7316 
7317     return 0;
7318 }
7319 
7320 static int open_self_maps(void *cpu_env, int fd)
7321 {
7322     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7323     TaskState *ts = cpu->opaque;
7324     GSList *map_info = read_self_maps();
7325     GSList *s;
7326     int count;
7327 
7328     for (s = map_info; s; s = g_slist_next(s)) {
7329         MapInfo *e = (MapInfo *) s->data;
7330 
7331         if (h2g_valid(e->start)) {
7332             unsigned long min = e->start;
7333             unsigned long max = e->end;
7334             int flags = page_get_flags(h2g(min));
7335             const char *path;
7336 
7337             max = h2g_valid(max - 1) ?
7338                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7339 
7340             if (page_check_range(h2g(min), max - min, flags) == -1) {
7341                 continue;
7342             }
7343 
7344             if (h2g(min) == ts->info->stack_limit) {
7345                 path = "[stack]";
7346             } else {
7347                 path = e->path;
7348             }
7349 
7350             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7351                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7352                             h2g(min), h2g(max - 1) + 1,
7353                             e->is_read ? 'r' : '-',
7354                             e->is_write ? 'w' : '-',
7355                             e->is_exec ? 'x' : '-',
7356                             e->is_priv ? 'p' : '-',
7357                             (uint64_t) e->offset, e->dev, e->inode);
7358             if (path) {
7359                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7360             } else {
7361                 dprintf(fd, "\n");
7362             }
7363         }
7364     }
7365 
7366     free_self_maps(map_info);
7367 
7368 #ifdef TARGET_VSYSCALL_PAGE
7369     /*
7370      * We only support execution from the vsyscall page.
7371      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7372      */
7373     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7374                     " --xp 00000000 00:00 0",
7375                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7376     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7377 #endif
7378 
7379     return 0;
7380 }
7381 
7382 static int open_self_stat(void *cpu_env, int fd)
7383 {
7384     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7385     TaskState *ts = cpu->opaque;
7386     g_autoptr(GString) buf = g_string_new(NULL);
7387     int i;
7388 
7389     for (i = 0; i < 44; i++) {
7390         if (i == 0) {
7391             /* pid */
7392             g_string_printf(buf, FMT_pid " ", getpid());
7393         } else if (i == 1) {
7394             /* app name */
7395             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7396             bin = bin ? bin + 1 : ts->bprm->argv[0];
7397             g_string_printf(buf, "(%.15s) ", bin);
7398         } else if (i == 27) {
7399             /* stack bottom */
7400             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7401         } else {
7402             /* for the rest, there is MasterCard */
7403             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7404         }
7405 
7406         if (write(fd, buf->str, buf->len) != buf->len) {
7407             return -1;
7408         }
7409     }
7410 
7411     return 0;
7412 }
7413 
7414 static int open_self_auxv(void *cpu_env, int fd)
7415 {
7416     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7417     TaskState *ts = cpu->opaque;
7418     abi_ulong auxv = ts->info->saved_auxv;
7419     abi_ulong len = ts->info->auxv_len;
7420     char *ptr;
7421 
7422     /*
7423      * Auxiliary vector is stored in target process stack.
7424      * read in whole auxv vector and copy it to file
7425      */
7426     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7427     if (ptr != NULL) {
7428         while (len > 0) {
7429             ssize_t r;
7430             r = write(fd, ptr, len);
7431             if (r <= 0) {
7432                 break;
7433             }
7434             len -= r;
7435             ptr += r;
7436         }
7437         lseek(fd, 0, SEEK_SET);
7438         unlock_user(ptr, auxv, len);
7439     }
7440 
7441     return 0;
7442 }
7443 
7444 static int is_proc_myself(const char *filename, const char *entry)
7445 {
7446     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7447         filename += strlen("/proc/");
7448         if (!strncmp(filename, "self/", strlen("self/"))) {
7449             filename += strlen("self/");
7450         } else if (*filename >= '1' && *filename <= '9') {
7451             char myself[80];
7452             snprintf(myself, sizeof(myself), "%d/", getpid());
7453             if (!strncmp(filename, myself, strlen(myself))) {
7454                 filename += strlen(myself);
7455             } else {
7456                 return 0;
7457             }
7458         } else {
7459             return 0;
7460         }
7461         if (!strcmp(filename, entry)) {
7462             return 1;
7463         }
7464     }
7465     return 0;
7466 }
7467 
7468 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7469     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7470 static int is_proc(const char *filename, const char *entry)
7471 {
7472     return strcmp(filename, entry) == 0;
7473 }
7474 #endif
7475 
7476 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7477 static int open_net_route(void *cpu_env, int fd)
7478 {
7479     FILE *fp;
7480     char *line = NULL;
7481     size_t len = 0;
7482     ssize_t read;
7483 
7484     fp = fopen("/proc/net/route", "r");
7485     if (fp == NULL) {
7486         return -1;
7487     }
7488 
7489     /* read header */
7490 
7491     read = getline(&line, &len, fp);
7492     dprintf(fd, "%s", line);
7493 
7494     /* read routes */
7495 
7496     while ((read = getline(&line, &len, fp)) != -1) {
7497         char iface[16];
7498         uint32_t dest, gw, mask;
7499         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7500         int fields;
7501 
7502         fields = sscanf(line,
7503                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7504                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7505                         &mask, &mtu, &window, &irtt);
7506         if (fields != 11) {
7507             continue;
7508         }
7509         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7510                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7511                 metric, tswap32(mask), mtu, window, irtt);
7512     }
7513 
7514     free(line);
7515     fclose(fp);
7516 
7517     return 0;
7518 }
7519 #endif
7520 
7521 #if defined(TARGET_SPARC)
7522 static int open_cpuinfo(void *cpu_env, int fd)
7523 {
7524     dprintf(fd, "type\t\t: sun4u\n");
7525     return 0;
7526 }
7527 #endif
7528 
7529 #if defined(TARGET_HPPA)
7530 static int open_cpuinfo(void *cpu_env, int fd)
7531 {
7532     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7533     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7534     dprintf(fd, "capabilities\t: os32\n");
7535     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7536     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7537     return 0;
7538 }
7539 #endif
7540 
7541 #if defined(TARGET_M68K)
7542 static int open_hardware(void *cpu_env, int fd)
7543 {
7544     dprintf(fd, "Model:\t\tqemu-m68k\n");
7545     return 0;
7546 }
7547 #endif
7548 
7549 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7550 {
7551     struct fake_open {
7552         const char *filename;
7553         int (*fill)(void *cpu_env, int fd);
7554         int (*cmp)(const char *s1, const char *s2);
7555     };
7556     const struct fake_open *fake_open;
7557     static const struct fake_open fakes[] = {
7558         { "maps", open_self_maps, is_proc_myself },
7559         { "stat", open_self_stat, is_proc_myself },
7560         { "auxv", open_self_auxv, is_proc_myself },
7561         { "cmdline", open_self_cmdline, is_proc_myself },
7562 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7563         { "/proc/net/route", open_net_route, is_proc },
7564 #endif
7565 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7566         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7567 #endif
7568 #if defined(TARGET_M68K)
7569         { "/proc/hardware", open_hardware, is_proc },
7570 #endif
7571         { NULL, NULL, NULL }
7572     };
7573 
7574     if (is_proc_myself(pathname, "exe")) {
7575         int execfd = qemu_getauxval(AT_EXECFD);
7576         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7577     }
7578 
7579     for (fake_open = fakes; fake_open->filename; fake_open++) {
7580         if (fake_open->cmp(pathname, fake_open->filename)) {
7581             break;
7582         }
7583     }
7584 
7585     if (fake_open->filename) {
7586         const char *tmpdir;
7587         char filename[PATH_MAX];
7588         int fd, r;
7589 
7590         /* create temporary file to map stat to */
7591         tmpdir = getenv("TMPDIR");
7592         if (!tmpdir)
7593             tmpdir = "/tmp";
7594         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7595         fd = mkstemp(filename);
7596         if (fd < 0) {
7597             return fd;
7598         }
7599         unlink(filename);
7600 
7601         if ((r = fake_open->fill(cpu_env, fd))) {
7602             int e = errno;
7603             close(fd);
7604             errno = e;
7605             return r;
7606         }
7607         lseek(fd, 0, SEEK_SET);
7608 
7609         return fd;
7610     }
7611 
7612     return safe_openat(dirfd, path(pathname), flags, mode);
7613 }
7614 
7615 #define TIMER_MAGIC 0x0caf0000
7616 #define TIMER_MAGIC_MASK 0xffff0000
7617 
7618 /* Convert QEMU provided timer ID back to internal 16bit index format */
7619 static target_timer_t get_timer_id(abi_long arg)
7620 {
7621     target_timer_t timerid = arg;
7622 
7623     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7624         return -TARGET_EINVAL;
7625     }
7626 
7627     timerid &= 0xffff;
7628 
7629     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7630         return -TARGET_EINVAL;
7631     }
7632 
7633     return timerid;
7634 }
7635 
7636 static int target_to_host_cpu_mask(unsigned long *host_mask,
7637                                    size_t host_size,
7638                                    abi_ulong target_addr,
7639                                    size_t target_size)
7640 {
7641     unsigned target_bits = sizeof(abi_ulong) * 8;
7642     unsigned host_bits = sizeof(*host_mask) * 8;
7643     abi_ulong *target_mask;
7644     unsigned i, j;
7645 
7646     assert(host_size >= target_size);
7647 
7648     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7649     if (!target_mask) {
7650         return -TARGET_EFAULT;
7651     }
7652     memset(host_mask, 0, host_size);
7653 
7654     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7655         unsigned bit = i * target_bits;
7656         abi_ulong val;
7657 
7658         __get_user(val, &target_mask[i]);
7659         for (j = 0; j < target_bits; j++, bit++) {
7660             if (val & (1UL << j)) {
7661                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7662             }
7663         }
7664     }
7665 
7666     unlock_user(target_mask, target_addr, 0);
7667     return 0;
7668 }
7669 
7670 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7671                                    size_t host_size,
7672                                    abi_ulong target_addr,
7673                                    size_t target_size)
7674 {
7675     unsigned target_bits = sizeof(abi_ulong) * 8;
7676     unsigned host_bits = sizeof(*host_mask) * 8;
7677     abi_ulong *target_mask;
7678     unsigned i, j;
7679 
7680     assert(host_size >= target_size);
7681 
7682     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7683     if (!target_mask) {
7684         return -TARGET_EFAULT;
7685     }
7686 
7687     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7688         unsigned bit = i * target_bits;
7689         abi_ulong val = 0;
7690 
7691         for (j = 0; j < target_bits; j++, bit++) {
7692             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7693                 val |= 1UL << j;
7694             }
7695         }
7696         __put_user(val, &target_mask[i]);
7697     }
7698 
7699     unlock_user(target_mask, target_addr, target_size);
7700     return 0;
7701 }
7702 
7703 /* This is an internal helper for do_syscall so that it is easier
7704  * to have a single return point, so that actions, such as logging
7705  * of syscall results, can be performed.
7706  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7707  */
7708 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7709                             abi_long arg2, abi_long arg3, abi_long arg4,
7710                             abi_long arg5, abi_long arg6, abi_long arg7,
7711                             abi_long arg8)
7712 {
7713     CPUState *cpu = env_cpu(cpu_env);
7714     abi_long ret;
7715 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7716     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7717     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7718     || defined(TARGET_NR_statx)
7719     struct stat st;
7720 #endif
7721 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7722     || defined(TARGET_NR_fstatfs)
7723     struct statfs stfs;
7724 #endif
7725     void *p;
7726 
7727     switch(num) {
7728     case TARGET_NR_exit:
7729         /* In old applications this may be used to implement _exit(2).
7730            However in threaded applictions it is used for thread termination,
7731            and _exit_group is used for application termination.
7732            Do thread termination if we have more then one thread.  */
7733 
7734         if (block_signals()) {
7735             return -TARGET_ERESTARTSYS;
7736         }
7737 
7738         pthread_mutex_lock(&clone_lock);
7739 
7740         if (CPU_NEXT(first_cpu)) {
7741             TaskState *ts = cpu->opaque;
7742 
7743             object_property_set_bool(OBJECT(cpu), false, "realized", NULL);
7744             object_unref(OBJECT(cpu));
7745             /*
7746              * At this point the CPU should be unrealized and removed
7747              * from cpu lists. We can clean-up the rest of the thread
7748              * data without the lock held.
7749              */
7750 
7751             pthread_mutex_unlock(&clone_lock);
7752 
7753             if (ts->child_tidptr) {
7754                 put_user_u32(0, ts->child_tidptr);
7755                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7756                           NULL, NULL, 0);
7757             }
7758             thread_cpu = NULL;
7759             g_free(ts);
7760             rcu_unregister_thread();
7761             pthread_exit(NULL);
7762         }
7763 
7764         pthread_mutex_unlock(&clone_lock);
7765         preexit_cleanup(cpu_env, arg1);
7766         _exit(arg1);
7767         return 0; /* avoid warning */
7768     case TARGET_NR_read:
7769         if (arg2 == 0 && arg3 == 0) {
7770             return get_errno(safe_read(arg1, 0, 0));
7771         } else {
7772             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7773                 return -TARGET_EFAULT;
7774             ret = get_errno(safe_read(arg1, p, arg3));
7775             if (ret >= 0 &&
7776                 fd_trans_host_to_target_data(arg1)) {
7777                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7778             }
7779             unlock_user(p, arg2, ret);
7780         }
7781         return ret;
7782     case TARGET_NR_write:
7783         if (arg2 == 0 && arg3 == 0) {
7784             return get_errno(safe_write(arg1, 0, 0));
7785         }
7786         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7787             return -TARGET_EFAULT;
7788         if (fd_trans_target_to_host_data(arg1)) {
7789             void *copy = g_malloc(arg3);
7790             memcpy(copy, p, arg3);
7791             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7792             if (ret >= 0) {
7793                 ret = get_errno(safe_write(arg1, copy, ret));
7794             }
7795             g_free(copy);
7796         } else {
7797             ret = get_errno(safe_write(arg1, p, arg3));
7798         }
7799         unlock_user(p, arg2, 0);
7800         return ret;
7801 
7802 #ifdef TARGET_NR_open
7803     case TARGET_NR_open:
7804         if (!(p = lock_user_string(arg1)))
7805             return -TARGET_EFAULT;
7806         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7807                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7808                                   arg3));
7809         fd_trans_unregister(ret);
7810         unlock_user(p, arg1, 0);
7811         return ret;
7812 #endif
7813     case TARGET_NR_openat:
7814         if (!(p = lock_user_string(arg2)))
7815             return -TARGET_EFAULT;
7816         ret = get_errno(do_openat(cpu_env, arg1, p,
7817                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7818                                   arg4));
7819         fd_trans_unregister(ret);
7820         unlock_user(p, arg2, 0);
7821         return ret;
7822 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7823     case TARGET_NR_name_to_handle_at:
7824         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7825         return ret;
7826 #endif
7827 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7828     case TARGET_NR_open_by_handle_at:
7829         ret = do_open_by_handle_at(arg1, arg2, arg3);
7830         fd_trans_unregister(ret);
7831         return ret;
7832 #endif
7833     case TARGET_NR_close:
7834         fd_trans_unregister(arg1);
7835         return get_errno(close(arg1));
7836 
7837     case TARGET_NR_brk:
7838         return do_brk(arg1);
7839 #ifdef TARGET_NR_fork
7840     case TARGET_NR_fork:
7841         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7842 #endif
7843 #ifdef TARGET_NR_waitpid
7844     case TARGET_NR_waitpid:
7845         {
7846             int status;
7847             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7848             if (!is_error(ret) && arg2 && ret
7849                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7850                 return -TARGET_EFAULT;
7851         }
7852         return ret;
7853 #endif
7854 #ifdef TARGET_NR_waitid
7855     case TARGET_NR_waitid:
7856         {
7857             siginfo_t info;
7858             info.si_pid = 0;
7859             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7860             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7861                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7862                     return -TARGET_EFAULT;
7863                 host_to_target_siginfo(p, &info);
7864                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7865             }
7866         }
7867         return ret;
7868 #endif
7869 #ifdef TARGET_NR_creat /* not on alpha */
7870     case TARGET_NR_creat:
7871         if (!(p = lock_user_string(arg1)))
7872             return -TARGET_EFAULT;
7873         ret = get_errno(creat(p, arg2));
7874         fd_trans_unregister(ret);
7875         unlock_user(p, arg1, 0);
7876         return ret;
7877 #endif
7878 #ifdef TARGET_NR_link
7879     case TARGET_NR_link:
7880         {
7881             void * p2;
7882             p = lock_user_string(arg1);
7883             p2 = lock_user_string(arg2);
7884             if (!p || !p2)
7885                 ret = -TARGET_EFAULT;
7886             else
7887                 ret = get_errno(link(p, p2));
7888             unlock_user(p2, arg2, 0);
7889             unlock_user(p, arg1, 0);
7890         }
7891         return ret;
7892 #endif
7893 #if defined(TARGET_NR_linkat)
7894     case TARGET_NR_linkat:
7895         {
7896             void * p2 = NULL;
7897             if (!arg2 || !arg4)
7898                 return -TARGET_EFAULT;
7899             p  = lock_user_string(arg2);
7900             p2 = lock_user_string(arg4);
7901             if (!p || !p2)
7902                 ret = -TARGET_EFAULT;
7903             else
7904                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7905             unlock_user(p, arg2, 0);
7906             unlock_user(p2, arg4, 0);
7907         }
7908         return ret;
7909 #endif
7910 #ifdef TARGET_NR_unlink
7911     case TARGET_NR_unlink:
7912         if (!(p = lock_user_string(arg1)))
7913             return -TARGET_EFAULT;
7914         ret = get_errno(unlink(p));
7915         unlock_user(p, arg1, 0);
7916         return ret;
7917 #endif
7918 #if defined(TARGET_NR_unlinkat)
7919     case TARGET_NR_unlinkat:
7920         if (!(p = lock_user_string(arg2)))
7921             return -TARGET_EFAULT;
7922         ret = get_errno(unlinkat(arg1, p, arg3));
7923         unlock_user(p, arg2, 0);
7924         return ret;
7925 #endif
7926     case TARGET_NR_execve:
7927         {
7928             char **argp, **envp;
7929             int argc, envc;
7930             abi_ulong gp;
7931             abi_ulong guest_argp;
7932             abi_ulong guest_envp;
7933             abi_ulong addr;
7934             char **q;
7935             int total_size = 0;
7936 
7937             argc = 0;
7938             guest_argp = arg2;
7939             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7940                 if (get_user_ual(addr, gp))
7941                     return -TARGET_EFAULT;
7942                 if (!addr)
7943                     break;
7944                 argc++;
7945             }
7946             envc = 0;
7947             guest_envp = arg3;
7948             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7949                 if (get_user_ual(addr, gp))
7950                     return -TARGET_EFAULT;
7951                 if (!addr)
7952                     break;
7953                 envc++;
7954             }
7955 
7956             argp = g_new0(char *, argc + 1);
7957             envp = g_new0(char *, envc + 1);
7958 
7959             for (gp = guest_argp, q = argp; gp;
7960                   gp += sizeof(abi_ulong), q++) {
7961                 if (get_user_ual(addr, gp))
7962                     goto execve_efault;
7963                 if (!addr)
7964                     break;
7965                 if (!(*q = lock_user_string(addr)))
7966                     goto execve_efault;
7967                 total_size += strlen(*q) + 1;
7968             }
7969             *q = NULL;
7970 
7971             for (gp = guest_envp, q = envp; gp;
7972                   gp += sizeof(abi_ulong), q++) {
7973                 if (get_user_ual(addr, gp))
7974                     goto execve_efault;
7975                 if (!addr)
7976                     break;
7977                 if (!(*q = lock_user_string(addr)))
7978                     goto execve_efault;
7979                 total_size += strlen(*q) + 1;
7980             }
7981             *q = NULL;
7982 
7983             if (!(p = lock_user_string(arg1)))
7984                 goto execve_efault;
7985             /* Although execve() is not an interruptible syscall it is
7986              * a special case where we must use the safe_syscall wrapper:
7987              * if we allow a signal to happen before we make the host
7988              * syscall then we will 'lose' it, because at the point of
7989              * execve the process leaves QEMU's control. So we use the
7990              * safe syscall wrapper to ensure that we either take the
7991              * signal as a guest signal, or else it does not happen
7992              * before the execve completes and makes it the other
7993              * program's problem.
7994              */
7995             ret = get_errno(safe_execve(p, argp, envp));
7996             unlock_user(p, arg1, 0);
7997 
7998             goto execve_end;
7999 
8000         execve_efault:
8001             ret = -TARGET_EFAULT;
8002 
8003         execve_end:
8004             for (gp = guest_argp, q = argp; *q;
8005                   gp += sizeof(abi_ulong), q++) {
8006                 if (get_user_ual(addr, gp)
8007                     || !addr)
8008                     break;
8009                 unlock_user(*q, addr, 0);
8010             }
8011             for (gp = guest_envp, q = envp; *q;
8012                   gp += sizeof(abi_ulong), q++) {
8013                 if (get_user_ual(addr, gp)
8014                     || !addr)
8015                     break;
8016                 unlock_user(*q, addr, 0);
8017             }
8018 
8019             g_free(argp);
8020             g_free(envp);
8021         }
8022         return ret;
8023     case TARGET_NR_chdir:
8024         if (!(p = lock_user_string(arg1)))
8025             return -TARGET_EFAULT;
8026         ret = get_errno(chdir(p));
8027         unlock_user(p, arg1, 0);
8028         return ret;
8029 #ifdef TARGET_NR_time
8030     case TARGET_NR_time:
8031         {
8032             time_t host_time;
8033             ret = get_errno(time(&host_time));
8034             if (!is_error(ret)
8035                 && arg1
8036                 && put_user_sal(host_time, arg1))
8037                 return -TARGET_EFAULT;
8038         }
8039         return ret;
8040 #endif
8041 #ifdef TARGET_NR_mknod
8042     case TARGET_NR_mknod:
8043         if (!(p = lock_user_string(arg1)))
8044             return -TARGET_EFAULT;
8045         ret = get_errno(mknod(p, arg2, arg3));
8046         unlock_user(p, arg1, 0);
8047         return ret;
8048 #endif
8049 #if defined(TARGET_NR_mknodat)
8050     case TARGET_NR_mknodat:
8051         if (!(p = lock_user_string(arg2)))
8052             return -TARGET_EFAULT;
8053         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8054         unlock_user(p, arg2, 0);
8055         return ret;
8056 #endif
8057 #ifdef TARGET_NR_chmod
8058     case TARGET_NR_chmod:
8059         if (!(p = lock_user_string(arg1)))
8060             return -TARGET_EFAULT;
8061         ret = get_errno(chmod(p, arg2));
8062         unlock_user(p, arg1, 0);
8063         return ret;
8064 #endif
8065 #ifdef TARGET_NR_lseek
8066     case TARGET_NR_lseek:
8067         return get_errno(lseek(arg1, arg2, arg3));
8068 #endif
8069 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8070     /* Alpha specific */
8071     case TARGET_NR_getxpid:
8072         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8073         return get_errno(getpid());
8074 #endif
8075 #ifdef TARGET_NR_getpid
8076     case TARGET_NR_getpid:
8077         return get_errno(getpid());
8078 #endif
8079     case TARGET_NR_mount:
8080         {
8081             /* need to look at the data field */
8082             void *p2, *p3;
8083 
8084             if (arg1) {
8085                 p = lock_user_string(arg1);
8086                 if (!p) {
8087                     return -TARGET_EFAULT;
8088                 }
8089             } else {
8090                 p = NULL;
8091             }
8092 
8093             p2 = lock_user_string(arg2);
8094             if (!p2) {
8095                 if (arg1) {
8096                     unlock_user(p, arg1, 0);
8097                 }
8098                 return -TARGET_EFAULT;
8099             }
8100 
8101             if (arg3) {
8102                 p3 = lock_user_string(arg3);
8103                 if (!p3) {
8104                     if (arg1) {
8105                         unlock_user(p, arg1, 0);
8106                     }
8107                     unlock_user(p2, arg2, 0);
8108                     return -TARGET_EFAULT;
8109                 }
8110             } else {
8111                 p3 = NULL;
8112             }
8113 
8114             /* FIXME - arg5 should be locked, but it isn't clear how to
8115              * do that since it's not guaranteed to be a NULL-terminated
8116              * string.
8117              */
8118             if (!arg5) {
8119                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8120             } else {
8121                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8122             }
8123             ret = get_errno(ret);
8124 
8125             if (arg1) {
8126                 unlock_user(p, arg1, 0);
8127             }
8128             unlock_user(p2, arg2, 0);
8129             if (arg3) {
8130                 unlock_user(p3, arg3, 0);
8131             }
8132         }
8133         return ret;
8134 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8135 #if defined(TARGET_NR_umount)
8136     case TARGET_NR_umount:
8137 #endif
8138 #if defined(TARGET_NR_oldumount)
8139     case TARGET_NR_oldumount:
8140 #endif
8141         if (!(p = lock_user_string(arg1)))
8142             return -TARGET_EFAULT;
8143         ret = get_errno(umount(p));
8144         unlock_user(p, arg1, 0);
8145         return ret;
8146 #endif
8147 #ifdef TARGET_NR_stime /* not on alpha */
8148     case TARGET_NR_stime:
8149         {
8150             struct timespec ts;
8151             ts.tv_nsec = 0;
8152             if (get_user_sal(ts.tv_sec, arg1)) {
8153                 return -TARGET_EFAULT;
8154             }
8155             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8156         }
8157 #endif
8158 #ifdef TARGET_NR_alarm /* not on alpha */
8159     case TARGET_NR_alarm:
8160         return alarm(arg1);
8161 #endif
8162 #ifdef TARGET_NR_pause /* not on alpha */
8163     case TARGET_NR_pause:
8164         if (!block_signals()) {
8165             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8166         }
8167         return -TARGET_EINTR;
8168 #endif
8169 #ifdef TARGET_NR_utime
8170     case TARGET_NR_utime:
8171         {
8172             struct utimbuf tbuf, *host_tbuf;
8173             struct target_utimbuf *target_tbuf;
8174             if (arg2) {
8175                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8176                     return -TARGET_EFAULT;
8177                 tbuf.actime = tswapal(target_tbuf->actime);
8178                 tbuf.modtime = tswapal(target_tbuf->modtime);
8179                 unlock_user_struct(target_tbuf, arg2, 0);
8180                 host_tbuf = &tbuf;
8181             } else {
8182                 host_tbuf = NULL;
8183             }
8184             if (!(p = lock_user_string(arg1)))
8185                 return -TARGET_EFAULT;
8186             ret = get_errno(utime(p, host_tbuf));
8187             unlock_user(p, arg1, 0);
8188         }
8189         return ret;
8190 #endif
8191 #ifdef TARGET_NR_utimes
8192     case TARGET_NR_utimes:
8193         {
8194             struct timeval *tvp, tv[2];
8195             if (arg2) {
8196                 if (copy_from_user_timeval(&tv[0], arg2)
8197                     || copy_from_user_timeval(&tv[1],
8198                                               arg2 + sizeof(struct target_timeval)))
8199                     return -TARGET_EFAULT;
8200                 tvp = tv;
8201             } else {
8202                 tvp = NULL;
8203             }
8204             if (!(p = lock_user_string(arg1)))
8205                 return -TARGET_EFAULT;
8206             ret = get_errno(utimes(p, tvp));
8207             unlock_user(p, arg1, 0);
8208         }
8209         return ret;
8210 #endif
8211 #if defined(TARGET_NR_futimesat)
8212     case TARGET_NR_futimesat:
8213         {
8214             struct timeval *tvp, tv[2];
8215             if (arg3) {
8216                 if (copy_from_user_timeval(&tv[0], arg3)
8217                     || copy_from_user_timeval(&tv[1],
8218                                               arg3 + sizeof(struct target_timeval)))
8219                     return -TARGET_EFAULT;
8220                 tvp = tv;
8221             } else {
8222                 tvp = NULL;
8223             }
8224             if (!(p = lock_user_string(arg2))) {
8225                 return -TARGET_EFAULT;
8226             }
8227             ret = get_errno(futimesat(arg1, path(p), tvp));
8228             unlock_user(p, arg2, 0);
8229         }
8230         return ret;
8231 #endif
8232 #ifdef TARGET_NR_access
8233     case TARGET_NR_access:
8234         if (!(p = lock_user_string(arg1))) {
8235             return -TARGET_EFAULT;
8236         }
8237         ret = get_errno(access(path(p), arg2));
8238         unlock_user(p, arg1, 0);
8239         return ret;
8240 #endif
8241 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8242     case TARGET_NR_faccessat:
8243         if (!(p = lock_user_string(arg2))) {
8244             return -TARGET_EFAULT;
8245         }
8246         ret = get_errno(faccessat(arg1, p, arg3, 0));
8247         unlock_user(p, arg2, 0);
8248         return ret;
8249 #endif
8250 #ifdef TARGET_NR_nice /* not on alpha */
8251     case TARGET_NR_nice:
8252         return get_errno(nice(arg1));
8253 #endif
8254     case TARGET_NR_sync:
8255         sync();
8256         return 0;
8257 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8258     case TARGET_NR_syncfs:
8259         return get_errno(syncfs(arg1));
8260 #endif
8261     case TARGET_NR_kill:
8262         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8263 #ifdef TARGET_NR_rename
8264     case TARGET_NR_rename:
8265         {
8266             void *p2;
8267             p = lock_user_string(arg1);
8268             p2 = lock_user_string(arg2);
8269             if (!p || !p2)
8270                 ret = -TARGET_EFAULT;
8271             else
8272                 ret = get_errno(rename(p, p2));
8273             unlock_user(p2, arg2, 0);
8274             unlock_user(p, arg1, 0);
8275         }
8276         return ret;
8277 #endif
8278 #if defined(TARGET_NR_renameat)
8279     case TARGET_NR_renameat:
8280         {
8281             void *p2;
8282             p  = lock_user_string(arg2);
8283             p2 = lock_user_string(arg4);
8284             if (!p || !p2)
8285                 ret = -TARGET_EFAULT;
8286             else
8287                 ret = get_errno(renameat(arg1, p, arg3, p2));
8288             unlock_user(p2, arg4, 0);
8289             unlock_user(p, arg2, 0);
8290         }
8291         return ret;
8292 #endif
8293 #if defined(TARGET_NR_renameat2)
8294     case TARGET_NR_renameat2:
8295         {
8296             void *p2;
8297             p  = lock_user_string(arg2);
8298             p2 = lock_user_string(arg4);
8299             if (!p || !p2) {
8300                 ret = -TARGET_EFAULT;
8301             } else {
8302                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8303             }
8304             unlock_user(p2, arg4, 0);
8305             unlock_user(p, arg2, 0);
8306         }
8307         return ret;
8308 #endif
8309 #ifdef TARGET_NR_mkdir
8310     case TARGET_NR_mkdir:
8311         if (!(p = lock_user_string(arg1)))
8312             return -TARGET_EFAULT;
8313         ret = get_errno(mkdir(p, arg2));
8314         unlock_user(p, arg1, 0);
8315         return ret;
8316 #endif
8317 #if defined(TARGET_NR_mkdirat)
8318     case TARGET_NR_mkdirat:
8319         if (!(p = lock_user_string(arg2)))
8320             return -TARGET_EFAULT;
8321         ret = get_errno(mkdirat(arg1, p, arg3));
8322         unlock_user(p, arg2, 0);
8323         return ret;
8324 #endif
8325 #ifdef TARGET_NR_rmdir
8326     case TARGET_NR_rmdir:
8327         if (!(p = lock_user_string(arg1)))
8328             return -TARGET_EFAULT;
8329         ret = get_errno(rmdir(p));
8330         unlock_user(p, arg1, 0);
8331         return ret;
8332 #endif
8333     case TARGET_NR_dup:
8334         ret = get_errno(dup(arg1));
8335         if (ret >= 0) {
8336             fd_trans_dup(arg1, ret);
8337         }
8338         return ret;
8339 #ifdef TARGET_NR_pipe
8340     case TARGET_NR_pipe:
8341         return do_pipe(cpu_env, arg1, 0, 0);
8342 #endif
8343 #ifdef TARGET_NR_pipe2
8344     case TARGET_NR_pipe2:
8345         return do_pipe(cpu_env, arg1,
8346                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8347 #endif
8348     case TARGET_NR_times:
8349         {
8350             struct target_tms *tmsp;
8351             struct tms tms;
8352             ret = get_errno(times(&tms));
8353             if (arg1) {
8354                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8355                 if (!tmsp)
8356                     return -TARGET_EFAULT;
8357                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8358                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8359                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8360                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8361             }
8362             if (!is_error(ret))
8363                 ret = host_to_target_clock_t(ret);
8364         }
8365         return ret;
8366     case TARGET_NR_acct:
8367         if (arg1 == 0) {
8368             ret = get_errno(acct(NULL));
8369         } else {
8370             if (!(p = lock_user_string(arg1))) {
8371                 return -TARGET_EFAULT;
8372             }
8373             ret = get_errno(acct(path(p)));
8374             unlock_user(p, arg1, 0);
8375         }
8376         return ret;
8377 #ifdef TARGET_NR_umount2
8378     case TARGET_NR_umount2:
8379         if (!(p = lock_user_string(arg1)))
8380             return -TARGET_EFAULT;
8381         ret = get_errno(umount2(p, arg2));
8382         unlock_user(p, arg1, 0);
8383         return ret;
8384 #endif
8385     case TARGET_NR_ioctl:
8386         return do_ioctl(arg1, arg2, arg3);
8387 #ifdef TARGET_NR_fcntl
8388     case TARGET_NR_fcntl:
8389         return do_fcntl(arg1, arg2, arg3);
8390 #endif
8391     case TARGET_NR_setpgid:
8392         return get_errno(setpgid(arg1, arg2));
8393     case TARGET_NR_umask:
8394         return get_errno(umask(arg1));
8395     case TARGET_NR_chroot:
8396         if (!(p = lock_user_string(arg1)))
8397             return -TARGET_EFAULT;
8398         ret = get_errno(chroot(p));
8399         unlock_user(p, arg1, 0);
8400         return ret;
8401 #ifdef TARGET_NR_dup2
8402     case TARGET_NR_dup2:
8403         ret = get_errno(dup2(arg1, arg2));
8404         if (ret >= 0) {
8405             fd_trans_dup(arg1, arg2);
8406         }
8407         return ret;
8408 #endif
8409 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8410     case TARGET_NR_dup3:
8411     {
8412         int host_flags;
8413 
8414         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8415             return -EINVAL;
8416         }
8417         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8418         ret = get_errno(dup3(arg1, arg2, host_flags));
8419         if (ret >= 0) {
8420             fd_trans_dup(arg1, arg2);
8421         }
8422         return ret;
8423     }
8424 #endif
8425 #ifdef TARGET_NR_getppid /* not on alpha */
8426     case TARGET_NR_getppid:
8427         return get_errno(getppid());
8428 #endif
8429 #ifdef TARGET_NR_getpgrp
8430     case TARGET_NR_getpgrp:
8431         return get_errno(getpgrp());
8432 #endif
8433     case TARGET_NR_setsid:
8434         return get_errno(setsid());
8435 #ifdef TARGET_NR_sigaction
8436     case TARGET_NR_sigaction:
8437         {
8438 #if defined(TARGET_ALPHA)
8439             struct target_sigaction act, oact, *pact = 0;
8440             struct target_old_sigaction *old_act;
8441             if (arg2) {
8442                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8443                     return -TARGET_EFAULT;
8444                 act._sa_handler = old_act->_sa_handler;
8445                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8446                 act.sa_flags = old_act->sa_flags;
8447                 act.sa_restorer = 0;
8448                 unlock_user_struct(old_act, arg2, 0);
8449                 pact = &act;
8450             }
8451             ret = get_errno(do_sigaction(arg1, pact, &oact));
8452             if (!is_error(ret) && arg3) {
8453                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8454                     return -TARGET_EFAULT;
8455                 old_act->_sa_handler = oact._sa_handler;
8456                 old_act->sa_mask = oact.sa_mask.sig[0];
8457                 old_act->sa_flags = oact.sa_flags;
8458                 unlock_user_struct(old_act, arg3, 1);
8459             }
8460 #elif defined(TARGET_MIPS)
8461 	    struct target_sigaction act, oact, *pact, *old_act;
8462 
8463 	    if (arg2) {
8464                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8465                     return -TARGET_EFAULT;
8466 		act._sa_handler = old_act->_sa_handler;
8467 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8468 		act.sa_flags = old_act->sa_flags;
8469 		unlock_user_struct(old_act, arg2, 0);
8470 		pact = &act;
8471 	    } else {
8472 		pact = NULL;
8473 	    }
8474 
8475 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8476 
8477 	    if (!is_error(ret) && arg3) {
8478                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8479                     return -TARGET_EFAULT;
8480 		old_act->_sa_handler = oact._sa_handler;
8481 		old_act->sa_flags = oact.sa_flags;
8482 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8483 		old_act->sa_mask.sig[1] = 0;
8484 		old_act->sa_mask.sig[2] = 0;
8485 		old_act->sa_mask.sig[3] = 0;
8486 		unlock_user_struct(old_act, arg3, 1);
8487 	    }
8488 #else
8489             struct target_old_sigaction *old_act;
8490             struct target_sigaction act, oact, *pact;
8491             if (arg2) {
8492                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8493                     return -TARGET_EFAULT;
8494                 act._sa_handler = old_act->_sa_handler;
8495                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8496                 act.sa_flags = old_act->sa_flags;
8497                 act.sa_restorer = old_act->sa_restorer;
8498 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8499                 act.ka_restorer = 0;
8500 #endif
8501                 unlock_user_struct(old_act, arg2, 0);
8502                 pact = &act;
8503             } else {
8504                 pact = NULL;
8505             }
8506             ret = get_errno(do_sigaction(arg1, pact, &oact));
8507             if (!is_error(ret) && arg3) {
8508                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8509                     return -TARGET_EFAULT;
8510                 old_act->_sa_handler = oact._sa_handler;
8511                 old_act->sa_mask = oact.sa_mask.sig[0];
8512                 old_act->sa_flags = oact.sa_flags;
8513                 old_act->sa_restorer = oact.sa_restorer;
8514                 unlock_user_struct(old_act, arg3, 1);
8515             }
8516 #endif
8517         }
8518         return ret;
8519 #endif
8520     case TARGET_NR_rt_sigaction:
8521         {
8522 #if defined(TARGET_ALPHA)
8523             /* For Alpha and SPARC this is a 5 argument syscall, with
8524              * a 'restorer' parameter which must be copied into the
8525              * sa_restorer field of the sigaction struct.
8526              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8527              * and arg5 is the sigsetsize.
8528              * Alpha also has a separate rt_sigaction struct that it uses
8529              * here; SPARC uses the usual sigaction struct.
8530              */
8531             struct target_rt_sigaction *rt_act;
8532             struct target_sigaction act, oact, *pact = 0;
8533 
8534             if (arg4 != sizeof(target_sigset_t)) {
8535                 return -TARGET_EINVAL;
8536             }
8537             if (arg2) {
8538                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8539                     return -TARGET_EFAULT;
8540                 act._sa_handler = rt_act->_sa_handler;
8541                 act.sa_mask = rt_act->sa_mask;
8542                 act.sa_flags = rt_act->sa_flags;
8543                 act.sa_restorer = arg5;
8544                 unlock_user_struct(rt_act, arg2, 0);
8545                 pact = &act;
8546             }
8547             ret = get_errno(do_sigaction(arg1, pact, &oact));
8548             if (!is_error(ret) && arg3) {
8549                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8550                     return -TARGET_EFAULT;
8551                 rt_act->_sa_handler = oact._sa_handler;
8552                 rt_act->sa_mask = oact.sa_mask;
8553                 rt_act->sa_flags = oact.sa_flags;
8554                 unlock_user_struct(rt_act, arg3, 1);
8555             }
8556 #else
8557 #ifdef TARGET_SPARC
8558             target_ulong restorer = arg4;
8559             target_ulong sigsetsize = arg5;
8560 #else
8561             target_ulong sigsetsize = arg4;
8562 #endif
8563             struct target_sigaction *act;
8564             struct target_sigaction *oact;
8565 
8566             if (sigsetsize != sizeof(target_sigset_t)) {
8567                 return -TARGET_EINVAL;
8568             }
8569             if (arg2) {
8570                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8571                     return -TARGET_EFAULT;
8572                 }
8573 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8574                 act->ka_restorer = restorer;
8575 #endif
8576             } else {
8577                 act = NULL;
8578             }
8579             if (arg3) {
8580                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8581                     ret = -TARGET_EFAULT;
8582                     goto rt_sigaction_fail;
8583                 }
8584             } else
8585                 oact = NULL;
8586             ret = get_errno(do_sigaction(arg1, act, oact));
8587 	rt_sigaction_fail:
8588             if (act)
8589                 unlock_user_struct(act, arg2, 0);
8590             if (oact)
8591                 unlock_user_struct(oact, arg3, 1);
8592 #endif
8593         }
8594         return ret;
8595 #ifdef TARGET_NR_sgetmask /* not on alpha */
8596     case TARGET_NR_sgetmask:
8597         {
8598             sigset_t cur_set;
8599             abi_ulong target_set;
8600             ret = do_sigprocmask(0, NULL, &cur_set);
8601             if (!ret) {
8602                 host_to_target_old_sigset(&target_set, &cur_set);
8603                 ret = target_set;
8604             }
8605         }
8606         return ret;
8607 #endif
8608 #ifdef TARGET_NR_ssetmask /* not on alpha */
8609     case TARGET_NR_ssetmask:
8610         {
8611             sigset_t set, oset;
8612             abi_ulong target_set = arg1;
8613             target_to_host_old_sigset(&set, &target_set);
8614             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8615             if (!ret) {
8616                 host_to_target_old_sigset(&target_set, &oset);
8617                 ret = target_set;
8618             }
8619         }
8620         return ret;
8621 #endif
8622 #ifdef TARGET_NR_sigprocmask
8623     case TARGET_NR_sigprocmask:
8624         {
8625 #if defined(TARGET_ALPHA)
8626             sigset_t set, oldset;
8627             abi_ulong mask;
8628             int how;
8629 
8630             switch (arg1) {
8631             case TARGET_SIG_BLOCK:
8632                 how = SIG_BLOCK;
8633                 break;
8634             case TARGET_SIG_UNBLOCK:
8635                 how = SIG_UNBLOCK;
8636                 break;
8637             case TARGET_SIG_SETMASK:
8638                 how = SIG_SETMASK;
8639                 break;
8640             default:
8641                 return -TARGET_EINVAL;
8642             }
8643             mask = arg2;
8644             target_to_host_old_sigset(&set, &mask);
8645 
8646             ret = do_sigprocmask(how, &set, &oldset);
8647             if (!is_error(ret)) {
8648                 host_to_target_old_sigset(&mask, &oldset);
8649                 ret = mask;
8650                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8651             }
8652 #else
8653             sigset_t set, oldset, *set_ptr;
8654             int how;
8655 
8656             if (arg2) {
8657                 switch (arg1) {
8658                 case TARGET_SIG_BLOCK:
8659                     how = SIG_BLOCK;
8660                     break;
8661                 case TARGET_SIG_UNBLOCK:
8662                     how = SIG_UNBLOCK;
8663                     break;
8664                 case TARGET_SIG_SETMASK:
8665                     how = SIG_SETMASK;
8666                     break;
8667                 default:
8668                     return -TARGET_EINVAL;
8669                 }
8670                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8671                     return -TARGET_EFAULT;
8672                 target_to_host_old_sigset(&set, p);
8673                 unlock_user(p, arg2, 0);
8674                 set_ptr = &set;
8675             } else {
8676                 how = 0;
8677                 set_ptr = NULL;
8678             }
8679             ret = do_sigprocmask(how, set_ptr, &oldset);
8680             if (!is_error(ret) && arg3) {
8681                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8682                     return -TARGET_EFAULT;
8683                 host_to_target_old_sigset(p, &oldset);
8684                 unlock_user(p, arg3, sizeof(target_sigset_t));
8685             }
8686 #endif
8687         }
8688         return ret;
8689 #endif
8690     case TARGET_NR_rt_sigprocmask:
8691         {
8692             int how = arg1;
8693             sigset_t set, oldset, *set_ptr;
8694 
8695             if (arg4 != sizeof(target_sigset_t)) {
8696                 return -TARGET_EINVAL;
8697             }
8698 
8699             if (arg2) {
8700                 switch(how) {
8701                 case TARGET_SIG_BLOCK:
8702                     how = SIG_BLOCK;
8703                     break;
8704                 case TARGET_SIG_UNBLOCK:
8705                     how = SIG_UNBLOCK;
8706                     break;
8707                 case TARGET_SIG_SETMASK:
8708                     how = SIG_SETMASK;
8709                     break;
8710                 default:
8711                     return -TARGET_EINVAL;
8712                 }
8713                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8714                     return -TARGET_EFAULT;
8715                 target_to_host_sigset(&set, p);
8716                 unlock_user(p, arg2, 0);
8717                 set_ptr = &set;
8718             } else {
8719                 how = 0;
8720                 set_ptr = NULL;
8721             }
8722             ret = do_sigprocmask(how, set_ptr, &oldset);
8723             if (!is_error(ret) && arg3) {
8724                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8725                     return -TARGET_EFAULT;
8726                 host_to_target_sigset(p, &oldset);
8727                 unlock_user(p, arg3, sizeof(target_sigset_t));
8728             }
8729         }
8730         return ret;
8731 #ifdef TARGET_NR_sigpending
8732     case TARGET_NR_sigpending:
8733         {
8734             sigset_t set;
8735             ret = get_errno(sigpending(&set));
8736             if (!is_error(ret)) {
8737                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8738                     return -TARGET_EFAULT;
8739                 host_to_target_old_sigset(p, &set);
8740                 unlock_user(p, arg1, sizeof(target_sigset_t));
8741             }
8742         }
8743         return ret;
8744 #endif
8745     case TARGET_NR_rt_sigpending:
8746         {
8747             sigset_t set;
8748 
8749             /* Yes, this check is >, not != like most. We follow the kernel's
8750              * logic and it does it like this because it implements
8751              * NR_sigpending through the same code path, and in that case
8752              * the old_sigset_t is smaller in size.
8753              */
8754             if (arg2 > sizeof(target_sigset_t)) {
8755                 return -TARGET_EINVAL;
8756             }
8757 
8758             ret = get_errno(sigpending(&set));
8759             if (!is_error(ret)) {
8760                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8761                     return -TARGET_EFAULT;
8762                 host_to_target_sigset(p, &set);
8763                 unlock_user(p, arg1, sizeof(target_sigset_t));
8764             }
8765         }
8766         return ret;
8767 #ifdef TARGET_NR_sigsuspend
8768     case TARGET_NR_sigsuspend:
8769         {
8770             TaskState *ts = cpu->opaque;
8771 #if defined(TARGET_ALPHA)
8772             abi_ulong mask = arg1;
8773             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8774 #else
8775             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8776                 return -TARGET_EFAULT;
8777             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8778             unlock_user(p, arg1, 0);
8779 #endif
8780             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8781                                                SIGSET_T_SIZE));
8782             if (ret != -TARGET_ERESTARTSYS) {
8783                 ts->in_sigsuspend = 1;
8784             }
8785         }
8786         return ret;
8787 #endif
8788     case TARGET_NR_rt_sigsuspend:
8789         {
8790             TaskState *ts = cpu->opaque;
8791 
8792             if (arg2 != sizeof(target_sigset_t)) {
8793                 return -TARGET_EINVAL;
8794             }
8795             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8796                 return -TARGET_EFAULT;
8797             target_to_host_sigset(&ts->sigsuspend_mask, p);
8798             unlock_user(p, arg1, 0);
8799             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8800                                                SIGSET_T_SIZE));
8801             if (ret != -TARGET_ERESTARTSYS) {
8802                 ts->in_sigsuspend = 1;
8803             }
8804         }
8805         return ret;
8806 #ifdef TARGET_NR_rt_sigtimedwait
8807     case TARGET_NR_rt_sigtimedwait:
8808         {
8809             sigset_t set;
8810             struct timespec uts, *puts;
8811             siginfo_t uinfo;
8812 
8813             if (arg4 != sizeof(target_sigset_t)) {
8814                 return -TARGET_EINVAL;
8815             }
8816 
8817             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8818                 return -TARGET_EFAULT;
8819             target_to_host_sigset(&set, p);
8820             unlock_user(p, arg1, 0);
8821             if (arg3) {
8822                 puts = &uts;
8823                 target_to_host_timespec(puts, arg3);
8824             } else {
8825                 puts = NULL;
8826             }
8827             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8828                                                  SIGSET_T_SIZE));
8829             if (!is_error(ret)) {
8830                 if (arg2) {
8831                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8832                                   0);
8833                     if (!p) {
8834                         return -TARGET_EFAULT;
8835                     }
8836                     host_to_target_siginfo(p, &uinfo);
8837                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8838                 }
8839                 ret = host_to_target_signal(ret);
8840             }
8841         }
8842         return ret;
8843 #endif
8844     case TARGET_NR_rt_sigqueueinfo:
8845         {
8846             siginfo_t uinfo;
8847 
8848             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8849             if (!p) {
8850                 return -TARGET_EFAULT;
8851             }
8852             target_to_host_siginfo(&uinfo, p);
8853             unlock_user(p, arg3, 0);
8854             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8855         }
8856         return ret;
8857     case TARGET_NR_rt_tgsigqueueinfo:
8858         {
8859             siginfo_t uinfo;
8860 
8861             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8862             if (!p) {
8863                 return -TARGET_EFAULT;
8864             }
8865             target_to_host_siginfo(&uinfo, p);
8866             unlock_user(p, arg4, 0);
8867             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8868         }
8869         return ret;
8870 #ifdef TARGET_NR_sigreturn
8871     case TARGET_NR_sigreturn:
8872         if (block_signals()) {
8873             return -TARGET_ERESTARTSYS;
8874         }
8875         return do_sigreturn(cpu_env);
8876 #endif
8877     case TARGET_NR_rt_sigreturn:
8878         if (block_signals()) {
8879             return -TARGET_ERESTARTSYS;
8880         }
8881         return do_rt_sigreturn(cpu_env);
8882     case TARGET_NR_sethostname:
8883         if (!(p = lock_user_string(arg1)))
8884             return -TARGET_EFAULT;
8885         ret = get_errno(sethostname(p, arg2));
8886         unlock_user(p, arg1, 0);
8887         return ret;
8888 #ifdef TARGET_NR_setrlimit
8889     case TARGET_NR_setrlimit:
8890         {
8891             int resource = target_to_host_resource(arg1);
8892             struct target_rlimit *target_rlim;
8893             struct rlimit rlim;
8894             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8895                 return -TARGET_EFAULT;
8896             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8897             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8898             unlock_user_struct(target_rlim, arg2, 0);
8899             /*
8900              * If we just passed through resource limit settings for memory then
8901              * they would also apply to QEMU's own allocations, and QEMU will
8902              * crash or hang or die if its allocations fail. Ideally we would
8903              * track the guest allocations in QEMU and apply the limits ourselves.
8904              * For now, just tell the guest the call succeeded but don't actually
8905              * limit anything.
8906              */
8907             if (resource != RLIMIT_AS &&
8908                 resource != RLIMIT_DATA &&
8909                 resource != RLIMIT_STACK) {
8910                 return get_errno(setrlimit(resource, &rlim));
8911             } else {
8912                 return 0;
8913             }
8914         }
8915 #endif
8916 #ifdef TARGET_NR_getrlimit
8917     case TARGET_NR_getrlimit:
8918         {
8919             int resource = target_to_host_resource(arg1);
8920             struct target_rlimit *target_rlim;
8921             struct rlimit rlim;
8922 
8923             ret = get_errno(getrlimit(resource, &rlim));
8924             if (!is_error(ret)) {
8925                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8926                     return -TARGET_EFAULT;
8927                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8928                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8929                 unlock_user_struct(target_rlim, arg2, 1);
8930             }
8931         }
8932         return ret;
8933 #endif
8934     case TARGET_NR_getrusage:
8935         {
8936             struct rusage rusage;
8937             ret = get_errno(getrusage(arg1, &rusage));
8938             if (!is_error(ret)) {
8939                 ret = host_to_target_rusage(arg2, &rusage);
8940             }
8941         }
8942         return ret;
8943 #if defined(TARGET_NR_gettimeofday)
8944     case TARGET_NR_gettimeofday:
8945         {
8946             struct timeval tv;
8947             struct timezone tz;
8948 
8949             ret = get_errno(gettimeofday(&tv, &tz));
8950             if (!is_error(ret)) {
8951                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
8952                     return -TARGET_EFAULT;
8953                 }
8954                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
8955                     return -TARGET_EFAULT;
8956                 }
8957             }
8958         }
8959         return ret;
8960 #endif
8961 #if defined(TARGET_NR_settimeofday)
8962     case TARGET_NR_settimeofday:
8963         {
8964             struct timeval tv, *ptv = NULL;
8965             struct timezone tz, *ptz = NULL;
8966 
8967             if (arg1) {
8968                 if (copy_from_user_timeval(&tv, arg1)) {
8969                     return -TARGET_EFAULT;
8970                 }
8971                 ptv = &tv;
8972             }
8973 
8974             if (arg2) {
8975                 if (copy_from_user_timezone(&tz, arg2)) {
8976                     return -TARGET_EFAULT;
8977                 }
8978                 ptz = &tz;
8979             }
8980 
8981             return get_errno(settimeofday(ptv, ptz));
8982         }
8983 #endif
8984 #if defined(TARGET_NR_select)
8985     case TARGET_NR_select:
8986 #if defined(TARGET_WANT_NI_OLD_SELECT)
8987         /* some architectures used to have old_select here
8988          * but now ENOSYS it.
8989          */
8990         ret = -TARGET_ENOSYS;
8991 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8992         ret = do_old_select(arg1);
8993 #else
8994         ret = do_select(arg1, arg2, arg3, arg4, arg5);
8995 #endif
8996         return ret;
8997 #endif
8998 #ifdef TARGET_NR_pselect6
8999     case TARGET_NR_pselect6:
9000         {
9001             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9002             fd_set rfds, wfds, efds;
9003             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9004             struct timespec ts, *ts_ptr;
9005 
9006             /*
9007              * The 6th arg is actually two args smashed together,
9008              * so we cannot use the C library.
9009              */
9010             sigset_t set;
9011             struct {
9012                 sigset_t *set;
9013                 size_t size;
9014             } sig, *sig_ptr;
9015 
9016             abi_ulong arg_sigset, arg_sigsize, *arg7;
9017             target_sigset_t *target_sigset;
9018 
9019             n = arg1;
9020             rfd_addr = arg2;
9021             wfd_addr = arg3;
9022             efd_addr = arg4;
9023             ts_addr = arg5;
9024 
9025             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9026             if (ret) {
9027                 return ret;
9028             }
9029             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9030             if (ret) {
9031                 return ret;
9032             }
9033             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9034             if (ret) {
9035                 return ret;
9036             }
9037 
9038             /*
9039              * This takes a timespec, and not a timeval, so we cannot
9040              * use the do_select() helper ...
9041              */
9042             if (ts_addr) {
9043                 if (target_to_host_timespec(&ts, ts_addr)) {
9044                     return -TARGET_EFAULT;
9045                 }
9046                 ts_ptr = &ts;
9047             } else {
9048                 ts_ptr = NULL;
9049             }
9050 
9051             /* Extract the two packed args for the sigset */
9052             if (arg6) {
9053                 sig_ptr = &sig;
9054                 sig.size = SIGSET_T_SIZE;
9055 
9056                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9057                 if (!arg7) {
9058                     return -TARGET_EFAULT;
9059                 }
9060                 arg_sigset = tswapal(arg7[0]);
9061                 arg_sigsize = tswapal(arg7[1]);
9062                 unlock_user(arg7, arg6, 0);
9063 
9064                 if (arg_sigset) {
9065                     sig.set = &set;
9066                     if (arg_sigsize != sizeof(*target_sigset)) {
9067                         /* Like the kernel, we enforce correct size sigsets */
9068                         return -TARGET_EINVAL;
9069                     }
9070                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9071                                               sizeof(*target_sigset), 1);
9072                     if (!target_sigset) {
9073                         return -TARGET_EFAULT;
9074                     }
9075                     target_to_host_sigset(&set, target_sigset);
9076                     unlock_user(target_sigset, arg_sigset, 0);
9077                 } else {
9078                     sig.set = NULL;
9079                 }
9080             } else {
9081                 sig_ptr = NULL;
9082             }
9083 
9084             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9085                                           ts_ptr, sig_ptr));
9086 
9087             if (!is_error(ret)) {
9088                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9089                     return -TARGET_EFAULT;
9090                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9091                     return -TARGET_EFAULT;
9092                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9093                     return -TARGET_EFAULT;
9094 
9095                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9096                     return -TARGET_EFAULT;
9097             }
9098         }
9099         return ret;
9100 #endif
9101 #ifdef TARGET_NR_symlink
9102     case TARGET_NR_symlink:
9103         {
9104             void *p2;
9105             p = lock_user_string(arg1);
9106             p2 = lock_user_string(arg2);
9107             if (!p || !p2)
9108                 ret = -TARGET_EFAULT;
9109             else
9110                 ret = get_errno(symlink(p, p2));
9111             unlock_user(p2, arg2, 0);
9112             unlock_user(p, arg1, 0);
9113         }
9114         return ret;
9115 #endif
9116 #if defined(TARGET_NR_symlinkat)
9117     case TARGET_NR_symlinkat:
9118         {
9119             void *p2;
9120             p  = lock_user_string(arg1);
9121             p2 = lock_user_string(arg3);
9122             if (!p || !p2)
9123                 ret = -TARGET_EFAULT;
9124             else
9125                 ret = get_errno(symlinkat(p, arg2, p2));
9126             unlock_user(p2, arg3, 0);
9127             unlock_user(p, arg1, 0);
9128         }
9129         return ret;
9130 #endif
9131 #ifdef TARGET_NR_readlink
9132     case TARGET_NR_readlink:
9133         {
9134             void *p2;
9135             p = lock_user_string(arg1);
9136             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9137             if (!p || !p2) {
9138                 ret = -TARGET_EFAULT;
9139             } else if (!arg3) {
9140                 /* Short circuit this for the magic exe check. */
9141                 ret = -TARGET_EINVAL;
9142             } else if (is_proc_myself((const char *)p, "exe")) {
9143                 char real[PATH_MAX], *temp;
9144                 temp = realpath(exec_path, real);
9145                 /* Return value is # of bytes that we wrote to the buffer. */
9146                 if (temp == NULL) {
9147                     ret = get_errno(-1);
9148                 } else {
9149                     /* Don't worry about sign mismatch as earlier mapping
9150                      * logic would have thrown a bad address error. */
9151                     ret = MIN(strlen(real), arg3);
9152                     /* We cannot NUL terminate the string. */
9153                     memcpy(p2, real, ret);
9154                 }
9155             } else {
9156                 ret = get_errno(readlink(path(p), p2, arg3));
9157             }
9158             unlock_user(p2, arg2, ret);
9159             unlock_user(p, arg1, 0);
9160         }
9161         return ret;
9162 #endif
9163 #if defined(TARGET_NR_readlinkat)
9164     case TARGET_NR_readlinkat:
9165         {
9166             void *p2;
9167             p  = lock_user_string(arg2);
9168             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9169             if (!p || !p2) {
9170                 ret = -TARGET_EFAULT;
9171             } else if (is_proc_myself((const char *)p, "exe")) {
9172                 char real[PATH_MAX], *temp;
9173                 temp = realpath(exec_path, real);
9174                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9175                 snprintf((char *)p2, arg4, "%s", real);
9176             } else {
9177                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9178             }
9179             unlock_user(p2, arg3, ret);
9180             unlock_user(p, arg2, 0);
9181         }
9182         return ret;
9183 #endif
9184 #ifdef TARGET_NR_swapon
9185     case TARGET_NR_swapon:
9186         if (!(p = lock_user_string(arg1)))
9187             return -TARGET_EFAULT;
9188         ret = get_errno(swapon(p, arg2));
9189         unlock_user(p, arg1, 0);
9190         return ret;
9191 #endif
9192     case TARGET_NR_reboot:
9193         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9194            /* arg4 must be ignored in all other cases */
9195            p = lock_user_string(arg4);
9196            if (!p) {
9197                return -TARGET_EFAULT;
9198            }
9199            ret = get_errno(reboot(arg1, arg2, arg3, p));
9200            unlock_user(p, arg4, 0);
9201         } else {
9202            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9203         }
9204         return ret;
9205 #ifdef TARGET_NR_mmap
9206     case TARGET_NR_mmap:
9207 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9208     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9209     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9210     || defined(TARGET_S390X)
9211         {
9212             abi_ulong *v;
9213             abi_ulong v1, v2, v3, v4, v5, v6;
9214             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9215                 return -TARGET_EFAULT;
9216             v1 = tswapal(v[0]);
9217             v2 = tswapal(v[1]);
9218             v3 = tswapal(v[2]);
9219             v4 = tswapal(v[3]);
9220             v5 = tswapal(v[4]);
9221             v6 = tswapal(v[5]);
9222             unlock_user(v, arg1, 0);
9223             ret = get_errno(target_mmap(v1, v2, v3,
9224                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9225                                         v5, v6));
9226         }
9227 #else
9228         ret = get_errno(target_mmap(arg1, arg2, arg3,
9229                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9230                                     arg5,
9231                                     arg6));
9232 #endif
9233         return ret;
9234 #endif
9235 #ifdef TARGET_NR_mmap2
9236     case TARGET_NR_mmap2:
9237 #ifndef MMAP_SHIFT
9238 #define MMAP_SHIFT 12
9239 #endif
9240         ret = target_mmap(arg1, arg2, arg3,
9241                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9242                           arg5, arg6 << MMAP_SHIFT);
9243         return get_errno(ret);
9244 #endif
9245     case TARGET_NR_munmap:
9246         return get_errno(target_munmap(arg1, arg2));
9247     case TARGET_NR_mprotect:
9248         {
9249             TaskState *ts = cpu->opaque;
9250             /* Special hack to detect libc making the stack executable.  */
9251             if ((arg3 & PROT_GROWSDOWN)
9252                 && arg1 >= ts->info->stack_limit
9253                 && arg1 <= ts->info->start_stack) {
9254                 arg3 &= ~PROT_GROWSDOWN;
9255                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9256                 arg1 = ts->info->stack_limit;
9257             }
9258         }
9259         return get_errno(target_mprotect(arg1, arg2, arg3));
9260 #ifdef TARGET_NR_mremap
9261     case TARGET_NR_mremap:
9262         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9263 #endif
9264         /* ??? msync/mlock/munlock are broken for softmmu.  */
9265 #ifdef TARGET_NR_msync
9266     case TARGET_NR_msync:
9267         return get_errno(msync(g2h(arg1), arg2, arg3));
9268 #endif
9269 #ifdef TARGET_NR_mlock
9270     case TARGET_NR_mlock:
9271         return get_errno(mlock(g2h(arg1), arg2));
9272 #endif
9273 #ifdef TARGET_NR_munlock
9274     case TARGET_NR_munlock:
9275         return get_errno(munlock(g2h(arg1), arg2));
9276 #endif
9277 #ifdef TARGET_NR_mlockall
9278     case TARGET_NR_mlockall:
9279         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9280 #endif
9281 #ifdef TARGET_NR_munlockall
9282     case TARGET_NR_munlockall:
9283         return get_errno(munlockall());
9284 #endif
9285 #ifdef TARGET_NR_truncate
9286     case TARGET_NR_truncate:
9287         if (!(p = lock_user_string(arg1)))
9288             return -TARGET_EFAULT;
9289         ret = get_errno(truncate(p, arg2));
9290         unlock_user(p, arg1, 0);
9291         return ret;
9292 #endif
9293 #ifdef TARGET_NR_ftruncate
9294     case TARGET_NR_ftruncate:
9295         return get_errno(ftruncate(arg1, arg2));
9296 #endif
9297     case TARGET_NR_fchmod:
9298         return get_errno(fchmod(arg1, arg2));
9299 #if defined(TARGET_NR_fchmodat)
9300     case TARGET_NR_fchmodat:
9301         if (!(p = lock_user_string(arg2)))
9302             return -TARGET_EFAULT;
9303         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9304         unlock_user(p, arg2, 0);
9305         return ret;
9306 #endif
9307     case TARGET_NR_getpriority:
9308         /* Note that negative values are valid for getpriority, so we must
9309            differentiate based on errno settings.  */
9310         errno = 0;
9311         ret = getpriority(arg1, arg2);
9312         if (ret == -1 && errno != 0) {
9313             return -host_to_target_errno(errno);
9314         }
9315 #ifdef TARGET_ALPHA
9316         /* Return value is the unbiased priority.  Signal no error.  */
9317         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9318 #else
9319         /* Return value is a biased priority to avoid negative numbers.  */
9320         ret = 20 - ret;
9321 #endif
9322         return ret;
9323     case TARGET_NR_setpriority:
9324         return get_errno(setpriority(arg1, arg2, arg3));
9325 #ifdef TARGET_NR_statfs
9326     case TARGET_NR_statfs:
9327         if (!(p = lock_user_string(arg1))) {
9328             return -TARGET_EFAULT;
9329         }
9330         ret = get_errno(statfs(path(p), &stfs));
9331         unlock_user(p, arg1, 0);
9332     convert_statfs:
9333         if (!is_error(ret)) {
9334             struct target_statfs *target_stfs;
9335 
9336             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9337                 return -TARGET_EFAULT;
9338             __put_user(stfs.f_type, &target_stfs->f_type);
9339             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9340             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9341             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9342             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9343             __put_user(stfs.f_files, &target_stfs->f_files);
9344             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9345             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9346             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9347             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9348             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9349 #ifdef _STATFS_F_FLAGS
9350             __put_user(stfs.f_flags, &target_stfs->f_flags);
9351 #else
9352             __put_user(0, &target_stfs->f_flags);
9353 #endif
9354             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9355             unlock_user_struct(target_stfs, arg2, 1);
9356         }
9357         return ret;
9358 #endif
9359 #ifdef TARGET_NR_fstatfs
9360     case TARGET_NR_fstatfs:
9361         ret = get_errno(fstatfs(arg1, &stfs));
9362         goto convert_statfs;
9363 #endif
9364 #ifdef TARGET_NR_statfs64
9365     case TARGET_NR_statfs64:
9366         if (!(p = lock_user_string(arg1))) {
9367             return -TARGET_EFAULT;
9368         }
9369         ret = get_errno(statfs(path(p), &stfs));
9370         unlock_user(p, arg1, 0);
9371     convert_statfs64:
9372         if (!is_error(ret)) {
9373             struct target_statfs64 *target_stfs;
9374 
9375             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9376                 return -TARGET_EFAULT;
9377             __put_user(stfs.f_type, &target_stfs->f_type);
9378             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9379             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9380             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9381             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9382             __put_user(stfs.f_files, &target_stfs->f_files);
9383             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9384             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9385             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9386             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9387             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9388             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9389             unlock_user_struct(target_stfs, arg3, 1);
9390         }
9391         return ret;
9392     case TARGET_NR_fstatfs64:
9393         ret = get_errno(fstatfs(arg1, &stfs));
9394         goto convert_statfs64;
9395 #endif
9396 #ifdef TARGET_NR_socketcall
9397     case TARGET_NR_socketcall:
9398         return do_socketcall(arg1, arg2);
9399 #endif
9400 #ifdef TARGET_NR_accept
9401     case TARGET_NR_accept:
9402         return do_accept4(arg1, arg2, arg3, 0);
9403 #endif
9404 #ifdef TARGET_NR_accept4
9405     case TARGET_NR_accept4:
9406         return do_accept4(arg1, arg2, arg3, arg4);
9407 #endif
9408 #ifdef TARGET_NR_bind
9409     case TARGET_NR_bind:
9410         return do_bind(arg1, arg2, arg3);
9411 #endif
9412 #ifdef TARGET_NR_connect
9413     case TARGET_NR_connect:
9414         return do_connect(arg1, arg2, arg3);
9415 #endif
9416 #ifdef TARGET_NR_getpeername
9417     case TARGET_NR_getpeername:
9418         return do_getpeername(arg1, arg2, arg3);
9419 #endif
9420 #ifdef TARGET_NR_getsockname
9421     case TARGET_NR_getsockname:
9422         return do_getsockname(arg1, arg2, arg3);
9423 #endif
9424 #ifdef TARGET_NR_getsockopt
9425     case TARGET_NR_getsockopt:
9426         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9427 #endif
9428 #ifdef TARGET_NR_listen
9429     case TARGET_NR_listen:
9430         return get_errno(listen(arg1, arg2));
9431 #endif
9432 #ifdef TARGET_NR_recv
9433     case TARGET_NR_recv:
9434         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9435 #endif
9436 #ifdef TARGET_NR_recvfrom
9437     case TARGET_NR_recvfrom:
9438         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9439 #endif
9440 #ifdef TARGET_NR_recvmsg
9441     case TARGET_NR_recvmsg:
9442         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9443 #endif
9444 #ifdef TARGET_NR_send
9445     case TARGET_NR_send:
9446         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9447 #endif
9448 #ifdef TARGET_NR_sendmsg
9449     case TARGET_NR_sendmsg:
9450         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9451 #endif
9452 #ifdef TARGET_NR_sendmmsg
9453     case TARGET_NR_sendmmsg:
9454         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9455 #endif
9456 #ifdef TARGET_NR_recvmmsg
9457     case TARGET_NR_recvmmsg:
9458         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9459 #endif
9460 #ifdef TARGET_NR_sendto
9461     case TARGET_NR_sendto:
9462         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9463 #endif
9464 #ifdef TARGET_NR_shutdown
9465     case TARGET_NR_shutdown:
9466         return get_errno(shutdown(arg1, arg2));
9467 #endif
9468 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9469     case TARGET_NR_getrandom:
9470         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9471         if (!p) {
9472             return -TARGET_EFAULT;
9473         }
9474         ret = get_errno(getrandom(p, arg2, arg3));
9475         unlock_user(p, arg1, ret);
9476         return ret;
9477 #endif
9478 #ifdef TARGET_NR_socket
9479     case TARGET_NR_socket:
9480         return do_socket(arg1, arg2, arg3);
9481 #endif
9482 #ifdef TARGET_NR_socketpair
9483     case TARGET_NR_socketpair:
9484         return do_socketpair(arg1, arg2, arg3, arg4);
9485 #endif
9486 #ifdef TARGET_NR_setsockopt
9487     case TARGET_NR_setsockopt:
9488         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9489 #endif
9490 #if defined(TARGET_NR_syslog)
9491     case TARGET_NR_syslog:
9492         {
9493             int len = arg2;
9494 
9495             switch (arg1) {
9496             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9497             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9498             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9499             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9500             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9501             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9502             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9503             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9504                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9505             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9506             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9507             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9508                 {
9509                     if (len < 0) {
9510                         return -TARGET_EINVAL;
9511                     }
9512                     if (len == 0) {
9513                         return 0;
9514                     }
9515                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9516                     if (!p) {
9517                         return -TARGET_EFAULT;
9518                     }
9519                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9520                     unlock_user(p, arg2, arg3);
9521                 }
9522                 return ret;
9523             default:
9524                 return -TARGET_EINVAL;
9525             }
9526         }
9527         break;
9528 #endif
9529     case TARGET_NR_setitimer:
9530         {
9531             struct itimerval value, ovalue, *pvalue;
9532 
9533             if (arg2) {
9534                 pvalue = &value;
9535                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9536                     || copy_from_user_timeval(&pvalue->it_value,
9537                                               arg2 + sizeof(struct target_timeval)))
9538                     return -TARGET_EFAULT;
9539             } else {
9540                 pvalue = NULL;
9541             }
9542             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9543             if (!is_error(ret) && arg3) {
9544                 if (copy_to_user_timeval(arg3,
9545                                          &ovalue.it_interval)
9546                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9547                                             &ovalue.it_value))
9548                     return -TARGET_EFAULT;
9549             }
9550         }
9551         return ret;
9552     case TARGET_NR_getitimer:
9553         {
9554             struct itimerval value;
9555 
9556             ret = get_errno(getitimer(arg1, &value));
9557             if (!is_error(ret) && arg2) {
9558                 if (copy_to_user_timeval(arg2,
9559                                          &value.it_interval)
9560                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9561                                             &value.it_value))
9562                     return -TARGET_EFAULT;
9563             }
9564         }
9565         return ret;
9566 #ifdef TARGET_NR_stat
9567     case TARGET_NR_stat:
9568         if (!(p = lock_user_string(arg1))) {
9569             return -TARGET_EFAULT;
9570         }
9571         ret = get_errno(stat(path(p), &st));
9572         unlock_user(p, arg1, 0);
9573         goto do_stat;
9574 #endif
9575 #ifdef TARGET_NR_lstat
9576     case TARGET_NR_lstat:
9577         if (!(p = lock_user_string(arg1))) {
9578             return -TARGET_EFAULT;
9579         }
9580         ret = get_errno(lstat(path(p), &st));
9581         unlock_user(p, arg1, 0);
9582         goto do_stat;
9583 #endif
9584 #ifdef TARGET_NR_fstat
9585     case TARGET_NR_fstat:
9586         {
9587             ret = get_errno(fstat(arg1, &st));
9588 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9589         do_stat:
9590 #endif
9591             if (!is_error(ret)) {
9592                 struct target_stat *target_st;
9593 
9594                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9595                     return -TARGET_EFAULT;
9596                 memset(target_st, 0, sizeof(*target_st));
9597                 __put_user(st.st_dev, &target_st->st_dev);
9598                 __put_user(st.st_ino, &target_st->st_ino);
9599                 __put_user(st.st_mode, &target_st->st_mode);
9600                 __put_user(st.st_uid, &target_st->st_uid);
9601                 __put_user(st.st_gid, &target_st->st_gid);
9602                 __put_user(st.st_nlink, &target_st->st_nlink);
9603                 __put_user(st.st_rdev, &target_st->st_rdev);
9604                 __put_user(st.st_size, &target_st->st_size);
9605                 __put_user(st.st_blksize, &target_st->st_blksize);
9606                 __put_user(st.st_blocks, &target_st->st_blocks);
9607                 __put_user(st.st_atime, &target_st->target_st_atime);
9608                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9609                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9610 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9611     defined(TARGET_STAT_HAVE_NSEC)
9612                 __put_user(st.st_atim.tv_nsec,
9613                            &target_st->target_st_atime_nsec);
9614                 __put_user(st.st_mtim.tv_nsec,
9615                            &target_st->target_st_mtime_nsec);
9616                 __put_user(st.st_ctim.tv_nsec,
9617                            &target_st->target_st_ctime_nsec);
9618 #endif
9619                 unlock_user_struct(target_st, arg2, 1);
9620             }
9621         }
9622         return ret;
9623 #endif
9624     case TARGET_NR_vhangup:
9625         return get_errno(vhangup());
9626 #ifdef TARGET_NR_syscall
9627     case TARGET_NR_syscall:
9628         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9629                           arg6, arg7, arg8, 0);
9630 #endif
9631 #if defined(TARGET_NR_wait4)
9632     case TARGET_NR_wait4:
9633         {
9634             int status;
9635             abi_long status_ptr = arg2;
9636             struct rusage rusage, *rusage_ptr;
9637             abi_ulong target_rusage = arg4;
9638             abi_long rusage_err;
9639             if (target_rusage)
9640                 rusage_ptr = &rusage;
9641             else
9642                 rusage_ptr = NULL;
9643             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9644             if (!is_error(ret)) {
9645                 if (status_ptr && ret) {
9646                     status = host_to_target_waitstatus(status);
9647                     if (put_user_s32(status, status_ptr))
9648                         return -TARGET_EFAULT;
9649                 }
9650                 if (target_rusage) {
9651                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9652                     if (rusage_err) {
9653                         ret = rusage_err;
9654                     }
9655                 }
9656             }
9657         }
9658         return ret;
9659 #endif
9660 #ifdef TARGET_NR_swapoff
9661     case TARGET_NR_swapoff:
9662         if (!(p = lock_user_string(arg1)))
9663             return -TARGET_EFAULT;
9664         ret = get_errno(swapoff(p));
9665         unlock_user(p, arg1, 0);
9666         return ret;
9667 #endif
9668     case TARGET_NR_sysinfo:
9669         {
9670             struct target_sysinfo *target_value;
9671             struct sysinfo value;
9672             ret = get_errno(sysinfo(&value));
9673             if (!is_error(ret) && arg1)
9674             {
9675                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9676                     return -TARGET_EFAULT;
9677                 __put_user(value.uptime, &target_value->uptime);
9678                 __put_user(value.loads[0], &target_value->loads[0]);
9679                 __put_user(value.loads[1], &target_value->loads[1]);
9680                 __put_user(value.loads[2], &target_value->loads[2]);
9681                 __put_user(value.totalram, &target_value->totalram);
9682                 __put_user(value.freeram, &target_value->freeram);
9683                 __put_user(value.sharedram, &target_value->sharedram);
9684                 __put_user(value.bufferram, &target_value->bufferram);
9685                 __put_user(value.totalswap, &target_value->totalswap);
9686                 __put_user(value.freeswap, &target_value->freeswap);
9687                 __put_user(value.procs, &target_value->procs);
9688                 __put_user(value.totalhigh, &target_value->totalhigh);
9689                 __put_user(value.freehigh, &target_value->freehigh);
9690                 __put_user(value.mem_unit, &target_value->mem_unit);
9691                 unlock_user_struct(target_value, arg1, 1);
9692             }
9693         }
9694         return ret;
9695 #ifdef TARGET_NR_ipc
9696     case TARGET_NR_ipc:
9697         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9698 #endif
9699 #ifdef TARGET_NR_semget
9700     case TARGET_NR_semget:
9701         return get_errno(semget(arg1, arg2, arg3));
9702 #endif
9703 #ifdef TARGET_NR_semop
9704     case TARGET_NR_semop:
9705         return do_semop(arg1, arg2, arg3);
9706 #endif
9707 #ifdef TARGET_NR_semctl
9708     case TARGET_NR_semctl:
9709         return do_semctl(arg1, arg2, arg3, arg4);
9710 #endif
9711 #ifdef TARGET_NR_msgctl
9712     case TARGET_NR_msgctl:
9713         return do_msgctl(arg1, arg2, arg3);
9714 #endif
9715 #ifdef TARGET_NR_msgget
9716     case TARGET_NR_msgget:
9717         return get_errno(msgget(arg1, arg2));
9718 #endif
9719 #ifdef TARGET_NR_msgrcv
9720     case TARGET_NR_msgrcv:
9721         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9722 #endif
9723 #ifdef TARGET_NR_msgsnd
9724     case TARGET_NR_msgsnd:
9725         return do_msgsnd(arg1, arg2, arg3, arg4);
9726 #endif
9727 #ifdef TARGET_NR_shmget
9728     case TARGET_NR_shmget:
9729         return get_errno(shmget(arg1, arg2, arg3));
9730 #endif
9731 #ifdef TARGET_NR_shmctl
9732     case TARGET_NR_shmctl:
9733         return do_shmctl(arg1, arg2, arg3);
9734 #endif
9735 #ifdef TARGET_NR_shmat
9736     case TARGET_NR_shmat:
9737         return do_shmat(cpu_env, arg1, arg2, arg3);
9738 #endif
9739 #ifdef TARGET_NR_shmdt
9740     case TARGET_NR_shmdt:
9741         return do_shmdt(arg1);
9742 #endif
9743     case TARGET_NR_fsync:
9744         return get_errno(fsync(arg1));
9745     case TARGET_NR_clone:
9746         /* Linux manages to have three different orderings for its
9747          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9748          * match the kernel's CONFIG_CLONE_* settings.
9749          * Microblaze is further special in that it uses a sixth
9750          * implicit argument to clone for the TLS pointer.
9751          */
9752 #if defined(TARGET_MICROBLAZE)
9753         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9754 #elif defined(TARGET_CLONE_BACKWARDS)
9755         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9756 #elif defined(TARGET_CLONE_BACKWARDS2)
9757         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9758 #else
9759         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9760 #endif
9761         return ret;
9762 #ifdef __NR_exit_group
9763         /* new thread calls */
9764     case TARGET_NR_exit_group:
9765         preexit_cleanup(cpu_env, arg1);
9766         return get_errno(exit_group(arg1));
9767 #endif
9768     case TARGET_NR_setdomainname:
9769         if (!(p = lock_user_string(arg1)))
9770             return -TARGET_EFAULT;
9771         ret = get_errno(setdomainname(p, arg2));
9772         unlock_user(p, arg1, 0);
9773         return ret;
9774     case TARGET_NR_uname:
9775         /* no need to transcode because we use the linux syscall */
9776         {
9777             struct new_utsname * buf;
9778 
9779             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9780                 return -TARGET_EFAULT;
9781             ret = get_errno(sys_uname(buf));
9782             if (!is_error(ret)) {
9783                 /* Overwrite the native machine name with whatever is being
9784                    emulated. */
9785                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9786                           sizeof(buf->machine));
9787                 /* Allow the user to override the reported release.  */
9788                 if (qemu_uname_release && *qemu_uname_release) {
9789                     g_strlcpy(buf->release, qemu_uname_release,
9790                               sizeof(buf->release));
9791                 }
9792             }
9793             unlock_user_struct(buf, arg1, 1);
9794         }
9795         return ret;
9796 #ifdef TARGET_I386
9797     case TARGET_NR_modify_ldt:
9798         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9799 #if !defined(TARGET_X86_64)
9800     case TARGET_NR_vm86:
9801         return do_vm86(cpu_env, arg1, arg2);
9802 #endif
9803 #endif
9804 #if defined(TARGET_NR_adjtimex)
9805     case TARGET_NR_adjtimex:
9806         {
9807             struct timex host_buf;
9808 
9809             if (target_to_host_timex(&host_buf, arg1) != 0) {
9810                 return -TARGET_EFAULT;
9811             }
9812             ret = get_errno(adjtimex(&host_buf));
9813             if (!is_error(ret)) {
9814                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9815                     return -TARGET_EFAULT;
9816                 }
9817             }
9818         }
9819         return ret;
9820 #endif
9821 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9822     case TARGET_NR_clock_adjtime:
9823         {
9824             struct timex htx, *phtx = &htx;
9825 
9826             if (target_to_host_timex(phtx, arg2) != 0) {
9827                 return -TARGET_EFAULT;
9828             }
9829             ret = get_errno(clock_adjtime(arg1, phtx));
9830             if (!is_error(ret) && phtx) {
9831                 if (host_to_target_timex(arg2, phtx) != 0) {
9832                     return -TARGET_EFAULT;
9833                 }
9834             }
9835         }
9836         return ret;
9837 #endif
9838     case TARGET_NR_getpgid:
9839         return get_errno(getpgid(arg1));
9840     case TARGET_NR_fchdir:
9841         return get_errno(fchdir(arg1));
9842     case TARGET_NR_personality:
9843         return get_errno(personality(arg1));
9844 #ifdef TARGET_NR__llseek /* Not on alpha */
9845     case TARGET_NR__llseek:
9846         {
9847             int64_t res;
9848 #if !defined(__NR_llseek)
9849             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9850             if (res == -1) {
9851                 ret = get_errno(res);
9852             } else {
9853                 ret = 0;
9854             }
9855 #else
9856             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9857 #endif
9858             if ((ret == 0) && put_user_s64(res, arg4)) {
9859                 return -TARGET_EFAULT;
9860             }
9861         }
9862         return ret;
9863 #endif
9864 #ifdef TARGET_NR_getdents
9865     case TARGET_NR_getdents:
9866 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9867 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9868         {
9869             struct target_dirent *target_dirp;
9870             struct linux_dirent *dirp;
9871             abi_long count = arg3;
9872 
9873             dirp = g_try_malloc(count);
9874             if (!dirp) {
9875                 return -TARGET_ENOMEM;
9876             }
9877 
9878             ret = get_errno(sys_getdents(arg1, dirp, count));
9879             if (!is_error(ret)) {
9880                 struct linux_dirent *de;
9881 		struct target_dirent *tde;
9882                 int len = ret;
9883                 int reclen, treclen;
9884 		int count1, tnamelen;
9885 
9886 		count1 = 0;
9887                 de = dirp;
9888                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9889                     return -TARGET_EFAULT;
9890 		tde = target_dirp;
9891                 while (len > 0) {
9892                     reclen = de->d_reclen;
9893                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9894                     assert(tnamelen >= 0);
9895                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9896                     assert(count1 + treclen <= count);
9897                     tde->d_reclen = tswap16(treclen);
9898                     tde->d_ino = tswapal(de->d_ino);
9899                     tde->d_off = tswapal(de->d_off);
9900                     memcpy(tde->d_name, de->d_name, tnamelen);
9901                     de = (struct linux_dirent *)((char *)de + reclen);
9902                     len -= reclen;
9903                     tde = (struct target_dirent *)((char *)tde + treclen);
9904 		    count1 += treclen;
9905                 }
9906 		ret = count1;
9907                 unlock_user(target_dirp, arg2, ret);
9908             }
9909             g_free(dirp);
9910         }
9911 #else
9912         {
9913             struct linux_dirent *dirp;
9914             abi_long count = arg3;
9915 
9916             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9917                 return -TARGET_EFAULT;
9918             ret = get_errno(sys_getdents(arg1, dirp, count));
9919             if (!is_error(ret)) {
9920                 struct linux_dirent *de;
9921                 int len = ret;
9922                 int reclen;
9923                 de = dirp;
9924                 while (len > 0) {
9925                     reclen = de->d_reclen;
9926                     if (reclen > len)
9927                         break;
9928                     de->d_reclen = tswap16(reclen);
9929                     tswapls(&de->d_ino);
9930                     tswapls(&de->d_off);
9931                     de = (struct linux_dirent *)((char *)de + reclen);
9932                     len -= reclen;
9933                 }
9934             }
9935             unlock_user(dirp, arg2, ret);
9936         }
9937 #endif
9938 #else
9939         /* Implement getdents in terms of getdents64 */
9940         {
9941             struct linux_dirent64 *dirp;
9942             abi_long count = arg3;
9943 
9944             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9945             if (!dirp) {
9946                 return -TARGET_EFAULT;
9947             }
9948             ret = get_errno(sys_getdents64(arg1, dirp, count));
9949             if (!is_error(ret)) {
9950                 /* Convert the dirent64 structs to target dirent.  We do this
9951                  * in-place, since we can guarantee that a target_dirent is no
9952                  * larger than a dirent64; however this means we have to be
9953                  * careful to read everything before writing in the new format.
9954                  */
9955                 struct linux_dirent64 *de;
9956                 struct target_dirent *tde;
9957                 int len = ret;
9958                 int tlen = 0;
9959 
9960                 de = dirp;
9961                 tde = (struct target_dirent *)dirp;
9962                 while (len > 0) {
9963                     int namelen, treclen;
9964                     int reclen = de->d_reclen;
9965                     uint64_t ino = de->d_ino;
9966                     int64_t off = de->d_off;
9967                     uint8_t type = de->d_type;
9968 
9969                     namelen = strlen(de->d_name);
9970                     treclen = offsetof(struct target_dirent, d_name)
9971                         + namelen + 2;
9972                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9973 
9974                     memmove(tde->d_name, de->d_name, namelen + 1);
9975                     tde->d_ino = tswapal(ino);
9976                     tde->d_off = tswapal(off);
9977                     tde->d_reclen = tswap16(treclen);
9978                     /* The target_dirent type is in what was formerly a padding
9979                      * byte at the end of the structure:
9980                      */
9981                     *(((char *)tde) + treclen - 1) = type;
9982 
9983                     de = (struct linux_dirent64 *)((char *)de + reclen);
9984                     tde = (struct target_dirent *)((char *)tde + treclen);
9985                     len -= reclen;
9986                     tlen += treclen;
9987                 }
9988                 ret = tlen;
9989             }
9990             unlock_user(dirp, arg2, ret);
9991         }
9992 #endif
9993         return ret;
9994 #endif /* TARGET_NR_getdents */
9995 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9996     case TARGET_NR_getdents64:
9997         {
9998             struct linux_dirent64 *dirp;
9999             abi_long count = arg3;
10000             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10001                 return -TARGET_EFAULT;
10002             ret = get_errno(sys_getdents64(arg1, dirp, count));
10003             if (!is_error(ret)) {
10004                 struct linux_dirent64 *de;
10005                 int len = ret;
10006                 int reclen;
10007                 de = dirp;
10008                 while (len > 0) {
10009                     reclen = de->d_reclen;
10010                     if (reclen > len)
10011                         break;
10012                     de->d_reclen = tswap16(reclen);
10013                     tswap64s((uint64_t *)&de->d_ino);
10014                     tswap64s((uint64_t *)&de->d_off);
10015                     de = (struct linux_dirent64 *)((char *)de + reclen);
10016                     len -= reclen;
10017                 }
10018             }
10019             unlock_user(dirp, arg2, ret);
10020         }
10021         return ret;
10022 #endif /* TARGET_NR_getdents64 */
10023 #if defined(TARGET_NR__newselect)
10024     case TARGET_NR__newselect:
10025         return do_select(arg1, arg2, arg3, arg4, arg5);
10026 #endif
10027 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10028 # ifdef TARGET_NR_poll
10029     case TARGET_NR_poll:
10030 # endif
10031 # ifdef TARGET_NR_ppoll
10032     case TARGET_NR_ppoll:
10033 # endif
10034         {
10035             struct target_pollfd *target_pfd;
10036             unsigned int nfds = arg2;
10037             struct pollfd *pfd;
10038             unsigned int i;
10039 
10040             pfd = NULL;
10041             target_pfd = NULL;
10042             if (nfds) {
10043                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10044                     return -TARGET_EINVAL;
10045                 }
10046 
10047                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10048                                        sizeof(struct target_pollfd) * nfds, 1);
10049                 if (!target_pfd) {
10050                     return -TARGET_EFAULT;
10051                 }
10052 
10053                 pfd = alloca(sizeof(struct pollfd) * nfds);
10054                 for (i = 0; i < nfds; i++) {
10055                     pfd[i].fd = tswap32(target_pfd[i].fd);
10056                     pfd[i].events = tswap16(target_pfd[i].events);
10057                 }
10058             }
10059 
10060             switch (num) {
10061 # ifdef TARGET_NR_ppoll
10062             case TARGET_NR_ppoll:
10063             {
10064                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10065                 target_sigset_t *target_set;
10066                 sigset_t _set, *set = &_set;
10067 
10068                 if (arg3) {
10069                     if (target_to_host_timespec(timeout_ts, arg3)) {
10070                         unlock_user(target_pfd, arg1, 0);
10071                         return -TARGET_EFAULT;
10072                     }
10073                 } else {
10074                     timeout_ts = NULL;
10075                 }
10076 
10077                 if (arg4) {
10078                     if (arg5 != sizeof(target_sigset_t)) {
10079                         unlock_user(target_pfd, arg1, 0);
10080                         return -TARGET_EINVAL;
10081                     }
10082 
10083                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10084                     if (!target_set) {
10085                         unlock_user(target_pfd, arg1, 0);
10086                         return -TARGET_EFAULT;
10087                     }
10088                     target_to_host_sigset(set, target_set);
10089                 } else {
10090                     set = NULL;
10091                 }
10092 
10093                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10094                                            set, SIGSET_T_SIZE));
10095 
10096                 if (!is_error(ret) && arg3) {
10097                     host_to_target_timespec(arg3, timeout_ts);
10098                 }
10099                 if (arg4) {
10100                     unlock_user(target_set, arg4, 0);
10101                 }
10102                 break;
10103             }
10104 # endif
10105 # ifdef TARGET_NR_poll
10106             case TARGET_NR_poll:
10107             {
10108                 struct timespec ts, *pts;
10109 
10110                 if (arg3 >= 0) {
10111                     /* Convert ms to secs, ns */
10112                     ts.tv_sec = arg3 / 1000;
10113                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10114                     pts = &ts;
10115                 } else {
10116                     /* -ve poll() timeout means "infinite" */
10117                     pts = NULL;
10118                 }
10119                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10120                 break;
10121             }
10122 # endif
10123             default:
10124                 g_assert_not_reached();
10125             }
10126 
10127             if (!is_error(ret)) {
10128                 for(i = 0; i < nfds; i++) {
10129                     target_pfd[i].revents = tswap16(pfd[i].revents);
10130                 }
10131             }
10132             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10133         }
10134         return ret;
10135 #endif
10136     case TARGET_NR_flock:
10137         /* NOTE: the flock constant seems to be the same for every
10138            Linux platform */
10139         return get_errno(safe_flock(arg1, arg2));
10140     case TARGET_NR_readv:
10141         {
10142             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10143             if (vec != NULL) {
10144                 ret = get_errno(safe_readv(arg1, vec, arg3));
10145                 unlock_iovec(vec, arg2, arg3, 1);
10146             } else {
10147                 ret = -host_to_target_errno(errno);
10148             }
10149         }
10150         return ret;
10151     case TARGET_NR_writev:
10152         {
10153             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10154             if (vec != NULL) {
10155                 ret = get_errno(safe_writev(arg1, vec, arg3));
10156                 unlock_iovec(vec, arg2, arg3, 0);
10157             } else {
10158                 ret = -host_to_target_errno(errno);
10159             }
10160         }
10161         return ret;
10162 #if defined(TARGET_NR_preadv)
10163     case TARGET_NR_preadv:
10164         {
10165             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10166             if (vec != NULL) {
10167                 unsigned long low, high;
10168 
10169                 target_to_host_low_high(arg4, arg5, &low, &high);
10170                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10171                 unlock_iovec(vec, arg2, arg3, 1);
10172             } else {
10173                 ret = -host_to_target_errno(errno);
10174            }
10175         }
10176         return ret;
10177 #endif
10178 #if defined(TARGET_NR_pwritev)
10179     case TARGET_NR_pwritev:
10180         {
10181             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10182             if (vec != NULL) {
10183                 unsigned long low, high;
10184 
10185                 target_to_host_low_high(arg4, arg5, &low, &high);
10186                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10187                 unlock_iovec(vec, arg2, arg3, 0);
10188             } else {
10189                 ret = -host_to_target_errno(errno);
10190            }
10191         }
10192         return ret;
10193 #endif
10194     case TARGET_NR_getsid:
10195         return get_errno(getsid(arg1));
10196 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10197     case TARGET_NR_fdatasync:
10198         return get_errno(fdatasync(arg1));
10199 #endif
10200 #ifdef TARGET_NR__sysctl
10201     case TARGET_NR__sysctl:
10202         /* We don't implement this, but ENOTDIR is always a safe
10203            return value. */
10204         return -TARGET_ENOTDIR;
10205 #endif
10206     case TARGET_NR_sched_getaffinity:
10207         {
10208             unsigned int mask_size;
10209             unsigned long *mask;
10210 
10211             /*
10212              * sched_getaffinity needs multiples of ulong, so need to take
10213              * care of mismatches between target ulong and host ulong sizes.
10214              */
10215             if (arg2 & (sizeof(abi_ulong) - 1)) {
10216                 return -TARGET_EINVAL;
10217             }
10218             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10219 
10220             mask = alloca(mask_size);
10221             memset(mask, 0, mask_size);
10222             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10223 
10224             if (!is_error(ret)) {
10225                 if (ret > arg2) {
10226                     /* More data returned than the caller's buffer will fit.
10227                      * This only happens if sizeof(abi_long) < sizeof(long)
10228                      * and the caller passed us a buffer holding an odd number
10229                      * of abi_longs. If the host kernel is actually using the
10230                      * extra 4 bytes then fail EINVAL; otherwise we can just
10231                      * ignore them and only copy the interesting part.
10232                      */
10233                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10234                     if (numcpus > arg2 * 8) {
10235                         return -TARGET_EINVAL;
10236                     }
10237                     ret = arg2;
10238                 }
10239 
10240                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10241                     return -TARGET_EFAULT;
10242                 }
10243             }
10244         }
10245         return ret;
10246     case TARGET_NR_sched_setaffinity:
10247         {
10248             unsigned int mask_size;
10249             unsigned long *mask;
10250 
10251             /*
10252              * sched_setaffinity needs multiples of ulong, so need to take
10253              * care of mismatches between target ulong and host ulong sizes.
10254              */
10255             if (arg2 & (sizeof(abi_ulong) - 1)) {
10256                 return -TARGET_EINVAL;
10257             }
10258             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10259             mask = alloca(mask_size);
10260 
10261             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10262             if (ret) {
10263                 return ret;
10264             }
10265 
10266             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10267         }
10268     case TARGET_NR_getcpu:
10269         {
10270             unsigned cpu, node;
10271             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10272                                        arg2 ? &node : NULL,
10273                                        NULL));
10274             if (is_error(ret)) {
10275                 return ret;
10276             }
10277             if (arg1 && put_user_u32(cpu, arg1)) {
10278                 return -TARGET_EFAULT;
10279             }
10280             if (arg2 && put_user_u32(node, arg2)) {
10281                 return -TARGET_EFAULT;
10282             }
10283         }
10284         return ret;
10285     case TARGET_NR_sched_setparam:
10286         {
10287             struct sched_param *target_schp;
10288             struct sched_param schp;
10289 
10290             if (arg2 == 0) {
10291                 return -TARGET_EINVAL;
10292             }
10293             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10294                 return -TARGET_EFAULT;
10295             schp.sched_priority = tswap32(target_schp->sched_priority);
10296             unlock_user_struct(target_schp, arg2, 0);
10297             return get_errno(sched_setparam(arg1, &schp));
10298         }
10299     case TARGET_NR_sched_getparam:
10300         {
10301             struct sched_param *target_schp;
10302             struct sched_param schp;
10303 
10304             if (arg2 == 0) {
10305                 return -TARGET_EINVAL;
10306             }
10307             ret = get_errno(sched_getparam(arg1, &schp));
10308             if (!is_error(ret)) {
10309                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10310                     return -TARGET_EFAULT;
10311                 target_schp->sched_priority = tswap32(schp.sched_priority);
10312                 unlock_user_struct(target_schp, arg2, 1);
10313             }
10314         }
10315         return ret;
10316     case TARGET_NR_sched_setscheduler:
10317         {
10318             struct sched_param *target_schp;
10319             struct sched_param schp;
10320             if (arg3 == 0) {
10321                 return -TARGET_EINVAL;
10322             }
10323             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10324                 return -TARGET_EFAULT;
10325             schp.sched_priority = tswap32(target_schp->sched_priority);
10326             unlock_user_struct(target_schp, arg3, 0);
10327             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10328         }
10329     case TARGET_NR_sched_getscheduler:
10330         return get_errno(sched_getscheduler(arg1));
10331     case TARGET_NR_sched_yield:
10332         return get_errno(sched_yield());
10333     case TARGET_NR_sched_get_priority_max:
10334         return get_errno(sched_get_priority_max(arg1));
10335     case TARGET_NR_sched_get_priority_min:
10336         return get_errno(sched_get_priority_min(arg1));
10337 #ifdef TARGET_NR_sched_rr_get_interval
10338     case TARGET_NR_sched_rr_get_interval:
10339         {
10340             struct timespec ts;
10341             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10342             if (!is_error(ret)) {
10343                 ret = host_to_target_timespec(arg2, &ts);
10344             }
10345         }
10346         return ret;
10347 #endif
10348 #if defined(TARGET_NR_nanosleep)
10349     case TARGET_NR_nanosleep:
10350         {
10351             struct timespec req, rem;
10352             target_to_host_timespec(&req, arg1);
10353             ret = get_errno(safe_nanosleep(&req, &rem));
10354             if (is_error(ret) && arg2) {
10355                 host_to_target_timespec(arg2, &rem);
10356             }
10357         }
10358         return ret;
10359 #endif
10360     case TARGET_NR_prctl:
10361         switch (arg1) {
10362         case PR_GET_PDEATHSIG:
10363         {
10364             int deathsig;
10365             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10366             if (!is_error(ret) && arg2
10367                 && put_user_ual(deathsig, arg2)) {
10368                 return -TARGET_EFAULT;
10369             }
10370             return ret;
10371         }
10372 #ifdef PR_GET_NAME
10373         case PR_GET_NAME:
10374         {
10375             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10376             if (!name) {
10377                 return -TARGET_EFAULT;
10378             }
10379             ret = get_errno(prctl(arg1, (unsigned long)name,
10380                                   arg3, arg4, arg5));
10381             unlock_user(name, arg2, 16);
10382             return ret;
10383         }
10384         case PR_SET_NAME:
10385         {
10386             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10387             if (!name) {
10388                 return -TARGET_EFAULT;
10389             }
10390             ret = get_errno(prctl(arg1, (unsigned long)name,
10391                                   arg3, arg4, arg5));
10392             unlock_user(name, arg2, 0);
10393             return ret;
10394         }
10395 #endif
10396 #ifdef TARGET_MIPS
10397         case TARGET_PR_GET_FP_MODE:
10398         {
10399             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10400             ret = 0;
10401             if (env->CP0_Status & (1 << CP0St_FR)) {
10402                 ret |= TARGET_PR_FP_MODE_FR;
10403             }
10404             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10405                 ret |= TARGET_PR_FP_MODE_FRE;
10406             }
10407             return ret;
10408         }
10409         case TARGET_PR_SET_FP_MODE:
10410         {
10411             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10412             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10413             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10414             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10415             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10416 
10417             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10418                                             TARGET_PR_FP_MODE_FRE;
10419 
10420             /* If nothing to change, return right away, successfully.  */
10421             if (old_fr == new_fr && old_fre == new_fre) {
10422                 return 0;
10423             }
10424             /* Check the value is valid */
10425             if (arg2 & ~known_bits) {
10426                 return -TARGET_EOPNOTSUPP;
10427             }
10428             /* Setting FRE without FR is not supported.  */
10429             if (new_fre && !new_fr) {
10430                 return -TARGET_EOPNOTSUPP;
10431             }
10432             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10433                 /* FR1 is not supported */
10434                 return -TARGET_EOPNOTSUPP;
10435             }
10436             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10437                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10438                 /* cannot set FR=0 */
10439                 return -TARGET_EOPNOTSUPP;
10440             }
10441             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10442                 /* Cannot set FRE=1 */
10443                 return -TARGET_EOPNOTSUPP;
10444             }
10445 
10446             int i;
10447             fpr_t *fpr = env->active_fpu.fpr;
10448             for (i = 0; i < 32 ; i += 2) {
10449                 if (!old_fr && new_fr) {
10450                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10451                 } else if (old_fr && !new_fr) {
10452                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10453                 }
10454             }
10455 
10456             if (new_fr) {
10457                 env->CP0_Status |= (1 << CP0St_FR);
10458                 env->hflags |= MIPS_HFLAG_F64;
10459             } else {
10460                 env->CP0_Status &= ~(1 << CP0St_FR);
10461                 env->hflags &= ~MIPS_HFLAG_F64;
10462             }
10463             if (new_fre) {
10464                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10465                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10466                     env->hflags |= MIPS_HFLAG_FRE;
10467                 }
10468             } else {
10469                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10470                 env->hflags &= ~MIPS_HFLAG_FRE;
10471             }
10472 
10473             return 0;
10474         }
10475 #endif /* MIPS */
10476 #ifdef TARGET_AARCH64
10477         case TARGET_PR_SVE_SET_VL:
10478             /*
10479              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10480              * PR_SVE_VL_INHERIT.  Note the kernel definition
10481              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10482              * even though the current architectural maximum is VQ=16.
10483              */
10484             ret = -TARGET_EINVAL;
10485             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10486                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10487                 CPUARMState *env = cpu_env;
10488                 ARMCPU *cpu = env_archcpu(env);
10489                 uint32_t vq, old_vq;
10490 
10491                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10492                 vq = MAX(arg2 / 16, 1);
10493                 vq = MIN(vq, cpu->sve_max_vq);
10494 
10495                 if (vq < old_vq) {
10496                     aarch64_sve_narrow_vq(env, vq);
10497                 }
10498                 env->vfp.zcr_el[1] = vq - 1;
10499                 arm_rebuild_hflags(env);
10500                 ret = vq * 16;
10501             }
10502             return ret;
10503         case TARGET_PR_SVE_GET_VL:
10504             ret = -TARGET_EINVAL;
10505             {
10506                 ARMCPU *cpu = env_archcpu(cpu_env);
10507                 if (cpu_isar_feature(aa64_sve, cpu)) {
10508                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10509                 }
10510             }
10511             return ret;
10512         case TARGET_PR_PAC_RESET_KEYS:
10513             {
10514                 CPUARMState *env = cpu_env;
10515                 ARMCPU *cpu = env_archcpu(env);
10516 
10517                 if (arg3 || arg4 || arg5) {
10518                     return -TARGET_EINVAL;
10519                 }
10520                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10521                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10522                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10523                                TARGET_PR_PAC_APGAKEY);
10524                     int ret = 0;
10525                     Error *err = NULL;
10526 
10527                     if (arg2 == 0) {
10528                         arg2 = all;
10529                     } else if (arg2 & ~all) {
10530                         return -TARGET_EINVAL;
10531                     }
10532                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10533                         ret |= qemu_guest_getrandom(&env->keys.apia,
10534                                                     sizeof(ARMPACKey), &err);
10535                     }
10536                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10537                         ret |= qemu_guest_getrandom(&env->keys.apib,
10538                                                     sizeof(ARMPACKey), &err);
10539                     }
10540                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10541                         ret |= qemu_guest_getrandom(&env->keys.apda,
10542                                                     sizeof(ARMPACKey), &err);
10543                     }
10544                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10545                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10546                                                     sizeof(ARMPACKey), &err);
10547                     }
10548                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10549                         ret |= qemu_guest_getrandom(&env->keys.apga,
10550                                                     sizeof(ARMPACKey), &err);
10551                     }
10552                     if (ret != 0) {
10553                         /*
10554                          * Some unknown failure in the crypto.  The best
10555                          * we can do is log it and fail the syscall.
10556                          * The real syscall cannot fail this way.
10557                          */
10558                         qemu_log_mask(LOG_UNIMP,
10559                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10560                                       error_get_pretty(err));
10561                         error_free(err);
10562                         return -TARGET_EIO;
10563                     }
10564                     return 0;
10565                 }
10566             }
10567             return -TARGET_EINVAL;
10568 #endif /* AARCH64 */
10569         case PR_GET_SECCOMP:
10570         case PR_SET_SECCOMP:
10571             /* Disable seccomp to prevent the target disabling syscalls we
10572              * need. */
10573             return -TARGET_EINVAL;
10574         default:
10575             /* Most prctl options have no pointer arguments */
10576             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10577         }
10578         break;
10579 #ifdef TARGET_NR_arch_prctl
10580     case TARGET_NR_arch_prctl:
10581         return do_arch_prctl(cpu_env, arg1, arg2);
10582 #endif
10583 #ifdef TARGET_NR_pread64
10584     case TARGET_NR_pread64:
10585         if (regpairs_aligned(cpu_env, num)) {
10586             arg4 = arg5;
10587             arg5 = arg6;
10588         }
10589         if (arg2 == 0 && arg3 == 0) {
10590             /* Special-case NULL buffer and zero length, which should succeed */
10591             p = 0;
10592         } else {
10593             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10594             if (!p) {
10595                 return -TARGET_EFAULT;
10596             }
10597         }
10598         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10599         unlock_user(p, arg2, ret);
10600         return ret;
10601     case TARGET_NR_pwrite64:
10602         if (regpairs_aligned(cpu_env, num)) {
10603             arg4 = arg5;
10604             arg5 = arg6;
10605         }
10606         if (arg2 == 0 && arg3 == 0) {
10607             /* Special-case NULL buffer and zero length, which should succeed */
10608             p = 0;
10609         } else {
10610             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10611             if (!p) {
10612                 return -TARGET_EFAULT;
10613             }
10614         }
10615         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10616         unlock_user(p, arg2, 0);
10617         return ret;
10618 #endif
10619     case TARGET_NR_getcwd:
10620         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10621             return -TARGET_EFAULT;
10622         ret = get_errno(sys_getcwd1(p, arg2));
10623         unlock_user(p, arg1, ret);
10624         return ret;
10625     case TARGET_NR_capget:
10626     case TARGET_NR_capset:
10627     {
10628         struct target_user_cap_header *target_header;
10629         struct target_user_cap_data *target_data = NULL;
10630         struct __user_cap_header_struct header;
10631         struct __user_cap_data_struct data[2];
10632         struct __user_cap_data_struct *dataptr = NULL;
10633         int i, target_datalen;
10634         int data_items = 1;
10635 
10636         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10637             return -TARGET_EFAULT;
10638         }
10639         header.version = tswap32(target_header->version);
10640         header.pid = tswap32(target_header->pid);
10641 
10642         if (header.version != _LINUX_CAPABILITY_VERSION) {
10643             /* Version 2 and up takes pointer to two user_data structs */
10644             data_items = 2;
10645         }
10646 
10647         target_datalen = sizeof(*target_data) * data_items;
10648 
10649         if (arg2) {
10650             if (num == TARGET_NR_capget) {
10651                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10652             } else {
10653                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10654             }
10655             if (!target_data) {
10656                 unlock_user_struct(target_header, arg1, 0);
10657                 return -TARGET_EFAULT;
10658             }
10659 
10660             if (num == TARGET_NR_capset) {
10661                 for (i = 0; i < data_items; i++) {
10662                     data[i].effective = tswap32(target_data[i].effective);
10663                     data[i].permitted = tswap32(target_data[i].permitted);
10664                     data[i].inheritable = tswap32(target_data[i].inheritable);
10665                 }
10666             }
10667 
10668             dataptr = data;
10669         }
10670 
10671         if (num == TARGET_NR_capget) {
10672             ret = get_errno(capget(&header, dataptr));
10673         } else {
10674             ret = get_errno(capset(&header, dataptr));
10675         }
10676 
10677         /* The kernel always updates version for both capget and capset */
10678         target_header->version = tswap32(header.version);
10679         unlock_user_struct(target_header, arg1, 1);
10680 
10681         if (arg2) {
10682             if (num == TARGET_NR_capget) {
10683                 for (i = 0; i < data_items; i++) {
10684                     target_data[i].effective = tswap32(data[i].effective);
10685                     target_data[i].permitted = tswap32(data[i].permitted);
10686                     target_data[i].inheritable = tswap32(data[i].inheritable);
10687                 }
10688                 unlock_user(target_data, arg2, target_datalen);
10689             } else {
10690                 unlock_user(target_data, arg2, 0);
10691             }
10692         }
10693         return ret;
10694     }
10695     case TARGET_NR_sigaltstack:
10696         return do_sigaltstack(arg1, arg2,
10697                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10698 
10699 #ifdef CONFIG_SENDFILE
10700 #ifdef TARGET_NR_sendfile
10701     case TARGET_NR_sendfile:
10702     {
10703         off_t *offp = NULL;
10704         off_t off;
10705         if (arg3) {
10706             ret = get_user_sal(off, arg3);
10707             if (is_error(ret)) {
10708                 return ret;
10709             }
10710             offp = &off;
10711         }
10712         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10713         if (!is_error(ret) && arg3) {
10714             abi_long ret2 = put_user_sal(off, arg3);
10715             if (is_error(ret2)) {
10716                 ret = ret2;
10717             }
10718         }
10719         return ret;
10720     }
10721 #endif
10722 #ifdef TARGET_NR_sendfile64
10723     case TARGET_NR_sendfile64:
10724     {
10725         off_t *offp = NULL;
10726         off_t off;
10727         if (arg3) {
10728             ret = get_user_s64(off, arg3);
10729             if (is_error(ret)) {
10730                 return ret;
10731             }
10732             offp = &off;
10733         }
10734         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10735         if (!is_error(ret) && arg3) {
10736             abi_long ret2 = put_user_s64(off, arg3);
10737             if (is_error(ret2)) {
10738                 ret = ret2;
10739             }
10740         }
10741         return ret;
10742     }
10743 #endif
10744 #endif
10745 #ifdef TARGET_NR_vfork
10746     case TARGET_NR_vfork:
10747         return get_errno(do_fork(cpu_env,
10748                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10749                          0, 0, 0, 0));
10750 #endif
10751 #ifdef TARGET_NR_ugetrlimit
10752     case TARGET_NR_ugetrlimit:
10753     {
10754 	struct rlimit rlim;
10755 	int resource = target_to_host_resource(arg1);
10756 	ret = get_errno(getrlimit(resource, &rlim));
10757 	if (!is_error(ret)) {
10758 	    struct target_rlimit *target_rlim;
10759             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10760                 return -TARGET_EFAULT;
10761 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10762 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10763             unlock_user_struct(target_rlim, arg2, 1);
10764 	}
10765         return ret;
10766     }
10767 #endif
10768 #ifdef TARGET_NR_truncate64
10769     case TARGET_NR_truncate64:
10770         if (!(p = lock_user_string(arg1)))
10771             return -TARGET_EFAULT;
10772 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10773         unlock_user(p, arg1, 0);
10774         return ret;
10775 #endif
10776 #ifdef TARGET_NR_ftruncate64
10777     case TARGET_NR_ftruncate64:
10778         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10779 #endif
10780 #ifdef TARGET_NR_stat64
10781     case TARGET_NR_stat64:
10782         if (!(p = lock_user_string(arg1))) {
10783             return -TARGET_EFAULT;
10784         }
10785         ret = get_errno(stat(path(p), &st));
10786         unlock_user(p, arg1, 0);
10787         if (!is_error(ret))
10788             ret = host_to_target_stat64(cpu_env, arg2, &st);
10789         return ret;
10790 #endif
10791 #ifdef TARGET_NR_lstat64
10792     case TARGET_NR_lstat64:
10793         if (!(p = lock_user_string(arg1))) {
10794             return -TARGET_EFAULT;
10795         }
10796         ret = get_errno(lstat(path(p), &st));
10797         unlock_user(p, arg1, 0);
10798         if (!is_error(ret))
10799             ret = host_to_target_stat64(cpu_env, arg2, &st);
10800         return ret;
10801 #endif
10802 #ifdef TARGET_NR_fstat64
10803     case TARGET_NR_fstat64:
10804         ret = get_errno(fstat(arg1, &st));
10805         if (!is_error(ret))
10806             ret = host_to_target_stat64(cpu_env, arg2, &st);
10807         return ret;
10808 #endif
10809 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10810 #ifdef TARGET_NR_fstatat64
10811     case TARGET_NR_fstatat64:
10812 #endif
10813 #ifdef TARGET_NR_newfstatat
10814     case TARGET_NR_newfstatat:
10815 #endif
10816         if (!(p = lock_user_string(arg2))) {
10817             return -TARGET_EFAULT;
10818         }
10819         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10820         unlock_user(p, arg2, 0);
10821         if (!is_error(ret))
10822             ret = host_to_target_stat64(cpu_env, arg3, &st);
10823         return ret;
10824 #endif
10825 #if defined(TARGET_NR_statx)
10826     case TARGET_NR_statx:
10827         {
10828             struct target_statx *target_stx;
10829             int dirfd = arg1;
10830             int flags = arg3;
10831 
10832             p = lock_user_string(arg2);
10833             if (p == NULL) {
10834                 return -TARGET_EFAULT;
10835             }
10836 #if defined(__NR_statx)
10837             {
10838                 /*
10839                  * It is assumed that struct statx is architecture independent.
10840                  */
10841                 struct target_statx host_stx;
10842                 int mask = arg4;
10843 
10844                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10845                 if (!is_error(ret)) {
10846                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10847                         unlock_user(p, arg2, 0);
10848                         return -TARGET_EFAULT;
10849                     }
10850                 }
10851 
10852                 if (ret != -TARGET_ENOSYS) {
10853                     unlock_user(p, arg2, 0);
10854                     return ret;
10855                 }
10856             }
10857 #endif
10858             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10859             unlock_user(p, arg2, 0);
10860 
10861             if (!is_error(ret)) {
10862                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10863                     return -TARGET_EFAULT;
10864                 }
10865                 memset(target_stx, 0, sizeof(*target_stx));
10866                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10867                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10868                 __put_user(st.st_ino, &target_stx->stx_ino);
10869                 __put_user(st.st_mode, &target_stx->stx_mode);
10870                 __put_user(st.st_uid, &target_stx->stx_uid);
10871                 __put_user(st.st_gid, &target_stx->stx_gid);
10872                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10873                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10874                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10875                 __put_user(st.st_size, &target_stx->stx_size);
10876                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10877                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10878                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10879                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10880                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10881                 unlock_user_struct(target_stx, arg5, 1);
10882             }
10883         }
10884         return ret;
10885 #endif
10886 #ifdef TARGET_NR_lchown
10887     case TARGET_NR_lchown:
10888         if (!(p = lock_user_string(arg1)))
10889             return -TARGET_EFAULT;
10890         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10891         unlock_user(p, arg1, 0);
10892         return ret;
10893 #endif
10894 #ifdef TARGET_NR_getuid
10895     case TARGET_NR_getuid:
10896         return get_errno(high2lowuid(getuid()));
10897 #endif
10898 #ifdef TARGET_NR_getgid
10899     case TARGET_NR_getgid:
10900         return get_errno(high2lowgid(getgid()));
10901 #endif
10902 #ifdef TARGET_NR_geteuid
10903     case TARGET_NR_geteuid:
10904         return get_errno(high2lowuid(geteuid()));
10905 #endif
10906 #ifdef TARGET_NR_getegid
10907     case TARGET_NR_getegid:
10908         return get_errno(high2lowgid(getegid()));
10909 #endif
10910     case TARGET_NR_setreuid:
10911         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10912     case TARGET_NR_setregid:
10913         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10914     case TARGET_NR_getgroups:
10915         {
10916             int gidsetsize = arg1;
10917             target_id *target_grouplist;
10918             gid_t *grouplist;
10919             int i;
10920 
10921             grouplist = alloca(gidsetsize * sizeof(gid_t));
10922             ret = get_errno(getgroups(gidsetsize, grouplist));
10923             if (gidsetsize == 0)
10924                 return ret;
10925             if (!is_error(ret)) {
10926                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10927                 if (!target_grouplist)
10928                     return -TARGET_EFAULT;
10929                 for(i = 0;i < ret; i++)
10930                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10931                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10932             }
10933         }
10934         return ret;
10935     case TARGET_NR_setgroups:
10936         {
10937             int gidsetsize = arg1;
10938             target_id *target_grouplist;
10939             gid_t *grouplist = NULL;
10940             int i;
10941             if (gidsetsize) {
10942                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10943                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10944                 if (!target_grouplist) {
10945                     return -TARGET_EFAULT;
10946                 }
10947                 for (i = 0; i < gidsetsize; i++) {
10948                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10949                 }
10950                 unlock_user(target_grouplist, arg2, 0);
10951             }
10952             return get_errno(setgroups(gidsetsize, grouplist));
10953         }
10954     case TARGET_NR_fchown:
10955         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10956 #if defined(TARGET_NR_fchownat)
10957     case TARGET_NR_fchownat:
10958         if (!(p = lock_user_string(arg2)))
10959             return -TARGET_EFAULT;
10960         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10961                                  low2highgid(arg4), arg5));
10962         unlock_user(p, arg2, 0);
10963         return ret;
10964 #endif
10965 #ifdef TARGET_NR_setresuid
10966     case TARGET_NR_setresuid:
10967         return get_errno(sys_setresuid(low2highuid(arg1),
10968                                        low2highuid(arg2),
10969                                        low2highuid(arg3)));
10970 #endif
10971 #ifdef TARGET_NR_getresuid
10972     case TARGET_NR_getresuid:
10973         {
10974             uid_t ruid, euid, suid;
10975             ret = get_errno(getresuid(&ruid, &euid, &suid));
10976             if (!is_error(ret)) {
10977                 if (put_user_id(high2lowuid(ruid), arg1)
10978                     || put_user_id(high2lowuid(euid), arg2)
10979                     || put_user_id(high2lowuid(suid), arg3))
10980                     return -TARGET_EFAULT;
10981             }
10982         }
10983         return ret;
10984 #endif
10985 #ifdef TARGET_NR_getresgid
10986     case TARGET_NR_setresgid:
10987         return get_errno(sys_setresgid(low2highgid(arg1),
10988                                        low2highgid(arg2),
10989                                        low2highgid(arg3)));
10990 #endif
10991 #ifdef TARGET_NR_getresgid
10992     case TARGET_NR_getresgid:
10993         {
10994             gid_t rgid, egid, sgid;
10995             ret = get_errno(getresgid(&rgid, &egid, &sgid));
10996             if (!is_error(ret)) {
10997                 if (put_user_id(high2lowgid(rgid), arg1)
10998                     || put_user_id(high2lowgid(egid), arg2)
10999                     || put_user_id(high2lowgid(sgid), arg3))
11000                     return -TARGET_EFAULT;
11001             }
11002         }
11003         return ret;
11004 #endif
11005 #ifdef TARGET_NR_chown
11006     case TARGET_NR_chown:
11007         if (!(p = lock_user_string(arg1)))
11008             return -TARGET_EFAULT;
11009         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11010         unlock_user(p, arg1, 0);
11011         return ret;
11012 #endif
11013     case TARGET_NR_setuid:
11014         return get_errno(sys_setuid(low2highuid(arg1)));
11015     case TARGET_NR_setgid:
11016         return get_errno(sys_setgid(low2highgid(arg1)));
11017     case TARGET_NR_setfsuid:
11018         return get_errno(setfsuid(arg1));
11019     case TARGET_NR_setfsgid:
11020         return get_errno(setfsgid(arg1));
11021 
11022 #ifdef TARGET_NR_lchown32
11023     case TARGET_NR_lchown32:
11024         if (!(p = lock_user_string(arg1)))
11025             return -TARGET_EFAULT;
11026         ret = get_errno(lchown(p, arg2, arg3));
11027         unlock_user(p, arg1, 0);
11028         return ret;
11029 #endif
11030 #ifdef TARGET_NR_getuid32
11031     case TARGET_NR_getuid32:
11032         return get_errno(getuid());
11033 #endif
11034 
11035 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11036    /* Alpha specific */
11037     case TARGET_NR_getxuid:
11038          {
11039             uid_t euid;
11040             euid=geteuid();
11041             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11042          }
11043         return get_errno(getuid());
11044 #endif
11045 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11046    /* Alpha specific */
11047     case TARGET_NR_getxgid:
11048          {
11049             uid_t egid;
11050             egid=getegid();
11051             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11052          }
11053         return get_errno(getgid());
11054 #endif
11055 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11056     /* Alpha specific */
11057     case TARGET_NR_osf_getsysinfo:
11058         ret = -TARGET_EOPNOTSUPP;
11059         switch (arg1) {
11060           case TARGET_GSI_IEEE_FP_CONTROL:
11061             {
11062                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11063                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11064 
11065                 swcr &= ~SWCR_STATUS_MASK;
11066                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11067 
11068                 if (put_user_u64 (swcr, arg2))
11069                         return -TARGET_EFAULT;
11070                 ret = 0;
11071             }
11072             break;
11073 
11074           /* case GSI_IEEE_STATE_AT_SIGNAL:
11075              -- Not implemented in linux kernel.
11076              case GSI_UACPROC:
11077              -- Retrieves current unaligned access state; not much used.
11078              case GSI_PROC_TYPE:
11079              -- Retrieves implver information; surely not used.
11080              case GSI_GET_HWRPB:
11081              -- Grabs a copy of the HWRPB; surely not used.
11082           */
11083         }
11084         return ret;
11085 #endif
11086 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11087     /* Alpha specific */
11088     case TARGET_NR_osf_setsysinfo:
11089         ret = -TARGET_EOPNOTSUPP;
11090         switch (arg1) {
11091           case TARGET_SSI_IEEE_FP_CONTROL:
11092             {
11093                 uint64_t swcr, fpcr;
11094 
11095                 if (get_user_u64 (swcr, arg2)) {
11096                     return -TARGET_EFAULT;
11097                 }
11098 
11099                 /*
11100                  * The kernel calls swcr_update_status to update the
11101                  * status bits from the fpcr at every point that it
11102                  * could be queried.  Therefore, we store the status
11103                  * bits only in FPCR.
11104                  */
11105                 ((CPUAlphaState *)cpu_env)->swcr
11106                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11107 
11108                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11109                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11110                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11111                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11112                 ret = 0;
11113             }
11114             break;
11115 
11116           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11117             {
11118                 uint64_t exc, fpcr, fex;
11119 
11120                 if (get_user_u64(exc, arg2)) {
11121                     return -TARGET_EFAULT;
11122                 }
11123                 exc &= SWCR_STATUS_MASK;
11124                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11125 
11126                 /* Old exceptions are not signaled.  */
11127                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11128                 fex = exc & ~fex;
11129                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11130                 fex &= ((CPUArchState *)cpu_env)->swcr;
11131 
11132                 /* Update the hardware fpcr.  */
11133                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11134                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11135 
11136                 if (fex) {
11137                     int si_code = TARGET_FPE_FLTUNK;
11138                     target_siginfo_t info;
11139 
11140                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11141                         si_code = TARGET_FPE_FLTUND;
11142                     }
11143                     if (fex & SWCR_TRAP_ENABLE_INE) {
11144                         si_code = TARGET_FPE_FLTRES;
11145                     }
11146                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11147                         si_code = TARGET_FPE_FLTUND;
11148                     }
11149                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11150                         si_code = TARGET_FPE_FLTOVF;
11151                     }
11152                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11153                         si_code = TARGET_FPE_FLTDIV;
11154                     }
11155                     if (fex & SWCR_TRAP_ENABLE_INV) {
11156                         si_code = TARGET_FPE_FLTINV;
11157                     }
11158 
11159                     info.si_signo = SIGFPE;
11160                     info.si_errno = 0;
11161                     info.si_code = si_code;
11162                     info._sifields._sigfault._addr
11163                         = ((CPUArchState *)cpu_env)->pc;
11164                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11165                                  QEMU_SI_FAULT, &info);
11166                 }
11167                 ret = 0;
11168             }
11169             break;
11170 
11171           /* case SSI_NVPAIRS:
11172              -- Used with SSIN_UACPROC to enable unaligned accesses.
11173              case SSI_IEEE_STATE_AT_SIGNAL:
11174              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11175              -- Not implemented in linux kernel
11176           */
11177         }
11178         return ret;
11179 #endif
11180 #ifdef TARGET_NR_osf_sigprocmask
11181     /* Alpha specific.  */
11182     case TARGET_NR_osf_sigprocmask:
11183         {
11184             abi_ulong mask;
11185             int how;
11186             sigset_t set, oldset;
11187 
11188             switch(arg1) {
11189             case TARGET_SIG_BLOCK:
11190                 how = SIG_BLOCK;
11191                 break;
11192             case TARGET_SIG_UNBLOCK:
11193                 how = SIG_UNBLOCK;
11194                 break;
11195             case TARGET_SIG_SETMASK:
11196                 how = SIG_SETMASK;
11197                 break;
11198             default:
11199                 return -TARGET_EINVAL;
11200             }
11201             mask = arg2;
11202             target_to_host_old_sigset(&set, &mask);
11203             ret = do_sigprocmask(how, &set, &oldset);
11204             if (!ret) {
11205                 host_to_target_old_sigset(&mask, &oldset);
11206                 ret = mask;
11207             }
11208         }
11209         return ret;
11210 #endif
11211 
11212 #ifdef TARGET_NR_getgid32
11213     case TARGET_NR_getgid32:
11214         return get_errno(getgid());
11215 #endif
11216 #ifdef TARGET_NR_geteuid32
11217     case TARGET_NR_geteuid32:
11218         return get_errno(geteuid());
11219 #endif
11220 #ifdef TARGET_NR_getegid32
11221     case TARGET_NR_getegid32:
11222         return get_errno(getegid());
11223 #endif
11224 #ifdef TARGET_NR_setreuid32
11225     case TARGET_NR_setreuid32:
11226         return get_errno(setreuid(arg1, arg2));
11227 #endif
11228 #ifdef TARGET_NR_setregid32
11229     case TARGET_NR_setregid32:
11230         return get_errno(setregid(arg1, arg2));
11231 #endif
11232 #ifdef TARGET_NR_getgroups32
11233     case TARGET_NR_getgroups32:
11234         {
11235             int gidsetsize = arg1;
11236             uint32_t *target_grouplist;
11237             gid_t *grouplist;
11238             int i;
11239 
11240             grouplist = alloca(gidsetsize * sizeof(gid_t));
11241             ret = get_errno(getgroups(gidsetsize, grouplist));
11242             if (gidsetsize == 0)
11243                 return ret;
11244             if (!is_error(ret)) {
11245                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11246                 if (!target_grouplist) {
11247                     return -TARGET_EFAULT;
11248                 }
11249                 for(i = 0;i < ret; i++)
11250                     target_grouplist[i] = tswap32(grouplist[i]);
11251                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11252             }
11253         }
11254         return ret;
11255 #endif
11256 #ifdef TARGET_NR_setgroups32
11257     case TARGET_NR_setgroups32:
11258         {
11259             int gidsetsize = arg1;
11260             uint32_t *target_grouplist;
11261             gid_t *grouplist;
11262             int i;
11263 
11264             grouplist = alloca(gidsetsize * sizeof(gid_t));
11265             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11266             if (!target_grouplist) {
11267                 return -TARGET_EFAULT;
11268             }
11269             for(i = 0;i < gidsetsize; i++)
11270                 grouplist[i] = tswap32(target_grouplist[i]);
11271             unlock_user(target_grouplist, arg2, 0);
11272             return get_errno(setgroups(gidsetsize, grouplist));
11273         }
11274 #endif
11275 #ifdef TARGET_NR_fchown32
11276     case TARGET_NR_fchown32:
11277         return get_errno(fchown(arg1, arg2, arg3));
11278 #endif
11279 #ifdef TARGET_NR_setresuid32
11280     case TARGET_NR_setresuid32:
11281         return get_errno(sys_setresuid(arg1, arg2, arg3));
11282 #endif
11283 #ifdef TARGET_NR_getresuid32
11284     case TARGET_NR_getresuid32:
11285         {
11286             uid_t ruid, euid, suid;
11287             ret = get_errno(getresuid(&ruid, &euid, &suid));
11288             if (!is_error(ret)) {
11289                 if (put_user_u32(ruid, arg1)
11290                     || put_user_u32(euid, arg2)
11291                     || put_user_u32(suid, arg3))
11292                     return -TARGET_EFAULT;
11293             }
11294         }
11295         return ret;
11296 #endif
11297 #ifdef TARGET_NR_setresgid32
11298     case TARGET_NR_setresgid32:
11299         return get_errno(sys_setresgid(arg1, arg2, arg3));
11300 #endif
11301 #ifdef TARGET_NR_getresgid32
11302     case TARGET_NR_getresgid32:
11303         {
11304             gid_t rgid, egid, sgid;
11305             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11306             if (!is_error(ret)) {
11307                 if (put_user_u32(rgid, arg1)
11308                     || put_user_u32(egid, arg2)
11309                     || put_user_u32(sgid, arg3))
11310                     return -TARGET_EFAULT;
11311             }
11312         }
11313         return ret;
11314 #endif
11315 #ifdef TARGET_NR_chown32
11316     case TARGET_NR_chown32:
11317         if (!(p = lock_user_string(arg1)))
11318             return -TARGET_EFAULT;
11319         ret = get_errno(chown(p, arg2, arg3));
11320         unlock_user(p, arg1, 0);
11321         return ret;
11322 #endif
11323 #ifdef TARGET_NR_setuid32
11324     case TARGET_NR_setuid32:
11325         return get_errno(sys_setuid(arg1));
11326 #endif
11327 #ifdef TARGET_NR_setgid32
11328     case TARGET_NR_setgid32:
11329         return get_errno(sys_setgid(arg1));
11330 #endif
11331 #ifdef TARGET_NR_setfsuid32
11332     case TARGET_NR_setfsuid32:
11333         return get_errno(setfsuid(arg1));
11334 #endif
11335 #ifdef TARGET_NR_setfsgid32
11336     case TARGET_NR_setfsgid32:
11337         return get_errno(setfsgid(arg1));
11338 #endif
11339 #ifdef TARGET_NR_mincore
11340     case TARGET_NR_mincore:
11341         {
11342             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11343             if (!a) {
11344                 return -TARGET_ENOMEM;
11345             }
11346             p = lock_user_string(arg3);
11347             if (!p) {
11348                 ret = -TARGET_EFAULT;
11349             } else {
11350                 ret = get_errno(mincore(a, arg2, p));
11351                 unlock_user(p, arg3, ret);
11352             }
11353             unlock_user(a, arg1, 0);
11354         }
11355         return ret;
11356 #endif
11357 #ifdef TARGET_NR_arm_fadvise64_64
11358     case TARGET_NR_arm_fadvise64_64:
11359         /* arm_fadvise64_64 looks like fadvise64_64 but
11360          * with different argument order: fd, advice, offset, len
11361          * rather than the usual fd, offset, len, advice.
11362          * Note that offset and len are both 64-bit so appear as
11363          * pairs of 32-bit registers.
11364          */
11365         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11366                             target_offset64(arg5, arg6), arg2);
11367         return -host_to_target_errno(ret);
11368 #endif
11369 
11370 #if TARGET_ABI_BITS == 32
11371 
11372 #ifdef TARGET_NR_fadvise64_64
11373     case TARGET_NR_fadvise64_64:
11374 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11375         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11376         ret = arg2;
11377         arg2 = arg3;
11378         arg3 = arg4;
11379         arg4 = arg5;
11380         arg5 = arg6;
11381         arg6 = ret;
11382 #else
11383         /* 6 args: fd, offset (high, low), len (high, low), advice */
11384         if (regpairs_aligned(cpu_env, num)) {
11385             /* offset is in (3,4), len in (5,6) and advice in 7 */
11386             arg2 = arg3;
11387             arg3 = arg4;
11388             arg4 = arg5;
11389             arg5 = arg6;
11390             arg6 = arg7;
11391         }
11392 #endif
11393         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11394                             target_offset64(arg4, arg5), arg6);
11395         return -host_to_target_errno(ret);
11396 #endif
11397 
11398 #ifdef TARGET_NR_fadvise64
11399     case TARGET_NR_fadvise64:
11400         /* 5 args: fd, offset (high, low), len, advice */
11401         if (regpairs_aligned(cpu_env, num)) {
11402             /* offset is in (3,4), len in 5 and advice in 6 */
11403             arg2 = arg3;
11404             arg3 = arg4;
11405             arg4 = arg5;
11406             arg5 = arg6;
11407         }
11408         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11409         return -host_to_target_errno(ret);
11410 #endif
11411 
11412 #else /* not a 32-bit ABI */
11413 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11414 #ifdef TARGET_NR_fadvise64_64
11415     case TARGET_NR_fadvise64_64:
11416 #endif
11417 #ifdef TARGET_NR_fadvise64
11418     case TARGET_NR_fadvise64:
11419 #endif
11420 #ifdef TARGET_S390X
11421         switch (arg4) {
11422         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11423         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11424         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11425         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11426         default: break;
11427         }
11428 #endif
11429         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11430 #endif
11431 #endif /* end of 64-bit ABI fadvise handling */
11432 
11433 #ifdef TARGET_NR_madvise
11434     case TARGET_NR_madvise:
11435         /* A straight passthrough may not be safe because qemu sometimes
11436            turns private file-backed mappings into anonymous mappings.
11437            This will break MADV_DONTNEED.
11438            This is a hint, so ignoring and returning success is ok.  */
11439         return 0;
11440 #endif
11441 #ifdef TARGET_NR_fcntl64
11442     case TARGET_NR_fcntl64:
11443     {
11444         int cmd;
11445         struct flock64 fl;
11446         from_flock64_fn *copyfrom = copy_from_user_flock64;
11447         to_flock64_fn *copyto = copy_to_user_flock64;
11448 
11449 #ifdef TARGET_ARM
11450         if (!((CPUARMState *)cpu_env)->eabi) {
11451             copyfrom = copy_from_user_oabi_flock64;
11452             copyto = copy_to_user_oabi_flock64;
11453         }
11454 #endif
11455 
11456         cmd = target_to_host_fcntl_cmd(arg2);
11457         if (cmd == -TARGET_EINVAL) {
11458             return cmd;
11459         }
11460 
11461         switch(arg2) {
11462         case TARGET_F_GETLK64:
11463             ret = copyfrom(&fl, arg3);
11464             if (ret) {
11465                 break;
11466             }
11467             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11468             if (ret == 0) {
11469                 ret = copyto(arg3, &fl);
11470             }
11471 	    break;
11472 
11473         case TARGET_F_SETLK64:
11474         case TARGET_F_SETLKW64:
11475             ret = copyfrom(&fl, arg3);
11476             if (ret) {
11477                 break;
11478             }
11479             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11480 	    break;
11481         default:
11482             ret = do_fcntl(arg1, arg2, arg3);
11483             break;
11484         }
11485         return ret;
11486     }
11487 #endif
11488 #ifdef TARGET_NR_cacheflush
11489     case TARGET_NR_cacheflush:
11490         /* self-modifying code is handled automatically, so nothing needed */
11491         return 0;
11492 #endif
11493 #ifdef TARGET_NR_getpagesize
11494     case TARGET_NR_getpagesize:
11495         return TARGET_PAGE_SIZE;
11496 #endif
11497     case TARGET_NR_gettid:
11498         return get_errno(sys_gettid());
11499 #ifdef TARGET_NR_readahead
11500     case TARGET_NR_readahead:
11501 #if TARGET_ABI_BITS == 32
11502         if (regpairs_aligned(cpu_env, num)) {
11503             arg2 = arg3;
11504             arg3 = arg4;
11505             arg4 = arg5;
11506         }
11507         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11508 #else
11509         ret = get_errno(readahead(arg1, arg2, arg3));
11510 #endif
11511         return ret;
11512 #endif
11513 #ifdef CONFIG_ATTR
11514 #ifdef TARGET_NR_setxattr
11515     case TARGET_NR_listxattr:
11516     case TARGET_NR_llistxattr:
11517     {
11518         void *p, *b = 0;
11519         if (arg2) {
11520             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11521             if (!b) {
11522                 return -TARGET_EFAULT;
11523             }
11524         }
11525         p = lock_user_string(arg1);
11526         if (p) {
11527             if (num == TARGET_NR_listxattr) {
11528                 ret = get_errno(listxattr(p, b, arg3));
11529             } else {
11530                 ret = get_errno(llistxattr(p, b, arg3));
11531             }
11532         } else {
11533             ret = -TARGET_EFAULT;
11534         }
11535         unlock_user(p, arg1, 0);
11536         unlock_user(b, arg2, arg3);
11537         return ret;
11538     }
11539     case TARGET_NR_flistxattr:
11540     {
11541         void *b = 0;
11542         if (arg2) {
11543             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11544             if (!b) {
11545                 return -TARGET_EFAULT;
11546             }
11547         }
11548         ret = get_errno(flistxattr(arg1, b, arg3));
11549         unlock_user(b, arg2, arg3);
11550         return ret;
11551     }
11552     case TARGET_NR_setxattr:
11553     case TARGET_NR_lsetxattr:
11554         {
11555             void *p, *n, *v = 0;
11556             if (arg3) {
11557                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11558                 if (!v) {
11559                     return -TARGET_EFAULT;
11560                 }
11561             }
11562             p = lock_user_string(arg1);
11563             n = lock_user_string(arg2);
11564             if (p && n) {
11565                 if (num == TARGET_NR_setxattr) {
11566                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11567                 } else {
11568                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11569                 }
11570             } else {
11571                 ret = -TARGET_EFAULT;
11572             }
11573             unlock_user(p, arg1, 0);
11574             unlock_user(n, arg2, 0);
11575             unlock_user(v, arg3, 0);
11576         }
11577         return ret;
11578     case TARGET_NR_fsetxattr:
11579         {
11580             void *n, *v = 0;
11581             if (arg3) {
11582                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11583                 if (!v) {
11584                     return -TARGET_EFAULT;
11585                 }
11586             }
11587             n = lock_user_string(arg2);
11588             if (n) {
11589                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11590             } else {
11591                 ret = -TARGET_EFAULT;
11592             }
11593             unlock_user(n, arg2, 0);
11594             unlock_user(v, arg3, 0);
11595         }
11596         return ret;
11597     case TARGET_NR_getxattr:
11598     case TARGET_NR_lgetxattr:
11599         {
11600             void *p, *n, *v = 0;
11601             if (arg3) {
11602                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11603                 if (!v) {
11604                     return -TARGET_EFAULT;
11605                 }
11606             }
11607             p = lock_user_string(arg1);
11608             n = lock_user_string(arg2);
11609             if (p && n) {
11610                 if (num == TARGET_NR_getxattr) {
11611                     ret = get_errno(getxattr(p, n, v, arg4));
11612                 } else {
11613                     ret = get_errno(lgetxattr(p, n, v, arg4));
11614                 }
11615             } else {
11616                 ret = -TARGET_EFAULT;
11617             }
11618             unlock_user(p, arg1, 0);
11619             unlock_user(n, arg2, 0);
11620             unlock_user(v, arg3, arg4);
11621         }
11622         return ret;
11623     case TARGET_NR_fgetxattr:
11624         {
11625             void *n, *v = 0;
11626             if (arg3) {
11627                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11628                 if (!v) {
11629                     return -TARGET_EFAULT;
11630                 }
11631             }
11632             n = lock_user_string(arg2);
11633             if (n) {
11634                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11635             } else {
11636                 ret = -TARGET_EFAULT;
11637             }
11638             unlock_user(n, arg2, 0);
11639             unlock_user(v, arg3, arg4);
11640         }
11641         return ret;
11642     case TARGET_NR_removexattr:
11643     case TARGET_NR_lremovexattr:
11644         {
11645             void *p, *n;
11646             p = lock_user_string(arg1);
11647             n = lock_user_string(arg2);
11648             if (p && n) {
11649                 if (num == TARGET_NR_removexattr) {
11650                     ret = get_errno(removexattr(p, n));
11651                 } else {
11652                     ret = get_errno(lremovexattr(p, n));
11653                 }
11654             } else {
11655                 ret = -TARGET_EFAULT;
11656             }
11657             unlock_user(p, arg1, 0);
11658             unlock_user(n, arg2, 0);
11659         }
11660         return ret;
11661     case TARGET_NR_fremovexattr:
11662         {
11663             void *n;
11664             n = lock_user_string(arg2);
11665             if (n) {
11666                 ret = get_errno(fremovexattr(arg1, n));
11667             } else {
11668                 ret = -TARGET_EFAULT;
11669             }
11670             unlock_user(n, arg2, 0);
11671         }
11672         return ret;
11673 #endif
11674 #endif /* CONFIG_ATTR */
11675 #ifdef TARGET_NR_set_thread_area
11676     case TARGET_NR_set_thread_area:
11677 #if defined(TARGET_MIPS)
11678       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11679       return 0;
11680 #elif defined(TARGET_CRIS)
11681       if (arg1 & 0xff)
11682           ret = -TARGET_EINVAL;
11683       else {
11684           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11685           ret = 0;
11686       }
11687       return ret;
11688 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11689       return do_set_thread_area(cpu_env, arg1);
11690 #elif defined(TARGET_M68K)
11691       {
11692           TaskState *ts = cpu->opaque;
11693           ts->tp_value = arg1;
11694           return 0;
11695       }
11696 #else
11697       return -TARGET_ENOSYS;
11698 #endif
11699 #endif
11700 #ifdef TARGET_NR_get_thread_area
11701     case TARGET_NR_get_thread_area:
11702 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11703         return do_get_thread_area(cpu_env, arg1);
11704 #elif defined(TARGET_M68K)
11705         {
11706             TaskState *ts = cpu->opaque;
11707             return ts->tp_value;
11708         }
11709 #else
11710         return -TARGET_ENOSYS;
11711 #endif
11712 #endif
11713 #ifdef TARGET_NR_getdomainname
11714     case TARGET_NR_getdomainname:
11715         return -TARGET_ENOSYS;
11716 #endif
11717 
11718 #ifdef TARGET_NR_clock_settime
11719     case TARGET_NR_clock_settime:
11720     {
11721         struct timespec ts;
11722 
11723         ret = target_to_host_timespec(&ts, arg2);
11724         if (!is_error(ret)) {
11725             ret = get_errno(clock_settime(arg1, &ts));
11726         }
11727         return ret;
11728     }
11729 #endif
11730 #ifdef TARGET_NR_clock_settime64
11731     case TARGET_NR_clock_settime64:
11732     {
11733         struct timespec ts;
11734 
11735         ret = target_to_host_timespec64(&ts, arg2);
11736         if (!is_error(ret)) {
11737             ret = get_errno(clock_settime(arg1, &ts));
11738         }
11739         return ret;
11740     }
11741 #endif
11742 #ifdef TARGET_NR_clock_gettime
11743     case TARGET_NR_clock_gettime:
11744     {
11745         struct timespec ts;
11746         ret = get_errno(clock_gettime(arg1, &ts));
11747         if (!is_error(ret)) {
11748             ret = host_to_target_timespec(arg2, &ts);
11749         }
11750         return ret;
11751     }
11752 #endif
11753 #ifdef TARGET_NR_clock_gettime64
11754     case TARGET_NR_clock_gettime64:
11755     {
11756         struct timespec ts;
11757         ret = get_errno(clock_gettime(arg1, &ts));
11758         if (!is_error(ret)) {
11759             ret = host_to_target_timespec64(arg2, &ts);
11760         }
11761         return ret;
11762     }
11763 #endif
11764 #ifdef TARGET_NR_clock_getres
11765     case TARGET_NR_clock_getres:
11766     {
11767         struct timespec ts;
11768         ret = get_errno(clock_getres(arg1, &ts));
11769         if (!is_error(ret)) {
11770             host_to_target_timespec(arg2, &ts);
11771         }
11772         return ret;
11773     }
11774 #endif
11775 #ifdef TARGET_NR_clock_nanosleep
11776     case TARGET_NR_clock_nanosleep:
11777     {
11778         struct timespec ts;
11779         target_to_host_timespec(&ts, arg3);
11780         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11781                                              &ts, arg4 ? &ts : NULL));
11782         if (arg4)
11783             host_to_target_timespec(arg4, &ts);
11784 
11785 #if defined(TARGET_PPC)
11786         /* clock_nanosleep is odd in that it returns positive errno values.
11787          * On PPC, CR0 bit 3 should be set in such a situation. */
11788         if (ret && ret != -TARGET_ERESTARTSYS) {
11789             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11790         }
11791 #endif
11792         return ret;
11793     }
11794 #endif
11795 
11796 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11797     case TARGET_NR_set_tid_address:
11798         return get_errno(set_tid_address((int *)g2h(arg1)));
11799 #endif
11800 
11801     case TARGET_NR_tkill:
11802         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11803 
11804     case TARGET_NR_tgkill:
11805         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11806                          target_to_host_signal(arg3)));
11807 
11808 #ifdef TARGET_NR_set_robust_list
11809     case TARGET_NR_set_robust_list:
11810     case TARGET_NR_get_robust_list:
11811         /* The ABI for supporting robust futexes has userspace pass
11812          * the kernel a pointer to a linked list which is updated by
11813          * userspace after the syscall; the list is walked by the kernel
11814          * when the thread exits. Since the linked list in QEMU guest
11815          * memory isn't a valid linked list for the host and we have
11816          * no way to reliably intercept the thread-death event, we can't
11817          * support these. Silently return ENOSYS so that guest userspace
11818          * falls back to a non-robust futex implementation (which should
11819          * be OK except in the corner case of the guest crashing while
11820          * holding a mutex that is shared with another process via
11821          * shared memory).
11822          */
11823         return -TARGET_ENOSYS;
11824 #endif
11825 
11826 #if defined(TARGET_NR_utimensat)
11827     case TARGET_NR_utimensat:
11828         {
11829             struct timespec *tsp, ts[2];
11830             if (!arg3) {
11831                 tsp = NULL;
11832             } else {
11833                 target_to_host_timespec(ts, arg3);
11834                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11835                 tsp = ts;
11836             }
11837             if (!arg2)
11838                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11839             else {
11840                 if (!(p = lock_user_string(arg2))) {
11841                     return -TARGET_EFAULT;
11842                 }
11843                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11844                 unlock_user(p, arg2, 0);
11845             }
11846         }
11847         return ret;
11848 #endif
11849 #ifdef TARGET_NR_futex
11850     case TARGET_NR_futex:
11851         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11852 #endif
11853 #ifdef TARGET_NR_futex_time64
11854     case TARGET_NR_futex_time64:
11855         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
11856 #endif
11857 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11858     case TARGET_NR_inotify_init:
11859         ret = get_errno(sys_inotify_init());
11860         if (ret >= 0) {
11861             fd_trans_register(ret, &target_inotify_trans);
11862         }
11863         return ret;
11864 #endif
11865 #ifdef CONFIG_INOTIFY1
11866 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11867     case TARGET_NR_inotify_init1:
11868         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11869                                           fcntl_flags_tbl)));
11870         if (ret >= 0) {
11871             fd_trans_register(ret, &target_inotify_trans);
11872         }
11873         return ret;
11874 #endif
11875 #endif
11876 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11877     case TARGET_NR_inotify_add_watch:
11878         p = lock_user_string(arg2);
11879         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11880         unlock_user(p, arg2, 0);
11881         return ret;
11882 #endif
11883 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11884     case TARGET_NR_inotify_rm_watch:
11885         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11886 #endif
11887 
11888 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11889     case TARGET_NR_mq_open:
11890         {
11891             struct mq_attr posix_mq_attr;
11892             struct mq_attr *pposix_mq_attr;
11893             int host_flags;
11894 
11895             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11896             pposix_mq_attr = NULL;
11897             if (arg4) {
11898                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11899                     return -TARGET_EFAULT;
11900                 }
11901                 pposix_mq_attr = &posix_mq_attr;
11902             }
11903             p = lock_user_string(arg1 - 1);
11904             if (!p) {
11905                 return -TARGET_EFAULT;
11906             }
11907             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11908             unlock_user (p, arg1, 0);
11909         }
11910         return ret;
11911 
11912     case TARGET_NR_mq_unlink:
11913         p = lock_user_string(arg1 - 1);
11914         if (!p) {
11915             return -TARGET_EFAULT;
11916         }
11917         ret = get_errno(mq_unlink(p));
11918         unlock_user (p, arg1, 0);
11919         return ret;
11920 
11921 #ifdef TARGET_NR_mq_timedsend
11922     case TARGET_NR_mq_timedsend:
11923         {
11924             struct timespec ts;
11925 
11926             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11927             if (arg5 != 0) {
11928                 target_to_host_timespec(&ts, arg5);
11929                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11930                 host_to_target_timespec(arg5, &ts);
11931             } else {
11932                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11933             }
11934             unlock_user (p, arg2, arg3);
11935         }
11936         return ret;
11937 #endif
11938 
11939 #ifdef TARGET_NR_mq_timedreceive
11940     case TARGET_NR_mq_timedreceive:
11941         {
11942             struct timespec ts;
11943             unsigned int prio;
11944 
11945             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11946             if (arg5 != 0) {
11947                 target_to_host_timespec(&ts, arg5);
11948                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11949                                                      &prio, &ts));
11950                 host_to_target_timespec(arg5, &ts);
11951             } else {
11952                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11953                                                      &prio, NULL));
11954             }
11955             unlock_user (p, arg2, arg3);
11956             if (arg4 != 0)
11957                 put_user_u32(prio, arg4);
11958         }
11959         return ret;
11960 #endif
11961 
11962     /* Not implemented for now... */
11963 /*     case TARGET_NR_mq_notify: */
11964 /*         break; */
11965 
11966     case TARGET_NR_mq_getsetattr:
11967         {
11968             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11969             ret = 0;
11970             if (arg2 != 0) {
11971                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11972                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11973                                            &posix_mq_attr_out));
11974             } else if (arg3 != 0) {
11975                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11976             }
11977             if (ret == 0 && arg3 != 0) {
11978                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11979             }
11980         }
11981         return ret;
11982 #endif
11983 
11984 #ifdef CONFIG_SPLICE
11985 #ifdef TARGET_NR_tee
11986     case TARGET_NR_tee:
11987         {
11988             ret = get_errno(tee(arg1,arg2,arg3,arg4));
11989         }
11990         return ret;
11991 #endif
11992 #ifdef TARGET_NR_splice
11993     case TARGET_NR_splice:
11994         {
11995             loff_t loff_in, loff_out;
11996             loff_t *ploff_in = NULL, *ploff_out = NULL;
11997             if (arg2) {
11998                 if (get_user_u64(loff_in, arg2)) {
11999                     return -TARGET_EFAULT;
12000                 }
12001                 ploff_in = &loff_in;
12002             }
12003             if (arg4) {
12004                 if (get_user_u64(loff_out, arg4)) {
12005                     return -TARGET_EFAULT;
12006                 }
12007                 ploff_out = &loff_out;
12008             }
12009             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12010             if (arg2) {
12011                 if (put_user_u64(loff_in, arg2)) {
12012                     return -TARGET_EFAULT;
12013                 }
12014             }
12015             if (arg4) {
12016                 if (put_user_u64(loff_out, arg4)) {
12017                     return -TARGET_EFAULT;
12018                 }
12019             }
12020         }
12021         return ret;
12022 #endif
12023 #ifdef TARGET_NR_vmsplice
12024 	case TARGET_NR_vmsplice:
12025         {
12026             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12027             if (vec != NULL) {
12028                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12029                 unlock_iovec(vec, arg2, arg3, 0);
12030             } else {
12031                 ret = -host_to_target_errno(errno);
12032             }
12033         }
12034         return ret;
12035 #endif
12036 #endif /* CONFIG_SPLICE */
12037 #ifdef CONFIG_EVENTFD
12038 #if defined(TARGET_NR_eventfd)
12039     case TARGET_NR_eventfd:
12040         ret = get_errno(eventfd(arg1, 0));
12041         if (ret >= 0) {
12042             fd_trans_register(ret, &target_eventfd_trans);
12043         }
12044         return ret;
12045 #endif
12046 #if defined(TARGET_NR_eventfd2)
12047     case TARGET_NR_eventfd2:
12048     {
12049         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12050         if (arg2 & TARGET_O_NONBLOCK) {
12051             host_flags |= O_NONBLOCK;
12052         }
12053         if (arg2 & TARGET_O_CLOEXEC) {
12054             host_flags |= O_CLOEXEC;
12055         }
12056         ret = get_errno(eventfd(arg1, host_flags));
12057         if (ret >= 0) {
12058             fd_trans_register(ret, &target_eventfd_trans);
12059         }
12060         return ret;
12061     }
12062 #endif
12063 #endif /* CONFIG_EVENTFD  */
12064 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12065     case TARGET_NR_fallocate:
12066 #if TARGET_ABI_BITS == 32
12067         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12068                                   target_offset64(arg5, arg6)));
12069 #else
12070         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12071 #endif
12072         return ret;
12073 #endif
12074 #if defined(CONFIG_SYNC_FILE_RANGE)
12075 #if defined(TARGET_NR_sync_file_range)
12076     case TARGET_NR_sync_file_range:
12077 #if TARGET_ABI_BITS == 32
12078 #if defined(TARGET_MIPS)
12079         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12080                                         target_offset64(arg5, arg6), arg7));
12081 #else
12082         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12083                                         target_offset64(arg4, arg5), arg6));
12084 #endif /* !TARGET_MIPS */
12085 #else
12086         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12087 #endif
12088         return ret;
12089 #endif
12090 #if defined(TARGET_NR_sync_file_range2) || \
12091     defined(TARGET_NR_arm_sync_file_range)
12092 #if defined(TARGET_NR_sync_file_range2)
12093     case TARGET_NR_sync_file_range2:
12094 #endif
12095 #if defined(TARGET_NR_arm_sync_file_range)
12096     case TARGET_NR_arm_sync_file_range:
12097 #endif
12098         /* This is like sync_file_range but the arguments are reordered */
12099 #if TARGET_ABI_BITS == 32
12100         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12101                                         target_offset64(arg5, arg6), arg2));
12102 #else
12103         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12104 #endif
12105         return ret;
12106 #endif
12107 #endif
12108 #if defined(TARGET_NR_signalfd4)
12109     case TARGET_NR_signalfd4:
12110         return do_signalfd4(arg1, arg2, arg4);
12111 #endif
12112 #if defined(TARGET_NR_signalfd)
12113     case TARGET_NR_signalfd:
12114         return do_signalfd4(arg1, arg2, 0);
12115 #endif
12116 #if defined(CONFIG_EPOLL)
12117 #if defined(TARGET_NR_epoll_create)
12118     case TARGET_NR_epoll_create:
12119         return get_errno(epoll_create(arg1));
12120 #endif
12121 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12122     case TARGET_NR_epoll_create1:
12123         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12124 #endif
12125 #if defined(TARGET_NR_epoll_ctl)
12126     case TARGET_NR_epoll_ctl:
12127     {
12128         struct epoll_event ep;
12129         struct epoll_event *epp = 0;
12130         if (arg4) {
12131             struct target_epoll_event *target_ep;
12132             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12133                 return -TARGET_EFAULT;
12134             }
12135             ep.events = tswap32(target_ep->events);
12136             /* The epoll_data_t union is just opaque data to the kernel,
12137              * so we transfer all 64 bits across and need not worry what
12138              * actual data type it is.
12139              */
12140             ep.data.u64 = tswap64(target_ep->data.u64);
12141             unlock_user_struct(target_ep, arg4, 0);
12142             epp = &ep;
12143         }
12144         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12145     }
12146 #endif
12147 
12148 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12149 #if defined(TARGET_NR_epoll_wait)
12150     case TARGET_NR_epoll_wait:
12151 #endif
12152 #if defined(TARGET_NR_epoll_pwait)
12153     case TARGET_NR_epoll_pwait:
12154 #endif
12155     {
12156         struct target_epoll_event *target_ep;
12157         struct epoll_event *ep;
12158         int epfd = arg1;
12159         int maxevents = arg3;
12160         int timeout = arg4;
12161 
12162         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12163             return -TARGET_EINVAL;
12164         }
12165 
12166         target_ep = lock_user(VERIFY_WRITE, arg2,
12167                               maxevents * sizeof(struct target_epoll_event), 1);
12168         if (!target_ep) {
12169             return -TARGET_EFAULT;
12170         }
12171 
12172         ep = g_try_new(struct epoll_event, maxevents);
12173         if (!ep) {
12174             unlock_user(target_ep, arg2, 0);
12175             return -TARGET_ENOMEM;
12176         }
12177 
12178         switch (num) {
12179 #if defined(TARGET_NR_epoll_pwait)
12180         case TARGET_NR_epoll_pwait:
12181         {
12182             target_sigset_t *target_set;
12183             sigset_t _set, *set = &_set;
12184 
12185             if (arg5) {
12186                 if (arg6 != sizeof(target_sigset_t)) {
12187                     ret = -TARGET_EINVAL;
12188                     break;
12189                 }
12190 
12191                 target_set = lock_user(VERIFY_READ, arg5,
12192                                        sizeof(target_sigset_t), 1);
12193                 if (!target_set) {
12194                     ret = -TARGET_EFAULT;
12195                     break;
12196                 }
12197                 target_to_host_sigset(set, target_set);
12198                 unlock_user(target_set, arg5, 0);
12199             } else {
12200                 set = NULL;
12201             }
12202 
12203             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12204                                              set, SIGSET_T_SIZE));
12205             break;
12206         }
12207 #endif
12208 #if defined(TARGET_NR_epoll_wait)
12209         case TARGET_NR_epoll_wait:
12210             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12211                                              NULL, 0));
12212             break;
12213 #endif
12214         default:
12215             ret = -TARGET_ENOSYS;
12216         }
12217         if (!is_error(ret)) {
12218             int i;
12219             for (i = 0; i < ret; i++) {
12220                 target_ep[i].events = tswap32(ep[i].events);
12221                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12222             }
12223             unlock_user(target_ep, arg2,
12224                         ret * sizeof(struct target_epoll_event));
12225         } else {
12226             unlock_user(target_ep, arg2, 0);
12227         }
12228         g_free(ep);
12229         return ret;
12230     }
12231 #endif
12232 #endif
12233 #ifdef TARGET_NR_prlimit64
12234     case TARGET_NR_prlimit64:
12235     {
12236         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12237         struct target_rlimit64 *target_rnew, *target_rold;
12238         struct host_rlimit64 rnew, rold, *rnewp = 0;
12239         int resource = target_to_host_resource(arg2);
12240 
12241         if (arg3 && (resource != RLIMIT_AS &&
12242                      resource != RLIMIT_DATA &&
12243                      resource != RLIMIT_STACK)) {
12244             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12245                 return -TARGET_EFAULT;
12246             }
12247             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12248             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12249             unlock_user_struct(target_rnew, arg3, 0);
12250             rnewp = &rnew;
12251         }
12252 
12253         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12254         if (!is_error(ret) && arg4) {
12255             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12256                 return -TARGET_EFAULT;
12257             }
12258             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12259             target_rold->rlim_max = tswap64(rold.rlim_max);
12260             unlock_user_struct(target_rold, arg4, 1);
12261         }
12262         return ret;
12263     }
12264 #endif
12265 #ifdef TARGET_NR_gethostname
12266     case TARGET_NR_gethostname:
12267     {
12268         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12269         if (name) {
12270             ret = get_errno(gethostname(name, arg2));
12271             unlock_user(name, arg1, arg2);
12272         } else {
12273             ret = -TARGET_EFAULT;
12274         }
12275         return ret;
12276     }
12277 #endif
12278 #ifdef TARGET_NR_atomic_cmpxchg_32
12279     case TARGET_NR_atomic_cmpxchg_32:
12280     {
12281         /* should use start_exclusive from main.c */
12282         abi_ulong mem_value;
12283         if (get_user_u32(mem_value, arg6)) {
12284             target_siginfo_t info;
12285             info.si_signo = SIGSEGV;
12286             info.si_errno = 0;
12287             info.si_code = TARGET_SEGV_MAPERR;
12288             info._sifields._sigfault._addr = arg6;
12289             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12290                          QEMU_SI_FAULT, &info);
12291             ret = 0xdeadbeef;
12292 
12293         }
12294         if (mem_value == arg2)
12295             put_user_u32(arg1, arg6);
12296         return mem_value;
12297     }
12298 #endif
12299 #ifdef TARGET_NR_atomic_barrier
12300     case TARGET_NR_atomic_barrier:
12301         /* Like the kernel implementation and the
12302            qemu arm barrier, no-op this? */
12303         return 0;
12304 #endif
12305 
12306 #ifdef TARGET_NR_timer_create
12307     case TARGET_NR_timer_create:
12308     {
12309         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12310 
12311         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12312 
12313         int clkid = arg1;
12314         int timer_index = next_free_host_timer();
12315 
12316         if (timer_index < 0) {
12317             ret = -TARGET_EAGAIN;
12318         } else {
12319             timer_t *phtimer = g_posix_timers  + timer_index;
12320 
12321             if (arg2) {
12322                 phost_sevp = &host_sevp;
12323                 ret = target_to_host_sigevent(phost_sevp, arg2);
12324                 if (ret != 0) {
12325                     return ret;
12326                 }
12327             }
12328 
12329             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12330             if (ret) {
12331                 phtimer = NULL;
12332             } else {
12333                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12334                     return -TARGET_EFAULT;
12335                 }
12336             }
12337         }
12338         return ret;
12339     }
12340 #endif
12341 
12342 #ifdef TARGET_NR_timer_settime
12343     case TARGET_NR_timer_settime:
12344     {
12345         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12346          * struct itimerspec * old_value */
12347         target_timer_t timerid = get_timer_id(arg1);
12348 
12349         if (timerid < 0) {
12350             ret = timerid;
12351         } else if (arg3 == 0) {
12352             ret = -TARGET_EINVAL;
12353         } else {
12354             timer_t htimer = g_posix_timers[timerid];
12355             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12356 
12357             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12358                 return -TARGET_EFAULT;
12359             }
12360             ret = get_errno(
12361                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12362             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12363                 return -TARGET_EFAULT;
12364             }
12365         }
12366         return ret;
12367     }
12368 #endif
12369 
12370 #ifdef TARGET_NR_timer_gettime
12371     case TARGET_NR_timer_gettime:
12372     {
12373         /* args: timer_t timerid, struct itimerspec *curr_value */
12374         target_timer_t timerid = get_timer_id(arg1);
12375 
12376         if (timerid < 0) {
12377             ret = timerid;
12378         } else if (!arg2) {
12379             ret = -TARGET_EFAULT;
12380         } else {
12381             timer_t htimer = g_posix_timers[timerid];
12382             struct itimerspec hspec;
12383             ret = get_errno(timer_gettime(htimer, &hspec));
12384 
12385             if (host_to_target_itimerspec(arg2, &hspec)) {
12386                 ret = -TARGET_EFAULT;
12387             }
12388         }
12389         return ret;
12390     }
12391 #endif
12392 
12393 #ifdef TARGET_NR_timer_getoverrun
12394     case TARGET_NR_timer_getoverrun:
12395     {
12396         /* args: timer_t timerid */
12397         target_timer_t timerid = get_timer_id(arg1);
12398 
12399         if (timerid < 0) {
12400             ret = timerid;
12401         } else {
12402             timer_t htimer = g_posix_timers[timerid];
12403             ret = get_errno(timer_getoverrun(htimer));
12404         }
12405         return ret;
12406     }
12407 #endif
12408 
12409 #ifdef TARGET_NR_timer_delete
12410     case TARGET_NR_timer_delete:
12411     {
12412         /* args: timer_t timerid */
12413         target_timer_t timerid = get_timer_id(arg1);
12414 
12415         if (timerid < 0) {
12416             ret = timerid;
12417         } else {
12418             timer_t htimer = g_posix_timers[timerid];
12419             ret = get_errno(timer_delete(htimer));
12420             g_posix_timers[timerid] = 0;
12421         }
12422         return ret;
12423     }
12424 #endif
12425 
12426 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12427     case TARGET_NR_timerfd_create:
12428         return get_errno(timerfd_create(arg1,
12429                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12430 #endif
12431 
12432 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12433     case TARGET_NR_timerfd_gettime:
12434         {
12435             struct itimerspec its_curr;
12436 
12437             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12438 
12439             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12440                 return -TARGET_EFAULT;
12441             }
12442         }
12443         return ret;
12444 #endif
12445 
12446 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12447     case TARGET_NR_timerfd_settime:
12448         {
12449             struct itimerspec its_new, its_old, *p_new;
12450 
12451             if (arg3) {
12452                 if (target_to_host_itimerspec(&its_new, arg3)) {
12453                     return -TARGET_EFAULT;
12454                 }
12455                 p_new = &its_new;
12456             } else {
12457                 p_new = NULL;
12458             }
12459 
12460             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12461 
12462             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12463                 return -TARGET_EFAULT;
12464             }
12465         }
12466         return ret;
12467 #endif
12468 
12469 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12470     case TARGET_NR_ioprio_get:
12471         return get_errno(ioprio_get(arg1, arg2));
12472 #endif
12473 
12474 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12475     case TARGET_NR_ioprio_set:
12476         return get_errno(ioprio_set(arg1, arg2, arg3));
12477 #endif
12478 
12479 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12480     case TARGET_NR_setns:
12481         return get_errno(setns(arg1, arg2));
12482 #endif
12483 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12484     case TARGET_NR_unshare:
12485         return get_errno(unshare(arg1));
12486 #endif
12487 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12488     case TARGET_NR_kcmp:
12489         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12490 #endif
12491 #ifdef TARGET_NR_swapcontext
12492     case TARGET_NR_swapcontext:
12493         /* PowerPC specific.  */
12494         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12495 #endif
12496 #ifdef TARGET_NR_memfd_create
12497     case TARGET_NR_memfd_create:
12498         p = lock_user_string(arg1);
12499         if (!p) {
12500             return -TARGET_EFAULT;
12501         }
12502         ret = get_errno(memfd_create(p, arg2));
12503         fd_trans_unregister(ret);
12504         unlock_user(p, arg1, 0);
12505         return ret;
12506 #endif
12507 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12508     case TARGET_NR_membarrier:
12509         return get_errno(membarrier(arg1, arg2));
12510 #endif
12511 
12512     default:
12513         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12514         return -TARGET_ENOSYS;
12515     }
12516     return ret;
12517 }
12518 
12519 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12520                     abi_long arg2, abi_long arg3, abi_long arg4,
12521                     abi_long arg5, abi_long arg6, abi_long arg7,
12522                     abi_long arg8)
12523 {
12524     CPUState *cpu = env_cpu(cpu_env);
12525     abi_long ret;
12526 
12527 #ifdef DEBUG_ERESTARTSYS
12528     /* Debug-only code for exercising the syscall-restart code paths
12529      * in the per-architecture cpu main loops: restart every syscall
12530      * the guest makes once before letting it through.
12531      */
12532     {
12533         static bool flag;
12534         flag = !flag;
12535         if (flag) {
12536             return -TARGET_ERESTARTSYS;
12537         }
12538     }
12539 #endif
12540 
12541     record_syscall_start(cpu, num, arg1,
12542                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12543 
12544     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12545         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12546     }
12547 
12548     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12549                       arg5, arg6, arg7, arg8);
12550 
12551     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12552         print_syscall_ret(num, ret, arg1, arg2, arg3, arg4, arg5, arg6);
12553     }
12554 
12555     record_syscall_return(cpu, num, ret);
12556     return ret;
12557 }
12558