xref: /openbmc/qemu/linux-user/syscall.c (revision 4bc08c61)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #include <libdrm/i915_drm.h>
118 #endif
119 #include "linux_loop.h"
120 #include "uname.h"
121 
122 #include "qemu.h"
123 #include "qemu/guest-random.h"
124 #include "qemu/selfmap.h"
125 #include "user/syscall-trace.h"
126 #include "qapi/error.h"
127 #include "fd-trans.h"
128 #include "tcg/tcg.h"
129 
130 #ifndef CLONE_IO
131 #define CLONE_IO                0x80000000      /* Clone io context */
132 #endif
133 
134 /* We can't directly call the host clone syscall, because this will
135  * badly confuse libc (breaking mutexes, for example). So we must
136  * divide clone flags into:
137  *  * flag combinations that look like pthread_create()
138  *  * flag combinations that look like fork()
139  *  * flags we can implement within QEMU itself
140  *  * flags we can't support and will return an error for
141  */
142 /* For thread creation, all these flags must be present; for
143  * fork, none must be present.
144  */
145 #define CLONE_THREAD_FLAGS                              \
146     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
147      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
148 
149 /* These flags are ignored:
150  * CLONE_DETACHED is now ignored by the kernel;
151  * CLONE_IO is just an optimisation hint to the I/O scheduler
152  */
153 #define CLONE_IGNORED_FLAGS                     \
154     (CLONE_DETACHED | CLONE_IO)
155 
156 /* Flags for fork which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_FORK_FLAGS               \
158     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
159      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
160 
161 /* Flags for thread creation which we can implement within QEMU itself */
162 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
163     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
164      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
165 
166 #define CLONE_INVALID_FORK_FLAGS                                        \
167     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
168 
169 #define CLONE_INVALID_THREAD_FLAGS                                      \
170     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
171        CLONE_IGNORED_FLAGS))
172 
173 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
174  * have almost all been allocated. We cannot support any of
175  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
176  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
177  * The checks against the invalid thread masks above will catch these.
178  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
179  */
180 
181 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
182  * once. This exercises the codepaths for restart.
183  */
184 //#define DEBUG_ERESTARTSYS
185 
186 //#include <linux/msdos_fs.h>
187 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
188 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
189 
190 #undef _syscall0
191 #undef _syscall1
192 #undef _syscall2
193 #undef _syscall3
194 #undef _syscall4
195 #undef _syscall5
196 #undef _syscall6
197 
198 #define _syscall0(type,name)		\
199 static type name (void)			\
200 {					\
201 	return syscall(__NR_##name);	\
202 }
203 
204 #define _syscall1(type,name,type1,arg1)		\
205 static type name (type1 arg1)			\
206 {						\
207 	return syscall(__NR_##name, arg1);	\
208 }
209 
210 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
211 static type name (type1 arg1,type2 arg2)		\
212 {							\
213 	return syscall(__NR_##name, arg1, arg2);	\
214 }
215 
216 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
217 static type name (type1 arg1,type2 arg2,type3 arg3)		\
218 {								\
219 	return syscall(__NR_##name, arg1, arg2, arg3);		\
220 }
221 
222 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
224 {										\
225 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
226 }
227 
228 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
229 		  type5,arg5)							\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
233 }
234 
235 
236 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
237 		  type5,arg5,type6,arg6)					\
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
239                   type6 arg6)							\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
242 }
243 
244 
245 #define __NR_sys_uname __NR_uname
246 #define __NR_sys_getcwd1 __NR_getcwd
247 #define __NR_sys_getdents __NR_getdents
248 #define __NR_sys_getdents64 __NR_getdents64
249 #define __NR_sys_getpriority __NR_getpriority
250 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
251 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
252 #define __NR_sys_syslog __NR_syslog
253 #if defined(__NR_futex)
254 # define __NR_sys_futex __NR_futex
255 #endif
256 #if defined(__NR_futex_time64)
257 # define __NR_sys_futex_time64 __NR_futex_time64
258 #endif
259 #define __NR_sys_inotify_init __NR_inotify_init
260 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
261 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
262 #define __NR_sys_statx __NR_statx
263 
264 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
265 #define __NR__llseek __NR_lseek
266 #endif
267 
268 /* Newer kernel ports have llseek() instead of _llseek() */
269 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
270 #define TARGET_NR__llseek TARGET_NR_llseek
271 #endif
272 
273 #define __NR_sys_gettid __NR_gettid
274 _syscall0(int, sys_gettid)
275 
276 /* For the 64-bit guest on 32-bit host case we must emulate
277  * getdents using getdents64, because otherwise the host
278  * might hand us back more dirent records than we can fit
279  * into the guest buffer after structure format conversion.
280  * Otherwise we emulate getdents with getdents if the host has it.
281  */
282 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
283 #define EMULATE_GETDENTS_WITH_GETDENTS
284 #endif
285 
286 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
287 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
288 #endif
289 #if (defined(TARGET_NR_getdents) && \
290       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
291     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
292 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
293 #endif
294 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
295 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
296           loff_t *, res, uint, wh);
297 #endif
298 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
299 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
300           siginfo_t *, uinfo)
301 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
302 #ifdef __NR_exit_group
303 _syscall1(int,exit_group,int,error_code)
304 #endif
305 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
306 _syscall1(int,set_tid_address,int *,tidptr)
307 #endif
308 #if defined(__NR_futex)
309 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
310           const struct timespec *,timeout,int *,uaddr2,int,val3)
311 #endif
312 #if defined(__NR_futex_time64)
313 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
314           const struct timespec *,timeout,int *,uaddr2,int,val3)
315 #endif
316 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
317 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
318           unsigned long *, user_mask_ptr);
319 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
320 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
321           unsigned long *, user_mask_ptr);
322 #define __NR_sys_getcpu __NR_getcpu
323 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
324 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
325           void *, arg);
326 _syscall2(int, capget, struct __user_cap_header_struct *, header,
327           struct __user_cap_data_struct *, data);
328 _syscall2(int, capset, struct __user_cap_header_struct *, header,
329           struct __user_cap_data_struct *, data);
330 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
331 _syscall2(int, ioprio_get, int, which, int, who)
332 #endif
333 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
334 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
335 #endif
336 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
337 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
338 #endif
339 
340 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
341 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
342           unsigned long, idx1, unsigned long, idx2)
343 #endif
344 
345 /*
346  * It is assumed that struct statx is architecture independent.
347  */
348 #if defined(TARGET_NR_statx) && defined(__NR_statx)
349 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
350           unsigned int, mask, struct target_statx *, statxbuf)
351 #endif
352 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
353 _syscall2(int, membarrier, int, cmd, int, flags)
354 #endif
355 
356 static bitmask_transtbl fcntl_flags_tbl[] = {
357   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
358   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
359   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
360   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
361   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
362   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
363   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
364   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
365   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
366   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
367   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
368   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
369   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
370 #if defined(O_DIRECT)
371   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
372 #endif
373 #if defined(O_NOATIME)
374   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
375 #endif
376 #if defined(O_CLOEXEC)
377   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
378 #endif
379 #if defined(O_PATH)
380   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
381 #endif
382 #if defined(O_TMPFILE)
383   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
384 #endif
385   /* Don't terminate the list prematurely on 64-bit host+guest.  */
386 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
387   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
388 #endif
389   { 0, 0, 0, 0 }
390 };
391 
392 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
393 
394 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
395 #if defined(__NR_utimensat)
396 #define __NR_sys_utimensat __NR_utimensat
397 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
398           const struct timespec *,tsp,int,flags)
399 #else
400 static int sys_utimensat(int dirfd, const char *pathname,
401                          const struct timespec times[2], int flags)
402 {
403     errno = ENOSYS;
404     return -1;
405 }
406 #endif
407 #endif /* TARGET_NR_utimensat */
408 
409 #ifdef TARGET_NR_renameat2
410 #if defined(__NR_renameat2)
411 #define __NR_sys_renameat2 __NR_renameat2
412 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
413           const char *, new, unsigned int, flags)
414 #else
415 static int sys_renameat2(int oldfd, const char *old,
416                          int newfd, const char *new, int flags)
417 {
418     if (flags == 0) {
419         return renameat(oldfd, old, newfd, new);
420     }
421     errno = ENOSYS;
422     return -1;
423 }
424 #endif
425 #endif /* TARGET_NR_renameat2 */
426 
427 #ifdef CONFIG_INOTIFY
428 #include <sys/inotify.h>
429 
430 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
431 static int sys_inotify_init(void)
432 {
433   return (inotify_init());
434 }
435 #endif
436 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
437 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
438 {
439   return (inotify_add_watch(fd, pathname, mask));
440 }
441 #endif
442 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
443 static int sys_inotify_rm_watch(int fd, int32_t wd)
444 {
445   return (inotify_rm_watch(fd, wd));
446 }
447 #endif
448 #ifdef CONFIG_INOTIFY1
449 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
450 static int sys_inotify_init1(int flags)
451 {
452   return (inotify_init1(flags));
453 }
454 #endif
455 #endif
456 #else
457 /* Userspace can usually survive runtime without inotify */
458 #undef TARGET_NR_inotify_init
459 #undef TARGET_NR_inotify_init1
460 #undef TARGET_NR_inotify_add_watch
461 #undef TARGET_NR_inotify_rm_watch
462 #endif /* CONFIG_INOTIFY  */
463 
464 #if defined(TARGET_NR_prlimit64)
465 #ifndef __NR_prlimit64
466 # define __NR_prlimit64 -1
467 #endif
468 #define __NR_sys_prlimit64 __NR_prlimit64
469 /* The glibc rlimit structure may not be that used by the underlying syscall */
470 struct host_rlimit64 {
471     uint64_t rlim_cur;
472     uint64_t rlim_max;
473 };
474 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
475           const struct host_rlimit64 *, new_limit,
476           struct host_rlimit64 *, old_limit)
477 #endif
478 
479 
480 #if defined(TARGET_NR_timer_create)
481 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
482 static timer_t g_posix_timers[32] = { 0, } ;
483 
484 static inline int next_free_host_timer(void)
485 {
486     int k ;
487     /* FIXME: Does finding the next free slot require a lock? */
488     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
489         if (g_posix_timers[k] == 0) {
490             g_posix_timers[k] = (timer_t) 1;
491             return k;
492         }
493     }
494     return -1;
495 }
496 #endif
497 
498 #define ERRNO_TABLE_SIZE 1200
499 
500 /* target_to_host_errno_table[] is initialized from
501  * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
503 };
504 
505 /*
506  * This list is the union of errno values overridden in asm-<arch>/errno.h
507  * minus the errnos that are not actually generic to all archs.
508  */
509 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
510     [EAGAIN]		= TARGET_EAGAIN,
511     [EIDRM]		= TARGET_EIDRM,
512     [ECHRNG]		= TARGET_ECHRNG,
513     [EL2NSYNC]		= TARGET_EL2NSYNC,
514     [EL3HLT]		= TARGET_EL3HLT,
515     [EL3RST]		= TARGET_EL3RST,
516     [ELNRNG]		= TARGET_ELNRNG,
517     [EUNATCH]		= TARGET_EUNATCH,
518     [ENOCSI]		= TARGET_ENOCSI,
519     [EL2HLT]		= TARGET_EL2HLT,
520     [EDEADLK]		= TARGET_EDEADLK,
521     [ENOLCK]		= TARGET_ENOLCK,
522     [EBADE]		= TARGET_EBADE,
523     [EBADR]		= TARGET_EBADR,
524     [EXFULL]		= TARGET_EXFULL,
525     [ENOANO]		= TARGET_ENOANO,
526     [EBADRQC]		= TARGET_EBADRQC,
527     [EBADSLT]		= TARGET_EBADSLT,
528     [EBFONT]		= TARGET_EBFONT,
529     [ENOSTR]		= TARGET_ENOSTR,
530     [ENODATA]		= TARGET_ENODATA,
531     [ETIME]		= TARGET_ETIME,
532     [ENOSR]		= TARGET_ENOSR,
533     [ENONET]		= TARGET_ENONET,
534     [ENOPKG]		= TARGET_ENOPKG,
535     [EREMOTE]		= TARGET_EREMOTE,
536     [ENOLINK]		= TARGET_ENOLINK,
537     [EADV]		= TARGET_EADV,
538     [ESRMNT]		= TARGET_ESRMNT,
539     [ECOMM]		= TARGET_ECOMM,
540     [EPROTO]		= TARGET_EPROTO,
541     [EDOTDOT]		= TARGET_EDOTDOT,
542     [EMULTIHOP]		= TARGET_EMULTIHOP,
543     [EBADMSG]		= TARGET_EBADMSG,
544     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
545     [EOVERFLOW]		= TARGET_EOVERFLOW,
546     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
547     [EBADFD]		= TARGET_EBADFD,
548     [EREMCHG]		= TARGET_EREMCHG,
549     [ELIBACC]		= TARGET_ELIBACC,
550     [ELIBBAD]		= TARGET_ELIBBAD,
551     [ELIBSCN]		= TARGET_ELIBSCN,
552     [ELIBMAX]		= TARGET_ELIBMAX,
553     [ELIBEXEC]		= TARGET_ELIBEXEC,
554     [EILSEQ]		= TARGET_EILSEQ,
555     [ENOSYS]		= TARGET_ENOSYS,
556     [ELOOP]		= TARGET_ELOOP,
557     [ERESTART]		= TARGET_ERESTART,
558     [ESTRPIPE]		= TARGET_ESTRPIPE,
559     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
560     [EUSERS]		= TARGET_EUSERS,
561     [ENOTSOCK]		= TARGET_ENOTSOCK,
562     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
563     [EMSGSIZE]		= TARGET_EMSGSIZE,
564     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
565     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
566     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
567     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
568     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
569     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
570     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
571     [EADDRINUSE]	= TARGET_EADDRINUSE,
572     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
573     [ENETDOWN]		= TARGET_ENETDOWN,
574     [ENETUNREACH]	= TARGET_ENETUNREACH,
575     [ENETRESET]		= TARGET_ENETRESET,
576     [ECONNABORTED]	= TARGET_ECONNABORTED,
577     [ECONNRESET]	= TARGET_ECONNRESET,
578     [ENOBUFS]		= TARGET_ENOBUFS,
579     [EISCONN]		= TARGET_EISCONN,
580     [ENOTCONN]		= TARGET_ENOTCONN,
581     [EUCLEAN]		= TARGET_EUCLEAN,
582     [ENOTNAM]		= TARGET_ENOTNAM,
583     [ENAVAIL]		= TARGET_ENAVAIL,
584     [EISNAM]		= TARGET_EISNAM,
585     [EREMOTEIO]		= TARGET_EREMOTEIO,
586     [EDQUOT]            = TARGET_EDQUOT,
587     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
588     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
589     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
590     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
591     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
592     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
593     [EALREADY]		= TARGET_EALREADY,
594     [EINPROGRESS]	= TARGET_EINPROGRESS,
595     [ESTALE]		= TARGET_ESTALE,
596     [ECANCELED]		= TARGET_ECANCELED,
597     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
598     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
599 #ifdef ENOKEY
600     [ENOKEY]		= TARGET_ENOKEY,
601 #endif
602 #ifdef EKEYEXPIRED
603     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
604 #endif
605 #ifdef EKEYREVOKED
606     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
607 #endif
608 #ifdef EKEYREJECTED
609     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
610 #endif
611 #ifdef EOWNERDEAD
612     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
613 #endif
614 #ifdef ENOTRECOVERABLE
615     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
616 #endif
617 #ifdef ENOMSG
618     [ENOMSG]            = TARGET_ENOMSG,
619 #endif
620 #ifdef ERKFILL
621     [ERFKILL]           = TARGET_ERFKILL,
622 #endif
623 #ifdef EHWPOISON
624     [EHWPOISON]         = TARGET_EHWPOISON,
625 #endif
626 };
627 
628 static inline int host_to_target_errno(int err)
629 {
630     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
631         host_to_target_errno_table[err]) {
632         return host_to_target_errno_table[err];
633     }
634     return err;
635 }
636 
637 static inline int target_to_host_errno(int err)
638 {
639     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
640         target_to_host_errno_table[err]) {
641         return target_to_host_errno_table[err];
642     }
643     return err;
644 }
645 
646 static inline abi_long get_errno(abi_long ret)
647 {
648     if (ret == -1)
649         return -host_to_target_errno(errno);
650     else
651         return ret;
652 }
653 
654 const char *target_strerror(int err)
655 {
656     if (err == TARGET_ERESTARTSYS) {
657         return "To be restarted";
658     }
659     if (err == TARGET_QEMU_ESIGRETURN) {
660         return "Successful exit from sigreturn";
661     }
662 
663     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
664         return NULL;
665     }
666     return strerror(target_to_host_errno(err));
667 }
668 
669 #define safe_syscall0(type, name) \
670 static type safe_##name(void) \
671 { \
672     return safe_syscall(__NR_##name); \
673 }
674 
675 #define safe_syscall1(type, name, type1, arg1) \
676 static type safe_##name(type1 arg1) \
677 { \
678     return safe_syscall(__NR_##name, arg1); \
679 }
680 
681 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
682 static type safe_##name(type1 arg1, type2 arg2) \
683 { \
684     return safe_syscall(__NR_##name, arg1, arg2); \
685 }
686 
687 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
689 { \
690     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
691 }
692 
693 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
694     type4, arg4) \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
696 { \
697     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
698 }
699 
700 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
701     type4, arg4, type5, arg5) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
703     type5 arg5) \
704 { \
705     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
706 }
707 
708 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
709     type4, arg4, type5, arg5, type6, arg6) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
711     type5 arg5, type6 arg6) \
712 { \
713     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
714 }
715 
716 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
717 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
718 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
719               int, flags, mode_t, mode)
720 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
721 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
722               struct rusage *, rusage)
723 #endif
724 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
725               int, options, struct rusage *, rusage)
726 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
727 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
728     defined(TARGET_NR_pselect6)
729 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
730               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
731 #endif
732 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
733 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
734               struct timespec *, tsp, const sigset_t *, sigmask,
735               size_t, sigsetsize)
736 #endif
737 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
738               int, maxevents, int, timeout, const sigset_t *, sigmask,
739               size_t, sigsetsize)
740 #if defined(__NR_futex)
741 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
742               const struct timespec *,timeout,int *,uaddr2,int,val3)
743 #endif
744 #if defined(__NR_futex_time64)
745 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
746               const struct timespec *,timeout,int *,uaddr2,int,val3)
747 #endif
748 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
749 safe_syscall2(int, kill, pid_t, pid, int, sig)
750 safe_syscall2(int, tkill, int, tid, int, sig)
751 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
752 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
753 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
754 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
755               unsigned long, pos_l, unsigned long, pos_h)
756 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
757               unsigned long, pos_l, unsigned long, pos_h)
758 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
759               socklen_t, addrlen)
760 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
761               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
762 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
763               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
764 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
765 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
766 safe_syscall2(int, flock, int, fd, int, operation)
767 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
768 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
769               const struct timespec *, uts, size_t, sigsetsize)
770 #endif
771 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
772               int, flags)
773 #if defined(TARGET_NR_nanosleep)
774 safe_syscall2(int, nanosleep, const struct timespec *, req,
775               struct timespec *, rem)
776 #endif
777 #if defined(TARGET_NR_clock_nanosleep) || \
778     defined(TARGET_NR_clock_nanosleep_time64)
779 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
780               const struct timespec *, req, struct timespec *, rem)
781 #endif
782 #ifdef __NR_ipc
783 #ifdef __s390x__
784 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
785               void *, ptr)
786 #else
787 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
788               void *, ptr, long, fifth)
789 #endif
790 #endif
791 #ifdef __NR_msgsnd
792 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
793               int, flags)
794 #endif
795 #ifdef __NR_msgrcv
796 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
797               long, msgtype, int, flags)
798 #endif
799 #ifdef __NR_semtimedop
800 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
801               unsigned, nsops, const struct timespec *, timeout)
802 #endif
803 #if defined(TARGET_NR_mq_timedsend) || \
804     defined(TARGET_NR_mq_timedsend_time64)
805 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
806               size_t, len, unsigned, prio, const struct timespec *, timeout)
807 #endif
808 #if defined(TARGET_NR_mq_timedreceive) || \
809     defined(TARGET_NR_mq_timedreceive_time64)
810 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
811               size_t, len, unsigned *, prio, const struct timespec *, timeout)
812 #endif
813 /* We do ioctl like this rather than via safe_syscall3 to preserve the
814  * "third argument might be integer or pointer or not present" behaviour of
815  * the libc function.
816  */
817 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
818 /* Similarly for fcntl. Note that callers must always:
819  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
820  *  use the flock64 struct rather than unsuffixed flock
821  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
822  */
823 #ifdef __NR_fcntl64
824 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
825 #else
826 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
827 #endif
828 
829 static inline int host_to_target_sock_type(int host_type)
830 {
831     int target_type;
832 
833     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
834     case SOCK_DGRAM:
835         target_type = TARGET_SOCK_DGRAM;
836         break;
837     case SOCK_STREAM:
838         target_type = TARGET_SOCK_STREAM;
839         break;
840     default:
841         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
842         break;
843     }
844 
845 #if defined(SOCK_CLOEXEC)
846     if (host_type & SOCK_CLOEXEC) {
847         target_type |= TARGET_SOCK_CLOEXEC;
848     }
849 #endif
850 
851 #if defined(SOCK_NONBLOCK)
852     if (host_type & SOCK_NONBLOCK) {
853         target_type |= TARGET_SOCK_NONBLOCK;
854     }
855 #endif
856 
857     return target_type;
858 }
859 
860 static abi_ulong target_brk;
861 static abi_ulong target_original_brk;
862 static abi_ulong brk_page;
863 
864 void target_set_brk(abi_ulong new_brk)
865 {
866     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
867     brk_page = HOST_PAGE_ALIGN(target_brk);
868 }
869 
870 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
871 #define DEBUGF_BRK(message, args...)
872 
873 /* do_brk() must return target values and target errnos. */
874 abi_long do_brk(abi_ulong new_brk)
875 {
876     abi_long mapped_addr;
877     abi_ulong new_alloc_size;
878 
879     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
880 
881     if (!new_brk) {
882         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
883         return target_brk;
884     }
885     if (new_brk < target_original_brk) {
886         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
887                    target_brk);
888         return target_brk;
889     }
890 
891     /* If the new brk is less than the highest page reserved to the
892      * target heap allocation, set it and we're almost done...  */
893     if (new_brk <= brk_page) {
894         /* Heap contents are initialized to zero, as for anonymous
895          * mapped pages.  */
896         if (new_brk > target_brk) {
897             memset(g2h(target_brk), 0, new_brk - target_brk);
898         }
899 	target_brk = new_brk;
900         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
901 	return target_brk;
902     }
903 
904     /* We need to allocate more memory after the brk... Note that
905      * we don't use MAP_FIXED because that will map over the top of
906      * any existing mapping (like the one with the host libc or qemu
907      * itself); instead we treat "mapped but at wrong address" as
908      * a failure and unmap again.
909      */
910     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
911     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
912                                         PROT_READ|PROT_WRITE,
913                                         MAP_ANON|MAP_PRIVATE, 0, 0));
914 
915     if (mapped_addr == brk_page) {
916         /* Heap contents are initialized to zero, as for anonymous
917          * mapped pages.  Technically the new pages are already
918          * initialized to zero since they *are* anonymous mapped
919          * pages, however we have to take care with the contents that
920          * come from the remaining part of the previous page: it may
921          * contains garbage data due to a previous heap usage (grown
922          * then shrunken).  */
923         memset(g2h(target_brk), 0, brk_page - target_brk);
924 
925         target_brk = new_brk;
926         brk_page = HOST_PAGE_ALIGN(target_brk);
927         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
928             target_brk);
929         return target_brk;
930     } else if (mapped_addr != -1) {
931         /* Mapped but at wrong address, meaning there wasn't actually
932          * enough space for this brk.
933          */
934         target_munmap(mapped_addr, new_alloc_size);
935         mapped_addr = -1;
936         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
937     }
938     else {
939         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
940     }
941 
942 #if defined(TARGET_ALPHA)
943     /* We (partially) emulate OSF/1 on Alpha, which requires we
944        return a proper errno, not an unchanged brk value.  */
945     return -TARGET_ENOMEM;
946 #endif
947     /* For everything else, return the previous break. */
948     return target_brk;
949 }
950 
951 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
952     defined(TARGET_NR_pselect6)
953 static inline abi_long copy_from_user_fdset(fd_set *fds,
954                                             abi_ulong target_fds_addr,
955                                             int n)
956 {
957     int i, nw, j, k;
958     abi_ulong b, *target_fds;
959 
960     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
961     if (!(target_fds = lock_user(VERIFY_READ,
962                                  target_fds_addr,
963                                  sizeof(abi_ulong) * nw,
964                                  1)))
965         return -TARGET_EFAULT;
966 
967     FD_ZERO(fds);
968     k = 0;
969     for (i = 0; i < nw; i++) {
970         /* grab the abi_ulong */
971         __get_user(b, &target_fds[i]);
972         for (j = 0; j < TARGET_ABI_BITS; j++) {
973             /* check the bit inside the abi_ulong */
974             if ((b >> j) & 1)
975                 FD_SET(k, fds);
976             k++;
977         }
978     }
979 
980     unlock_user(target_fds, target_fds_addr, 0);
981 
982     return 0;
983 }
984 
985 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
986                                                  abi_ulong target_fds_addr,
987                                                  int n)
988 {
989     if (target_fds_addr) {
990         if (copy_from_user_fdset(fds, target_fds_addr, n))
991             return -TARGET_EFAULT;
992         *fds_ptr = fds;
993     } else {
994         *fds_ptr = NULL;
995     }
996     return 0;
997 }
998 
999 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1000                                           const fd_set *fds,
1001                                           int n)
1002 {
1003     int i, nw, j, k;
1004     abi_long v;
1005     abi_ulong *target_fds;
1006 
1007     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1008     if (!(target_fds = lock_user(VERIFY_WRITE,
1009                                  target_fds_addr,
1010                                  sizeof(abi_ulong) * nw,
1011                                  0)))
1012         return -TARGET_EFAULT;
1013 
1014     k = 0;
1015     for (i = 0; i < nw; i++) {
1016         v = 0;
1017         for (j = 0; j < TARGET_ABI_BITS; j++) {
1018             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1019             k++;
1020         }
1021         __put_user(v, &target_fds[i]);
1022     }
1023 
1024     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1025 
1026     return 0;
1027 }
1028 #endif
1029 
1030 #if defined(__alpha__)
1031 #define HOST_HZ 1024
1032 #else
1033 #define HOST_HZ 100
1034 #endif
1035 
1036 static inline abi_long host_to_target_clock_t(long ticks)
1037 {
1038 #if HOST_HZ == TARGET_HZ
1039     return ticks;
1040 #else
1041     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1042 #endif
1043 }
1044 
1045 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1046                                              const struct rusage *rusage)
1047 {
1048     struct target_rusage *target_rusage;
1049 
1050     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1051         return -TARGET_EFAULT;
1052     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1053     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1054     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1055     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1056     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1057     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1058     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1059     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1060     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1061     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1062     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1063     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1064     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1065     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1066     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1067     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1068     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1069     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1070     unlock_user_struct(target_rusage, target_addr, 1);
1071 
1072     return 0;
1073 }
1074 
1075 #ifdef TARGET_NR_setrlimit
1076 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1077 {
1078     abi_ulong target_rlim_swap;
1079     rlim_t result;
1080 
1081     target_rlim_swap = tswapal(target_rlim);
1082     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1083         return RLIM_INFINITY;
1084 
1085     result = target_rlim_swap;
1086     if (target_rlim_swap != (rlim_t)result)
1087         return RLIM_INFINITY;
1088 
1089     return result;
1090 }
1091 #endif
1092 
1093 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1094 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1095 {
1096     abi_ulong target_rlim_swap;
1097     abi_ulong result;
1098 
1099     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1100         target_rlim_swap = TARGET_RLIM_INFINITY;
1101     else
1102         target_rlim_swap = rlim;
1103     result = tswapal(target_rlim_swap);
1104 
1105     return result;
1106 }
1107 #endif
1108 
1109 static inline int target_to_host_resource(int code)
1110 {
1111     switch (code) {
1112     case TARGET_RLIMIT_AS:
1113         return RLIMIT_AS;
1114     case TARGET_RLIMIT_CORE:
1115         return RLIMIT_CORE;
1116     case TARGET_RLIMIT_CPU:
1117         return RLIMIT_CPU;
1118     case TARGET_RLIMIT_DATA:
1119         return RLIMIT_DATA;
1120     case TARGET_RLIMIT_FSIZE:
1121         return RLIMIT_FSIZE;
1122     case TARGET_RLIMIT_LOCKS:
1123         return RLIMIT_LOCKS;
1124     case TARGET_RLIMIT_MEMLOCK:
1125         return RLIMIT_MEMLOCK;
1126     case TARGET_RLIMIT_MSGQUEUE:
1127         return RLIMIT_MSGQUEUE;
1128     case TARGET_RLIMIT_NICE:
1129         return RLIMIT_NICE;
1130     case TARGET_RLIMIT_NOFILE:
1131         return RLIMIT_NOFILE;
1132     case TARGET_RLIMIT_NPROC:
1133         return RLIMIT_NPROC;
1134     case TARGET_RLIMIT_RSS:
1135         return RLIMIT_RSS;
1136     case TARGET_RLIMIT_RTPRIO:
1137         return RLIMIT_RTPRIO;
1138     case TARGET_RLIMIT_SIGPENDING:
1139         return RLIMIT_SIGPENDING;
1140     case TARGET_RLIMIT_STACK:
1141         return RLIMIT_STACK;
1142     default:
1143         return code;
1144     }
1145 }
1146 
1147 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1148                                               abi_ulong target_tv_addr)
1149 {
1150     struct target_timeval *target_tv;
1151 
1152     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1153         return -TARGET_EFAULT;
1154     }
1155 
1156     __get_user(tv->tv_sec, &target_tv->tv_sec);
1157     __get_user(tv->tv_usec, &target_tv->tv_usec);
1158 
1159     unlock_user_struct(target_tv, target_tv_addr, 0);
1160 
1161     return 0;
1162 }
1163 
1164 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1165                                             const struct timeval *tv)
1166 {
1167     struct target_timeval *target_tv;
1168 
1169     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1170         return -TARGET_EFAULT;
1171     }
1172 
1173     __put_user(tv->tv_sec, &target_tv->tv_sec);
1174     __put_user(tv->tv_usec, &target_tv->tv_usec);
1175 
1176     unlock_user_struct(target_tv, target_tv_addr, 1);
1177 
1178     return 0;
1179 }
1180 
1181 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1182 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1183                                                 abi_ulong target_tv_addr)
1184 {
1185     struct target__kernel_sock_timeval *target_tv;
1186 
1187     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1188         return -TARGET_EFAULT;
1189     }
1190 
1191     __get_user(tv->tv_sec, &target_tv->tv_sec);
1192     __get_user(tv->tv_usec, &target_tv->tv_usec);
1193 
1194     unlock_user_struct(target_tv, target_tv_addr, 0);
1195 
1196     return 0;
1197 }
1198 #endif
1199 
1200 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1201                                               const struct timeval *tv)
1202 {
1203     struct target__kernel_sock_timeval *target_tv;
1204 
1205     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1206         return -TARGET_EFAULT;
1207     }
1208 
1209     __put_user(tv->tv_sec, &target_tv->tv_sec);
1210     __put_user(tv->tv_usec, &target_tv->tv_usec);
1211 
1212     unlock_user_struct(target_tv, target_tv_addr, 1);
1213 
1214     return 0;
1215 }
1216 
1217 #if defined(TARGET_NR_futex) || \
1218     defined(TARGET_NR_rt_sigtimedwait) || \
1219     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1220     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1221     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1222     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1223     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1224     defined(TARGET_NR_timer_settime) || \
1225     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1226 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1227                                                abi_ulong target_addr)
1228 {
1229     struct target_timespec *target_ts;
1230 
1231     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1232         return -TARGET_EFAULT;
1233     }
1234     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1235     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1236     unlock_user_struct(target_ts, target_addr, 0);
1237     return 0;
1238 }
1239 #endif
1240 
1241 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1242     defined(TARGET_NR_timer_settime64) || \
1243     defined(TARGET_NR_mq_timedsend_time64) || \
1244     defined(TARGET_NR_mq_timedreceive_time64) || \
1245     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1246     defined(TARGET_NR_clock_nanosleep_time64) || \
1247     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1248     defined(TARGET_NR_utimensat) || \
1249     defined(TARGET_NR_utimensat_time64) || \
1250     defined(TARGET_NR_semtimedop_time64)
1251 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1252                                                  abi_ulong target_addr)
1253 {
1254     struct target__kernel_timespec *target_ts;
1255 
1256     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1257         return -TARGET_EFAULT;
1258     }
1259     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1260     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1261     /* in 32bit mode, this drops the padding */
1262     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1263     unlock_user_struct(target_ts, target_addr, 0);
1264     return 0;
1265 }
1266 #endif
1267 
1268 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1269                                                struct timespec *host_ts)
1270 {
1271     struct target_timespec *target_ts;
1272 
1273     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1274         return -TARGET_EFAULT;
1275     }
1276     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1277     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1278     unlock_user_struct(target_ts, target_addr, 1);
1279     return 0;
1280 }
1281 
1282 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1283                                                  struct timespec *host_ts)
1284 {
1285     struct target__kernel_timespec *target_ts;
1286 
1287     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1288         return -TARGET_EFAULT;
1289     }
1290     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1291     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1292     unlock_user_struct(target_ts, target_addr, 1);
1293     return 0;
1294 }
1295 
1296 #if defined(TARGET_NR_gettimeofday)
1297 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1298                                              struct timezone *tz)
1299 {
1300     struct target_timezone *target_tz;
1301 
1302     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1303         return -TARGET_EFAULT;
1304     }
1305 
1306     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1307     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1308 
1309     unlock_user_struct(target_tz, target_tz_addr, 1);
1310 
1311     return 0;
1312 }
1313 #endif
1314 
1315 #if defined(TARGET_NR_settimeofday)
1316 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1317                                                abi_ulong target_tz_addr)
1318 {
1319     struct target_timezone *target_tz;
1320 
1321     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1322         return -TARGET_EFAULT;
1323     }
1324 
1325     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1326     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1327 
1328     unlock_user_struct(target_tz, target_tz_addr, 0);
1329 
1330     return 0;
1331 }
1332 #endif
1333 
1334 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1335 #include <mqueue.h>
1336 
1337 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1338                                               abi_ulong target_mq_attr_addr)
1339 {
1340     struct target_mq_attr *target_mq_attr;
1341 
1342     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1343                           target_mq_attr_addr, 1))
1344         return -TARGET_EFAULT;
1345 
1346     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1347     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1348     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1349     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1350 
1351     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1352 
1353     return 0;
1354 }
1355 
1356 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1357                                             const struct mq_attr *attr)
1358 {
1359     struct target_mq_attr *target_mq_attr;
1360 
1361     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1362                           target_mq_attr_addr, 0))
1363         return -TARGET_EFAULT;
1364 
1365     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1366     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1367     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1368     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1369 
1370     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1371 
1372     return 0;
1373 }
1374 #endif
1375 
1376 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1377 /* do_select() must return target values and target errnos. */
1378 static abi_long do_select(int n,
1379                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1380                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1381 {
1382     fd_set rfds, wfds, efds;
1383     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1384     struct timeval tv;
1385     struct timespec ts, *ts_ptr;
1386     abi_long ret;
1387 
1388     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1389     if (ret) {
1390         return ret;
1391     }
1392     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1393     if (ret) {
1394         return ret;
1395     }
1396     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1397     if (ret) {
1398         return ret;
1399     }
1400 
1401     if (target_tv_addr) {
1402         if (copy_from_user_timeval(&tv, target_tv_addr))
1403             return -TARGET_EFAULT;
1404         ts.tv_sec = tv.tv_sec;
1405         ts.tv_nsec = tv.tv_usec * 1000;
1406         ts_ptr = &ts;
1407     } else {
1408         ts_ptr = NULL;
1409     }
1410 
1411     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1412                                   ts_ptr, NULL));
1413 
1414     if (!is_error(ret)) {
1415         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1416             return -TARGET_EFAULT;
1417         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1418             return -TARGET_EFAULT;
1419         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1420             return -TARGET_EFAULT;
1421 
1422         if (target_tv_addr) {
1423             tv.tv_sec = ts.tv_sec;
1424             tv.tv_usec = ts.tv_nsec / 1000;
1425             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1426                 return -TARGET_EFAULT;
1427             }
1428         }
1429     }
1430 
1431     return ret;
1432 }
1433 
1434 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1435 static abi_long do_old_select(abi_ulong arg1)
1436 {
1437     struct target_sel_arg_struct *sel;
1438     abi_ulong inp, outp, exp, tvp;
1439     long nsel;
1440 
1441     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1442         return -TARGET_EFAULT;
1443     }
1444 
1445     nsel = tswapal(sel->n);
1446     inp = tswapal(sel->inp);
1447     outp = tswapal(sel->outp);
1448     exp = tswapal(sel->exp);
1449     tvp = tswapal(sel->tvp);
1450 
1451     unlock_user_struct(sel, arg1, 0);
1452 
1453     return do_select(nsel, inp, outp, exp, tvp);
1454 }
1455 #endif
1456 #endif
1457 
1458 static abi_long do_pipe2(int host_pipe[], int flags)
1459 {
1460 #ifdef CONFIG_PIPE2
1461     return pipe2(host_pipe, flags);
1462 #else
1463     return -ENOSYS;
1464 #endif
1465 }
1466 
1467 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1468                         int flags, int is_pipe2)
1469 {
1470     int host_pipe[2];
1471     abi_long ret;
1472     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1473 
1474     if (is_error(ret))
1475         return get_errno(ret);
1476 
1477     /* Several targets have special calling conventions for the original
1478        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1479     if (!is_pipe2) {
1480 #if defined(TARGET_ALPHA)
1481         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1482         return host_pipe[0];
1483 #elif defined(TARGET_MIPS)
1484         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1485         return host_pipe[0];
1486 #elif defined(TARGET_SH4)
1487         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1488         return host_pipe[0];
1489 #elif defined(TARGET_SPARC)
1490         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1491         return host_pipe[0];
1492 #endif
1493     }
1494 
1495     if (put_user_s32(host_pipe[0], pipedes)
1496         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1497         return -TARGET_EFAULT;
1498     return get_errno(ret);
1499 }
1500 
1501 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1502                                               abi_ulong target_addr,
1503                                               socklen_t len)
1504 {
1505     struct target_ip_mreqn *target_smreqn;
1506 
1507     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1508     if (!target_smreqn)
1509         return -TARGET_EFAULT;
1510     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1511     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1512     if (len == sizeof(struct target_ip_mreqn))
1513         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1514     unlock_user(target_smreqn, target_addr, 0);
1515 
1516     return 0;
1517 }
1518 
1519 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1520                                                abi_ulong target_addr,
1521                                                socklen_t len)
1522 {
1523     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1524     sa_family_t sa_family;
1525     struct target_sockaddr *target_saddr;
1526 
1527     if (fd_trans_target_to_host_addr(fd)) {
1528         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1529     }
1530 
1531     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1532     if (!target_saddr)
1533         return -TARGET_EFAULT;
1534 
1535     sa_family = tswap16(target_saddr->sa_family);
1536 
1537     /* Oops. The caller might send a incomplete sun_path; sun_path
1538      * must be terminated by \0 (see the manual page), but
1539      * unfortunately it is quite common to specify sockaddr_un
1540      * length as "strlen(x->sun_path)" while it should be
1541      * "strlen(...) + 1". We'll fix that here if needed.
1542      * Linux kernel has a similar feature.
1543      */
1544 
1545     if (sa_family == AF_UNIX) {
1546         if (len < unix_maxlen && len > 0) {
1547             char *cp = (char*)target_saddr;
1548 
1549             if ( cp[len-1] && !cp[len] )
1550                 len++;
1551         }
1552         if (len > unix_maxlen)
1553             len = unix_maxlen;
1554     }
1555 
1556     memcpy(addr, target_saddr, len);
1557     addr->sa_family = sa_family;
1558     if (sa_family == AF_NETLINK) {
1559         struct sockaddr_nl *nladdr;
1560 
1561         nladdr = (struct sockaddr_nl *)addr;
1562         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1563         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1564     } else if (sa_family == AF_PACKET) {
1565 	struct target_sockaddr_ll *lladdr;
1566 
1567 	lladdr = (struct target_sockaddr_ll *)addr;
1568 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1569 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1570     }
1571     unlock_user(target_saddr, target_addr, 0);
1572 
1573     return 0;
1574 }
1575 
1576 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1577                                                struct sockaddr *addr,
1578                                                socklen_t len)
1579 {
1580     struct target_sockaddr *target_saddr;
1581 
1582     if (len == 0) {
1583         return 0;
1584     }
1585     assert(addr);
1586 
1587     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1588     if (!target_saddr)
1589         return -TARGET_EFAULT;
1590     memcpy(target_saddr, addr, len);
1591     if (len >= offsetof(struct target_sockaddr, sa_family) +
1592         sizeof(target_saddr->sa_family)) {
1593         target_saddr->sa_family = tswap16(addr->sa_family);
1594     }
1595     if (addr->sa_family == AF_NETLINK &&
1596         len >= sizeof(struct target_sockaddr_nl)) {
1597         struct target_sockaddr_nl *target_nl =
1598                (struct target_sockaddr_nl *)target_saddr;
1599         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1600         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1601     } else if (addr->sa_family == AF_PACKET) {
1602         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1603         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1604         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1605     } else if (addr->sa_family == AF_INET6 &&
1606                len >= sizeof(struct target_sockaddr_in6)) {
1607         struct target_sockaddr_in6 *target_in6 =
1608                (struct target_sockaddr_in6 *)target_saddr;
1609         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1610     }
1611     unlock_user(target_saddr, target_addr, len);
1612 
1613     return 0;
1614 }
1615 
1616 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1617                                            struct target_msghdr *target_msgh)
1618 {
1619     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1620     abi_long msg_controllen;
1621     abi_ulong target_cmsg_addr;
1622     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1623     socklen_t space = 0;
1624 
1625     msg_controllen = tswapal(target_msgh->msg_controllen);
1626     if (msg_controllen < sizeof (struct target_cmsghdr))
1627         goto the_end;
1628     target_cmsg_addr = tswapal(target_msgh->msg_control);
1629     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1630     target_cmsg_start = target_cmsg;
1631     if (!target_cmsg)
1632         return -TARGET_EFAULT;
1633 
1634     while (cmsg && target_cmsg) {
1635         void *data = CMSG_DATA(cmsg);
1636         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1637 
1638         int len = tswapal(target_cmsg->cmsg_len)
1639             - sizeof(struct target_cmsghdr);
1640 
1641         space += CMSG_SPACE(len);
1642         if (space > msgh->msg_controllen) {
1643             space -= CMSG_SPACE(len);
1644             /* This is a QEMU bug, since we allocated the payload
1645              * area ourselves (unlike overflow in host-to-target
1646              * conversion, which is just the guest giving us a buffer
1647              * that's too small). It can't happen for the payload types
1648              * we currently support; if it becomes an issue in future
1649              * we would need to improve our allocation strategy to
1650              * something more intelligent than "twice the size of the
1651              * target buffer we're reading from".
1652              */
1653             qemu_log_mask(LOG_UNIMP,
1654                           ("Unsupported ancillary data %d/%d: "
1655                            "unhandled msg size\n"),
1656                           tswap32(target_cmsg->cmsg_level),
1657                           tswap32(target_cmsg->cmsg_type));
1658             break;
1659         }
1660 
1661         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1662             cmsg->cmsg_level = SOL_SOCKET;
1663         } else {
1664             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1665         }
1666         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1667         cmsg->cmsg_len = CMSG_LEN(len);
1668 
1669         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1670             int *fd = (int *)data;
1671             int *target_fd = (int *)target_data;
1672             int i, numfds = len / sizeof(int);
1673 
1674             for (i = 0; i < numfds; i++) {
1675                 __get_user(fd[i], target_fd + i);
1676             }
1677         } else if (cmsg->cmsg_level == SOL_SOCKET
1678                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1679             struct ucred *cred = (struct ucred *)data;
1680             struct target_ucred *target_cred =
1681                 (struct target_ucred *)target_data;
1682 
1683             __get_user(cred->pid, &target_cred->pid);
1684             __get_user(cred->uid, &target_cred->uid);
1685             __get_user(cred->gid, &target_cred->gid);
1686         } else {
1687             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1688                           cmsg->cmsg_level, cmsg->cmsg_type);
1689             memcpy(data, target_data, len);
1690         }
1691 
1692         cmsg = CMSG_NXTHDR(msgh, cmsg);
1693         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1694                                          target_cmsg_start);
1695     }
1696     unlock_user(target_cmsg, target_cmsg_addr, 0);
1697  the_end:
1698     msgh->msg_controllen = space;
1699     return 0;
1700 }
1701 
1702 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1703                                            struct msghdr *msgh)
1704 {
1705     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1706     abi_long msg_controllen;
1707     abi_ulong target_cmsg_addr;
1708     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1709     socklen_t space = 0;
1710 
1711     msg_controllen = tswapal(target_msgh->msg_controllen);
1712     if (msg_controllen < sizeof (struct target_cmsghdr))
1713         goto the_end;
1714     target_cmsg_addr = tswapal(target_msgh->msg_control);
1715     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1716     target_cmsg_start = target_cmsg;
1717     if (!target_cmsg)
1718         return -TARGET_EFAULT;
1719 
1720     while (cmsg && target_cmsg) {
1721         void *data = CMSG_DATA(cmsg);
1722         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1723 
1724         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1725         int tgt_len, tgt_space;
1726 
1727         /* We never copy a half-header but may copy half-data;
1728          * this is Linux's behaviour in put_cmsg(). Note that
1729          * truncation here is a guest problem (which we report
1730          * to the guest via the CTRUNC bit), unlike truncation
1731          * in target_to_host_cmsg, which is a QEMU bug.
1732          */
1733         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1734             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1735             break;
1736         }
1737 
1738         if (cmsg->cmsg_level == SOL_SOCKET) {
1739             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1740         } else {
1741             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1742         }
1743         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1744 
1745         /* Payload types which need a different size of payload on
1746          * the target must adjust tgt_len here.
1747          */
1748         tgt_len = len;
1749         switch (cmsg->cmsg_level) {
1750         case SOL_SOCKET:
1751             switch (cmsg->cmsg_type) {
1752             case SO_TIMESTAMP:
1753                 tgt_len = sizeof(struct target_timeval);
1754                 break;
1755             default:
1756                 break;
1757             }
1758             break;
1759         default:
1760             break;
1761         }
1762 
1763         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1764             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1765             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1766         }
1767 
1768         /* We must now copy-and-convert len bytes of payload
1769          * into tgt_len bytes of destination space. Bear in mind
1770          * that in both source and destination we may be dealing
1771          * with a truncated value!
1772          */
1773         switch (cmsg->cmsg_level) {
1774         case SOL_SOCKET:
1775             switch (cmsg->cmsg_type) {
1776             case SCM_RIGHTS:
1777             {
1778                 int *fd = (int *)data;
1779                 int *target_fd = (int *)target_data;
1780                 int i, numfds = tgt_len / sizeof(int);
1781 
1782                 for (i = 0; i < numfds; i++) {
1783                     __put_user(fd[i], target_fd + i);
1784                 }
1785                 break;
1786             }
1787             case SO_TIMESTAMP:
1788             {
1789                 struct timeval *tv = (struct timeval *)data;
1790                 struct target_timeval *target_tv =
1791                     (struct target_timeval *)target_data;
1792 
1793                 if (len != sizeof(struct timeval) ||
1794                     tgt_len != sizeof(struct target_timeval)) {
1795                     goto unimplemented;
1796                 }
1797 
1798                 /* copy struct timeval to target */
1799                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1800                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1801                 break;
1802             }
1803             case SCM_CREDENTIALS:
1804             {
1805                 struct ucred *cred = (struct ucred *)data;
1806                 struct target_ucred *target_cred =
1807                     (struct target_ucred *)target_data;
1808 
1809                 __put_user(cred->pid, &target_cred->pid);
1810                 __put_user(cred->uid, &target_cred->uid);
1811                 __put_user(cred->gid, &target_cred->gid);
1812                 break;
1813             }
1814             default:
1815                 goto unimplemented;
1816             }
1817             break;
1818 
1819         case SOL_IP:
1820             switch (cmsg->cmsg_type) {
1821             case IP_TTL:
1822             {
1823                 uint32_t *v = (uint32_t *)data;
1824                 uint32_t *t_int = (uint32_t *)target_data;
1825 
1826                 if (len != sizeof(uint32_t) ||
1827                     tgt_len != sizeof(uint32_t)) {
1828                     goto unimplemented;
1829                 }
1830                 __put_user(*v, t_int);
1831                 break;
1832             }
1833             case IP_RECVERR:
1834             {
1835                 struct errhdr_t {
1836                    struct sock_extended_err ee;
1837                    struct sockaddr_in offender;
1838                 };
1839                 struct errhdr_t *errh = (struct errhdr_t *)data;
1840                 struct errhdr_t *target_errh =
1841                     (struct errhdr_t *)target_data;
1842 
1843                 if (len != sizeof(struct errhdr_t) ||
1844                     tgt_len != sizeof(struct errhdr_t)) {
1845                     goto unimplemented;
1846                 }
1847                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1848                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1849                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1850                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1851                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1852                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1853                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1854                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1855                     (void *) &errh->offender, sizeof(errh->offender));
1856                 break;
1857             }
1858             default:
1859                 goto unimplemented;
1860             }
1861             break;
1862 
1863         case SOL_IPV6:
1864             switch (cmsg->cmsg_type) {
1865             case IPV6_HOPLIMIT:
1866             {
1867                 uint32_t *v = (uint32_t *)data;
1868                 uint32_t *t_int = (uint32_t *)target_data;
1869 
1870                 if (len != sizeof(uint32_t) ||
1871                     tgt_len != sizeof(uint32_t)) {
1872                     goto unimplemented;
1873                 }
1874                 __put_user(*v, t_int);
1875                 break;
1876             }
1877             case IPV6_RECVERR:
1878             {
1879                 struct errhdr6_t {
1880                    struct sock_extended_err ee;
1881                    struct sockaddr_in6 offender;
1882                 };
1883                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1884                 struct errhdr6_t *target_errh =
1885                     (struct errhdr6_t *)target_data;
1886 
1887                 if (len != sizeof(struct errhdr6_t) ||
1888                     tgt_len != sizeof(struct errhdr6_t)) {
1889                     goto unimplemented;
1890                 }
1891                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1892                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1893                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1894                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1895                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1896                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1897                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1898                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1899                     (void *) &errh->offender, sizeof(errh->offender));
1900                 break;
1901             }
1902             default:
1903                 goto unimplemented;
1904             }
1905             break;
1906 
1907         default:
1908         unimplemented:
1909             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1910                           cmsg->cmsg_level, cmsg->cmsg_type);
1911             memcpy(target_data, data, MIN(len, tgt_len));
1912             if (tgt_len > len) {
1913                 memset(target_data + len, 0, tgt_len - len);
1914             }
1915         }
1916 
1917         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1918         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1919         if (msg_controllen < tgt_space) {
1920             tgt_space = msg_controllen;
1921         }
1922         msg_controllen -= tgt_space;
1923         space += tgt_space;
1924         cmsg = CMSG_NXTHDR(msgh, cmsg);
1925         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1926                                          target_cmsg_start);
1927     }
1928     unlock_user(target_cmsg, target_cmsg_addr, space);
1929  the_end:
1930     target_msgh->msg_controllen = tswapal(space);
1931     return 0;
1932 }
1933 
1934 /* do_setsockopt() Must return target values and target errnos. */
1935 static abi_long do_setsockopt(int sockfd, int level, int optname,
1936                               abi_ulong optval_addr, socklen_t optlen)
1937 {
1938     abi_long ret;
1939     int val;
1940     struct ip_mreqn *ip_mreq;
1941     struct ip_mreq_source *ip_mreq_source;
1942 
1943     switch(level) {
1944     case SOL_TCP:
1945         /* TCP options all take an 'int' value.  */
1946         if (optlen < sizeof(uint32_t))
1947             return -TARGET_EINVAL;
1948 
1949         if (get_user_u32(val, optval_addr))
1950             return -TARGET_EFAULT;
1951         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1952         break;
1953     case SOL_IP:
1954         switch(optname) {
1955         case IP_TOS:
1956         case IP_TTL:
1957         case IP_HDRINCL:
1958         case IP_ROUTER_ALERT:
1959         case IP_RECVOPTS:
1960         case IP_RETOPTS:
1961         case IP_PKTINFO:
1962         case IP_MTU_DISCOVER:
1963         case IP_RECVERR:
1964         case IP_RECVTTL:
1965         case IP_RECVTOS:
1966 #ifdef IP_FREEBIND
1967         case IP_FREEBIND:
1968 #endif
1969         case IP_MULTICAST_TTL:
1970         case IP_MULTICAST_LOOP:
1971             val = 0;
1972             if (optlen >= sizeof(uint32_t)) {
1973                 if (get_user_u32(val, optval_addr))
1974                     return -TARGET_EFAULT;
1975             } else if (optlen >= 1) {
1976                 if (get_user_u8(val, optval_addr))
1977                     return -TARGET_EFAULT;
1978             }
1979             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1980             break;
1981         case IP_ADD_MEMBERSHIP:
1982         case IP_DROP_MEMBERSHIP:
1983             if (optlen < sizeof (struct target_ip_mreq) ||
1984                 optlen > sizeof (struct target_ip_mreqn))
1985                 return -TARGET_EINVAL;
1986 
1987             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1988             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1989             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1990             break;
1991 
1992         case IP_BLOCK_SOURCE:
1993         case IP_UNBLOCK_SOURCE:
1994         case IP_ADD_SOURCE_MEMBERSHIP:
1995         case IP_DROP_SOURCE_MEMBERSHIP:
1996             if (optlen != sizeof (struct target_ip_mreq_source))
1997                 return -TARGET_EINVAL;
1998 
1999             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2000             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2001             unlock_user (ip_mreq_source, optval_addr, 0);
2002             break;
2003 
2004         default:
2005             goto unimplemented;
2006         }
2007         break;
2008     case SOL_IPV6:
2009         switch (optname) {
2010         case IPV6_MTU_DISCOVER:
2011         case IPV6_MTU:
2012         case IPV6_V6ONLY:
2013         case IPV6_RECVPKTINFO:
2014         case IPV6_UNICAST_HOPS:
2015         case IPV6_MULTICAST_HOPS:
2016         case IPV6_MULTICAST_LOOP:
2017         case IPV6_RECVERR:
2018         case IPV6_RECVHOPLIMIT:
2019         case IPV6_2292HOPLIMIT:
2020         case IPV6_CHECKSUM:
2021         case IPV6_ADDRFORM:
2022         case IPV6_2292PKTINFO:
2023         case IPV6_RECVTCLASS:
2024         case IPV6_RECVRTHDR:
2025         case IPV6_2292RTHDR:
2026         case IPV6_RECVHOPOPTS:
2027         case IPV6_2292HOPOPTS:
2028         case IPV6_RECVDSTOPTS:
2029         case IPV6_2292DSTOPTS:
2030         case IPV6_TCLASS:
2031 #ifdef IPV6_RECVPATHMTU
2032         case IPV6_RECVPATHMTU:
2033 #endif
2034 #ifdef IPV6_TRANSPARENT
2035         case IPV6_TRANSPARENT:
2036 #endif
2037 #ifdef IPV6_FREEBIND
2038         case IPV6_FREEBIND:
2039 #endif
2040 #ifdef IPV6_RECVORIGDSTADDR
2041         case IPV6_RECVORIGDSTADDR:
2042 #endif
2043             val = 0;
2044             if (optlen < sizeof(uint32_t)) {
2045                 return -TARGET_EINVAL;
2046             }
2047             if (get_user_u32(val, optval_addr)) {
2048                 return -TARGET_EFAULT;
2049             }
2050             ret = get_errno(setsockopt(sockfd, level, optname,
2051                                        &val, sizeof(val)));
2052             break;
2053         case IPV6_PKTINFO:
2054         {
2055             struct in6_pktinfo pki;
2056 
2057             if (optlen < sizeof(pki)) {
2058                 return -TARGET_EINVAL;
2059             }
2060 
2061             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2062                 return -TARGET_EFAULT;
2063             }
2064 
2065             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2066 
2067             ret = get_errno(setsockopt(sockfd, level, optname,
2068                                        &pki, sizeof(pki)));
2069             break;
2070         }
2071         case IPV6_ADD_MEMBERSHIP:
2072         case IPV6_DROP_MEMBERSHIP:
2073         {
2074             struct ipv6_mreq ipv6mreq;
2075 
2076             if (optlen < sizeof(ipv6mreq)) {
2077                 return -TARGET_EINVAL;
2078             }
2079 
2080             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2081                 return -TARGET_EFAULT;
2082             }
2083 
2084             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2085 
2086             ret = get_errno(setsockopt(sockfd, level, optname,
2087                                        &ipv6mreq, sizeof(ipv6mreq)));
2088             break;
2089         }
2090         default:
2091             goto unimplemented;
2092         }
2093         break;
2094     case SOL_ICMPV6:
2095         switch (optname) {
2096         case ICMPV6_FILTER:
2097         {
2098             struct icmp6_filter icmp6f;
2099 
2100             if (optlen > sizeof(icmp6f)) {
2101                 optlen = sizeof(icmp6f);
2102             }
2103 
2104             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2105                 return -TARGET_EFAULT;
2106             }
2107 
2108             for (val = 0; val < 8; val++) {
2109                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2110             }
2111 
2112             ret = get_errno(setsockopt(sockfd, level, optname,
2113                                        &icmp6f, optlen));
2114             break;
2115         }
2116         default:
2117             goto unimplemented;
2118         }
2119         break;
2120     case SOL_RAW:
2121         switch (optname) {
2122         case ICMP_FILTER:
2123         case IPV6_CHECKSUM:
2124             /* those take an u32 value */
2125             if (optlen < sizeof(uint32_t)) {
2126                 return -TARGET_EINVAL;
2127             }
2128 
2129             if (get_user_u32(val, optval_addr)) {
2130                 return -TARGET_EFAULT;
2131             }
2132             ret = get_errno(setsockopt(sockfd, level, optname,
2133                                        &val, sizeof(val)));
2134             break;
2135 
2136         default:
2137             goto unimplemented;
2138         }
2139         break;
2140 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2141     case SOL_ALG:
2142         switch (optname) {
2143         case ALG_SET_KEY:
2144         {
2145             char *alg_key = g_malloc(optlen);
2146 
2147             if (!alg_key) {
2148                 return -TARGET_ENOMEM;
2149             }
2150             if (copy_from_user(alg_key, optval_addr, optlen)) {
2151                 g_free(alg_key);
2152                 return -TARGET_EFAULT;
2153             }
2154             ret = get_errno(setsockopt(sockfd, level, optname,
2155                                        alg_key, optlen));
2156             g_free(alg_key);
2157             break;
2158         }
2159         case ALG_SET_AEAD_AUTHSIZE:
2160         {
2161             ret = get_errno(setsockopt(sockfd, level, optname,
2162                                        NULL, optlen));
2163             break;
2164         }
2165         default:
2166             goto unimplemented;
2167         }
2168         break;
2169 #endif
2170     case TARGET_SOL_SOCKET:
2171         switch (optname) {
2172         case TARGET_SO_RCVTIMEO:
2173         {
2174                 struct timeval tv;
2175 
2176                 optname = SO_RCVTIMEO;
2177 
2178 set_timeout:
2179                 if (optlen != sizeof(struct target_timeval)) {
2180                     return -TARGET_EINVAL;
2181                 }
2182 
2183                 if (copy_from_user_timeval(&tv, optval_addr)) {
2184                     return -TARGET_EFAULT;
2185                 }
2186 
2187                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2188                                 &tv, sizeof(tv)));
2189                 return ret;
2190         }
2191         case TARGET_SO_SNDTIMEO:
2192                 optname = SO_SNDTIMEO;
2193                 goto set_timeout;
2194         case TARGET_SO_ATTACH_FILTER:
2195         {
2196                 struct target_sock_fprog *tfprog;
2197                 struct target_sock_filter *tfilter;
2198                 struct sock_fprog fprog;
2199                 struct sock_filter *filter;
2200                 int i;
2201 
2202                 if (optlen != sizeof(*tfprog)) {
2203                     return -TARGET_EINVAL;
2204                 }
2205                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2206                     return -TARGET_EFAULT;
2207                 }
2208                 if (!lock_user_struct(VERIFY_READ, tfilter,
2209                                       tswapal(tfprog->filter), 0)) {
2210                     unlock_user_struct(tfprog, optval_addr, 1);
2211                     return -TARGET_EFAULT;
2212                 }
2213 
2214                 fprog.len = tswap16(tfprog->len);
2215                 filter = g_try_new(struct sock_filter, fprog.len);
2216                 if (filter == NULL) {
2217                     unlock_user_struct(tfilter, tfprog->filter, 1);
2218                     unlock_user_struct(tfprog, optval_addr, 1);
2219                     return -TARGET_ENOMEM;
2220                 }
2221                 for (i = 0; i < fprog.len; i++) {
2222                     filter[i].code = tswap16(tfilter[i].code);
2223                     filter[i].jt = tfilter[i].jt;
2224                     filter[i].jf = tfilter[i].jf;
2225                     filter[i].k = tswap32(tfilter[i].k);
2226                 }
2227                 fprog.filter = filter;
2228 
2229                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2230                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2231                 g_free(filter);
2232 
2233                 unlock_user_struct(tfilter, tfprog->filter, 1);
2234                 unlock_user_struct(tfprog, optval_addr, 1);
2235                 return ret;
2236         }
2237 	case TARGET_SO_BINDTODEVICE:
2238 	{
2239 		char *dev_ifname, *addr_ifname;
2240 
2241 		if (optlen > IFNAMSIZ - 1) {
2242 		    optlen = IFNAMSIZ - 1;
2243 		}
2244 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2245 		if (!dev_ifname) {
2246 		    return -TARGET_EFAULT;
2247 		}
2248 		optname = SO_BINDTODEVICE;
2249 		addr_ifname = alloca(IFNAMSIZ);
2250 		memcpy(addr_ifname, dev_ifname, optlen);
2251 		addr_ifname[optlen] = 0;
2252 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2253                                            addr_ifname, optlen));
2254 		unlock_user (dev_ifname, optval_addr, 0);
2255 		return ret;
2256 	}
2257         case TARGET_SO_LINGER:
2258         {
2259                 struct linger lg;
2260                 struct target_linger *tlg;
2261 
2262                 if (optlen != sizeof(struct target_linger)) {
2263                     return -TARGET_EINVAL;
2264                 }
2265                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2266                     return -TARGET_EFAULT;
2267                 }
2268                 __get_user(lg.l_onoff, &tlg->l_onoff);
2269                 __get_user(lg.l_linger, &tlg->l_linger);
2270                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2271                                 &lg, sizeof(lg)));
2272                 unlock_user_struct(tlg, optval_addr, 0);
2273                 return ret;
2274         }
2275             /* Options with 'int' argument.  */
2276         case TARGET_SO_DEBUG:
2277 		optname = SO_DEBUG;
2278 		break;
2279         case TARGET_SO_REUSEADDR:
2280 		optname = SO_REUSEADDR;
2281 		break;
2282 #ifdef SO_REUSEPORT
2283         case TARGET_SO_REUSEPORT:
2284                 optname = SO_REUSEPORT;
2285                 break;
2286 #endif
2287         case TARGET_SO_TYPE:
2288 		optname = SO_TYPE;
2289 		break;
2290         case TARGET_SO_ERROR:
2291 		optname = SO_ERROR;
2292 		break;
2293         case TARGET_SO_DONTROUTE:
2294 		optname = SO_DONTROUTE;
2295 		break;
2296         case TARGET_SO_BROADCAST:
2297 		optname = SO_BROADCAST;
2298 		break;
2299         case TARGET_SO_SNDBUF:
2300 		optname = SO_SNDBUF;
2301 		break;
2302         case TARGET_SO_SNDBUFFORCE:
2303                 optname = SO_SNDBUFFORCE;
2304                 break;
2305         case TARGET_SO_RCVBUF:
2306 		optname = SO_RCVBUF;
2307 		break;
2308         case TARGET_SO_RCVBUFFORCE:
2309                 optname = SO_RCVBUFFORCE;
2310                 break;
2311         case TARGET_SO_KEEPALIVE:
2312 		optname = SO_KEEPALIVE;
2313 		break;
2314         case TARGET_SO_OOBINLINE:
2315 		optname = SO_OOBINLINE;
2316 		break;
2317         case TARGET_SO_NO_CHECK:
2318 		optname = SO_NO_CHECK;
2319 		break;
2320         case TARGET_SO_PRIORITY:
2321 		optname = SO_PRIORITY;
2322 		break;
2323 #ifdef SO_BSDCOMPAT
2324         case TARGET_SO_BSDCOMPAT:
2325 		optname = SO_BSDCOMPAT;
2326 		break;
2327 #endif
2328         case TARGET_SO_PASSCRED:
2329 		optname = SO_PASSCRED;
2330 		break;
2331         case TARGET_SO_PASSSEC:
2332                 optname = SO_PASSSEC;
2333                 break;
2334         case TARGET_SO_TIMESTAMP:
2335 		optname = SO_TIMESTAMP;
2336 		break;
2337         case TARGET_SO_RCVLOWAT:
2338 		optname = SO_RCVLOWAT;
2339 		break;
2340         default:
2341             goto unimplemented;
2342         }
2343 	if (optlen < sizeof(uint32_t))
2344             return -TARGET_EINVAL;
2345 
2346 	if (get_user_u32(val, optval_addr))
2347             return -TARGET_EFAULT;
2348 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2349         break;
2350 #ifdef SOL_NETLINK
2351     case SOL_NETLINK:
2352         switch (optname) {
2353         case NETLINK_PKTINFO:
2354         case NETLINK_ADD_MEMBERSHIP:
2355         case NETLINK_DROP_MEMBERSHIP:
2356         case NETLINK_BROADCAST_ERROR:
2357         case NETLINK_NO_ENOBUFS:
2358 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2359         case NETLINK_LISTEN_ALL_NSID:
2360         case NETLINK_CAP_ACK:
2361 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2362 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2363         case NETLINK_EXT_ACK:
2364 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2365 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2366         case NETLINK_GET_STRICT_CHK:
2367 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2368             break;
2369         default:
2370             goto unimplemented;
2371         }
2372         val = 0;
2373         if (optlen < sizeof(uint32_t)) {
2374             return -TARGET_EINVAL;
2375         }
2376         if (get_user_u32(val, optval_addr)) {
2377             return -TARGET_EFAULT;
2378         }
2379         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2380                                    sizeof(val)));
2381         break;
2382 #endif /* SOL_NETLINK */
2383     default:
2384     unimplemented:
2385         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2386                       level, optname);
2387         ret = -TARGET_ENOPROTOOPT;
2388     }
2389     return ret;
2390 }
2391 
2392 /* do_getsockopt() Must return target values and target errnos. */
2393 static abi_long do_getsockopt(int sockfd, int level, int optname,
2394                               abi_ulong optval_addr, abi_ulong optlen)
2395 {
2396     abi_long ret;
2397     int len, val;
2398     socklen_t lv;
2399 
2400     switch(level) {
2401     case TARGET_SOL_SOCKET:
2402         level = SOL_SOCKET;
2403         switch (optname) {
2404         /* These don't just return a single integer */
2405         case TARGET_SO_PEERNAME:
2406             goto unimplemented;
2407         case TARGET_SO_RCVTIMEO: {
2408             struct timeval tv;
2409             socklen_t tvlen;
2410 
2411             optname = SO_RCVTIMEO;
2412 
2413 get_timeout:
2414             if (get_user_u32(len, optlen)) {
2415                 return -TARGET_EFAULT;
2416             }
2417             if (len < 0) {
2418                 return -TARGET_EINVAL;
2419             }
2420 
2421             tvlen = sizeof(tv);
2422             ret = get_errno(getsockopt(sockfd, level, optname,
2423                                        &tv, &tvlen));
2424             if (ret < 0) {
2425                 return ret;
2426             }
2427             if (len > sizeof(struct target_timeval)) {
2428                 len = sizeof(struct target_timeval);
2429             }
2430             if (copy_to_user_timeval(optval_addr, &tv)) {
2431                 return -TARGET_EFAULT;
2432             }
2433             if (put_user_u32(len, optlen)) {
2434                 return -TARGET_EFAULT;
2435             }
2436             break;
2437         }
2438         case TARGET_SO_SNDTIMEO:
2439             optname = SO_SNDTIMEO;
2440             goto get_timeout;
2441         case TARGET_SO_PEERCRED: {
2442             struct ucred cr;
2443             socklen_t crlen;
2444             struct target_ucred *tcr;
2445 
2446             if (get_user_u32(len, optlen)) {
2447                 return -TARGET_EFAULT;
2448             }
2449             if (len < 0) {
2450                 return -TARGET_EINVAL;
2451             }
2452 
2453             crlen = sizeof(cr);
2454             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2455                                        &cr, &crlen));
2456             if (ret < 0) {
2457                 return ret;
2458             }
2459             if (len > crlen) {
2460                 len = crlen;
2461             }
2462             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2463                 return -TARGET_EFAULT;
2464             }
2465             __put_user(cr.pid, &tcr->pid);
2466             __put_user(cr.uid, &tcr->uid);
2467             __put_user(cr.gid, &tcr->gid);
2468             unlock_user_struct(tcr, optval_addr, 1);
2469             if (put_user_u32(len, optlen)) {
2470                 return -TARGET_EFAULT;
2471             }
2472             break;
2473         }
2474         case TARGET_SO_PEERSEC: {
2475             char *name;
2476 
2477             if (get_user_u32(len, optlen)) {
2478                 return -TARGET_EFAULT;
2479             }
2480             if (len < 0) {
2481                 return -TARGET_EINVAL;
2482             }
2483             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2484             if (!name) {
2485                 return -TARGET_EFAULT;
2486             }
2487             lv = len;
2488             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2489                                        name, &lv));
2490             if (put_user_u32(lv, optlen)) {
2491                 ret = -TARGET_EFAULT;
2492             }
2493             unlock_user(name, optval_addr, lv);
2494             break;
2495         }
2496         case TARGET_SO_LINGER:
2497         {
2498             struct linger lg;
2499             socklen_t lglen;
2500             struct target_linger *tlg;
2501 
2502             if (get_user_u32(len, optlen)) {
2503                 return -TARGET_EFAULT;
2504             }
2505             if (len < 0) {
2506                 return -TARGET_EINVAL;
2507             }
2508 
2509             lglen = sizeof(lg);
2510             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2511                                        &lg, &lglen));
2512             if (ret < 0) {
2513                 return ret;
2514             }
2515             if (len > lglen) {
2516                 len = lglen;
2517             }
2518             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2519                 return -TARGET_EFAULT;
2520             }
2521             __put_user(lg.l_onoff, &tlg->l_onoff);
2522             __put_user(lg.l_linger, &tlg->l_linger);
2523             unlock_user_struct(tlg, optval_addr, 1);
2524             if (put_user_u32(len, optlen)) {
2525                 return -TARGET_EFAULT;
2526             }
2527             break;
2528         }
2529         /* Options with 'int' argument.  */
2530         case TARGET_SO_DEBUG:
2531             optname = SO_DEBUG;
2532             goto int_case;
2533         case TARGET_SO_REUSEADDR:
2534             optname = SO_REUSEADDR;
2535             goto int_case;
2536 #ifdef SO_REUSEPORT
2537         case TARGET_SO_REUSEPORT:
2538             optname = SO_REUSEPORT;
2539             goto int_case;
2540 #endif
2541         case TARGET_SO_TYPE:
2542             optname = SO_TYPE;
2543             goto int_case;
2544         case TARGET_SO_ERROR:
2545             optname = SO_ERROR;
2546             goto int_case;
2547         case TARGET_SO_DONTROUTE:
2548             optname = SO_DONTROUTE;
2549             goto int_case;
2550         case TARGET_SO_BROADCAST:
2551             optname = SO_BROADCAST;
2552             goto int_case;
2553         case TARGET_SO_SNDBUF:
2554             optname = SO_SNDBUF;
2555             goto int_case;
2556         case TARGET_SO_RCVBUF:
2557             optname = SO_RCVBUF;
2558             goto int_case;
2559         case TARGET_SO_KEEPALIVE:
2560             optname = SO_KEEPALIVE;
2561             goto int_case;
2562         case TARGET_SO_OOBINLINE:
2563             optname = SO_OOBINLINE;
2564             goto int_case;
2565         case TARGET_SO_NO_CHECK:
2566             optname = SO_NO_CHECK;
2567             goto int_case;
2568         case TARGET_SO_PRIORITY:
2569             optname = SO_PRIORITY;
2570             goto int_case;
2571 #ifdef SO_BSDCOMPAT
2572         case TARGET_SO_BSDCOMPAT:
2573             optname = SO_BSDCOMPAT;
2574             goto int_case;
2575 #endif
2576         case TARGET_SO_PASSCRED:
2577             optname = SO_PASSCRED;
2578             goto int_case;
2579         case TARGET_SO_TIMESTAMP:
2580             optname = SO_TIMESTAMP;
2581             goto int_case;
2582         case TARGET_SO_RCVLOWAT:
2583             optname = SO_RCVLOWAT;
2584             goto int_case;
2585         case TARGET_SO_ACCEPTCONN:
2586             optname = SO_ACCEPTCONN;
2587             goto int_case;
2588         default:
2589             goto int_case;
2590         }
2591         break;
2592     case SOL_TCP:
2593         /* TCP options all take an 'int' value.  */
2594     int_case:
2595         if (get_user_u32(len, optlen))
2596             return -TARGET_EFAULT;
2597         if (len < 0)
2598             return -TARGET_EINVAL;
2599         lv = sizeof(lv);
2600         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2601         if (ret < 0)
2602             return ret;
2603         if (optname == SO_TYPE) {
2604             val = host_to_target_sock_type(val);
2605         }
2606         if (len > lv)
2607             len = lv;
2608         if (len == 4) {
2609             if (put_user_u32(val, optval_addr))
2610                 return -TARGET_EFAULT;
2611         } else {
2612             if (put_user_u8(val, optval_addr))
2613                 return -TARGET_EFAULT;
2614         }
2615         if (put_user_u32(len, optlen))
2616             return -TARGET_EFAULT;
2617         break;
2618     case SOL_IP:
2619         switch(optname) {
2620         case IP_TOS:
2621         case IP_TTL:
2622         case IP_HDRINCL:
2623         case IP_ROUTER_ALERT:
2624         case IP_RECVOPTS:
2625         case IP_RETOPTS:
2626         case IP_PKTINFO:
2627         case IP_MTU_DISCOVER:
2628         case IP_RECVERR:
2629         case IP_RECVTOS:
2630 #ifdef IP_FREEBIND
2631         case IP_FREEBIND:
2632 #endif
2633         case IP_MULTICAST_TTL:
2634         case IP_MULTICAST_LOOP:
2635             if (get_user_u32(len, optlen))
2636                 return -TARGET_EFAULT;
2637             if (len < 0)
2638                 return -TARGET_EINVAL;
2639             lv = sizeof(lv);
2640             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2641             if (ret < 0)
2642                 return ret;
2643             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2644                 len = 1;
2645                 if (put_user_u32(len, optlen)
2646                     || put_user_u8(val, optval_addr))
2647                     return -TARGET_EFAULT;
2648             } else {
2649                 if (len > sizeof(int))
2650                     len = sizeof(int);
2651                 if (put_user_u32(len, optlen)
2652                     || put_user_u32(val, optval_addr))
2653                     return -TARGET_EFAULT;
2654             }
2655             break;
2656         default:
2657             ret = -TARGET_ENOPROTOOPT;
2658             break;
2659         }
2660         break;
2661     case SOL_IPV6:
2662         switch (optname) {
2663         case IPV6_MTU_DISCOVER:
2664         case IPV6_MTU:
2665         case IPV6_V6ONLY:
2666         case IPV6_RECVPKTINFO:
2667         case IPV6_UNICAST_HOPS:
2668         case IPV6_MULTICAST_HOPS:
2669         case IPV6_MULTICAST_LOOP:
2670         case IPV6_RECVERR:
2671         case IPV6_RECVHOPLIMIT:
2672         case IPV6_2292HOPLIMIT:
2673         case IPV6_CHECKSUM:
2674         case IPV6_ADDRFORM:
2675         case IPV6_2292PKTINFO:
2676         case IPV6_RECVTCLASS:
2677         case IPV6_RECVRTHDR:
2678         case IPV6_2292RTHDR:
2679         case IPV6_RECVHOPOPTS:
2680         case IPV6_2292HOPOPTS:
2681         case IPV6_RECVDSTOPTS:
2682         case IPV6_2292DSTOPTS:
2683         case IPV6_TCLASS:
2684 #ifdef IPV6_RECVPATHMTU
2685         case IPV6_RECVPATHMTU:
2686 #endif
2687 #ifdef IPV6_TRANSPARENT
2688         case IPV6_TRANSPARENT:
2689 #endif
2690 #ifdef IPV6_FREEBIND
2691         case IPV6_FREEBIND:
2692 #endif
2693 #ifdef IPV6_RECVORIGDSTADDR
2694         case IPV6_RECVORIGDSTADDR:
2695 #endif
2696             if (get_user_u32(len, optlen))
2697                 return -TARGET_EFAULT;
2698             if (len < 0)
2699                 return -TARGET_EINVAL;
2700             lv = sizeof(lv);
2701             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2702             if (ret < 0)
2703                 return ret;
2704             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2705                 len = 1;
2706                 if (put_user_u32(len, optlen)
2707                     || put_user_u8(val, optval_addr))
2708                     return -TARGET_EFAULT;
2709             } else {
2710                 if (len > sizeof(int))
2711                     len = sizeof(int);
2712                 if (put_user_u32(len, optlen)
2713                     || put_user_u32(val, optval_addr))
2714                     return -TARGET_EFAULT;
2715             }
2716             break;
2717         default:
2718             ret = -TARGET_ENOPROTOOPT;
2719             break;
2720         }
2721         break;
2722 #ifdef SOL_NETLINK
2723     case SOL_NETLINK:
2724         switch (optname) {
2725         case NETLINK_PKTINFO:
2726         case NETLINK_BROADCAST_ERROR:
2727         case NETLINK_NO_ENOBUFS:
2728 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2729         case NETLINK_LISTEN_ALL_NSID:
2730         case NETLINK_CAP_ACK:
2731 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2732 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2733         case NETLINK_EXT_ACK:
2734 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2735 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2736         case NETLINK_GET_STRICT_CHK:
2737 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2738             if (get_user_u32(len, optlen)) {
2739                 return -TARGET_EFAULT;
2740             }
2741             if (len != sizeof(val)) {
2742                 return -TARGET_EINVAL;
2743             }
2744             lv = len;
2745             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2746             if (ret < 0) {
2747                 return ret;
2748             }
2749             if (put_user_u32(lv, optlen)
2750                 || put_user_u32(val, optval_addr)) {
2751                 return -TARGET_EFAULT;
2752             }
2753             break;
2754 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2755         case NETLINK_LIST_MEMBERSHIPS:
2756         {
2757             uint32_t *results;
2758             int i;
2759             if (get_user_u32(len, optlen)) {
2760                 return -TARGET_EFAULT;
2761             }
2762             if (len < 0) {
2763                 return -TARGET_EINVAL;
2764             }
2765             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2766             if (!results) {
2767                 return -TARGET_EFAULT;
2768             }
2769             lv = len;
2770             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2771             if (ret < 0) {
2772                 unlock_user(results, optval_addr, 0);
2773                 return ret;
2774             }
2775             /* swap host endianess to target endianess. */
2776             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2777                 results[i] = tswap32(results[i]);
2778             }
2779             if (put_user_u32(lv, optlen)) {
2780                 return -TARGET_EFAULT;
2781             }
2782             unlock_user(results, optval_addr, 0);
2783             break;
2784         }
2785 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2786         default:
2787             goto unimplemented;
2788         }
2789         break;
2790 #endif /* SOL_NETLINK */
2791     default:
2792     unimplemented:
2793         qemu_log_mask(LOG_UNIMP,
2794                       "getsockopt level=%d optname=%d not yet supported\n",
2795                       level, optname);
2796         ret = -TARGET_EOPNOTSUPP;
2797         break;
2798     }
2799     return ret;
2800 }
2801 
2802 /* Convert target low/high pair representing file offset into the host
2803  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2804  * as the kernel doesn't handle them either.
2805  */
2806 static void target_to_host_low_high(abi_ulong tlow,
2807                                     abi_ulong thigh,
2808                                     unsigned long *hlow,
2809                                     unsigned long *hhigh)
2810 {
2811     uint64_t off = tlow |
2812         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2813         TARGET_LONG_BITS / 2;
2814 
2815     *hlow = off;
2816     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2817 }
2818 
2819 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2820                                 abi_ulong count, int copy)
2821 {
2822     struct target_iovec *target_vec;
2823     struct iovec *vec;
2824     abi_ulong total_len, max_len;
2825     int i;
2826     int err = 0;
2827     bool bad_address = false;
2828 
2829     if (count == 0) {
2830         errno = 0;
2831         return NULL;
2832     }
2833     if (count > IOV_MAX) {
2834         errno = EINVAL;
2835         return NULL;
2836     }
2837 
2838     vec = g_try_new0(struct iovec, count);
2839     if (vec == NULL) {
2840         errno = ENOMEM;
2841         return NULL;
2842     }
2843 
2844     target_vec = lock_user(VERIFY_READ, target_addr,
2845                            count * sizeof(struct target_iovec), 1);
2846     if (target_vec == NULL) {
2847         err = EFAULT;
2848         goto fail2;
2849     }
2850 
2851     /* ??? If host page size > target page size, this will result in a
2852        value larger than what we can actually support.  */
2853     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2854     total_len = 0;
2855 
2856     for (i = 0; i < count; i++) {
2857         abi_ulong base = tswapal(target_vec[i].iov_base);
2858         abi_long len = tswapal(target_vec[i].iov_len);
2859 
2860         if (len < 0) {
2861             err = EINVAL;
2862             goto fail;
2863         } else if (len == 0) {
2864             /* Zero length pointer is ignored.  */
2865             vec[i].iov_base = 0;
2866         } else {
2867             vec[i].iov_base = lock_user(type, base, len, copy);
2868             /* If the first buffer pointer is bad, this is a fault.  But
2869              * subsequent bad buffers will result in a partial write; this
2870              * is realized by filling the vector with null pointers and
2871              * zero lengths. */
2872             if (!vec[i].iov_base) {
2873                 if (i == 0) {
2874                     err = EFAULT;
2875                     goto fail;
2876                 } else {
2877                     bad_address = true;
2878                 }
2879             }
2880             if (bad_address) {
2881                 len = 0;
2882             }
2883             if (len > max_len - total_len) {
2884                 len = max_len - total_len;
2885             }
2886         }
2887         vec[i].iov_len = len;
2888         total_len += len;
2889     }
2890 
2891     unlock_user(target_vec, target_addr, 0);
2892     return vec;
2893 
2894  fail:
2895     while (--i >= 0) {
2896         if (tswapal(target_vec[i].iov_len) > 0) {
2897             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2898         }
2899     }
2900     unlock_user(target_vec, target_addr, 0);
2901  fail2:
2902     g_free(vec);
2903     errno = err;
2904     return NULL;
2905 }
2906 
2907 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2908                          abi_ulong count, int copy)
2909 {
2910     struct target_iovec *target_vec;
2911     int i;
2912 
2913     target_vec = lock_user(VERIFY_READ, target_addr,
2914                            count * sizeof(struct target_iovec), 1);
2915     if (target_vec) {
2916         for (i = 0; i < count; i++) {
2917             abi_ulong base = tswapal(target_vec[i].iov_base);
2918             abi_long len = tswapal(target_vec[i].iov_len);
2919             if (len < 0) {
2920                 break;
2921             }
2922             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2923         }
2924         unlock_user(target_vec, target_addr, 0);
2925     }
2926 
2927     g_free(vec);
2928 }
2929 
2930 static inline int target_to_host_sock_type(int *type)
2931 {
2932     int host_type = 0;
2933     int target_type = *type;
2934 
2935     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2936     case TARGET_SOCK_DGRAM:
2937         host_type = SOCK_DGRAM;
2938         break;
2939     case TARGET_SOCK_STREAM:
2940         host_type = SOCK_STREAM;
2941         break;
2942     default:
2943         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2944         break;
2945     }
2946     if (target_type & TARGET_SOCK_CLOEXEC) {
2947 #if defined(SOCK_CLOEXEC)
2948         host_type |= SOCK_CLOEXEC;
2949 #else
2950         return -TARGET_EINVAL;
2951 #endif
2952     }
2953     if (target_type & TARGET_SOCK_NONBLOCK) {
2954 #if defined(SOCK_NONBLOCK)
2955         host_type |= SOCK_NONBLOCK;
2956 #elif !defined(O_NONBLOCK)
2957         return -TARGET_EINVAL;
2958 #endif
2959     }
2960     *type = host_type;
2961     return 0;
2962 }
2963 
2964 /* Try to emulate socket type flags after socket creation.  */
2965 static int sock_flags_fixup(int fd, int target_type)
2966 {
2967 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2968     if (target_type & TARGET_SOCK_NONBLOCK) {
2969         int flags = fcntl(fd, F_GETFL);
2970         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2971             close(fd);
2972             return -TARGET_EINVAL;
2973         }
2974     }
2975 #endif
2976     return fd;
2977 }
2978 
2979 /* do_socket() Must return target values and target errnos. */
2980 static abi_long do_socket(int domain, int type, int protocol)
2981 {
2982     int target_type = type;
2983     int ret;
2984 
2985     ret = target_to_host_sock_type(&type);
2986     if (ret) {
2987         return ret;
2988     }
2989 
2990     if (domain == PF_NETLINK && !(
2991 #ifdef CONFIG_RTNETLINK
2992          protocol == NETLINK_ROUTE ||
2993 #endif
2994          protocol == NETLINK_KOBJECT_UEVENT ||
2995          protocol == NETLINK_AUDIT)) {
2996         return -TARGET_EPROTONOSUPPORT;
2997     }
2998 
2999     if (domain == AF_PACKET ||
3000         (domain == AF_INET && type == SOCK_PACKET)) {
3001         protocol = tswap16(protocol);
3002     }
3003 
3004     ret = get_errno(socket(domain, type, protocol));
3005     if (ret >= 0) {
3006         ret = sock_flags_fixup(ret, target_type);
3007         if (type == SOCK_PACKET) {
3008             /* Manage an obsolete case :
3009              * if socket type is SOCK_PACKET, bind by name
3010              */
3011             fd_trans_register(ret, &target_packet_trans);
3012         } else if (domain == PF_NETLINK) {
3013             switch (protocol) {
3014 #ifdef CONFIG_RTNETLINK
3015             case NETLINK_ROUTE:
3016                 fd_trans_register(ret, &target_netlink_route_trans);
3017                 break;
3018 #endif
3019             case NETLINK_KOBJECT_UEVENT:
3020                 /* nothing to do: messages are strings */
3021                 break;
3022             case NETLINK_AUDIT:
3023                 fd_trans_register(ret, &target_netlink_audit_trans);
3024                 break;
3025             default:
3026                 g_assert_not_reached();
3027             }
3028         }
3029     }
3030     return ret;
3031 }
3032 
3033 /* do_bind() Must return target values and target errnos. */
3034 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3035                         socklen_t addrlen)
3036 {
3037     void *addr;
3038     abi_long ret;
3039 
3040     if ((int)addrlen < 0) {
3041         return -TARGET_EINVAL;
3042     }
3043 
3044     addr = alloca(addrlen+1);
3045 
3046     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3047     if (ret)
3048         return ret;
3049 
3050     return get_errno(bind(sockfd, addr, addrlen));
3051 }
3052 
3053 /* do_connect() Must return target values and target errnos. */
3054 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3055                            socklen_t addrlen)
3056 {
3057     void *addr;
3058     abi_long ret;
3059 
3060     if ((int)addrlen < 0) {
3061         return -TARGET_EINVAL;
3062     }
3063 
3064     addr = alloca(addrlen+1);
3065 
3066     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3067     if (ret)
3068         return ret;
3069 
3070     return get_errno(safe_connect(sockfd, addr, addrlen));
3071 }
3072 
3073 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3074 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3075                                       int flags, int send)
3076 {
3077     abi_long ret, len;
3078     struct msghdr msg;
3079     abi_ulong count;
3080     struct iovec *vec;
3081     abi_ulong target_vec;
3082 
3083     if (msgp->msg_name) {
3084         msg.msg_namelen = tswap32(msgp->msg_namelen);
3085         msg.msg_name = alloca(msg.msg_namelen+1);
3086         ret = target_to_host_sockaddr(fd, msg.msg_name,
3087                                       tswapal(msgp->msg_name),
3088                                       msg.msg_namelen);
3089         if (ret == -TARGET_EFAULT) {
3090             /* For connected sockets msg_name and msg_namelen must
3091              * be ignored, so returning EFAULT immediately is wrong.
3092              * Instead, pass a bad msg_name to the host kernel, and
3093              * let it decide whether to return EFAULT or not.
3094              */
3095             msg.msg_name = (void *)-1;
3096         } else if (ret) {
3097             goto out2;
3098         }
3099     } else {
3100         msg.msg_name = NULL;
3101         msg.msg_namelen = 0;
3102     }
3103     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3104     msg.msg_control = alloca(msg.msg_controllen);
3105     memset(msg.msg_control, 0, msg.msg_controllen);
3106 
3107     msg.msg_flags = tswap32(msgp->msg_flags);
3108 
3109     count = tswapal(msgp->msg_iovlen);
3110     target_vec = tswapal(msgp->msg_iov);
3111 
3112     if (count > IOV_MAX) {
3113         /* sendrcvmsg returns a different errno for this condition than
3114          * readv/writev, so we must catch it here before lock_iovec() does.
3115          */
3116         ret = -TARGET_EMSGSIZE;
3117         goto out2;
3118     }
3119 
3120     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3121                      target_vec, count, send);
3122     if (vec == NULL) {
3123         ret = -host_to_target_errno(errno);
3124         goto out2;
3125     }
3126     msg.msg_iovlen = count;
3127     msg.msg_iov = vec;
3128 
3129     if (send) {
3130         if (fd_trans_target_to_host_data(fd)) {
3131             void *host_msg;
3132 
3133             host_msg = g_malloc(msg.msg_iov->iov_len);
3134             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3135             ret = fd_trans_target_to_host_data(fd)(host_msg,
3136                                                    msg.msg_iov->iov_len);
3137             if (ret >= 0) {
3138                 msg.msg_iov->iov_base = host_msg;
3139                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3140             }
3141             g_free(host_msg);
3142         } else {
3143             ret = target_to_host_cmsg(&msg, msgp);
3144             if (ret == 0) {
3145                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3146             }
3147         }
3148     } else {
3149         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3150         if (!is_error(ret)) {
3151             len = ret;
3152             if (fd_trans_host_to_target_data(fd)) {
3153                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3154                                                MIN(msg.msg_iov->iov_len, len));
3155             } else {
3156                 ret = host_to_target_cmsg(msgp, &msg);
3157             }
3158             if (!is_error(ret)) {
3159                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3160                 msgp->msg_flags = tswap32(msg.msg_flags);
3161                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3162                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3163                                     msg.msg_name, msg.msg_namelen);
3164                     if (ret) {
3165                         goto out;
3166                     }
3167                 }
3168 
3169                 ret = len;
3170             }
3171         }
3172     }
3173 
3174 out:
3175     unlock_iovec(vec, target_vec, count, !send);
3176 out2:
3177     return ret;
3178 }
3179 
3180 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3181                                int flags, int send)
3182 {
3183     abi_long ret;
3184     struct target_msghdr *msgp;
3185 
3186     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3187                           msgp,
3188                           target_msg,
3189                           send ? 1 : 0)) {
3190         return -TARGET_EFAULT;
3191     }
3192     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3193     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3194     return ret;
3195 }
3196 
3197 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3198  * so it might not have this *mmsg-specific flag either.
3199  */
3200 #ifndef MSG_WAITFORONE
3201 #define MSG_WAITFORONE 0x10000
3202 #endif
3203 
3204 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3205                                 unsigned int vlen, unsigned int flags,
3206                                 int send)
3207 {
3208     struct target_mmsghdr *mmsgp;
3209     abi_long ret = 0;
3210     int i;
3211 
3212     if (vlen > UIO_MAXIOV) {
3213         vlen = UIO_MAXIOV;
3214     }
3215 
3216     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3217     if (!mmsgp) {
3218         return -TARGET_EFAULT;
3219     }
3220 
3221     for (i = 0; i < vlen; i++) {
3222         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3223         if (is_error(ret)) {
3224             break;
3225         }
3226         mmsgp[i].msg_len = tswap32(ret);
3227         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3228         if (flags & MSG_WAITFORONE) {
3229             flags |= MSG_DONTWAIT;
3230         }
3231     }
3232 
3233     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3234 
3235     /* Return number of datagrams sent if we sent any at all;
3236      * otherwise return the error.
3237      */
3238     if (i) {
3239         return i;
3240     }
3241     return ret;
3242 }
3243 
3244 /* do_accept4() Must return target values and target errnos. */
3245 static abi_long do_accept4(int fd, abi_ulong target_addr,
3246                            abi_ulong target_addrlen_addr, int flags)
3247 {
3248     socklen_t addrlen, ret_addrlen;
3249     void *addr;
3250     abi_long ret;
3251     int host_flags;
3252 
3253     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3254 
3255     if (target_addr == 0) {
3256         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3257     }
3258 
3259     /* linux returns EINVAL if addrlen pointer is invalid */
3260     if (get_user_u32(addrlen, target_addrlen_addr))
3261         return -TARGET_EINVAL;
3262 
3263     if ((int)addrlen < 0) {
3264         return -TARGET_EINVAL;
3265     }
3266 
3267     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3268         return -TARGET_EINVAL;
3269 
3270     addr = alloca(addrlen);
3271 
3272     ret_addrlen = addrlen;
3273     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3274     if (!is_error(ret)) {
3275         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3276         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3277             ret = -TARGET_EFAULT;
3278         }
3279     }
3280     return ret;
3281 }
3282 
3283 /* do_getpeername() Must return target values and target errnos. */
3284 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3285                                abi_ulong target_addrlen_addr)
3286 {
3287     socklen_t addrlen, ret_addrlen;
3288     void *addr;
3289     abi_long ret;
3290 
3291     if (get_user_u32(addrlen, target_addrlen_addr))
3292         return -TARGET_EFAULT;
3293 
3294     if ((int)addrlen < 0) {
3295         return -TARGET_EINVAL;
3296     }
3297 
3298     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3299         return -TARGET_EFAULT;
3300 
3301     addr = alloca(addrlen);
3302 
3303     ret_addrlen = addrlen;
3304     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3305     if (!is_error(ret)) {
3306         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3307         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3308             ret = -TARGET_EFAULT;
3309         }
3310     }
3311     return ret;
3312 }
3313 
3314 /* do_getsockname() Must return target values and target errnos. */
3315 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3316                                abi_ulong target_addrlen_addr)
3317 {
3318     socklen_t addrlen, ret_addrlen;
3319     void *addr;
3320     abi_long ret;
3321 
3322     if (get_user_u32(addrlen, target_addrlen_addr))
3323         return -TARGET_EFAULT;
3324 
3325     if ((int)addrlen < 0) {
3326         return -TARGET_EINVAL;
3327     }
3328 
3329     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3330         return -TARGET_EFAULT;
3331 
3332     addr = alloca(addrlen);
3333 
3334     ret_addrlen = addrlen;
3335     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3336     if (!is_error(ret)) {
3337         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3338         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3339             ret = -TARGET_EFAULT;
3340         }
3341     }
3342     return ret;
3343 }
3344 
3345 /* do_socketpair() Must return target values and target errnos. */
3346 static abi_long do_socketpair(int domain, int type, int protocol,
3347                               abi_ulong target_tab_addr)
3348 {
3349     int tab[2];
3350     abi_long ret;
3351 
3352     target_to_host_sock_type(&type);
3353 
3354     ret = get_errno(socketpair(domain, type, protocol, tab));
3355     if (!is_error(ret)) {
3356         if (put_user_s32(tab[0], target_tab_addr)
3357             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3358             ret = -TARGET_EFAULT;
3359     }
3360     return ret;
3361 }
3362 
3363 /* do_sendto() Must return target values and target errnos. */
3364 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3365                           abi_ulong target_addr, socklen_t addrlen)
3366 {
3367     void *addr;
3368     void *host_msg;
3369     void *copy_msg = NULL;
3370     abi_long ret;
3371 
3372     if ((int)addrlen < 0) {
3373         return -TARGET_EINVAL;
3374     }
3375 
3376     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3377     if (!host_msg)
3378         return -TARGET_EFAULT;
3379     if (fd_trans_target_to_host_data(fd)) {
3380         copy_msg = host_msg;
3381         host_msg = g_malloc(len);
3382         memcpy(host_msg, copy_msg, len);
3383         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3384         if (ret < 0) {
3385             goto fail;
3386         }
3387     }
3388     if (target_addr) {
3389         addr = alloca(addrlen+1);
3390         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3391         if (ret) {
3392             goto fail;
3393         }
3394         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3395     } else {
3396         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3397     }
3398 fail:
3399     if (copy_msg) {
3400         g_free(host_msg);
3401         host_msg = copy_msg;
3402     }
3403     unlock_user(host_msg, msg, 0);
3404     return ret;
3405 }
3406 
3407 /* do_recvfrom() Must return target values and target errnos. */
3408 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3409                             abi_ulong target_addr,
3410                             abi_ulong target_addrlen)
3411 {
3412     socklen_t addrlen, ret_addrlen;
3413     void *addr;
3414     void *host_msg;
3415     abi_long ret;
3416 
3417     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3418     if (!host_msg)
3419         return -TARGET_EFAULT;
3420     if (target_addr) {
3421         if (get_user_u32(addrlen, target_addrlen)) {
3422             ret = -TARGET_EFAULT;
3423             goto fail;
3424         }
3425         if ((int)addrlen < 0) {
3426             ret = -TARGET_EINVAL;
3427             goto fail;
3428         }
3429         addr = alloca(addrlen);
3430         ret_addrlen = addrlen;
3431         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3432                                       addr, &ret_addrlen));
3433     } else {
3434         addr = NULL; /* To keep compiler quiet.  */
3435         addrlen = 0; /* To keep compiler quiet.  */
3436         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3437     }
3438     if (!is_error(ret)) {
3439         if (fd_trans_host_to_target_data(fd)) {
3440             abi_long trans;
3441             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3442             if (is_error(trans)) {
3443                 ret = trans;
3444                 goto fail;
3445             }
3446         }
3447         if (target_addr) {
3448             host_to_target_sockaddr(target_addr, addr,
3449                                     MIN(addrlen, ret_addrlen));
3450             if (put_user_u32(ret_addrlen, target_addrlen)) {
3451                 ret = -TARGET_EFAULT;
3452                 goto fail;
3453             }
3454         }
3455         unlock_user(host_msg, msg, len);
3456     } else {
3457 fail:
3458         unlock_user(host_msg, msg, 0);
3459     }
3460     return ret;
3461 }
3462 
3463 #ifdef TARGET_NR_socketcall
3464 /* do_socketcall() must return target values and target errnos. */
3465 static abi_long do_socketcall(int num, abi_ulong vptr)
3466 {
3467     static const unsigned nargs[] = { /* number of arguments per operation */
3468         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3469         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3470         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3471         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3472         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3473         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3474         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3475         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3476         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3477         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3478         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3479         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3480         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3481         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3482         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3483         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3484         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3485         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3486         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3487         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3488     };
3489     abi_long a[6]; /* max 6 args */
3490     unsigned i;
3491 
3492     /* check the range of the first argument num */
3493     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3494     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3495         return -TARGET_EINVAL;
3496     }
3497     /* ensure we have space for args */
3498     if (nargs[num] > ARRAY_SIZE(a)) {
3499         return -TARGET_EINVAL;
3500     }
3501     /* collect the arguments in a[] according to nargs[] */
3502     for (i = 0; i < nargs[num]; ++i) {
3503         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3504             return -TARGET_EFAULT;
3505         }
3506     }
3507     /* now when we have the args, invoke the appropriate underlying function */
3508     switch (num) {
3509     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3510         return do_socket(a[0], a[1], a[2]);
3511     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3512         return do_bind(a[0], a[1], a[2]);
3513     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3514         return do_connect(a[0], a[1], a[2]);
3515     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3516         return get_errno(listen(a[0], a[1]));
3517     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3518         return do_accept4(a[0], a[1], a[2], 0);
3519     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3520         return do_getsockname(a[0], a[1], a[2]);
3521     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3522         return do_getpeername(a[0], a[1], a[2]);
3523     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3524         return do_socketpair(a[0], a[1], a[2], a[3]);
3525     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3526         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3527     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3528         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3529     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3530         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3531     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3532         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3533     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3534         return get_errno(shutdown(a[0], a[1]));
3535     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3536         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3537     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3538         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3539     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3540         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3541     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3542         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3543     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3544         return do_accept4(a[0], a[1], a[2], a[3]);
3545     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3546         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3547     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3548         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3549     default:
3550         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3551         return -TARGET_EINVAL;
3552     }
3553 }
3554 #endif
3555 
3556 #define N_SHM_REGIONS	32
3557 
3558 static struct shm_region {
3559     abi_ulong start;
3560     abi_ulong size;
3561     bool in_use;
3562 } shm_regions[N_SHM_REGIONS];
3563 
3564 #ifndef TARGET_SEMID64_DS
3565 /* asm-generic version of this struct */
3566 struct target_semid64_ds
3567 {
3568   struct target_ipc_perm sem_perm;
3569   abi_ulong sem_otime;
3570 #if TARGET_ABI_BITS == 32
3571   abi_ulong __unused1;
3572 #endif
3573   abi_ulong sem_ctime;
3574 #if TARGET_ABI_BITS == 32
3575   abi_ulong __unused2;
3576 #endif
3577   abi_ulong sem_nsems;
3578   abi_ulong __unused3;
3579   abi_ulong __unused4;
3580 };
3581 #endif
3582 
3583 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3584                                                abi_ulong target_addr)
3585 {
3586     struct target_ipc_perm *target_ip;
3587     struct target_semid64_ds *target_sd;
3588 
3589     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3590         return -TARGET_EFAULT;
3591     target_ip = &(target_sd->sem_perm);
3592     host_ip->__key = tswap32(target_ip->__key);
3593     host_ip->uid = tswap32(target_ip->uid);
3594     host_ip->gid = tswap32(target_ip->gid);
3595     host_ip->cuid = tswap32(target_ip->cuid);
3596     host_ip->cgid = tswap32(target_ip->cgid);
3597 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3598     host_ip->mode = tswap32(target_ip->mode);
3599 #else
3600     host_ip->mode = tswap16(target_ip->mode);
3601 #endif
3602 #if defined(TARGET_PPC)
3603     host_ip->__seq = tswap32(target_ip->__seq);
3604 #else
3605     host_ip->__seq = tswap16(target_ip->__seq);
3606 #endif
3607     unlock_user_struct(target_sd, target_addr, 0);
3608     return 0;
3609 }
3610 
3611 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3612                                                struct ipc_perm *host_ip)
3613 {
3614     struct target_ipc_perm *target_ip;
3615     struct target_semid64_ds *target_sd;
3616 
3617     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3618         return -TARGET_EFAULT;
3619     target_ip = &(target_sd->sem_perm);
3620     target_ip->__key = tswap32(host_ip->__key);
3621     target_ip->uid = tswap32(host_ip->uid);
3622     target_ip->gid = tswap32(host_ip->gid);
3623     target_ip->cuid = tswap32(host_ip->cuid);
3624     target_ip->cgid = tswap32(host_ip->cgid);
3625 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3626     target_ip->mode = tswap32(host_ip->mode);
3627 #else
3628     target_ip->mode = tswap16(host_ip->mode);
3629 #endif
3630 #if defined(TARGET_PPC)
3631     target_ip->__seq = tswap32(host_ip->__seq);
3632 #else
3633     target_ip->__seq = tswap16(host_ip->__seq);
3634 #endif
3635     unlock_user_struct(target_sd, target_addr, 1);
3636     return 0;
3637 }
3638 
3639 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3640                                                abi_ulong target_addr)
3641 {
3642     struct target_semid64_ds *target_sd;
3643 
3644     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3645         return -TARGET_EFAULT;
3646     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3647         return -TARGET_EFAULT;
3648     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3649     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3650     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3651     unlock_user_struct(target_sd, target_addr, 0);
3652     return 0;
3653 }
3654 
3655 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3656                                                struct semid_ds *host_sd)
3657 {
3658     struct target_semid64_ds *target_sd;
3659 
3660     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3661         return -TARGET_EFAULT;
3662     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3663         return -TARGET_EFAULT;
3664     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3665     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3666     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3667     unlock_user_struct(target_sd, target_addr, 1);
3668     return 0;
3669 }
3670 
3671 struct target_seminfo {
3672     int semmap;
3673     int semmni;
3674     int semmns;
3675     int semmnu;
3676     int semmsl;
3677     int semopm;
3678     int semume;
3679     int semusz;
3680     int semvmx;
3681     int semaem;
3682 };
3683 
3684 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3685                                               struct seminfo *host_seminfo)
3686 {
3687     struct target_seminfo *target_seminfo;
3688     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3689         return -TARGET_EFAULT;
3690     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3691     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3692     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3693     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3694     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3695     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3696     __put_user(host_seminfo->semume, &target_seminfo->semume);
3697     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3698     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3699     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3700     unlock_user_struct(target_seminfo, target_addr, 1);
3701     return 0;
3702 }
3703 
3704 union semun {
3705 	int val;
3706 	struct semid_ds *buf;
3707 	unsigned short *array;
3708 	struct seminfo *__buf;
3709 };
3710 
3711 union target_semun {
3712 	int val;
3713 	abi_ulong buf;
3714 	abi_ulong array;
3715 	abi_ulong __buf;
3716 };
3717 
3718 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3719                                                abi_ulong target_addr)
3720 {
3721     int nsems;
3722     unsigned short *array;
3723     union semun semun;
3724     struct semid_ds semid_ds;
3725     int i, ret;
3726 
3727     semun.buf = &semid_ds;
3728 
3729     ret = semctl(semid, 0, IPC_STAT, semun);
3730     if (ret == -1)
3731         return get_errno(ret);
3732 
3733     nsems = semid_ds.sem_nsems;
3734 
3735     *host_array = g_try_new(unsigned short, nsems);
3736     if (!*host_array) {
3737         return -TARGET_ENOMEM;
3738     }
3739     array = lock_user(VERIFY_READ, target_addr,
3740                       nsems*sizeof(unsigned short), 1);
3741     if (!array) {
3742         g_free(*host_array);
3743         return -TARGET_EFAULT;
3744     }
3745 
3746     for(i=0; i<nsems; i++) {
3747         __get_user((*host_array)[i], &array[i]);
3748     }
3749     unlock_user(array, target_addr, 0);
3750 
3751     return 0;
3752 }
3753 
3754 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3755                                                unsigned short **host_array)
3756 {
3757     int nsems;
3758     unsigned short *array;
3759     union semun semun;
3760     struct semid_ds semid_ds;
3761     int i, ret;
3762 
3763     semun.buf = &semid_ds;
3764 
3765     ret = semctl(semid, 0, IPC_STAT, semun);
3766     if (ret == -1)
3767         return get_errno(ret);
3768 
3769     nsems = semid_ds.sem_nsems;
3770 
3771     array = lock_user(VERIFY_WRITE, target_addr,
3772                       nsems*sizeof(unsigned short), 0);
3773     if (!array)
3774         return -TARGET_EFAULT;
3775 
3776     for(i=0; i<nsems; i++) {
3777         __put_user((*host_array)[i], &array[i]);
3778     }
3779     g_free(*host_array);
3780     unlock_user(array, target_addr, 1);
3781 
3782     return 0;
3783 }
3784 
3785 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3786                                  abi_ulong target_arg)
3787 {
3788     union target_semun target_su = { .buf = target_arg };
3789     union semun arg;
3790     struct semid_ds dsarg;
3791     unsigned short *array = NULL;
3792     struct seminfo seminfo;
3793     abi_long ret = -TARGET_EINVAL;
3794     abi_long err;
3795     cmd &= 0xff;
3796 
3797     switch( cmd ) {
3798 	case GETVAL:
3799 	case SETVAL:
3800             /* In 64 bit cross-endian situations, we will erroneously pick up
3801              * the wrong half of the union for the "val" element.  To rectify
3802              * this, the entire 8-byte structure is byteswapped, followed by
3803 	     * a swap of the 4 byte val field. In other cases, the data is
3804 	     * already in proper host byte order. */
3805 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3806 		target_su.buf = tswapal(target_su.buf);
3807 		arg.val = tswap32(target_su.val);
3808 	    } else {
3809 		arg.val = target_su.val;
3810 	    }
3811             ret = get_errno(semctl(semid, semnum, cmd, arg));
3812             break;
3813 	case GETALL:
3814 	case SETALL:
3815             err = target_to_host_semarray(semid, &array, target_su.array);
3816             if (err)
3817                 return err;
3818             arg.array = array;
3819             ret = get_errno(semctl(semid, semnum, cmd, arg));
3820             err = host_to_target_semarray(semid, target_su.array, &array);
3821             if (err)
3822                 return err;
3823             break;
3824 	case IPC_STAT:
3825 	case IPC_SET:
3826 	case SEM_STAT:
3827             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3828             if (err)
3829                 return err;
3830             arg.buf = &dsarg;
3831             ret = get_errno(semctl(semid, semnum, cmd, arg));
3832             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3833             if (err)
3834                 return err;
3835             break;
3836 	case IPC_INFO:
3837 	case SEM_INFO:
3838             arg.__buf = &seminfo;
3839             ret = get_errno(semctl(semid, semnum, cmd, arg));
3840             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3841             if (err)
3842                 return err;
3843             break;
3844 	case IPC_RMID:
3845 	case GETPID:
3846 	case GETNCNT:
3847 	case GETZCNT:
3848             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3849             break;
3850     }
3851 
3852     return ret;
3853 }
3854 
3855 struct target_sembuf {
3856     unsigned short sem_num;
3857     short sem_op;
3858     short sem_flg;
3859 };
3860 
3861 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3862                                              abi_ulong target_addr,
3863                                              unsigned nsops)
3864 {
3865     struct target_sembuf *target_sembuf;
3866     int i;
3867 
3868     target_sembuf = lock_user(VERIFY_READ, target_addr,
3869                               nsops*sizeof(struct target_sembuf), 1);
3870     if (!target_sembuf)
3871         return -TARGET_EFAULT;
3872 
3873     for(i=0; i<nsops; i++) {
3874         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3875         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3876         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3877     }
3878 
3879     unlock_user(target_sembuf, target_addr, 0);
3880 
3881     return 0;
3882 }
3883 
3884 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3885     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
3886 
3887 /*
3888  * This macro is required to handle the s390 variants, which passes the
3889  * arguments in a different order than default.
3890  */
3891 #ifdef __s390x__
3892 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3893   (__nsops), (__timeout), (__sops)
3894 #else
3895 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3896   (__nsops), 0, (__sops), (__timeout)
3897 #endif
3898 
3899 static inline abi_long do_semtimedop(int semid,
3900                                      abi_long ptr,
3901                                      unsigned nsops,
3902                                      abi_long timeout, bool time64)
3903 {
3904     struct sembuf *sops;
3905     struct timespec ts, *pts = NULL;
3906     abi_long ret;
3907 
3908     if (timeout) {
3909         pts = &ts;
3910         if (time64) {
3911             if (target_to_host_timespec64(pts, timeout)) {
3912                 return -TARGET_EFAULT;
3913             }
3914         } else {
3915             if (target_to_host_timespec(pts, timeout)) {
3916                 return -TARGET_EFAULT;
3917             }
3918         }
3919     }
3920 
3921     if (nsops > TARGET_SEMOPM) {
3922         return -TARGET_E2BIG;
3923     }
3924 
3925     sops = g_new(struct sembuf, nsops);
3926 
3927     if (target_to_host_sembuf(sops, ptr, nsops)) {
3928         g_free(sops);
3929         return -TARGET_EFAULT;
3930     }
3931 
3932     ret = -TARGET_ENOSYS;
3933 #ifdef __NR_semtimedop
3934     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
3935 #endif
3936 #ifdef __NR_ipc
3937     if (ret == -TARGET_ENOSYS) {
3938         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
3939                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
3940     }
3941 #endif
3942     g_free(sops);
3943     return ret;
3944 }
3945 #endif
3946 
3947 struct target_msqid_ds
3948 {
3949     struct target_ipc_perm msg_perm;
3950     abi_ulong msg_stime;
3951 #if TARGET_ABI_BITS == 32
3952     abi_ulong __unused1;
3953 #endif
3954     abi_ulong msg_rtime;
3955 #if TARGET_ABI_BITS == 32
3956     abi_ulong __unused2;
3957 #endif
3958     abi_ulong msg_ctime;
3959 #if TARGET_ABI_BITS == 32
3960     abi_ulong __unused3;
3961 #endif
3962     abi_ulong __msg_cbytes;
3963     abi_ulong msg_qnum;
3964     abi_ulong msg_qbytes;
3965     abi_ulong msg_lspid;
3966     abi_ulong msg_lrpid;
3967     abi_ulong __unused4;
3968     abi_ulong __unused5;
3969 };
3970 
3971 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3972                                                abi_ulong target_addr)
3973 {
3974     struct target_msqid_ds *target_md;
3975 
3976     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3977         return -TARGET_EFAULT;
3978     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3979         return -TARGET_EFAULT;
3980     host_md->msg_stime = tswapal(target_md->msg_stime);
3981     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3982     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3983     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3984     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3985     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3986     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3987     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3988     unlock_user_struct(target_md, target_addr, 0);
3989     return 0;
3990 }
3991 
3992 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3993                                                struct msqid_ds *host_md)
3994 {
3995     struct target_msqid_ds *target_md;
3996 
3997     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3998         return -TARGET_EFAULT;
3999     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4000         return -TARGET_EFAULT;
4001     target_md->msg_stime = tswapal(host_md->msg_stime);
4002     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4003     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4004     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4005     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4006     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4007     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4008     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4009     unlock_user_struct(target_md, target_addr, 1);
4010     return 0;
4011 }
4012 
4013 struct target_msginfo {
4014     int msgpool;
4015     int msgmap;
4016     int msgmax;
4017     int msgmnb;
4018     int msgmni;
4019     int msgssz;
4020     int msgtql;
4021     unsigned short int msgseg;
4022 };
4023 
4024 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4025                                               struct msginfo *host_msginfo)
4026 {
4027     struct target_msginfo *target_msginfo;
4028     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4029         return -TARGET_EFAULT;
4030     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4031     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4032     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4033     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4034     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4035     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4036     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4037     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4038     unlock_user_struct(target_msginfo, target_addr, 1);
4039     return 0;
4040 }
4041 
4042 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4043 {
4044     struct msqid_ds dsarg;
4045     struct msginfo msginfo;
4046     abi_long ret = -TARGET_EINVAL;
4047 
4048     cmd &= 0xff;
4049 
4050     switch (cmd) {
4051     case IPC_STAT:
4052     case IPC_SET:
4053     case MSG_STAT:
4054         if (target_to_host_msqid_ds(&dsarg,ptr))
4055             return -TARGET_EFAULT;
4056         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4057         if (host_to_target_msqid_ds(ptr,&dsarg))
4058             return -TARGET_EFAULT;
4059         break;
4060     case IPC_RMID:
4061         ret = get_errno(msgctl(msgid, cmd, NULL));
4062         break;
4063     case IPC_INFO:
4064     case MSG_INFO:
4065         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4066         if (host_to_target_msginfo(ptr, &msginfo))
4067             return -TARGET_EFAULT;
4068         break;
4069     }
4070 
4071     return ret;
4072 }
4073 
4074 struct target_msgbuf {
4075     abi_long mtype;
4076     char	mtext[1];
4077 };
4078 
4079 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4080                                  ssize_t msgsz, int msgflg)
4081 {
4082     struct target_msgbuf *target_mb;
4083     struct msgbuf *host_mb;
4084     abi_long ret = 0;
4085 
4086     if (msgsz < 0) {
4087         return -TARGET_EINVAL;
4088     }
4089 
4090     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4091         return -TARGET_EFAULT;
4092     host_mb = g_try_malloc(msgsz + sizeof(long));
4093     if (!host_mb) {
4094         unlock_user_struct(target_mb, msgp, 0);
4095         return -TARGET_ENOMEM;
4096     }
4097     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4098     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4099     ret = -TARGET_ENOSYS;
4100 #ifdef __NR_msgsnd
4101     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4102 #endif
4103 #ifdef __NR_ipc
4104     if (ret == -TARGET_ENOSYS) {
4105 #ifdef __s390x__
4106         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4107                                  host_mb));
4108 #else
4109         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4110                                  host_mb, 0));
4111 #endif
4112     }
4113 #endif
4114     g_free(host_mb);
4115     unlock_user_struct(target_mb, msgp, 0);
4116 
4117     return ret;
4118 }
4119 
4120 #ifdef __NR_ipc
4121 #if defined(__sparc__)
4122 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4123 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4124 #elif defined(__s390x__)
4125 /* The s390 sys_ipc variant has only five parameters.  */
4126 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4127     ((long int[]){(long int)__msgp, __msgtyp})
4128 #else
4129 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4130     ((long int[]){(long int)__msgp, __msgtyp}), 0
4131 #endif
4132 #endif
4133 
4134 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4135                                  ssize_t msgsz, abi_long msgtyp,
4136                                  int msgflg)
4137 {
4138     struct target_msgbuf *target_mb;
4139     char *target_mtext;
4140     struct msgbuf *host_mb;
4141     abi_long ret = 0;
4142 
4143     if (msgsz < 0) {
4144         return -TARGET_EINVAL;
4145     }
4146 
4147     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4148         return -TARGET_EFAULT;
4149 
4150     host_mb = g_try_malloc(msgsz + sizeof(long));
4151     if (!host_mb) {
4152         ret = -TARGET_ENOMEM;
4153         goto end;
4154     }
4155     ret = -TARGET_ENOSYS;
4156 #ifdef __NR_msgrcv
4157     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4158 #endif
4159 #ifdef __NR_ipc
4160     if (ret == -TARGET_ENOSYS) {
4161         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4162                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4163     }
4164 #endif
4165 
4166     if (ret > 0) {
4167         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4168         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4169         if (!target_mtext) {
4170             ret = -TARGET_EFAULT;
4171             goto end;
4172         }
4173         memcpy(target_mb->mtext, host_mb->mtext, ret);
4174         unlock_user(target_mtext, target_mtext_addr, ret);
4175     }
4176 
4177     target_mb->mtype = tswapal(host_mb->mtype);
4178 
4179 end:
4180     if (target_mb)
4181         unlock_user_struct(target_mb, msgp, 1);
4182     g_free(host_mb);
4183     return ret;
4184 }
4185 
4186 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4187                                                abi_ulong target_addr)
4188 {
4189     struct target_shmid_ds *target_sd;
4190 
4191     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4192         return -TARGET_EFAULT;
4193     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4194         return -TARGET_EFAULT;
4195     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4196     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4197     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4198     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4199     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4200     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4201     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4202     unlock_user_struct(target_sd, target_addr, 0);
4203     return 0;
4204 }
4205 
4206 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4207                                                struct shmid_ds *host_sd)
4208 {
4209     struct target_shmid_ds *target_sd;
4210 
4211     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4212         return -TARGET_EFAULT;
4213     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4214         return -TARGET_EFAULT;
4215     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4216     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4217     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4218     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4219     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4220     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4221     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4222     unlock_user_struct(target_sd, target_addr, 1);
4223     return 0;
4224 }
4225 
4226 struct  target_shminfo {
4227     abi_ulong shmmax;
4228     abi_ulong shmmin;
4229     abi_ulong shmmni;
4230     abi_ulong shmseg;
4231     abi_ulong shmall;
4232 };
4233 
4234 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4235                                               struct shminfo *host_shminfo)
4236 {
4237     struct target_shminfo *target_shminfo;
4238     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4239         return -TARGET_EFAULT;
4240     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4241     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4242     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4243     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4244     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4245     unlock_user_struct(target_shminfo, target_addr, 1);
4246     return 0;
4247 }
4248 
4249 struct target_shm_info {
4250     int used_ids;
4251     abi_ulong shm_tot;
4252     abi_ulong shm_rss;
4253     abi_ulong shm_swp;
4254     abi_ulong swap_attempts;
4255     abi_ulong swap_successes;
4256 };
4257 
4258 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4259                                                struct shm_info *host_shm_info)
4260 {
4261     struct target_shm_info *target_shm_info;
4262     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4263         return -TARGET_EFAULT;
4264     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4265     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4266     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4267     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4268     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4269     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4270     unlock_user_struct(target_shm_info, target_addr, 1);
4271     return 0;
4272 }
4273 
4274 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4275 {
4276     struct shmid_ds dsarg;
4277     struct shminfo shminfo;
4278     struct shm_info shm_info;
4279     abi_long ret = -TARGET_EINVAL;
4280 
4281     cmd &= 0xff;
4282 
4283     switch(cmd) {
4284     case IPC_STAT:
4285     case IPC_SET:
4286     case SHM_STAT:
4287         if (target_to_host_shmid_ds(&dsarg, buf))
4288             return -TARGET_EFAULT;
4289         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4290         if (host_to_target_shmid_ds(buf, &dsarg))
4291             return -TARGET_EFAULT;
4292         break;
4293     case IPC_INFO:
4294         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4295         if (host_to_target_shminfo(buf, &shminfo))
4296             return -TARGET_EFAULT;
4297         break;
4298     case SHM_INFO:
4299         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4300         if (host_to_target_shm_info(buf, &shm_info))
4301             return -TARGET_EFAULT;
4302         break;
4303     case IPC_RMID:
4304     case SHM_LOCK:
4305     case SHM_UNLOCK:
4306         ret = get_errno(shmctl(shmid, cmd, NULL));
4307         break;
4308     }
4309 
4310     return ret;
4311 }
4312 
4313 #ifndef TARGET_FORCE_SHMLBA
4314 /* For most architectures, SHMLBA is the same as the page size;
4315  * some architectures have larger values, in which case they should
4316  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4317  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4318  * and defining its own value for SHMLBA.
4319  *
4320  * The kernel also permits SHMLBA to be set by the architecture to a
4321  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4322  * this means that addresses are rounded to the large size if
4323  * SHM_RND is set but addresses not aligned to that size are not rejected
4324  * as long as they are at least page-aligned. Since the only architecture
4325  * which uses this is ia64 this code doesn't provide for that oddity.
4326  */
4327 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4328 {
4329     return TARGET_PAGE_SIZE;
4330 }
4331 #endif
4332 
4333 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4334                                  int shmid, abi_ulong shmaddr, int shmflg)
4335 {
4336     abi_long raddr;
4337     void *host_raddr;
4338     struct shmid_ds shm_info;
4339     int i,ret;
4340     abi_ulong shmlba;
4341 
4342     /* find out the length of the shared memory segment */
4343     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4344     if (is_error(ret)) {
4345         /* can't get length, bail out */
4346         return ret;
4347     }
4348 
4349     shmlba = target_shmlba(cpu_env);
4350 
4351     if (shmaddr & (shmlba - 1)) {
4352         if (shmflg & SHM_RND) {
4353             shmaddr &= ~(shmlba - 1);
4354         } else {
4355             return -TARGET_EINVAL;
4356         }
4357     }
4358     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4359         return -TARGET_EINVAL;
4360     }
4361 
4362     mmap_lock();
4363 
4364     if (shmaddr)
4365         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4366     else {
4367         abi_ulong mmap_start;
4368 
4369         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4370         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4371 
4372         if (mmap_start == -1) {
4373             errno = ENOMEM;
4374             host_raddr = (void *)-1;
4375         } else
4376             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4377     }
4378 
4379     if (host_raddr == (void *)-1) {
4380         mmap_unlock();
4381         return get_errno((long)host_raddr);
4382     }
4383     raddr=h2g((unsigned long)host_raddr);
4384 
4385     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4386                    PAGE_VALID | PAGE_READ |
4387                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4388 
4389     for (i = 0; i < N_SHM_REGIONS; i++) {
4390         if (!shm_regions[i].in_use) {
4391             shm_regions[i].in_use = true;
4392             shm_regions[i].start = raddr;
4393             shm_regions[i].size = shm_info.shm_segsz;
4394             break;
4395         }
4396     }
4397 
4398     mmap_unlock();
4399     return raddr;
4400 
4401 }
4402 
4403 static inline abi_long do_shmdt(abi_ulong shmaddr)
4404 {
4405     int i;
4406     abi_long rv;
4407 
4408     mmap_lock();
4409 
4410     for (i = 0; i < N_SHM_REGIONS; ++i) {
4411         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4412             shm_regions[i].in_use = false;
4413             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4414             break;
4415         }
4416     }
4417     rv = get_errno(shmdt(g2h(shmaddr)));
4418 
4419     mmap_unlock();
4420 
4421     return rv;
4422 }
4423 
4424 #ifdef TARGET_NR_ipc
4425 /* ??? This only works with linear mappings.  */
4426 /* do_ipc() must return target values and target errnos. */
4427 static abi_long do_ipc(CPUArchState *cpu_env,
4428                        unsigned int call, abi_long first,
4429                        abi_long second, abi_long third,
4430                        abi_long ptr, abi_long fifth)
4431 {
4432     int version;
4433     abi_long ret = 0;
4434 
4435     version = call >> 16;
4436     call &= 0xffff;
4437 
4438     switch (call) {
4439     case IPCOP_semop:
4440         ret = do_semtimedop(first, ptr, second, 0, false);
4441         break;
4442     case IPCOP_semtimedop:
4443     /*
4444      * The s390 sys_ipc variant has only five parameters instead of six
4445      * (as for default variant) and the only difference is the handling of
4446      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4447      * to a struct timespec where the generic variant uses fifth parameter.
4448      */
4449 #if defined(TARGET_S390X)
4450         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4451 #else
4452         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4453 #endif
4454         break;
4455 
4456     case IPCOP_semget:
4457         ret = get_errno(semget(first, second, third));
4458         break;
4459 
4460     case IPCOP_semctl: {
4461         /* The semun argument to semctl is passed by value, so dereference the
4462          * ptr argument. */
4463         abi_ulong atptr;
4464         get_user_ual(atptr, ptr);
4465         ret = do_semctl(first, second, third, atptr);
4466         break;
4467     }
4468 
4469     case IPCOP_msgget:
4470         ret = get_errno(msgget(first, second));
4471         break;
4472 
4473     case IPCOP_msgsnd:
4474         ret = do_msgsnd(first, ptr, second, third);
4475         break;
4476 
4477     case IPCOP_msgctl:
4478         ret = do_msgctl(first, second, ptr);
4479         break;
4480 
4481     case IPCOP_msgrcv:
4482         switch (version) {
4483         case 0:
4484             {
4485                 struct target_ipc_kludge {
4486                     abi_long msgp;
4487                     abi_long msgtyp;
4488                 } *tmp;
4489 
4490                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4491                     ret = -TARGET_EFAULT;
4492                     break;
4493                 }
4494 
4495                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4496 
4497                 unlock_user_struct(tmp, ptr, 0);
4498                 break;
4499             }
4500         default:
4501             ret = do_msgrcv(first, ptr, second, fifth, third);
4502         }
4503         break;
4504 
4505     case IPCOP_shmat:
4506         switch (version) {
4507         default:
4508         {
4509             abi_ulong raddr;
4510             raddr = do_shmat(cpu_env, first, ptr, second);
4511             if (is_error(raddr))
4512                 return get_errno(raddr);
4513             if (put_user_ual(raddr, third))
4514                 return -TARGET_EFAULT;
4515             break;
4516         }
4517         case 1:
4518             ret = -TARGET_EINVAL;
4519             break;
4520         }
4521 	break;
4522     case IPCOP_shmdt:
4523         ret = do_shmdt(ptr);
4524 	break;
4525 
4526     case IPCOP_shmget:
4527 	/* IPC_* flag values are the same on all linux platforms */
4528 	ret = get_errno(shmget(first, second, third));
4529 	break;
4530 
4531 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4532     case IPCOP_shmctl:
4533         ret = do_shmctl(first, second, ptr);
4534         break;
4535     default:
4536         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4537                       call, version);
4538 	ret = -TARGET_ENOSYS;
4539 	break;
4540     }
4541     return ret;
4542 }
4543 #endif
4544 
4545 /* kernel structure types definitions */
4546 
4547 #define STRUCT(name, ...) STRUCT_ ## name,
4548 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4549 enum {
4550 #include "syscall_types.h"
4551 STRUCT_MAX
4552 };
4553 #undef STRUCT
4554 #undef STRUCT_SPECIAL
4555 
4556 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4557 #define STRUCT_SPECIAL(name)
4558 #include "syscall_types.h"
4559 #undef STRUCT
4560 #undef STRUCT_SPECIAL
4561 
4562 #define MAX_STRUCT_SIZE 4096
4563 
4564 #ifdef CONFIG_FIEMAP
4565 /* So fiemap access checks don't overflow on 32 bit systems.
4566  * This is very slightly smaller than the limit imposed by
4567  * the underlying kernel.
4568  */
4569 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4570                             / sizeof(struct fiemap_extent))
4571 
4572 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4573                                        int fd, int cmd, abi_long arg)
4574 {
4575     /* The parameter for this ioctl is a struct fiemap followed
4576      * by an array of struct fiemap_extent whose size is set
4577      * in fiemap->fm_extent_count. The array is filled in by the
4578      * ioctl.
4579      */
4580     int target_size_in, target_size_out;
4581     struct fiemap *fm;
4582     const argtype *arg_type = ie->arg_type;
4583     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4584     void *argptr, *p;
4585     abi_long ret;
4586     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4587     uint32_t outbufsz;
4588     int free_fm = 0;
4589 
4590     assert(arg_type[0] == TYPE_PTR);
4591     assert(ie->access == IOC_RW);
4592     arg_type++;
4593     target_size_in = thunk_type_size(arg_type, 0);
4594     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4595     if (!argptr) {
4596         return -TARGET_EFAULT;
4597     }
4598     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4599     unlock_user(argptr, arg, 0);
4600     fm = (struct fiemap *)buf_temp;
4601     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4602         return -TARGET_EINVAL;
4603     }
4604 
4605     outbufsz = sizeof (*fm) +
4606         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4607 
4608     if (outbufsz > MAX_STRUCT_SIZE) {
4609         /* We can't fit all the extents into the fixed size buffer.
4610          * Allocate one that is large enough and use it instead.
4611          */
4612         fm = g_try_malloc(outbufsz);
4613         if (!fm) {
4614             return -TARGET_ENOMEM;
4615         }
4616         memcpy(fm, buf_temp, sizeof(struct fiemap));
4617         free_fm = 1;
4618     }
4619     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4620     if (!is_error(ret)) {
4621         target_size_out = target_size_in;
4622         /* An extent_count of 0 means we were only counting the extents
4623          * so there are no structs to copy
4624          */
4625         if (fm->fm_extent_count != 0) {
4626             target_size_out += fm->fm_mapped_extents * extent_size;
4627         }
4628         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4629         if (!argptr) {
4630             ret = -TARGET_EFAULT;
4631         } else {
4632             /* Convert the struct fiemap */
4633             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4634             if (fm->fm_extent_count != 0) {
4635                 p = argptr + target_size_in;
4636                 /* ...and then all the struct fiemap_extents */
4637                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4638                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4639                                   THUNK_TARGET);
4640                     p += extent_size;
4641                 }
4642             }
4643             unlock_user(argptr, arg, target_size_out);
4644         }
4645     }
4646     if (free_fm) {
4647         g_free(fm);
4648     }
4649     return ret;
4650 }
4651 #endif
4652 
4653 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4654                                 int fd, int cmd, abi_long arg)
4655 {
4656     const argtype *arg_type = ie->arg_type;
4657     int target_size;
4658     void *argptr;
4659     int ret;
4660     struct ifconf *host_ifconf;
4661     uint32_t outbufsz;
4662     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4663     int target_ifreq_size;
4664     int nb_ifreq;
4665     int free_buf = 0;
4666     int i;
4667     int target_ifc_len;
4668     abi_long target_ifc_buf;
4669     int host_ifc_len;
4670     char *host_ifc_buf;
4671 
4672     assert(arg_type[0] == TYPE_PTR);
4673     assert(ie->access == IOC_RW);
4674 
4675     arg_type++;
4676     target_size = thunk_type_size(arg_type, 0);
4677 
4678     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4679     if (!argptr)
4680         return -TARGET_EFAULT;
4681     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4682     unlock_user(argptr, arg, 0);
4683 
4684     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4685     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4686     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4687 
4688     if (target_ifc_buf != 0) {
4689         target_ifc_len = host_ifconf->ifc_len;
4690         nb_ifreq = target_ifc_len / target_ifreq_size;
4691         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4692 
4693         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4694         if (outbufsz > MAX_STRUCT_SIZE) {
4695             /*
4696              * We can't fit all the extents into the fixed size buffer.
4697              * Allocate one that is large enough and use it instead.
4698              */
4699             host_ifconf = malloc(outbufsz);
4700             if (!host_ifconf) {
4701                 return -TARGET_ENOMEM;
4702             }
4703             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4704             free_buf = 1;
4705         }
4706         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4707 
4708         host_ifconf->ifc_len = host_ifc_len;
4709     } else {
4710       host_ifc_buf = NULL;
4711     }
4712     host_ifconf->ifc_buf = host_ifc_buf;
4713 
4714     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4715     if (!is_error(ret)) {
4716 	/* convert host ifc_len to target ifc_len */
4717 
4718         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4719         target_ifc_len = nb_ifreq * target_ifreq_size;
4720         host_ifconf->ifc_len = target_ifc_len;
4721 
4722 	/* restore target ifc_buf */
4723 
4724         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4725 
4726 	/* copy struct ifconf to target user */
4727 
4728         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4729         if (!argptr)
4730             return -TARGET_EFAULT;
4731         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4732         unlock_user(argptr, arg, target_size);
4733 
4734         if (target_ifc_buf != 0) {
4735             /* copy ifreq[] to target user */
4736             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4737             for (i = 0; i < nb_ifreq ; i++) {
4738                 thunk_convert(argptr + i * target_ifreq_size,
4739                               host_ifc_buf + i * sizeof(struct ifreq),
4740                               ifreq_arg_type, THUNK_TARGET);
4741             }
4742             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4743         }
4744     }
4745 
4746     if (free_buf) {
4747         free(host_ifconf);
4748     }
4749 
4750     return ret;
4751 }
4752 
4753 #if defined(CONFIG_USBFS)
4754 #if HOST_LONG_BITS > 64
4755 #error USBDEVFS thunks do not support >64 bit hosts yet.
4756 #endif
4757 struct live_urb {
4758     uint64_t target_urb_adr;
4759     uint64_t target_buf_adr;
4760     char *target_buf_ptr;
4761     struct usbdevfs_urb host_urb;
4762 };
4763 
4764 static GHashTable *usbdevfs_urb_hashtable(void)
4765 {
4766     static GHashTable *urb_hashtable;
4767 
4768     if (!urb_hashtable) {
4769         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4770     }
4771     return urb_hashtable;
4772 }
4773 
4774 static void urb_hashtable_insert(struct live_urb *urb)
4775 {
4776     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4777     g_hash_table_insert(urb_hashtable, urb, urb);
4778 }
4779 
4780 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4781 {
4782     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4783     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4784 }
4785 
4786 static void urb_hashtable_remove(struct live_urb *urb)
4787 {
4788     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4789     g_hash_table_remove(urb_hashtable, urb);
4790 }
4791 
4792 static abi_long
4793 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4794                           int fd, int cmd, abi_long arg)
4795 {
4796     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4797     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4798     struct live_urb *lurb;
4799     void *argptr;
4800     uint64_t hurb;
4801     int target_size;
4802     uintptr_t target_urb_adr;
4803     abi_long ret;
4804 
4805     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4806 
4807     memset(buf_temp, 0, sizeof(uint64_t));
4808     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4809     if (is_error(ret)) {
4810         return ret;
4811     }
4812 
4813     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4814     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4815     if (!lurb->target_urb_adr) {
4816         return -TARGET_EFAULT;
4817     }
4818     urb_hashtable_remove(lurb);
4819     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4820         lurb->host_urb.buffer_length);
4821     lurb->target_buf_ptr = NULL;
4822 
4823     /* restore the guest buffer pointer */
4824     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4825 
4826     /* update the guest urb struct */
4827     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4828     if (!argptr) {
4829         g_free(lurb);
4830         return -TARGET_EFAULT;
4831     }
4832     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4833     unlock_user(argptr, lurb->target_urb_adr, target_size);
4834 
4835     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4836     /* write back the urb handle */
4837     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4838     if (!argptr) {
4839         g_free(lurb);
4840         return -TARGET_EFAULT;
4841     }
4842 
4843     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4844     target_urb_adr = lurb->target_urb_adr;
4845     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4846     unlock_user(argptr, arg, target_size);
4847 
4848     g_free(lurb);
4849     return ret;
4850 }
4851 
4852 static abi_long
4853 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4854                              uint8_t *buf_temp __attribute__((unused)),
4855                              int fd, int cmd, abi_long arg)
4856 {
4857     struct live_urb *lurb;
4858 
4859     /* map target address back to host URB with metadata. */
4860     lurb = urb_hashtable_lookup(arg);
4861     if (!lurb) {
4862         return -TARGET_EFAULT;
4863     }
4864     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4865 }
4866 
4867 static abi_long
4868 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4869                             int fd, int cmd, abi_long arg)
4870 {
4871     const argtype *arg_type = ie->arg_type;
4872     int target_size;
4873     abi_long ret;
4874     void *argptr;
4875     int rw_dir;
4876     struct live_urb *lurb;
4877 
4878     /*
4879      * each submitted URB needs to map to a unique ID for the
4880      * kernel, and that unique ID needs to be a pointer to
4881      * host memory.  hence, we need to malloc for each URB.
4882      * isochronous transfers have a variable length struct.
4883      */
4884     arg_type++;
4885     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4886 
4887     /* construct host copy of urb and metadata */
4888     lurb = g_try_malloc0(sizeof(struct live_urb));
4889     if (!lurb) {
4890         return -TARGET_ENOMEM;
4891     }
4892 
4893     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4894     if (!argptr) {
4895         g_free(lurb);
4896         return -TARGET_EFAULT;
4897     }
4898     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4899     unlock_user(argptr, arg, 0);
4900 
4901     lurb->target_urb_adr = arg;
4902     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4903 
4904     /* buffer space used depends on endpoint type so lock the entire buffer */
4905     /* control type urbs should check the buffer contents for true direction */
4906     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4907     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4908         lurb->host_urb.buffer_length, 1);
4909     if (lurb->target_buf_ptr == NULL) {
4910         g_free(lurb);
4911         return -TARGET_EFAULT;
4912     }
4913 
4914     /* update buffer pointer in host copy */
4915     lurb->host_urb.buffer = lurb->target_buf_ptr;
4916 
4917     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4918     if (is_error(ret)) {
4919         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4920         g_free(lurb);
4921     } else {
4922         urb_hashtable_insert(lurb);
4923     }
4924 
4925     return ret;
4926 }
4927 #endif /* CONFIG_USBFS */
4928 
4929 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4930                             int cmd, abi_long arg)
4931 {
4932     void *argptr;
4933     struct dm_ioctl *host_dm;
4934     abi_long guest_data;
4935     uint32_t guest_data_size;
4936     int target_size;
4937     const argtype *arg_type = ie->arg_type;
4938     abi_long ret;
4939     void *big_buf = NULL;
4940     char *host_data;
4941 
4942     arg_type++;
4943     target_size = thunk_type_size(arg_type, 0);
4944     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4945     if (!argptr) {
4946         ret = -TARGET_EFAULT;
4947         goto out;
4948     }
4949     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4950     unlock_user(argptr, arg, 0);
4951 
4952     /* buf_temp is too small, so fetch things into a bigger buffer */
4953     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4954     memcpy(big_buf, buf_temp, target_size);
4955     buf_temp = big_buf;
4956     host_dm = big_buf;
4957 
4958     guest_data = arg + host_dm->data_start;
4959     if ((guest_data - arg) < 0) {
4960         ret = -TARGET_EINVAL;
4961         goto out;
4962     }
4963     guest_data_size = host_dm->data_size - host_dm->data_start;
4964     host_data = (char*)host_dm + host_dm->data_start;
4965 
4966     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4967     if (!argptr) {
4968         ret = -TARGET_EFAULT;
4969         goto out;
4970     }
4971 
4972     switch (ie->host_cmd) {
4973     case DM_REMOVE_ALL:
4974     case DM_LIST_DEVICES:
4975     case DM_DEV_CREATE:
4976     case DM_DEV_REMOVE:
4977     case DM_DEV_SUSPEND:
4978     case DM_DEV_STATUS:
4979     case DM_DEV_WAIT:
4980     case DM_TABLE_STATUS:
4981     case DM_TABLE_CLEAR:
4982     case DM_TABLE_DEPS:
4983     case DM_LIST_VERSIONS:
4984         /* no input data */
4985         break;
4986     case DM_DEV_RENAME:
4987     case DM_DEV_SET_GEOMETRY:
4988         /* data contains only strings */
4989         memcpy(host_data, argptr, guest_data_size);
4990         break;
4991     case DM_TARGET_MSG:
4992         memcpy(host_data, argptr, guest_data_size);
4993         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4994         break;
4995     case DM_TABLE_LOAD:
4996     {
4997         void *gspec = argptr;
4998         void *cur_data = host_data;
4999         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5000         int spec_size = thunk_type_size(arg_type, 0);
5001         int i;
5002 
5003         for (i = 0; i < host_dm->target_count; i++) {
5004             struct dm_target_spec *spec = cur_data;
5005             uint32_t next;
5006             int slen;
5007 
5008             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5009             slen = strlen((char*)gspec + spec_size) + 1;
5010             next = spec->next;
5011             spec->next = sizeof(*spec) + slen;
5012             strcpy((char*)&spec[1], gspec + spec_size);
5013             gspec += next;
5014             cur_data += spec->next;
5015         }
5016         break;
5017     }
5018     default:
5019         ret = -TARGET_EINVAL;
5020         unlock_user(argptr, guest_data, 0);
5021         goto out;
5022     }
5023     unlock_user(argptr, guest_data, 0);
5024 
5025     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5026     if (!is_error(ret)) {
5027         guest_data = arg + host_dm->data_start;
5028         guest_data_size = host_dm->data_size - host_dm->data_start;
5029         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5030         switch (ie->host_cmd) {
5031         case DM_REMOVE_ALL:
5032         case DM_DEV_CREATE:
5033         case DM_DEV_REMOVE:
5034         case DM_DEV_RENAME:
5035         case DM_DEV_SUSPEND:
5036         case DM_DEV_STATUS:
5037         case DM_TABLE_LOAD:
5038         case DM_TABLE_CLEAR:
5039         case DM_TARGET_MSG:
5040         case DM_DEV_SET_GEOMETRY:
5041             /* no return data */
5042             break;
5043         case DM_LIST_DEVICES:
5044         {
5045             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5046             uint32_t remaining_data = guest_data_size;
5047             void *cur_data = argptr;
5048             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5049             int nl_size = 12; /* can't use thunk_size due to alignment */
5050 
5051             while (1) {
5052                 uint32_t next = nl->next;
5053                 if (next) {
5054                     nl->next = nl_size + (strlen(nl->name) + 1);
5055                 }
5056                 if (remaining_data < nl->next) {
5057                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5058                     break;
5059                 }
5060                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5061                 strcpy(cur_data + nl_size, nl->name);
5062                 cur_data += nl->next;
5063                 remaining_data -= nl->next;
5064                 if (!next) {
5065                     break;
5066                 }
5067                 nl = (void*)nl + next;
5068             }
5069             break;
5070         }
5071         case DM_DEV_WAIT:
5072         case DM_TABLE_STATUS:
5073         {
5074             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5075             void *cur_data = argptr;
5076             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5077             int spec_size = thunk_type_size(arg_type, 0);
5078             int i;
5079 
5080             for (i = 0; i < host_dm->target_count; i++) {
5081                 uint32_t next = spec->next;
5082                 int slen = strlen((char*)&spec[1]) + 1;
5083                 spec->next = (cur_data - argptr) + spec_size + slen;
5084                 if (guest_data_size < spec->next) {
5085                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5086                     break;
5087                 }
5088                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5089                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5090                 cur_data = argptr + spec->next;
5091                 spec = (void*)host_dm + host_dm->data_start + next;
5092             }
5093             break;
5094         }
5095         case DM_TABLE_DEPS:
5096         {
5097             void *hdata = (void*)host_dm + host_dm->data_start;
5098             int count = *(uint32_t*)hdata;
5099             uint64_t *hdev = hdata + 8;
5100             uint64_t *gdev = argptr + 8;
5101             int i;
5102 
5103             *(uint32_t*)argptr = tswap32(count);
5104             for (i = 0; i < count; i++) {
5105                 *gdev = tswap64(*hdev);
5106                 gdev++;
5107                 hdev++;
5108             }
5109             break;
5110         }
5111         case DM_LIST_VERSIONS:
5112         {
5113             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5114             uint32_t remaining_data = guest_data_size;
5115             void *cur_data = argptr;
5116             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5117             int vers_size = thunk_type_size(arg_type, 0);
5118 
5119             while (1) {
5120                 uint32_t next = vers->next;
5121                 if (next) {
5122                     vers->next = vers_size + (strlen(vers->name) + 1);
5123                 }
5124                 if (remaining_data < vers->next) {
5125                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5126                     break;
5127                 }
5128                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5129                 strcpy(cur_data + vers_size, vers->name);
5130                 cur_data += vers->next;
5131                 remaining_data -= vers->next;
5132                 if (!next) {
5133                     break;
5134                 }
5135                 vers = (void*)vers + next;
5136             }
5137             break;
5138         }
5139         default:
5140             unlock_user(argptr, guest_data, 0);
5141             ret = -TARGET_EINVAL;
5142             goto out;
5143         }
5144         unlock_user(argptr, guest_data, guest_data_size);
5145 
5146         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5147         if (!argptr) {
5148             ret = -TARGET_EFAULT;
5149             goto out;
5150         }
5151         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5152         unlock_user(argptr, arg, target_size);
5153     }
5154 out:
5155     g_free(big_buf);
5156     return ret;
5157 }
5158 
5159 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5160                                int cmd, abi_long arg)
5161 {
5162     void *argptr;
5163     int target_size;
5164     const argtype *arg_type = ie->arg_type;
5165     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5166     abi_long ret;
5167 
5168     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5169     struct blkpg_partition host_part;
5170 
5171     /* Read and convert blkpg */
5172     arg_type++;
5173     target_size = thunk_type_size(arg_type, 0);
5174     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5175     if (!argptr) {
5176         ret = -TARGET_EFAULT;
5177         goto out;
5178     }
5179     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5180     unlock_user(argptr, arg, 0);
5181 
5182     switch (host_blkpg->op) {
5183     case BLKPG_ADD_PARTITION:
5184     case BLKPG_DEL_PARTITION:
5185         /* payload is struct blkpg_partition */
5186         break;
5187     default:
5188         /* Unknown opcode */
5189         ret = -TARGET_EINVAL;
5190         goto out;
5191     }
5192 
5193     /* Read and convert blkpg->data */
5194     arg = (abi_long)(uintptr_t)host_blkpg->data;
5195     target_size = thunk_type_size(part_arg_type, 0);
5196     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5197     if (!argptr) {
5198         ret = -TARGET_EFAULT;
5199         goto out;
5200     }
5201     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5202     unlock_user(argptr, arg, 0);
5203 
5204     /* Swizzle the data pointer to our local copy and call! */
5205     host_blkpg->data = &host_part;
5206     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5207 
5208 out:
5209     return ret;
5210 }
5211 
5212 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5213                                 int fd, int cmd, abi_long arg)
5214 {
5215     const argtype *arg_type = ie->arg_type;
5216     const StructEntry *se;
5217     const argtype *field_types;
5218     const int *dst_offsets, *src_offsets;
5219     int target_size;
5220     void *argptr;
5221     abi_ulong *target_rt_dev_ptr = NULL;
5222     unsigned long *host_rt_dev_ptr = NULL;
5223     abi_long ret;
5224     int i;
5225 
5226     assert(ie->access == IOC_W);
5227     assert(*arg_type == TYPE_PTR);
5228     arg_type++;
5229     assert(*arg_type == TYPE_STRUCT);
5230     target_size = thunk_type_size(arg_type, 0);
5231     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5232     if (!argptr) {
5233         return -TARGET_EFAULT;
5234     }
5235     arg_type++;
5236     assert(*arg_type == (int)STRUCT_rtentry);
5237     se = struct_entries + *arg_type++;
5238     assert(se->convert[0] == NULL);
5239     /* convert struct here to be able to catch rt_dev string */
5240     field_types = se->field_types;
5241     dst_offsets = se->field_offsets[THUNK_HOST];
5242     src_offsets = se->field_offsets[THUNK_TARGET];
5243     for (i = 0; i < se->nb_fields; i++) {
5244         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5245             assert(*field_types == TYPE_PTRVOID);
5246             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5247             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5248             if (*target_rt_dev_ptr != 0) {
5249                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5250                                                   tswapal(*target_rt_dev_ptr));
5251                 if (!*host_rt_dev_ptr) {
5252                     unlock_user(argptr, arg, 0);
5253                     return -TARGET_EFAULT;
5254                 }
5255             } else {
5256                 *host_rt_dev_ptr = 0;
5257             }
5258             field_types++;
5259             continue;
5260         }
5261         field_types = thunk_convert(buf_temp + dst_offsets[i],
5262                                     argptr + src_offsets[i],
5263                                     field_types, THUNK_HOST);
5264     }
5265     unlock_user(argptr, arg, 0);
5266 
5267     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5268 
5269     assert(host_rt_dev_ptr != NULL);
5270     assert(target_rt_dev_ptr != NULL);
5271     if (*host_rt_dev_ptr != 0) {
5272         unlock_user((void *)*host_rt_dev_ptr,
5273                     *target_rt_dev_ptr, 0);
5274     }
5275     return ret;
5276 }
5277 
5278 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5279                                      int fd, int cmd, abi_long arg)
5280 {
5281     int sig = target_to_host_signal(arg);
5282     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5283 }
5284 
5285 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5286                                     int fd, int cmd, abi_long arg)
5287 {
5288     struct timeval tv;
5289     abi_long ret;
5290 
5291     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5292     if (is_error(ret)) {
5293         return ret;
5294     }
5295 
5296     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5297         if (copy_to_user_timeval(arg, &tv)) {
5298             return -TARGET_EFAULT;
5299         }
5300     } else {
5301         if (copy_to_user_timeval64(arg, &tv)) {
5302             return -TARGET_EFAULT;
5303         }
5304     }
5305 
5306     return ret;
5307 }
5308 
5309 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5310                                       int fd, int cmd, abi_long arg)
5311 {
5312     struct timespec ts;
5313     abi_long ret;
5314 
5315     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5316     if (is_error(ret)) {
5317         return ret;
5318     }
5319 
5320     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5321         if (host_to_target_timespec(arg, &ts)) {
5322             return -TARGET_EFAULT;
5323         }
5324     } else{
5325         if (host_to_target_timespec64(arg, &ts)) {
5326             return -TARGET_EFAULT;
5327         }
5328     }
5329 
5330     return ret;
5331 }
5332 
5333 #ifdef TIOCGPTPEER
5334 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5335                                      int fd, int cmd, abi_long arg)
5336 {
5337     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5338     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5339 }
5340 #endif
5341 
5342 #ifdef HAVE_DRM_H
5343 
5344 static void unlock_drm_version(struct drm_version *host_ver,
5345                                struct target_drm_version *target_ver,
5346                                bool copy)
5347 {
5348     unlock_user(host_ver->name, target_ver->name,
5349                                 copy ? host_ver->name_len : 0);
5350     unlock_user(host_ver->date, target_ver->date,
5351                                 copy ? host_ver->date_len : 0);
5352     unlock_user(host_ver->desc, target_ver->desc,
5353                                 copy ? host_ver->desc_len : 0);
5354 }
5355 
5356 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5357                                           struct target_drm_version *target_ver)
5358 {
5359     memset(host_ver, 0, sizeof(*host_ver));
5360 
5361     __get_user(host_ver->name_len, &target_ver->name_len);
5362     if (host_ver->name_len) {
5363         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5364                                    target_ver->name_len, 0);
5365         if (!host_ver->name) {
5366             return -EFAULT;
5367         }
5368     }
5369 
5370     __get_user(host_ver->date_len, &target_ver->date_len);
5371     if (host_ver->date_len) {
5372         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5373                                    target_ver->date_len, 0);
5374         if (!host_ver->date) {
5375             goto err;
5376         }
5377     }
5378 
5379     __get_user(host_ver->desc_len, &target_ver->desc_len);
5380     if (host_ver->desc_len) {
5381         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5382                                    target_ver->desc_len, 0);
5383         if (!host_ver->desc) {
5384             goto err;
5385         }
5386     }
5387 
5388     return 0;
5389 err:
5390     unlock_drm_version(host_ver, target_ver, false);
5391     return -EFAULT;
5392 }
5393 
5394 static inline void host_to_target_drmversion(
5395                                           struct target_drm_version *target_ver,
5396                                           struct drm_version *host_ver)
5397 {
5398     __put_user(host_ver->version_major, &target_ver->version_major);
5399     __put_user(host_ver->version_minor, &target_ver->version_minor);
5400     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5401     __put_user(host_ver->name_len, &target_ver->name_len);
5402     __put_user(host_ver->date_len, &target_ver->date_len);
5403     __put_user(host_ver->desc_len, &target_ver->desc_len);
5404     unlock_drm_version(host_ver, target_ver, true);
5405 }
5406 
5407 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5408                              int fd, int cmd, abi_long arg)
5409 {
5410     struct drm_version *ver;
5411     struct target_drm_version *target_ver;
5412     abi_long ret;
5413 
5414     switch (ie->host_cmd) {
5415     case DRM_IOCTL_VERSION:
5416         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5417             return -TARGET_EFAULT;
5418         }
5419         ver = (struct drm_version *)buf_temp;
5420         ret = target_to_host_drmversion(ver, target_ver);
5421         if (!is_error(ret)) {
5422             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5423             if (is_error(ret)) {
5424                 unlock_drm_version(ver, target_ver, false);
5425             } else {
5426                 host_to_target_drmversion(target_ver, ver);
5427             }
5428         }
5429         unlock_user_struct(target_ver, arg, 0);
5430         return ret;
5431     }
5432     return -TARGET_ENOSYS;
5433 }
5434 
5435 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5436                                            struct drm_i915_getparam *gparam,
5437                                            int fd, abi_long arg)
5438 {
5439     abi_long ret;
5440     int value;
5441     struct target_drm_i915_getparam *target_gparam;
5442 
5443     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5444         return -TARGET_EFAULT;
5445     }
5446 
5447     __get_user(gparam->param, &target_gparam->param);
5448     gparam->value = &value;
5449     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5450     put_user_s32(value, target_gparam->value);
5451 
5452     unlock_user_struct(target_gparam, arg, 0);
5453     return ret;
5454 }
5455 
5456 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5457                                   int fd, int cmd, abi_long arg)
5458 {
5459     switch (ie->host_cmd) {
5460     case DRM_IOCTL_I915_GETPARAM:
5461         return do_ioctl_drm_i915_getparam(ie,
5462                                           (struct drm_i915_getparam *)buf_temp,
5463                                           fd, arg);
5464     default:
5465         return -TARGET_ENOSYS;
5466     }
5467 }
5468 
5469 #endif
5470 
5471 IOCTLEntry ioctl_entries[] = {
5472 #define IOCTL(cmd, access, ...) \
5473     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5474 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5475     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5476 #define IOCTL_IGNORE(cmd) \
5477     { TARGET_ ## cmd, 0, #cmd },
5478 #include "ioctls.h"
5479     { 0, 0, },
5480 };
5481 
5482 /* ??? Implement proper locking for ioctls.  */
5483 /* do_ioctl() Must return target values and target errnos. */
5484 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5485 {
5486     const IOCTLEntry *ie;
5487     const argtype *arg_type;
5488     abi_long ret;
5489     uint8_t buf_temp[MAX_STRUCT_SIZE];
5490     int target_size;
5491     void *argptr;
5492 
5493     ie = ioctl_entries;
5494     for(;;) {
5495         if (ie->target_cmd == 0) {
5496             qemu_log_mask(
5497                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5498             return -TARGET_ENOSYS;
5499         }
5500         if (ie->target_cmd == cmd)
5501             break;
5502         ie++;
5503     }
5504     arg_type = ie->arg_type;
5505     if (ie->do_ioctl) {
5506         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5507     } else if (!ie->host_cmd) {
5508         /* Some architectures define BSD ioctls in their headers
5509            that are not implemented in Linux.  */
5510         return -TARGET_ENOSYS;
5511     }
5512 
5513     switch(arg_type[0]) {
5514     case TYPE_NULL:
5515         /* no argument */
5516         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5517         break;
5518     case TYPE_PTRVOID:
5519     case TYPE_INT:
5520     case TYPE_LONG:
5521     case TYPE_ULONG:
5522         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5523         break;
5524     case TYPE_PTR:
5525         arg_type++;
5526         target_size = thunk_type_size(arg_type, 0);
5527         switch(ie->access) {
5528         case IOC_R:
5529             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5530             if (!is_error(ret)) {
5531                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5532                 if (!argptr)
5533                     return -TARGET_EFAULT;
5534                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5535                 unlock_user(argptr, arg, target_size);
5536             }
5537             break;
5538         case IOC_W:
5539             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5540             if (!argptr)
5541                 return -TARGET_EFAULT;
5542             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5543             unlock_user(argptr, arg, 0);
5544             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5545             break;
5546         default:
5547         case IOC_RW:
5548             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5549             if (!argptr)
5550                 return -TARGET_EFAULT;
5551             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5552             unlock_user(argptr, arg, 0);
5553             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5554             if (!is_error(ret)) {
5555                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5556                 if (!argptr)
5557                     return -TARGET_EFAULT;
5558                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5559                 unlock_user(argptr, arg, target_size);
5560             }
5561             break;
5562         }
5563         break;
5564     default:
5565         qemu_log_mask(LOG_UNIMP,
5566                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5567                       (long)cmd, arg_type[0]);
5568         ret = -TARGET_ENOSYS;
5569         break;
5570     }
5571     return ret;
5572 }
5573 
5574 static const bitmask_transtbl iflag_tbl[] = {
5575         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5576         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5577         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5578         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5579         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5580         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5581         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5582         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5583         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5584         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5585         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5586         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5587         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5588         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5589         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5590         { 0, 0, 0, 0 }
5591 };
5592 
5593 static const bitmask_transtbl oflag_tbl[] = {
5594 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5595 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5596 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5597 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5598 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5599 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5600 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5601 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5602 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5603 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5604 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5605 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5606 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5607 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5608 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5609 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5610 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5611 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5612 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5613 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5614 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5615 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5616 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5617 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5618 	{ 0, 0, 0, 0 }
5619 };
5620 
5621 static const bitmask_transtbl cflag_tbl[] = {
5622 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5623 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5624 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5625 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5626 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5627 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5628 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5629 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5630 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5631 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5632 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5633 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5634 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5635 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5636 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5637 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5638 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5639 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5640 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5641 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5642 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5643 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5644 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5645 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5646 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5647 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5648 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5649 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5650 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5651 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5652 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5653 	{ 0, 0, 0, 0 }
5654 };
5655 
5656 static const bitmask_transtbl lflag_tbl[] = {
5657   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5658   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5659   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5660   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5661   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5662   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5663   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5664   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5665   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5666   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5667   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5668   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5669   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5670   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5671   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5672   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5673   { 0, 0, 0, 0 }
5674 };
5675 
5676 static void target_to_host_termios (void *dst, const void *src)
5677 {
5678     struct host_termios *host = dst;
5679     const struct target_termios *target = src;
5680 
5681     host->c_iflag =
5682         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5683     host->c_oflag =
5684         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5685     host->c_cflag =
5686         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5687     host->c_lflag =
5688         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5689     host->c_line = target->c_line;
5690 
5691     memset(host->c_cc, 0, sizeof(host->c_cc));
5692     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5693     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5694     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5695     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5696     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5697     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5698     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5699     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5700     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5701     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5702     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5703     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5704     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5705     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5706     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5707     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5708     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5709 }
5710 
5711 static void host_to_target_termios (void *dst, const void *src)
5712 {
5713     struct target_termios *target = dst;
5714     const struct host_termios *host = src;
5715 
5716     target->c_iflag =
5717         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5718     target->c_oflag =
5719         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5720     target->c_cflag =
5721         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5722     target->c_lflag =
5723         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5724     target->c_line = host->c_line;
5725 
5726     memset(target->c_cc, 0, sizeof(target->c_cc));
5727     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5728     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5729     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5730     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5731     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5732     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5733     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5734     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5735     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5736     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5737     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5738     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5739     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5740     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5741     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5742     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5743     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5744 }
5745 
5746 static const StructEntry struct_termios_def = {
5747     .convert = { host_to_target_termios, target_to_host_termios },
5748     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5749     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5750     .print = print_termios,
5751 };
5752 
5753 static bitmask_transtbl mmap_flags_tbl[] = {
5754     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5755     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5756     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5757     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5758       MAP_ANONYMOUS, MAP_ANONYMOUS },
5759     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5760       MAP_GROWSDOWN, MAP_GROWSDOWN },
5761     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5762       MAP_DENYWRITE, MAP_DENYWRITE },
5763     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5764       MAP_EXECUTABLE, MAP_EXECUTABLE },
5765     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5766     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5767       MAP_NORESERVE, MAP_NORESERVE },
5768     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5769     /* MAP_STACK had been ignored by the kernel for quite some time.
5770        Recognize it for the target insofar as we do not want to pass
5771        it through to the host.  */
5772     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5773     { 0, 0, 0, 0 }
5774 };
5775 
5776 /*
5777  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5778  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5779  */
5780 #if defined(TARGET_I386)
5781 
5782 /* NOTE: there is really one LDT for all the threads */
5783 static uint8_t *ldt_table;
5784 
5785 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5786 {
5787     int size;
5788     void *p;
5789 
5790     if (!ldt_table)
5791         return 0;
5792     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5793     if (size > bytecount)
5794         size = bytecount;
5795     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5796     if (!p)
5797         return -TARGET_EFAULT;
5798     /* ??? Should this by byteswapped?  */
5799     memcpy(p, ldt_table, size);
5800     unlock_user(p, ptr, size);
5801     return size;
5802 }
5803 
5804 /* XXX: add locking support */
5805 static abi_long write_ldt(CPUX86State *env,
5806                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5807 {
5808     struct target_modify_ldt_ldt_s ldt_info;
5809     struct target_modify_ldt_ldt_s *target_ldt_info;
5810     int seg_32bit, contents, read_exec_only, limit_in_pages;
5811     int seg_not_present, useable, lm;
5812     uint32_t *lp, entry_1, entry_2;
5813 
5814     if (bytecount != sizeof(ldt_info))
5815         return -TARGET_EINVAL;
5816     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5817         return -TARGET_EFAULT;
5818     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5819     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5820     ldt_info.limit = tswap32(target_ldt_info->limit);
5821     ldt_info.flags = tswap32(target_ldt_info->flags);
5822     unlock_user_struct(target_ldt_info, ptr, 0);
5823 
5824     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5825         return -TARGET_EINVAL;
5826     seg_32bit = ldt_info.flags & 1;
5827     contents = (ldt_info.flags >> 1) & 3;
5828     read_exec_only = (ldt_info.flags >> 3) & 1;
5829     limit_in_pages = (ldt_info.flags >> 4) & 1;
5830     seg_not_present = (ldt_info.flags >> 5) & 1;
5831     useable = (ldt_info.flags >> 6) & 1;
5832 #ifdef TARGET_ABI32
5833     lm = 0;
5834 #else
5835     lm = (ldt_info.flags >> 7) & 1;
5836 #endif
5837     if (contents == 3) {
5838         if (oldmode)
5839             return -TARGET_EINVAL;
5840         if (seg_not_present == 0)
5841             return -TARGET_EINVAL;
5842     }
5843     /* allocate the LDT */
5844     if (!ldt_table) {
5845         env->ldt.base = target_mmap(0,
5846                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5847                                     PROT_READ|PROT_WRITE,
5848                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5849         if (env->ldt.base == -1)
5850             return -TARGET_ENOMEM;
5851         memset(g2h(env->ldt.base), 0,
5852                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5853         env->ldt.limit = 0xffff;
5854         ldt_table = g2h(env->ldt.base);
5855     }
5856 
5857     /* NOTE: same code as Linux kernel */
5858     /* Allow LDTs to be cleared by the user. */
5859     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5860         if (oldmode ||
5861             (contents == 0		&&
5862              read_exec_only == 1	&&
5863              seg_32bit == 0		&&
5864              limit_in_pages == 0	&&
5865              seg_not_present == 1	&&
5866              useable == 0 )) {
5867             entry_1 = 0;
5868             entry_2 = 0;
5869             goto install;
5870         }
5871     }
5872 
5873     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5874         (ldt_info.limit & 0x0ffff);
5875     entry_2 = (ldt_info.base_addr & 0xff000000) |
5876         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5877         (ldt_info.limit & 0xf0000) |
5878         ((read_exec_only ^ 1) << 9) |
5879         (contents << 10) |
5880         ((seg_not_present ^ 1) << 15) |
5881         (seg_32bit << 22) |
5882         (limit_in_pages << 23) |
5883         (lm << 21) |
5884         0x7000;
5885     if (!oldmode)
5886         entry_2 |= (useable << 20);
5887 
5888     /* Install the new entry ...  */
5889 install:
5890     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5891     lp[0] = tswap32(entry_1);
5892     lp[1] = tswap32(entry_2);
5893     return 0;
5894 }
5895 
5896 /* specific and weird i386 syscalls */
5897 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5898                               unsigned long bytecount)
5899 {
5900     abi_long ret;
5901 
5902     switch (func) {
5903     case 0:
5904         ret = read_ldt(ptr, bytecount);
5905         break;
5906     case 1:
5907         ret = write_ldt(env, ptr, bytecount, 1);
5908         break;
5909     case 0x11:
5910         ret = write_ldt(env, ptr, bytecount, 0);
5911         break;
5912     default:
5913         ret = -TARGET_ENOSYS;
5914         break;
5915     }
5916     return ret;
5917 }
5918 
5919 #if defined(TARGET_ABI32)
5920 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5921 {
5922     uint64_t *gdt_table = g2h(env->gdt.base);
5923     struct target_modify_ldt_ldt_s ldt_info;
5924     struct target_modify_ldt_ldt_s *target_ldt_info;
5925     int seg_32bit, contents, read_exec_only, limit_in_pages;
5926     int seg_not_present, useable, lm;
5927     uint32_t *lp, entry_1, entry_2;
5928     int i;
5929 
5930     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5931     if (!target_ldt_info)
5932         return -TARGET_EFAULT;
5933     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5934     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5935     ldt_info.limit = tswap32(target_ldt_info->limit);
5936     ldt_info.flags = tswap32(target_ldt_info->flags);
5937     if (ldt_info.entry_number == -1) {
5938         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5939             if (gdt_table[i] == 0) {
5940                 ldt_info.entry_number = i;
5941                 target_ldt_info->entry_number = tswap32(i);
5942                 break;
5943             }
5944         }
5945     }
5946     unlock_user_struct(target_ldt_info, ptr, 1);
5947 
5948     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5949         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5950            return -TARGET_EINVAL;
5951     seg_32bit = ldt_info.flags & 1;
5952     contents = (ldt_info.flags >> 1) & 3;
5953     read_exec_only = (ldt_info.flags >> 3) & 1;
5954     limit_in_pages = (ldt_info.flags >> 4) & 1;
5955     seg_not_present = (ldt_info.flags >> 5) & 1;
5956     useable = (ldt_info.flags >> 6) & 1;
5957 #ifdef TARGET_ABI32
5958     lm = 0;
5959 #else
5960     lm = (ldt_info.flags >> 7) & 1;
5961 #endif
5962 
5963     if (contents == 3) {
5964         if (seg_not_present == 0)
5965             return -TARGET_EINVAL;
5966     }
5967 
5968     /* NOTE: same code as Linux kernel */
5969     /* Allow LDTs to be cleared by the user. */
5970     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5971         if ((contents == 0             &&
5972              read_exec_only == 1       &&
5973              seg_32bit == 0            &&
5974              limit_in_pages == 0       &&
5975              seg_not_present == 1      &&
5976              useable == 0 )) {
5977             entry_1 = 0;
5978             entry_2 = 0;
5979             goto install;
5980         }
5981     }
5982 
5983     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5984         (ldt_info.limit & 0x0ffff);
5985     entry_2 = (ldt_info.base_addr & 0xff000000) |
5986         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5987         (ldt_info.limit & 0xf0000) |
5988         ((read_exec_only ^ 1) << 9) |
5989         (contents << 10) |
5990         ((seg_not_present ^ 1) << 15) |
5991         (seg_32bit << 22) |
5992         (limit_in_pages << 23) |
5993         (useable << 20) |
5994         (lm << 21) |
5995         0x7000;
5996 
5997     /* Install the new entry ...  */
5998 install:
5999     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6000     lp[0] = tswap32(entry_1);
6001     lp[1] = tswap32(entry_2);
6002     return 0;
6003 }
6004 
6005 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6006 {
6007     struct target_modify_ldt_ldt_s *target_ldt_info;
6008     uint64_t *gdt_table = g2h(env->gdt.base);
6009     uint32_t base_addr, limit, flags;
6010     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6011     int seg_not_present, useable, lm;
6012     uint32_t *lp, entry_1, entry_2;
6013 
6014     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6015     if (!target_ldt_info)
6016         return -TARGET_EFAULT;
6017     idx = tswap32(target_ldt_info->entry_number);
6018     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6019         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6020         unlock_user_struct(target_ldt_info, ptr, 1);
6021         return -TARGET_EINVAL;
6022     }
6023     lp = (uint32_t *)(gdt_table + idx);
6024     entry_1 = tswap32(lp[0]);
6025     entry_2 = tswap32(lp[1]);
6026 
6027     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6028     contents = (entry_2 >> 10) & 3;
6029     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6030     seg_32bit = (entry_2 >> 22) & 1;
6031     limit_in_pages = (entry_2 >> 23) & 1;
6032     useable = (entry_2 >> 20) & 1;
6033 #ifdef TARGET_ABI32
6034     lm = 0;
6035 #else
6036     lm = (entry_2 >> 21) & 1;
6037 #endif
6038     flags = (seg_32bit << 0) | (contents << 1) |
6039         (read_exec_only << 3) | (limit_in_pages << 4) |
6040         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6041     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6042     base_addr = (entry_1 >> 16) |
6043         (entry_2 & 0xff000000) |
6044         ((entry_2 & 0xff) << 16);
6045     target_ldt_info->base_addr = tswapal(base_addr);
6046     target_ldt_info->limit = tswap32(limit);
6047     target_ldt_info->flags = tswap32(flags);
6048     unlock_user_struct(target_ldt_info, ptr, 1);
6049     return 0;
6050 }
6051 
6052 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6053 {
6054     return -TARGET_ENOSYS;
6055 }
6056 #else
6057 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6058 {
6059     abi_long ret = 0;
6060     abi_ulong val;
6061     int idx;
6062 
6063     switch(code) {
6064     case TARGET_ARCH_SET_GS:
6065     case TARGET_ARCH_SET_FS:
6066         if (code == TARGET_ARCH_SET_GS)
6067             idx = R_GS;
6068         else
6069             idx = R_FS;
6070         cpu_x86_load_seg(env, idx, 0);
6071         env->segs[idx].base = addr;
6072         break;
6073     case TARGET_ARCH_GET_GS:
6074     case TARGET_ARCH_GET_FS:
6075         if (code == TARGET_ARCH_GET_GS)
6076             idx = R_GS;
6077         else
6078             idx = R_FS;
6079         val = env->segs[idx].base;
6080         if (put_user(val, addr, abi_ulong))
6081             ret = -TARGET_EFAULT;
6082         break;
6083     default:
6084         ret = -TARGET_EINVAL;
6085         break;
6086     }
6087     return ret;
6088 }
6089 #endif /* defined(TARGET_ABI32 */
6090 
6091 #endif /* defined(TARGET_I386) */
6092 
6093 #define NEW_STACK_SIZE 0x40000
6094 
6095 
6096 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6097 typedef struct {
6098     CPUArchState *env;
6099     pthread_mutex_t mutex;
6100     pthread_cond_t cond;
6101     pthread_t thread;
6102     uint32_t tid;
6103     abi_ulong child_tidptr;
6104     abi_ulong parent_tidptr;
6105     sigset_t sigmask;
6106 } new_thread_info;
6107 
6108 static void *clone_func(void *arg)
6109 {
6110     new_thread_info *info = arg;
6111     CPUArchState *env;
6112     CPUState *cpu;
6113     TaskState *ts;
6114 
6115     rcu_register_thread();
6116     tcg_register_thread();
6117     env = info->env;
6118     cpu = env_cpu(env);
6119     thread_cpu = cpu;
6120     ts = (TaskState *)cpu->opaque;
6121     info->tid = sys_gettid();
6122     task_settid(ts);
6123     if (info->child_tidptr)
6124         put_user_u32(info->tid, info->child_tidptr);
6125     if (info->parent_tidptr)
6126         put_user_u32(info->tid, info->parent_tidptr);
6127     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6128     /* Enable signals.  */
6129     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6130     /* Signal to the parent that we're ready.  */
6131     pthread_mutex_lock(&info->mutex);
6132     pthread_cond_broadcast(&info->cond);
6133     pthread_mutex_unlock(&info->mutex);
6134     /* Wait until the parent has finished initializing the tls state.  */
6135     pthread_mutex_lock(&clone_lock);
6136     pthread_mutex_unlock(&clone_lock);
6137     cpu_loop(env);
6138     /* never exits */
6139     return NULL;
6140 }
6141 
6142 /* do_fork() Must return host values and target errnos (unlike most
6143    do_*() functions). */
6144 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6145                    abi_ulong parent_tidptr, target_ulong newtls,
6146                    abi_ulong child_tidptr)
6147 {
6148     CPUState *cpu = env_cpu(env);
6149     int ret;
6150     TaskState *ts;
6151     CPUState *new_cpu;
6152     CPUArchState *new_env;
6153     sigset_t sigmask;
6154 
6155     flags &= ~CLONE_IGNORED_FLAGS;
6156 
6157     /* Emulate vfork() with fork() */
6158     if (flags & CLONE_VFORK)
6159         flags &= ~(CLONE_VFORK | CLONE_VM);
6160 
6161     if (flags & CLONE_VM) {
6162         TaskState *parent_ts = (TaskState *)cpu->opaque;
6163         new_thread_info info;
6164         pthread_attr_t attr;
6165 
6166         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6167             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6168             return -TARGET_EINVAL;
6169         }
6170 
6171         ts = g_new0(TaskState, 1);
6172         init_task_state(ts);
6173 
6174         /* Grab a mutex so that thread setup appears atomic.  */
6175         pthread_mutex_lock(&clone_lock);
6176 
6177         /* we create a new CPU instance. */
6178         new_env = cpu_copy(env);
6179         /* Init regs that differ from the parent.  */
6180         cpu_clone_regs_child(new_env, newsp, flags);
6181         cpu_clone_regs_parent(env, flags);
6182         new_cpu = env_cpu(new_env);
6183         new_cpu->opaque = ts;
6184         ts->bprm = parent_ts->bprm;
6185         ts->info = parent_ts->info;
6186         ts->signal_mask = parent_ts->signal_mask;
6187 
6188         if (flags & CLONE_CHILD_CLEARTID) {
6189             ts->child_tidptr = child_tidptr;
6190         }
6191 
6192         if (flags & CLONE_SETTLS) {
6193             cpu_set_tls (new_env, newtls);
6194         }
6195 
6196         memset(&info, 0, sizeof(info));
6197         pthread_mutex_init(&info.mutex, NULL);
6198         pthread_mutex_lock(&info.mutex);
6199         pthread_cond_init(&info.cond, NULL);
6200         info.env = new_env;
6201         if (flags & CLONE_CHILD_SETTID) {
6202             info.child_tidptr = child_tidptr;
6203         }
6204         if (flags & CLONE_PARENT_SETTID) {
6205             info.parent_tidptr = parent_tidptr;
6206         }
6207 
6208         ret = pthread_attr_init(&attr);
6209         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6210         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6211         /* It is not safe to deliver signals until the child has finished
6212            initializing, so temporarily block all signals.  */
6213         sigfillset(&sigmask);
6214         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6215         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6216 
6217         /* If this is our first additional thread, we need to ensure we
6218          * generate code for parallel execution and flush old translations.
6219          */
6220         if (!parallel_cpus) {
6221             parallel_cpus = true;
6222             tb_flush(cpu);
6223         }
6224 
6225         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6226         /* TODO: Free new CPU state if thread creation failed.  */
6227 
6228         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6229         pthread_attr_destroy(&attr);
6230         if (ret == 0) {
6231             /* Wait for the child to initialize.  */
6232             pthread_cond_wait(&info.cond, &info.mutex);
6233             ret = info.tid;
6234         } else {
6235             ret = -1;
6236         }
6237         pthread_mutex_unlock(&info.mutex);
6238         pthread_cond_destroy(&info.cond);
6239         pthread_mutex_destroy(&info.mutex);
6240         pthread_mutex_unlock(&clone_lock);
6241     } else {
6242         /* if no CLONE_VM, we consider it is a fork */
6243         if (flags & CLONE_INVALID_FORK_FLAGS) {
6244             return -TARGET_EINVAL;
6245         }
6246 
6247         /* We can't support custom termination signals */
6248         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6249             return -TARGET_EINVAL;
6250         }
6251 
6252         if (block_signals()) {
6253             return -TARGET_ERESTARTSYS;
6254         }
6255 
6256         fork_start();
6257         ret = fork();
6258         if (ret == 0) {
6259             /* Child Process.  */
6260             cpu_clone_regs_child(env, newsp, flags);
6261             fork_end(1);
6262             /* There is a race condition here.  The parent process could
6263                theoretically read the TID in the child process before the child
6264                tid is set.  This would require using either ptrace
6265                (not implemented) or having *_tidptr to point at a shared memory
6266                mapping.  We can't repeat the spinlock hack used above because
6267                the child process gets its own copy of the lock.  */
6268             if (flags & CLONE_CHILD_SETTID)
6269                 put_user_u32(sys_gettid(), child_tidptr);
6270             if (flags & CLONE_PARENT_SETTID)
6271                 put_user_u32(sys_gettid(), parent_tidptr);
6272             ts = (TaskState *)cpu->opaque;
6273             if (flags & CLONE_SETTLS)
6274                 cpu_set_tls (env, newtls);
6275             if (flags & CLONE_CHILD_CLEARTID)
6276                 ts->child_tidptr = child_tidptr;
6277         } else {
6278             cpu_clone_regs_parent(env, flags);
6279             fork_end(0);
6280         }
6281     }
6282     return ret;
6283 }
6284 
6285 /* warning : doesn't handle linux specific flags... */
6286 static int target_to_host_fcntl_cmd(int cmd)
6287 {
6288     int ret;
6289 
6290     switch(cmd) {
6291     case TARGET_F_DUPFD:
6292     case TARGET_F_GETFD:
6293     case TARGET_F_SETFD:
6294     case TARGET_F_GETFL:
6295     case TARGET_F_SETFL:
6296     case TARGET_F_OFD_GETLK:
6297     case TARGET_F_OFD_SETLK:
6298     case TARGET_F_OFD_SETLKW:
6299         ret = cmd;
6300         break;
6301     case TARGET_F_GETLK:
6302         ret = F_GETLK64;
6303         break;
6304     case TARGET_F_SETLK:
6305         ret = F_SETLK64;
6306         break;
6307     case TARGET_F_SETLKW:
6308         ret = F_SETLKW64;
6309         break;
6310     case TARGET_F_GETOWN:
6311         ret = F_GETOWN;
6312         break;
6313     case TARGET_F_SETOWN:
6314         ret = F_SETOWN;
6315         break;
6316     case TARGET_F_GETSIG:
6317         ret = F_GETSIG;
6318         break;
6319     case TARGET_F_SETSIG:
6320         ret = F_SETSIG;
6321         break;
6322 #if TARGET_ABI_BITS == 32
6323     case TARGET_F_GETLK64:
6324         ret = F_GETLK64;
6325         break;
6326     case TARGET_F_SETLK64:
6327         ret = F_SETLK64;
6328         break;
6329     case TARGET_F_SETLKW64:
6330         ret = F_SETLKW64;
6331         break;
6332 #endif
6333     case TARGET_F_SETLEASE:
6334         ret = F_SETLEASE;
6335         break;
6336     case TARGET_F_GETLEASE:
6337         ret = F_GETLEASE;
6338         break;
6339 #ifdef F_DUPFD_CLOEXEC
6340     case TARGET_F_DUPFD_CLOEXEC:
6341         ret = F_DUPFD_CLOEXEC;
6342         break;
6343 #endif
6344     case TARGET_F_NOTIFY:
6345         ret = F_NOTIFY;
6346         break;
6347 #ifdef F_GETOWN_EX
6348     case TARGET_F_GETOWN_EX:
6349         ret = F_GETOWN_EX;
6350         break;
6351 #endif
6352 #ifdef F_SETOWN_EX
6353     case TARGET_F_SETOWN_EX:
6354         ret = F_SETOWN_EX;
6355         break;
6356 #endif
6357 #ifdef F_SETPIPE_SZ
6358     case TARGET_F_SETPIPE_SZ:
6359         ret = F_SETPIPE_SZ;
6360         break;
6361     case TARGET_F_GETPIPE_SZ:
6362         ret = F_GETPIPE_SZ;
6363         break;
6364 #endif
6365     default:
6366         ret = -TARGET_EINVAL;
6367         break;
6368     }
6369 
6370 #if defined(__powerpc64__)
6371     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6372      * is not supported by kernel. The glibc fcntl call actually adjusts
6373      * them to 5, 6 and 7 before making the syscall(). Since we make the
6374      * syscall directly, adjust to what is supported by the kernel.
6375      */
6376     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6377         ret -= F_GETLK64 - 5;
6378     }
6379 #endif
6380 
6381     return ret;
6382 }
6383 
6384 #define FLOCK_TRANSTBL \
6385     switch (type) { \
6386     TRANSTBL_CONVERT(F_RDLCK); \
6387     TRANSTBL_CONVERT(F_WRLCK); \
6388     TRANSTBL_CONVERT(F_UNLCK); \
6389     TRANSTBL_CONVERT(F_EXLCK); \
6390     TRANSTBL_CONVERT(F_SHLCK); \
6391     }
6392 
6393 static int target_to_host_flock(int type)
6394 {
6395 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6396     FLOCK_TRANSTBL
6397 #undef  TRANSTBL_CONVERT
6398     return -TARGET_EINVAL;
6399 }
6400 
6401 static int host_to_target_flock(int type)
6402 {
6403 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6404     FLOCK_TRANSTBL
6405 #undef  TRANSTBL_CONVERT
6406     /* if we don't know how to convert the value coming
6407      * from the host we copy to the target field as-is
6408      */
6409     return type;
6410 }
6411 
6412 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6413                                             abi_ulong target_flock_addr)
6414 {
6415     struct target_flock *target_fl;
6416     int l_type;
6417 
6418     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6419         return -TARGET_EFAULT;
6420     }
6421 
6422     __get_user(l_type, &target_fl->l_type);
6423     l_type = target_to_host_flock(l_type);
6424     if (l_type < 0) {
6425         return l_type;
6426     }
6427     fl->l_type = l_type;
6428     __get_user(fl->l_whence, &target_fl->l_whence);
6429     __get_user(fl->l_start, &target_fl->l_start);
6430     __get_user(fl->l_len, &target_fl->l_len);
6431     __get_user(fl->l_pid, &target_fl->l_pid);
6432     unlock_user_struct(target_fl, target_flock_addr, 0);
6433     return 0;
6434 }
6435 
6436 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6437                                           const struct flock64 *fl)
6438 {
6439     struct target_flock *target_fl;
6440     short l_type;
6441 
6442     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6443         return -TARGET_EFAULT;
6444     }
6445 
6446     l_type = host_to_target_flock(fl->l_type);
6447     __put_user(l_type, &target_fl->l_type);
6448     __put_user(fl->l_whence, &target_fl->l_whence);
6449     __put_user(fl->l_start, &target_fl->l_start);
6450     __put_user(fl->l_len, &target_fl->l_len);
6451     __put_user(fl->l_pid, &target_fl->l_pid);
6452     unlock_user_struct(target_fl, target_flock_addr, 1);
6453     return 0;
6454 }
6455 
6456 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6457 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6458 
6459 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6460 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6461                                                    abi_ulong target_flock_addr)
6462 {
6463     struct target_oabi_flock64 *target_fl;
6464     int l_type;
6465 
6466     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6467         return -TARGET_EFAULT;
6468     }
6469 
6470     __get_user(l_type, &target_fl->l_type);
6471     l_type = target_to_host_flock(l_type);
6472     if (l_type < 0) {
6473         return l_type;
6474     }
6475     fl->l_type = l_type;
6476     __get_user(fl->l_whence, &target_fl->l_whence);
6477     __get_user(fl->l_start, &target_fl->l_start);
6478     __get_user(fl->l_len, &target_fl->l_len);
6479     __get_user(fl->l_pid, &target_fl->l_pid);
6480     unlock_user_struct(target_fl, target_flock_addr, 0);
6481     return 0;
6482 }
6483 
6484 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6485                                                  const struct flock64 *fl)
6486 {
6487     struct target_oabi_flock64 *target_fl;
6488     short l_type;
6489 
6490     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6491         return -TARGET_EFAULT;
6492     }
6493 
6494     l_type = host_to_target_flock(fl->l_type);
6495     __put_user(l_type, &target_fl->l_type);
6496     __put_user(fl->l_whence, &target_fl->l_whence);
6497     __put_user(fl->l_start, &target_fl->l_start);
6498     __put_user(fl->l_len, &target_fl->l_len);
6499     __put_user(fl->l_pid, &target_fl->l_pid);
6500     unlock_user_struct(target_fl, target_flock_addr, 1);
6501     return 0;
6502 }
6503 #endif
6504 
6505 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6506                                               abi_ulong target_flock_addr)
6507 {
6508     struct target_flock64 *target_fl;
6509     int l_type;
6510 
6511     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6512         return -TARGET_EFAULT;
6513     }
6514 
6515     __get_user(l_type, &target_fl->l_type);
6516     l_type = target_to_host_flock(l_type);
6517     if (l_type < 0) {
6518         return l_type;
6519     }
6520     fl->l_type = l_type;
6521     __get_user(fl->l_whence, &target_fl->l_whence);
6522     __get_user(fl->l_start, &target_fl->l_start);
6523     __get_user(fl->l_len, &target_fl->l_len);
6524     __get_user(fl->l_pid, &target_fl->l_pid);
6525     unlock_user_struct(target_fl, target_flock_addr, 0);
6526     return 0;
6527 }
6528 
6529 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6530                                             const struct flock64 *fl)
6531 {
6532     struct target_flock64 *target_fl;
6533     short l_type;
6534 
6535     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6536         return -TARGET_EFAULT;
6537     }
6538 
6539     l_type = host_to_target_flock(fl->l_type);
6540     __put_user(l_type, &target_fl->l_type);
6541     __put_user(fl->l_whence, &target_fl->l_whence);
6542     __put_user(fl->l_start, &target_fl->l_start);
6543     __put_user(fl->l_len, &target_fl->l_len);
6544     __put_user(fl->l_pid, &target_fl->l_pid);
6545     unlock_user_struct(target_fl, target_flock_addr, 1);
6546     return 0;
6547 }
6548 
6549 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6550 {
6551     struct flock64 fl64;
6552 #ifdef F_GETOWN_EX
6553     struct f_owner_ex fox;
6554     struct target_f_owner_ex *target_fox;
6555 #endif
6556     abi_long ret;
6557     int host_cmd = target_to_host_fcntl_cmd(cmd);
6558 
6559     if (host_cmd == -TARGET_EINVAL)
6560 	    return host_cmd;
6561 
6562     switch(cmd) {
6563     case TARGET_F_GETLK:
6564         ret = copy_from_user_flock(&fl64, arg);
6565         if (ret) {
6566             return ret;
6567         }
6568         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6569         if (ret == 0) {
6570             ret = copy_to_user_flock(arg, &fl64);
6571         }
6572         break;
6573 
6574     case TARGET_F_SETLK:
6575     case TARGET_F_SETLKW:
6576         ret = copy_from_user_flock(&fl64, arg);
6577         if (ret) {
6578             return ret;
6579         }
6580         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6581         break;
6582 
6583     case TARGET_F_GETLK64:
6584     case TARGET_F_OFD_GETLK:
6585         ret = copy_from_user_flock64(&fl64, arg);
6586         if (ret) {
6587             return ret;
6588         }
6589         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6590         if (ret == 0) {
6591             ret = copy_to_user_flock64(arg, &fl64);
6592         }
6593         break;
6594     case TARGET_F_SETLK64:
6595     case TARGET_F_SETLKW64:
6596     case TARGET_F_OFD_SETLK:
6597     case TARGET_F_OFD_SETLKW:
6598         ret = copy_from_user_flock64(&fl64, arg);
6599         if (ret) {
6600             return ret;
6601         }
6602         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6603         break;
6604 
6605     case TARGET_F_GETFL:
6606         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6607         if (ret >= 0) {
6608             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6609         }
6610         break;
6611 
6612     case TARGET_F_SETFL:
6613         ret = get_errno(safe_fcntl(fd, host_cmd,
6614                                    target_to_host_bitmask(arg,
6615                                                           fcntl_flags_tbl)));
6616         break;
6617 
6618 #ifdef F_GETOWN_EX
6619     case TARGET_F_GETOWN_EX:
6620         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6621         if (ret >= 0) {
6622             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6623                 return -TARGET_EFAULT;
6624             target_fox->type = tswap32(fox.type);
6625             target_fox->pid = tswap32(fox.pid);
6626             unlock_user_struct(target_fox, arg, 1);
6627         }
6628         break;
6629 #endif
6630 
6631 #ifdef F_SETOWN_EX
6632     case TARGET_F_SETOWN_EX:
6633         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6634             return -TARGET_EFAULT;
6635         fox.type = tswap32(target_fox->type);
6636         fox.pid = tswap32(target_fox->pid);
6637         unlock_user_struct(target_fox, arg, 0);
6638         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6639         break;
6640 #endif
6641 
6642     case TARGET_F_SETOWN:
6643     case TARGET_F_GETOWN:
6644     case TARGET_F_SETSIG:
6645     case TARGET_F_GETSIG:
6646     case TARGET_F_SETLEASE:
6647     case TARGET_F_GETLEASE:
6648     case TARGET_F_SETPIPE_SZ:
6649     case TARGET_F_GETPIPE_SZ:
6650         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6651         break;
6652 
6653     default:
6654         ret = get_errno(safe_fcntl(fd, cmd, arg));
6655         break;
6656     }
6657     return ret;
6658 }
6659 
6660 #ifdef USE_UID16
6661 
6662 static inline int high2lowuid(int uid)
6663 {
6664     if (uid > 65535)
6665         return 65534;
6666     else
6667         return uid;
6668 }
6669 
6670 static inline int high2lowgid(int gid)
6671 {
6672     if (gid > 65535)
6673         return 65534;
6674     else
6675         return gid;
6676 }
6677 
6678 static inline int low2highuid(int uid)
6679 {
6680     if ((int16_t)uid == -1)
6681         return -1;
6682     else
6683         return uid;
6684 }
6685 
6686 static inline int low2highgid(int gid)
6687 {
6688     if ((int16_t)gid == -1)
6689         return -1;
6690     else
6691         return gid;
6692 }
6693 static inline int tswapid(int id)
6694 {
6695     return tswap16(id);
6696 }
6697 
6698 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6699 
6700 #else /* !USE_UID16 */
6701 static inline int high2lowuid(int uid)
6702 {
6703     return uid;
6704 }
6705 static inline int high2lowgid(int gid)
6706 {
6707     return gid;
6708 }
6709 static inline int low2highuid(int uid)
6710 {
6711     return uid;
6712 }
6713 static inline int low2highgid(int gid)
6714 {
6715     return gid;
6716 }
6717 static inline int tswapid(int id)
6718 {
6719     return tswap32(id);
6720 }
6721 
6722 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6723 
6724 #endif /* USE_UID16 */
6725 
6726 /* We must do direct syscalls for setting UID/GID, because we want to
6727  * implement the Linux system call semantics of "change only for this thread",
6728  * not the libc/POSIX semantics of "change for all threads in process".
6729  * (See http://ewontfix.com/17/ for more details.)
6730  * We use the 32-bit version of the syscalls if present; if it is not
6731  * then either the host architecture supports 32-bit UIDs natively with
6732  * the standard syscall, or the 16-bit UID is the best we can do.
6733  */
6734 #ifdef __NR_setuid32
6735 #define __NR_sys_setuid __NR_setuid32
6736 #else
6737 #define __NR_sys_setuid __NR_setuid
6738 #endif
6739 #ifdef __NR_setgid32
6740 #define __NR_sys_setgid __NR_setgid32
6741 #else
6742 #define __NR_sys_setgid __NR_setgid
6743 #endif
6744 #ifdef __NR_setresuid32
6745 #define __NR_sys_setresuid __NR_setresuid32
6746 #else
6747 #define __NR_sys_setresuid __NR_setresuid
6748 #endif
6749 #ifdef __NR_setresgid32
6750 #define __NR_sys_setresgid __NR_setresgid32
6751 #else
6752 #define __NR_sys_setresgid __NR_setresgid
6753 #endif
6754 
6755 _syscall1(int, sys_setuid, uid_t, uid)
6756 _syscall1(int, sys_setgid, gid_t, gid)
6757 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6758 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6759 
6760 void syscall_init(void)
6761 {
6762     IOCTLEntry *ie;
6763     const argtype *arg_type;
6764     int size;
6765     int i;
6766 
6767     thunk_init(STRUCT_MAX);
6768 
6769 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6770 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6771 #include "syscall_types.h"
6772 #undef STRUCT
6773 #undef STRUCT_SPECIAL
6774 
6775     /* Build target_to_host_errno_table[] table from
6776      * host_to_target_errno_table[]. */
6777     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6778         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6779     }
6780 
6781     /* we patch the ioctl size if necessary. We rely on the fact that
6782        no ioctl has all the bits at '1' in the size field */
6783     ie = ioctl_entries;
6784     while (ie->target_cmd != 0) {
6785         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6786             TARGET_IOC_SIZEMASK) {
6787             arg_type = ie->arg_type;
6788             if (arg_type[0] != TYPE_PTR) {
6789                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6790                         ie->target_cmd);
6791                 exit(1);
6792             }
6793             arg_type++;
6794             size = thunk_type_size(arg_type, 0);
6795             ie->target_cmd = (ie->target_cmd &
6796                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6797                 (size << TARGET_IOC_SIZESHIFT);
6798         }
6799 
6800         /* automatic consistency check if same arch */
6801 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6802     (defined(__x86_64__) && defined(TARGET_X86_64))
6803         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6804             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6805                     ie->name, ie->target_cmd, ie->host_cmd);
6806         }
6807 #endif
6808         ie++;
6809     }
6810 }
6811 
6812 #ifdef TARGET_NR_truncate64
6813 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6814                                          abi_long arg2,
6815                                          abi_long arg3,
6816                                          abi_long arg4)
6817 {
6818     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6819         arg2 = arg3;
6820         arg3 = arg4;
6821     }
6822     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6823 }
6824 #endif
6825 
6826 #ifdef TARGET_NR_ftruncate64
6827 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6828                                           abi_long arg2,
6829                                           abi_long arg3,
6830                                           abi_long arg4)
6831 {
6832     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6833         arg2 = arg3;
6834         arg3 = arg4;
6835     }
6836     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6837 }
6838 #endif
6839 
6840 #if defined(TARGET_NR_timer_settime) || \
6841     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6842 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
6843                                                  abi_ulong target_addr)
6844 {
6845     if (target_to_host_timespec(&host_its->it_interval, target_addr +
6846                                 offsetof(struct target_itimerspec,
6847                                          it_interval)) ||
6848         target_to_host_timespec(&host_its->it_value, target_addr +
6849                                 offsetof(struct target_itimerspec,
6850                                          it_value))) {
6851         return -TARGET_EFAULT;
6852     }
6853 
6854     return 0;
6855 }
6856 #endif
6857 
6858 #if defined(TARGET_NR_timer_settime64) || \
6859     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6860 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
6861                                                    abi_ulong target_addr)
6862 {
6863     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
6864                                   offsetof(struct target__kernel_itimerspec,
6865                                            it_interval)) ||
6866         target_to_host_timespec64(&host_its->it_value, target_addr +
6867                                   offsetof(struct target__kernel_itimerspec,
6868                                            it_value))) {
6869         return -TARGET_EFAULT;
6870     }
6871 
6872     return 0;
6873 }
6874 #endif
6875 
6876 #if ((defined(TARGET_NR_timerfd_gettime) || \
6877       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6878       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6879 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6880                                                  struct itimerspec *host_its)
6881 {
6882     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6883                                                        it_interval),
6884                                 &host_its->it_interval) ||
6885         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6886                                                        it_value),
6887                                 &host_its->it_value)) {
6888         return -TARGET_EFAULT;
6889     }
6890     return 0;
6891 }
6892 #endif
6893 
6894 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6895       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6896       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6897 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
6898                                                    struct itimerspec *host_its)
6899 {
6900     if (host_to_target_timespec64(target_addr +
6901                                   offsetof(struct target__kernel_itimerspec,
6902                                            it_interval),
6903                                   &host_its->it_interval) ||
6904         host_to_target_timespec64(target_addr +
6905                                   offsetof(struct target__kernel_itimerspec,
6906                                            it_value),
6907                                   &host_its->it_value)) {
6908         return -TARGET_EFAULT;
6909     }
6910     return 0;
6911 }
6912 #endif
6913 
6914 #if defined(TARGET_NR_adjtimex) || \
6915     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6916 static inline abi_long target_to_host_timex(struct timex *host_tx,
6917                                             abi_long target_addr)
6918 {
6919     struct target_timex *target_tx;
6920 
6921     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6922         return -TARGET_EFAULT;
6923     }
6924 
6925     __get_user(host_tx->modes, &target_tx->modes);
6926     __get_user(host_tx->offset, &target_tx->offset);
6927     __get_user(host_tx->freq, &target_tx->freq);
6928     __get_user(host_tx->maxerror, &target_tx->maxerror);
6929     __get_user(host_tx->esterror, &target_tx->esterror);
6930     __get_user(host_tx->status, &target_tx->status);
6931     __get_user(host_tx->constant, &target_tx->constant);
6932     __get_user(host_tx->precision, &target_tx->precision);
6933     __get_user(host_tx->tolerance, &target_tx->tolerance);
6934     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6935     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6936     __get_user(host_tx->tick, &target_tx->tick);
6937     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6938     __get_user(host_tx->jitter, &target_tx->jitter);
6939     __get_user(host_tx->shift, &target_tx->shift);
6940     __get_user(host_tx->stabil, &target_tx->stabil);
6941     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6942     __get_user(host_tx->calcnt, &target_tx->calcnt);
6943     __get_user(host_tx->errcnt, &target_tx->errcnt);
6944     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6945     __get_user(host_tx->tai, &target_tx->tai);
6946 
6947     unlock_user_struct(target_tx, target_addr, 0);
6948     return 0;
6949 }
6950 
6951 static inline abi_long host_to_target_timex(abi_long target_addr,
6952                                             struct timex *host_tx)
6953 {
6954     struct target_timex *target_tx;
6955 
6956     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6957         return -TARGET_EFAULT;
6958     }
6959 
6960     __put_user(host_tx->modes, &target_tx->modes);
6961     __put_user(host_tx->offset, &target_tx->offset);
6962     __put_user(host_tx->freq, &target_tx->freq);
6963     __put_user(host_tx->maxerror, &target_tx->maxerror);
6964     __put_user(host_tx->esterror, &target_tx->esterror);
6965     __put_user(host_tx->status, &target_tx->status);
6966     __put_user(host_tx->constant, &target_tx->constant);
6967     __put_user(host_tx->precision, &target_tx->precision);
6968     __put_user(host_tx->tolerance, &target_tx->tolerance);
6969     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6970     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6971     __put_user(host_tx->tick, &target_tx->tick);
6972     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6973     __put_user(host_tx->jitter, &target_tx->jitter);
6974     __put_user(host_tx->shift, &target_tx->shift);
6975     __put_user(host_tx->stabil, &target_tx->stabil);
6976     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6977     __put_user(host_tx->calcnt, &target_tx->calcnt);
6978     __put_user(host_tx->errcnt, &target_tx->errcnt);
6979     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6980     __put_user(host_tx->tai, &target_tx->tai);
6981 
6982     unlock_user_struct(target_tx, target_addr, 1);
6983     return 0;
6984 }
6985 #endif
6986 
6987 
6988 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
6989 static inline abi_long target_to_host_timex64(struct timex *host_tx,
6990                                               abi_long target_addr)
6991 {
6992     struct target__kernel_timex *target_tx;
6993 
6994     if (copy_from_user_timeval64(&host_tx->time, target_addr +
6995                                  offsetof(struct target__kernel_timex,
6996                                           time))) {
6997         return -TARGET_EFAULT;
6998     }
6999 
7000     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7001         return -TARGET_EFAULT;
7002     }
7003 
7004     __get_user(host_tx->modes, &target_tx->modes);
7005     __get_user(host_tx->offset, &target_tx->offset);
7006     __get_user(host_tx->freq, &target_tx->freq);
7007     __get_user(host_tx->maxerror, &target_tx->maxerror);
7008     __get_user(host_tx->esterror, &target_tx->esterror);
7009     __get_user(host_tx->status, &target_tx->status);
7010     __get_user(host_tx->constant, &target_tx->constant);
7011     __get_user(host_tx->precision, &target_tx->precision);
7012     __get_user(host_tx->tolerance, &target_tx->tolerance);
7013     __get_user(host_tx->tick, &target_tx->tick);
7014     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7015     __get_user(host_tx->jitter, &target_tx->jitter);
7016     __get_user(host_tx->shift, &target_tx->shift);
7017     __get_user(host_tx->stabil, &target_tx->stabil);
7018     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7019     __get_user(host_tx->calcnt, &target_tx->calcnt);
7020     __get_user(host_tx->errcnt, &target_tx->errcnt);
7021     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7022     __get_user(host_tx->tai, &target_tx->tai);
7023 
7024     unlock_user_struct(target_tx, target_addr, 0);
7025     return 0;
7026 }
7027 
7028 static inline abi_long host_to_target_timex64(abi_long target_addr,
7029                                               struct timex *host_tx)
7030 {
7031     struct target__kernel_timex *target_tx;
7032 
7033    if (copy_to_user_timeval64(target_addr +
7034                               offsetof(struct target__kernel_timex, time),
7035                               &host_tx->time)) {
7036         return -TARGET_EFAULT;
7037     }
7038 
7039     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7040         return -TARGET_EFAULT;
7041     }
7042 
7043     __put_user(host_tx->modes, &target_tx->modes);
7044     __put_user(host_tx->offset, &target_tx->offset);
7045     __put_user(host_tx->freq, &target_tx->freq);
7046     __put_user(host_tx->maxerror, &target_tx->maxerror);
7047     __put_user(host_tx->esterror, &target_tx->esterror);
7048     __put_user(host_tx->status, &target_tx->status);
7049     __put_user(host_tx->constant, &target_tx->constant);
7050     __put_user(host_tx->precision, &target_tx->precision);
7051     __put_user(host_tx->tolerance, &target_tx->tolerance);
7052     __put_user(host_tx->tick, &target_tx->tick);
7053     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7054     __put_user(host_tx->jitter, &target_tx->jitter);
7055     __put_user(host_tx->shift, &target_tx->shift);
7056     __put_user(host_tx->stabil, &target_tx->stabil);
7057     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7058     __put_user(host_tx->calcnt, &target_tx->calcnt);
7059     __put_user(host_tx->errcnt, &target_tx->errcnt);
7060     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7061     __put_user(host_tx->tai, &target_tx->tai);
7062 
7063     unlock_user_struct(target_tx, target_addr, 1);
7064     return 0;
7065 }
7066 #endif
7067 
7068 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7069                                                abi_ulong target_addr)
7070 {
7071     struct target_sigevent *target_sevp;
7072 
7073     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7074         return -TARGET_EFAULT;
7075     }
7076 
7077     /* This union is awkward on 64 bit systems because it has a 32 bit
7078      * integer and a pointer in it; we follow the conversion approach
7079      * used for handling sigval types in signal.c so the guest should get
7080      * the correct value back even if we did a 64 bit byteswap and it's
7081      * using the 32 bit integer.
7082      */
7083     host_sevp->sigev_value.sival_ptr =
7084         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7085     host_sevp->sigev_signo =
7086         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7087     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7088     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7089 
7090     unlock_user_struct(target_sevp, target_addr, 1);
7091     return 0;
7092 }
7093 
7094 #if defined(TARGET_NR_mlockall)
7095 static inline int target_to_host_mlockall_arg(int arg)
7096 {
7097     int result = 0;
7098 
7099     if (arg & TARGET_MCL_CURRENT) {
7100         result |= MCL_CURRENT;
7101     }
7102     if (arg & TARGET_MCL_FUTURE) {
7103         result |= MCL_FUTURE;
7104     }
7105 #ifdef MCL_ONFAULT
7106     if (arg & TARGET_MCL_ONFAULT) {
7107         result |= MCL_ONFAULT;
7108     }
7109 #endif
7110 
7111     return result;
7112 }
7113 #endif
7114 
7115 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7116      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7117      defined(TARGET_NR_newfstatat))
7118 static inline abi_long host_to_target_stat64(void *cpu_env,
7119                                              abi_ulong target_addr,
7120                                              struct stat *host_st)
7121 {
7122 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7123     if (((CPUARMState *)cpu_env)->eabi) {
7124         struct target_eabi_stat64 *target_st;
7125 
7126         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7127             return -TARGET_EFAULT;
7128         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7129         __put_user(host_st->st_dev, &target_st->st_dev);
7130         __put_user(host_st->st_ino, &target_st->st_ino);
7131 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7132         __put_user(host_st->st_ino, &target_st->__st_ino);
7133 #endif
7134         __put_user(host_st->st_mode, &target_st->st_mode);
7135         __put_user(host_st->st_nlink, &target_st->st_nlink);
7136         __put_user(host_st->st_uid, &target_st->st_uid);
7137         __put_user(host_st->st_gid, &target_st->st_gid);
7138         __put_user(host_st->st_rdev, &target_st->st_rdev);
7139         __put_user(host_st->st_size, &target_st->st_size);
7140         __put_user(host_st->st_blksize, &target_st->st_blksize);
7141         __put_user(host_st->st_blocks, &target_st->st_blocks);
7142         __put_user(host_st->st_atime, &target_st->target_st_atime);
7143         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7144         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7145 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7146         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7147         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7148         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7149 #endif
7150         unlock_user_struct(target_st, target_addr, 1);
7151     } else
7152 #endif
7153     {
7154 #if defined(TARGET_HAS_STRUCT_STAT64)
7155         struct target_stat64 *target_st;
7156 #else
7157         struct target_stat *target_st;
7158 #endif
7159 
7160         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7161             return -TARGET_EFAULT;
7162         memset(target_st, 0, sizeof(*target_st));
7163         __put_user(host_st->st_dev, &target_st->st_dev);
7164         __put_user(host_st->st_ino, &target_st->st_ino);
7165 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7166         __put_user(host_st->st_ino, &target_st->__st_ino);
7167 #endif
7168         __put_user(host_st->st_mode, &target_st->st_mode);
7169         __put_user(host_st->st_nlink, &target_st->st_nlink);
7170         __put_user(host_st->st_uid, &target_st->st_uid);
7171         __put_user(host_st->st_gid, &target_st->st_gid);
7172         __put_user(host_st->st_rdev, &target_st->st_rdev);
7173         /* XXX: better use of kernel struct */
7174         __put_user(host_st->st_size, &target_st->st_size);
7175         __put_user(host_st->st_blksize, &target_st->st_blksize);
7176         __put_user(host_st->st_blocks, &target_st->st_blocks);
7177         __put_user(host_st->st_atime, &target_st->target_st_atime);
7178         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7179         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7180 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7181         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7182         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7183         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7184 #endif
7185         unlock_user_struct(target_st, target_addr, 1);
7186     }
7187 
7188     return 0;
7189 }
7190 #endif
7191 
7192 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7193 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7194                                             abi_ulong target_addr)
7195 {
7196     struct target_statx *target_stx;
7197 
7198     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7199         return -TARGET_EFAULT;
7200     }
7201     memset(target_stx, 0, sizeof(*target_stx));
7202 
7203     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7204     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7205     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7206     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7207     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7208     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7209     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7210     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7211     __put_user(host_stx->stx_size, &target_stx->stx_size);
7212     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7213     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7214     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7215     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7216     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7217     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7218     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7219     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7220     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7221     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7222     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7223     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7224     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7225     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7226 
7227     unlock_user_struct(target_stx, target_addr, 1);
7228 
7229     return 0;
7230 }
7231 #endif
7232 
7233 static int do_sys_futex(int *uaddr, int op, int val,
7234                          const struct timespec *timeout, int *uaddr2,
7235                          int val3)
7236 {
7237 #if HOST_LONG_BITS == 64
7238 #if defined(__NR_futex)
7239     /* always a 64-bit time_t, it doesn't define _time64 version  */
7240     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7241 
7242 #endif
7243 #else /* HOST_LONG_BITS == 64 */
7244 #if defined(__NR_futex_time64)
7245     if (sizeof(timeout->tv_sec) == 8) {
7246         /* _time64 function on 32bit arch */
7247         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7248     }
7249 #endif
7250 #if defined(__NR_futex)
7251     /* old function on 32bit arch */
7252     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7253 #endif
7254 #endif /* HOST_LONG_BITS == 64 */
7255     g_assert_not_reached();
7256 }
7257 
7258 static int do_safe_futex(int *uaddr, int op, int val,
7259                          const struct timespec *timeout, int *uaddr2,
7260                          int val3)
7261 {
7262 #if HOST_LONG_BITS == 64
7263 #if defined(__NR_futex)
7264     /* always a 64-bit time_t, it doesn't define _time64 version  */
7265     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7266 #endif
7267 #else /* HOST_LONG_BITS == 64 */
7268 #if defined(__NR_futex_time64)
7269     if (sizeof(timeout->tv_sec) == 8) {
7270         /* _time64 function on 32bit arch */
7271         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7272                                            val3));
7273     }
7274 #endif
7275 #if defined(__NR_futex)
7276     /* old function on 32bit arch */
7277     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7278 #endif
7279 #endif /* HOST_LONG_BITS == 64 */
7280     return -TARGET_ENOSYS;
7281 }
7282 
7283 /* ??? Using host futex calls even when target atomic operations
7284    are not really atomic probably breaks things.  However implementing
7285    futexes locally would make futexes shared between multiple processes
7286    tricky.  However they're probably useless because guest atomic
7287    operations won't work either.  */
7288 #if defined(TARGET_NR_futex)
7289 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7290                     target_ulong uaddr2, int val3)
7291 {
7292     struct timespec ts, *pts;
7293     int base_op;
7294 
7295     /* ??? We assume FUTEX_* constants are the same on both host
7296        and target.  */
7297 #ifdef FUTEX_CMD_MASK
7298     base_op = op & FUTEX_CMD_MASK;
7299 #else
7300     base_op = op;
7301 #endif
7302     switch (base_op) {
7303     case FUTEX_WAIT:
7304     case FUTEX_WAIT_BITSET:
7305         if (timeout) {
7306             pts = &ts;
7307             target_to_host_timespec(pts, timeout);
7308         } else {
7309             pts = NULL;
7310         }
7311         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7312     case FUTEX_WAKE:
7313         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7314     case FUTEX_FD:
7315         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7316     case FUTEX_REQUEUE:
7317     case FUTEX_CMP_REQUEUE:
7318     case FUTEX_WAKE_OP:
7319         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7320            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7321            But the prototype takes a `struct timespec *'; insert casts
7322            to satisfy the compiler.  We do not need to tswap TIMEOUT
7323            since it's not compared to guest memory.  */
7324         pts = (struct timespec *)(uintptr_t) timeout;
7325         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7326                              (base_op == FUTEX_CMP_REQUEUE
7327                                       ? tswap32(val3)
7328                                       : val3));
7329     default:
7330         return -TARGET_ENOSYS;
7331     }
7332 }
7333 #endif
7334 
7335 #if defined(TARGET_NR_futex_time64)
7336 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7337                            target_ulong uaddr2, int val3)
7338 {
7339     struct timespec ts, *pts;
7340     int base_op;
7341 
7342     /* ??? We assume FUTEX_* constants are the same on both host
7343        and target.  */
7344 #ifdef FUTEX_CMD_MASK
7345     base_op = op & FUTEX_CMD_MASK;
7346 #else
7347     base_op = op;
7348 #endif
7349     switch (base_op) {
7350     case FUTEX_WAIT:
7351     case FUTEX_WAIT_BITSET:
7352         if (timeout) {
7353             pts = &ts;
7354             target_to_host_timespec64(pts, timeout);
7355         } else {
7356             pts = NULL;
7357         }
7358         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7359     case FUTEX_WAKE:
7360         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7361     case FUTEX_FD:
7362         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7363     case FUTEX_REQUEUE:
7364     case FUTEX_CMP_REQUEUE:
7365     case FUTEX_WAKE_OP:
7366         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7367            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7368            But the prototype takes a `struct timespec *'; insert casts
7369            to satisfy the compiler.  We do not need to tswap TIMEOUT
7370            since it's not compared to guest memory.  */
7371         pts = (struct timespec *)(uintptr_t) timeout;
7372         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7373                              (base_op == FUTEX_CMP_REQUEUE
7374                                       ? tswap32(val3)
7375                                       : val3));
7376     default:
7377         return -TARGET_ENOSYS;
7378     }
7379 }
7380 #endif
7381 
7382 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7383 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7384                                      abi_long handle, abi_long mount_id,
7385                                      abi_long flags)
7386 {
7387     struct file_handle *target_fh;
7388     struct file_handle *fh;
7389     int mid = 0;
7390     abi_long ret;
7391     char *name;
7392     unsigned int size, total_size;
7393 
7394     if (get_user_s32(size, handle)) {
7395         return -TARGET_EFAULT;
7396     }
7397 
7398     name = lock_user_string(pathname);
7399     if (!name) {
7400         return -TARGET_EFAULT;
7401     }
7402 
7403     total_size = sizeof(struct file_handle) + size;
7404     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7405     if (!target_fh) {
7406         unlock_user(name, pathname, 0);
7407         return -TARGET_EFAULT;
7408     }
7409 
7410     fh = g_malloc0(total_size);
7411     fh->handle_bytes = size;
7412 
7413     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7414     unlock_user(name, pathname, 0);
7415 
7416     /* man name_to_handle_at(2):
7417      * Other than the use of the handle_bytes field, the caller should treat
7418      * the file_handle structure as an opaque data type
7419      */
7420 
7421     memcpy(target_fh, fh, total_size);
7422     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7423     target_fh->handle_type = tswap32(fh->handle_type);
7424     g_free(fh);
7425     unlock_user(target_fh, handle, total_size);
7426 
7427     if (put_user_s32(mid, mount_id)) {
7428         return -TARGET_EFAULT;
7429     }
7430 
7431     return ret;
7432 
7433 }
7434 #endif
7435 
7436 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7437 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7438                                      abi_long flags)
7439 {
7440     struct file_handle *target_fh;
7441     struct file_handle *fh;
7442     unsigned int size, total_size;
7443     abi_long ret;
7444 
7445     if (get_user_s32(size, handle)) {
7446         return -TARGET_EFAULT;
7447     }
7448 
7449     total_size = sizeof(struct file_handle) + size;
7450     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7451     if (!target_fh) {
7452         return -TARGET_EFAULT;
7453     }
7454 
7455     fh = g_memdup(target_fh, total_size);
7456     fh->handle_bytes = size;
7457     fh->handle_type = tswap32(target_fh->handle_type);
7458 
7459     ret = get_errno(open_by_handle_at(mount_fd, fh,
7460                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7461 
7462     g_free(fh);
7463 
7464     unlock_user(target_fh, handle, total_size);
7465 
7466     return ret;
7467 }
7468 #endif
7469 
7470 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7471 
7472 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7473 {
7474     int host_flags;
7475     target_sigset_t *target_mask;
7476     sigset_t host_mask;
7477     abi_long ret;
7478 
7479     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7480         return -TARGET_EINVAL;
7481     }
7482     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7483         return -TARGET_EFAULT;
7484     }
7485 
7486     target_to_host_sigset(&host_mask, target_mask);
7487 
7488     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7489 
7490     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7491     if (ret >= 0) {
7492         fd_trans_register(ret, &target_signalfd_trans);
7493     }
7494 
7495     unlock_user_struct(target_mask, mask, 0);
7496 
7497     return ret;
7498 }
7499 #endif
7500 
7501 /* Map host to target signal numbers for the wait family of syscalls.
7502    Assume all other status bits are the same.  */
7503 int host_to_target_waitstatus(int status)
7504 {
7505     if (WIFSIGNALED(status)) {
7506         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7507     }
7508     if (WIFSTOPPED(status)) {
7509         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7510                | (status & 0xff);
7511     }
7512     return status;
7513 }
7514 
7515 static int open_self_cmdline(void *cpu_env, int fd)
7516 {
7517     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7518     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7519     int i;
7520 
7521     for (i = 0; i < bprm->argc; i++) {
7522         size_t len = strlen(bprm->argv[i]) + 1;
7523 
7524         if (write(fd, bprm->argv[i], len) != len) {
7525             return -1;
7526         }
7527     }
7528 
7529     return 0;
7530 }
7531 
7532 static int open_self_maps(void *cpu_env, int fd)
7533 {
7534     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7535     TaskState *ts = cpu->opaque;
7536     GSList *map_info = read_self_maps();
7537     GSList *s;
7538     int count;
7539 
7540     for (s = map_info; s; s = g_slist_next(s)) {
7541         MapInfo *e = (MapInfo *) s->data;
7542 
7543         if (h2g_valid(e->start)) {
7544             unsigned long min = e->start;
7545             unsigned long max = e->end;
7546             int flags = page_get_flags(h2g(min));
7547             const char *path;
7548 
7549             max = h2g_valid(max - 1) ?
7550                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7551 
7552             if (page_check_range(h2g(min), max - min, flags) == -1) {
7553                 continue;
7554             }
7555 
7556             if (h2g(min) == ts->info->stack_limit) {
7557                 path = "[stack]";
7558             } else {
7559                 path = e->path;
7560             }
7561 
7562             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7563                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7564                             h2g(min), h2g(max - 1) + 1,
7565                             e->is_read ? 'r' : '-',
7566                             e->is_write ? 'w' : '-',
7567                             e->is_exec ? 'x' : '-',
7568                             e->is_priv ? 'p' : '-',
7569                             (uint64_t) e->offset, e->dev, e->inode);
7570             if (path) {
7571                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7572             } else {
7573                 dprintf(fd, "\n");
7574             }
7575         }
7576     }
7577 
7578     free_self_maps(map_info);
7579 
7580 #ifdef TARGET_VSYSCALL_PAGE
7581     /*
7582      * We only support execution from the vsyscall page.
7583      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7584      */
7585     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7586                     " --xp 00000000 00:00 0",
7587                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7588     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7589 #endif
7590 
7591     return 0;
7592 }
7593 
7594 static int open_self_stat(void *cpu_env, int fd)
7595 {
7596     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7597     TaskState *ts = cpu->opaque;
7598     g_autoptr(GString) buf = g_string_new(NULL);
7599     int i;
7600 
7601     for (i = 0; i < 44; i++) {
7602         if (i == 0) {
7603             /* pid */
7604             g_string_printf(buf, FMT_pid " ", getpid());
7605         } else if (i == 1) {
7606             /* app name */
7607             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7608             bin = bin ? bin + 1 : ts->bprm->argv[0];
7609             g_string_printf(buf, "(%.15s) ", bin);
7610         } else if (i == 27) {
7611             /* stack bottom */
7612             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7613         } else {
7614             /* for the rest, there is MasterCard */
7615             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7616         }
7617 
7618         if (write(fd, buf->str, buf->len) != buf->len) {
7619             return -1;
7620         }
7621     }
7622 
7623     return 0;
7624 }
7625 
7626 static int open_self_auxv(void *cpu_env, int fd)
7627 {
7628     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7629     TaskState *ts = cpu->opaque;
7630     abi_ulong auxv = ts->info->saved_auxv;
7631     abi_ulong len = ts->info->auxv_len;
7632     char *ptr;
7633 
7634     /*
7635      * Auxiliary vector is stored in target process stack.
7636      * read in whole auxv vector and copy it to file
7637      */
7638     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7639     if (ptr != NULL) {
7640         while (len > 0) {
7641             ssize_t r;
7642             r = write(fd, ptr, len);
7643             if (r <= 0) {
7644                 break;
7645             }
7646             len -= r;
7647             ptr += r;
7648         }
7649         lseek(fd, 0, SEEK_SET);
7650         unlock_user(ptr, auxv, len);
7651     }
7652 
7653     return 0;
7654 }
7655 
7656 static int is_proc_myself(const char *filename, const char *entry)
7657 {
7658     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7659         filename += strlen("/proc/");
7660         if (!strncmp(filename, "self/", strlen("self/"))) {
7661             filename += strlen("self/");
7662         } else if (*filename >= '1' && *filename <= '9') {
7663             char myself[80];
7664             snprintf(myself, sizeof(myself), "%d/", getpid());
7665             if (!strncmp(filename, myself, strlen(myself))) {
7666                 filename += strlen(myself);
7667             } else {
7668                 return 0;
7669             }
7670         } else {
7671             return 0;
7672         }
7673         if (!strcmp(filename, entry)) {
7674             return 1;
7675         }
7676     }
7677     return 0;
7678 }
7679 
7680 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7681     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7682 static int is_proc(const char *filename, const char *entry)
7683 {
7684     return strcmp(filename, entry) == 0;
7685 }
7686 #endif
7687 
7688 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7689 static int open_net_route(void *cpu_env, int fd)
7690 {
7691     FILE *fp;
7692     char *line = NULL;
7693     size_t len = 0;
7694     ssize_t read;
7695 
7696     fp = fopen("/proc/net/route", "r");
7697     if (fp == NULL) {
7698         return -1;
7699     }
7700 
7701     /* read header */
7702 
7703     read = getline(&line, &len, fp);
7704     dprintf(fd, "%s", line);
7705 
7706     /* read routes */
7707 
7708     while ((read = getline(&line, &len, fp)) != -1) {
7709         char iface[16];
7710         uint32_t dest, gw, mask;
7711         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7712         int fields;
7713 
7714         fields = sscanf(line,
7715                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7716                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7717                         &mask, &mtu, &window, &irtt);
7718         if (fields != 11) {
7719             continue;
7720         }
7721         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7722                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7723                 metric, tswap32(mask), mtu, window, irtt);
7724     }
7725 
7726     free(line);
7727     fclose(fp);
7728 
7729     return 0;
7730 }
7731 #endif
7732 
7733 #if defined(TARGET_SPARC)
7734 static int open_cpuinfo(void *cpu_env, int fd)
7735 {
7736     dprintf(fd, "type\t\t: sun4u\n");
7737     return 0;
7738 }
7739 #endif
7740 
7741 #if defined(TARGET_HPPA)
7742 static int open_cpuinfo(void *cpu_env, int fd)
7743 {
7744     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7745     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7746     dprintf(fd, "capabilities\t: os32\n");
7747     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7748     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7749     return 0;
7750 }
7751 #endif
7752 
7753 #if defined(TARGET_M68K)
7754 static int open_hardware(void *cpu_env, int fd)
7755 {
7756     dprintf(fd, "Model:\t\tqemu-m68k\n");
7757     return 0;
7758 }
7759 #endif
7760 
7761 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7762 {
7763     struct fake_open {
7764         const char *filename;
7765         int (*fill)(void *cpu_env, int fd);
7766         int (*cmp)(const char *s1, const char *s2);
7767     };
7768     const struct fake_open *fake_open;
7769     static const struct fake_open fakes[] = {
7770         { "maps", open_self_maps, is_proc_myself },
7771         { "stat", open_self_stat, is_proc_myself },
7772         { "auxv", open_self_auxv, is_proc_myself },
7773         { "cmdline", open_self_cmdline, is_proc_myself },
7774 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7775         { "/proc/net/route", open_net_route, is_proc },
7776 #endif
7777 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7778         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7779 #endif
7780 #if defined(TARGET_M68K)
7781         { "/proc/hardware", open_hardware, is_proc },
7782 #endif
7783         { NULL, NULL, NULL }
7784     };
7785 
7786     if (is_proc_myself(pathname, "exe")) {
7787         int execfd = qemu_getauxval(AT_EXECFD);
7788         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7789     }
7790 
7791     for (fake_open = fakes; fake_open->filename; fake_open++) {
7792         if (fake_open->cmp(pathname, fake_open->filename)) {
7793             break;
7794         }
7795     }
7796 
7797     if (fake_open->filename) {
7798         const char *tmpdir;
7799         char filename[PATH_MAX];
7800         int fd, r;
7801 
7802         /* create temporary file to map stat to */
7803         tmpdir = getenv("TMPDIR");
7804         if (!tmpdir)
7805             tmpdir = "/tmp";
7806         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7807         fd = mkstemp(filename);
7808         if (fd < 0) {
7809             return fd;
7810         }
7811         unlink(filename);
7812 
7813         if ((r = fake_open->fill(cpu_env, fd))) {
7814             int e = errno;
7815             close(fd);
7816             errno = e;
7817             return r;
7818         }
7819         lseek(fd, 0, SEEK_SET);
7820 
7821         return fd;
7822     }
7823 
7824     return safe_openat(dirfd, path(pathname), flags, mode);
7825 }
7826 
7827 #define TIMER_MAGIC 0x0caf0000
7828 #define TIMER_MAGIC_MASK 0xffff0000
7829 
7830 /* Convert QEMU provided timer ID back to internal 16bit index format */
7831 static target_timer_t get_timer_id(abi_long arg)
7832 {
7833     target_timer_t timerid = arg;
7834 
7835     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7836         return -TARGET_EINVAL;
7837     }
7838 
7839     timerid &= 0xffff;
7840 
7841     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7842         return -TARGET_EINVAL;
7843     }
7844 
7845     return timerid;
7846 }
7847 
7848 static int target_to_host_cpu_mask(unsigned long *host_mask,
7849                                    size_t host_size,
7850                                    abi_ulong target_addr,
7851                                    size_t target_size)
7852 {
7853     unsigned target_bits = sizeof(abi_ulong) * 8;
7854     unsigned host_bits = sizeof(*host_mask) * 8;
7855     abi_ulong *target_mask;
7856     unsigned i, j;
7857 
7858     assert(host_size >= target_size);
7859 
7860     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7861     if (!target_mask) {
7862         return -TARGET_EFAULT;
7863     }
7864     memset(host_mask, 0, host_size);
7865 
7866     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7867         unsigned bit = i * target_bits;
7868         abi_ulong val;
7869 
7870         __get_user(val, &target_mask[i]);
7871         for (j = 0; j < target_bits; j++, bit++) {
7872             if (val & (1UL << j)) {
7873                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7874             }
7875         }
7876     }
7877 
7878     unlock_user(target_mask, target_addr, 0);
7879     return 0;
7880 }
7881 
7882 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7883                                    size_t host_size,
7884                                    abi_ulong target_addr,
7885                                    size_t target_size)
7886 {
7887     unsigned target_bits = sizeof(abi_ulong) * 8;
7888     unsigned host_bits = sizeof(*host_mask) * 8;
7889     abi_ulong *target_mask;
7890     unsigned i, j;
7891 
7892     assert(host_size >= target_size);
7893 
7894     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7895     if (!target_mask) {
7896         return -TARGET_EFAULT;
7897     }
7898 
7899     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7900         unsigned bit = i * target_bits;
7901         abi_ulong val = 0;
7902 
7903         for (j = 0; j < target_bits; j++, bit++) {
7904             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7905                 val |= 1UL << j;
7906             }
7907         }
7908         __put_user(val, &target_mask[i]);
7909     }
7910 
7911     unlock_user(target_mask, target_addr, target_size);
7912     return 0;
7913 }
7914 
7915 /* This is an internal helper for do_syscall so that it is easier
7916  * to have a single return point, so that actions, such as logging
7917  * of syscall results, can be performed.
7918  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7919  */
7920 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7921                             abi_long arg2, abi_long arg3, abi_long arg4,
7922                             abi_long arg5, abi_long arg6, abi_long arg7,
7923                             abi_long arg8)
7924 {
7925     CPUState *cpu = env_cpu(cpu_env);
7926     abi_long ret;
7927 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7928     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7929     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7930     || defined(TARGET_NR_statx)
7931     struct stat st;
7932 #endif
7933 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7934     || defined(TARGET_NR_fstatfs)
7935     struct statfs stfs;
7936 #endif
7937     void *p;
7938 
7939     switch(num) {
7940     case TARGET_NR_exit:
7941         /* In old applications this may be used to implement _exit(2).
7942            However in threaded applictions it is used for thread termination,
7943            and _exit_group is used for application termination.
7944            Do thread termination if we have more then one thread.  */
7945 
7946         if (block_signals()) {
7947             return -TARGET_ERESTARTSYS;
7948         }
7949 
7950         pthread_mutex_lock(&clone_lock);
7951 
7952         if (CPU_NEXT(first_cpu)) {
7953             TaskState *ts = cpu->opaque;
7954 
7955             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7956             object_unref(OBJECT(cpu));
7957             /*
7958              * At this point the CPU should be unrealized and removed
7959              * from cpu lists. We can clean-up the rest of the thread
7960              * data without the lock held.
7961              */
7962 
7963             pthread_mutex_unlock(&clone_lock);
7964 
7965             if (ts->child_tidptr) {
7966                 put_user_u32(0, ts->child_tidptr);
7967                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7968                           NULL, NULL, 0);
7969             }
7970             thread_cpu = NULL;
7971             g_free(ts);
7972             rcu_unregister_thread();
7973             pthread_exit(NULL);
7974         }
7975 
7976         pthread_mutex_unlock(&clone_lock);
7977         preexit_cleanup(cpu_env, arg1);
7978         _exit(arg1);
7979         return 0; /* avoid warning */
7980     case TARGET_NR_read:
7981         if (arg2 == 0 && arg3 == 0) {
7982             return get_errno(safe_read(arg1, 0, 0));
7983         } else {
7984             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7985                 return -TARGET_EFAULT;
7986             ret = get_errno(safe_read(arg1, p, arg3));
7987             if (ret >= 0 &&
7988                 fd_trans_host_to_target_data(arg1)) {
7989                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7990             }
7991             unlock_user(p, arg2, ret);
7992         }
7993         return ret;
7994     case TARGET_NR_write:
7995         if (arg2 == 0 && arg3 == 0) {
7996             return get_errno(safe_write(arg1, 0, 0));
7997         }
7998         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7999             return -TARGET_EFAULT;
8000         if (fd_trans_target_to_host_data(arg1)) {
8001             void *copy = g_malloc(arg3);
8002             memcpy(copy, p, arg3);
8003             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8004             if (ret >= 0) {
8005                 ret = get_errno(safe_write(arg1, copy, ret));
8006             }
8007             g_free(copy);
8008         } else {
8009             ret = get_errno(safe_write(arg1, p, arg3));
8010         }
8011         unlock_user(p, arg2, 0);
8012         return ret;
8013 
8014 #ifdef TARGET_NR_open
8015     case TARGET_NR_open:
8016         if (!(p = lock_user_string(arg1)))
8017             return -TARGET_EFAULT;
8018         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8019                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8020                                   arg3));
8021         fd_trans_unregister(ret);
8022         unlock_user(p, arg1, 0);
8023         return ret;
8024 #endif
8025     case TARGET_NR_openat:
8026         if (!(p = lock_user_string(arg2)))
8027             return -TARGET_EFAULT;
8028         ret = get_errno(do_openat(cpu_env, arg1, p,
8029                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8030                                   arg4));
8031         fd_trans_unregister(ret);
8032         unlock_user(p, arg2, 0);
8033         return ret;
8034 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8035     case TARGET_NR_name_to_handle_at:
8036         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8037         return ret;
8038 #endif
8039 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8040     case TARGET_NR_open_by_handle_at:
8041         ret = do_open_by_handle_at(arg1, arg2, arg3);
8042         fd_trans_unregister(ret);
8043         return ret;
8044 #endif
8045     case TARGET_NR_close:
8046         fd_trans_unregister(arg1);
8047         return get_errno(close(arg1));
8048 
8049     case TARGET_NR_brk:
8050         return do_brk(arg1);
8051 #ifdef TARGET_NR_fork
8052     case TARGET_NR_fork:
8053         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8054 #endif
8055 #ifdef TARGET_NR_waitpid
8056     case TARGET_NR_waitpid:
8057         {
8058             int status;
8059             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8060             if (!is_error(ret) && arg2 && ret
8061                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8062                 return -TARGET_EFAULT;
8063         }
8064         return ret;
8065 #endif
8066 #ifdef TARGET_NR_waitid
8067     case TARGET_NR_waitid:
8068         {
8069             siginfo_t info;
8070             info.si_pid = 0;
8071             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8072             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8073                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8074                     return -TARGET_EFAULT;
8075                 host_to_target_siginfo(p, &info);
8076                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8077             }
8078         }
8079         return ret;
8080 #endif
8081 #ifdef TARGET_NR_creat /* not on alpha */
8082     case TARGET_NR_creat:
8083         if (!(p = lock_user_string(arg1)))
8084             return -TARGET_EFAULT;
8085         ret = get_errno(creat(p, arg2));
8086         fd_trans_unregister(ret);
8087         unlock_user(p, arg1, 0);
8088         return ret;
8089 #endif
8090 #ifdef TARGET_NR_link
8091     case TARGET_NR_link:
8092         {
8093             void * p2;
8094             p = lock_user_string(arg1);
8095             p2 = lock_user_string(arg2);
8096             if (!p || !p2)
8097                 ret = -TARGET_EFAULT;
8098             else
8099                 ret = get_errno(link(p, p2));
8100             unlock_user(p2, arg2, 0);
8101             unlock_user(p, arg1, 0);
8102         }
8103         return ret;
8104 #endif
8105 #if defined(TARGET_NR_linkat)
8106     case TARGET_NR_linkat:
8107         {
8108             void * p2 = NULL;
8109             if (!arg2 || !arg4)
8110                 return -TARGET_EFAULT;
8111             p  = lock_user_string(arg2);
8112             p2 = lock_user_string(arg4);
8113             if (!p || !p2)
8114                 ret = -TARGET_EFAULT;
8115             else
8116                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8117             unlock_user(p, arg2, 0);
8118             unlock_user(p2, arg4, 0);
8119         }
8120         return ret;
8121 #endif
8122 #ifdef TARGET_NR_unlink
8123     case TARGET_NR_unlink:
8124         if (!(p = lock_user_string(arg1)))
8125             return -TARGET_EFAULT;
8126         ret = get_errno(unlink(p));
8127         unlock_user(p, arg1, 0);
8128         return ret;
8129 #endif
8130 #if defined(TARGET_NR_unlinkat)
8131     case TARGET_NR_unlinkat:
8132         if (!(p = lock_user_string(arg2)))
8133             return -TARGET_EFAULT;
8134         ret = get_errno(unlinkat(arg1, p, arg3));
8135         unlock_user(p, arg2, 0);
8136         return ret;
8137 #endif
8138     case TARGET_NR_execve:
8139         {
8140             char **argp, **envp;
8141             int argc, envc;
8142             abi_ulong gp;
8143             abi_ulong guest_argp;
8144             abi_ulong guest_envp;
8145             abi_ulong addr;
8146             char **q;
8147             int total_size = 0;
8148 
8149             argc = 0;
8150             guest_argp = arg2;
8151             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8152                 if (get_user_ual(addr, gp))
8153                     return -TARGET_EFAULT;
8154                 if (!addr)
8155                     break;
8156                 argc++;
8157             }
8158             envc = 0;
8159             guest_envp = arg3;
8160             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8161                 if (get_user_ual(addr, gp))
8162                     return -TARGET_EFAULT;
8163                 if (!addr)
8164                     break;
8165                 envc++;
8166             }
8167 
8168             argp = g_new0(char *, argc + 1);
8169             envp = g_new0(char *, envc + 1);
8170 
8171             for (gp = guest_argp, q = argp; gp;
8172                   gp += sizeof(abi_ulong), q++) {
8173                 if (get_user_ual(addr, gp))
8174                     goto execve_efault;
8175                 if (!addr)
8176                     break;
8177                 if (!(*q = lock_user_string(addr)))
8178                     goto execve_efault;
8179                 total_size += strlen(*q) + 1;
8180             }
8181             *q = NULL;
8182 
8183             for (gp = guest_envp, q = envp; gp;
8184                   gp += sizeof(abi_ulong), q++) {
8185                 if (get_user_ual(addr, gp))
8186                     goto execve_efault;
8187                 if (!addr)
8188                     break;
8189                 if (!(*q = lock_user_string(addr)))
8190                     goto execve_efault;
8191                 total_size += strlen(*q) + 1;
8192             }
8193             *q = NULL;
8194 
8195             if (!(p = lock_user_string(arg1)))
8196                 goto execve_efault;
8197             /* Although execve() is not an interruptible syscall it is
8198              * a special case where we must use the safe_syscall wrapper:
8199              * if we allow a signal to happen before we make the host
8200              * syscall then we will 'lose' it, because at the point of
8201              * execve the process leaves QEMU's control. So we use the
8202              * safe syscall wrapper to ensure that we either take the
8203              * signal as a guest signal, or else it does not happen
8204              * before the execve completes and makes it the other
8205              * program's problem.
8206              */
8207             ret = get_errno(safe_execve(p, argp, envp));
8208             unlock_user(p, arg1, 0);
8209 
8210             goto execve_end;
8211 
8212         execve_efault:
8213             ret = -TARGET_EFAULT;
8214 
8215         execve_end:
8216             for (gp = guest_argp, q = argp; *q;
8217                   gp += sizeof(abi_ulong), q++) {
8218                 if (get_user_ual(addr, gp)
8219                     || !addr)
8220                     break;
8221                 unlock_user(*q, addr, 0);
8222             }
8223             for (gp = guest_envp, q = envp; *q;
8224                   gp += sizeof(abi_ulong), q++) {
8225                 if (get_user_ual(addr, gp)
8226                     || !addr)
8227                     break;
8228                 unlock_user(*q, addr, 0);
8229             }
8230 
8231             g_free(argp);
8232             g_free(envp);
8233         }
8234         return ret;
8235     case TARGET_NR_chdir:
8236         if (!(p = lock_user_string(arg1)))
8237             return -TARGET_EFAULT;
8238         ret = get_errno(chdir(p));
8239         unlock_user(p, arg1, 0);
8240         return ret;
8241 #ifdef TARGET_NR_time
8242     case TARGET_NR_time:
8243         {
8244             time_t host_time;
8245             ret = get_errno(time(&host_time));
8246             if (!is_error(ret)
8247                 && arg1
8248                 && put_user_sal(host_time, arg1))
8249                 return -TARGET_EFAULT;
8250         }
8251         return ret;
8252 #endif
8253 #ifdef TARGET_NR_mknod
8254     case TARGET_NR_mknod:
8255         if (!(p = lock_user_string(arg1)))
8256             return -TARGET_EFAULT;
8257         ret = get_errno(mknod(p, arg2, arg3));
8258         unlock_user(p, arg1, 0);
8259         return ret;
8260 #endif
8261 #if defined(TARGET_NR_mknodat)
8262     case TARGET_NR_mknodat:
8263         if (!(p = lock_user_string(arg2)))
8264             return -TARGET_EFAULT;
8265         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8266         unlock_user(p, arg2, 0);
8267         return ret;
8268 #endif
8269 #ifdef TARGET_NR_chmod
8270     case TARGET_NR_chmod:
8271         if (!(p = lock_user_string(arg1)))
8272             return -TARGET_EFAULT;
8273         ret = get_errno(chmod(p, arg2));
8274         unlock_user(p, arg1, 0);
8275         return ret;
8276 #endif
8277 #ifdef TARGET_NR_lseek
8278     case TARGET_NR_lseek:
8279         return get_errno(lseek(arg1, arg2, arg3));
8280 #endif
8281 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8282     /* Alpha specific */
8283     case TARGET_NR_getxpid:
8284         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8285         return get_errno(getpid());
8286 #endif
8287 #ifdef TARGET_NR_getpid
8288     case TARGET_NR_getpid:
8289         return get_errno(getpid());
8290 #endif
8291     case TARGET_NR_mount:
8292         {
8293             /* need to look at the data field */
8294             void *p2, *p3;
8295 
8296             if (arg1) {
8297                 p = lock_user_string(arg1);
8298                 if (!p) {
8299                     return -TARGET_EFAULT;
8300                 }
8301             } else {
8302                 p = NULL;
8303             }
8304 
8305             p2 = lock_user_string(arg2);
8306             if (!p2) {
8307                 if (arg1) {
8308                     unlock_user(p, arg1, 0);
8309                 }
8310                 return -TARGET_EFAULT;
8311             }
8312 
8313             if (arg3) {
8314                 p3 = lock_user_string(arg3);
8315                 if (!p3) {
8316                     if (arg1) {
8317                         unlock_user(p, arg1, 0);
8318                     }
8319                     unlock_user(p2, arg2, 0);
8320                     return -TARGET_EFAULT;
8321                 }
8322             } else {
8323                 p3 = NULL;
8324             }
8325 
8326             /* FIXME - arg5 should be locked, but it isn't clear how to
8327              * do that since it's not guaranteed to be a NULL-terminated
8328              * string.
8329              */
8330             if (!arg5) {
8331                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8332             } else {
8333                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8334             }
8335             ret = get_errno(ret);
8336 
8337             if (arg1) {
8338                 unlock_user(p, arg1, 0);
8339             }
8340             unlock_user(p2, arg2, 0);
8341             if (arg3) {
8342                 unlock_user(p3, arg3, 0);
8343             }
8344         }
8345         return ret;
8346 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8347 #if defined(TARGET_NR_umount)
8348     case TARGET_NR_umount:
8349 #endif
8350 #if defined(TARGET_NR_oldumount)
8351     case TARGET_NR_oldumount:
8352 #endif
8353         if (!(p = lock_user_string(arg1)))
8354             return -TARGET_EFAULT;
8355         ret = get_errno(umount(p));
8356         unlock_user(p, arg1, 0);
8357         return ret;
8358 #endif
8359 #ifdef TARGET_NR_stime /* not on alpha */
8360     case TARGET_NR_stime:
8361         {
8362             struct timespec ts;
8363             ts.tv_nsec = 0;
8364             if (get_user_sal(ts.tv_sec, arg1)) {
8365                 return -TARGET_EFAULT;
8366             }
8367             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8368         }
8369 #endif
8370 #ifdef TARGET_NR_alarm /* not on alpha */
8371     case TARGET_NR_alarm:
8372         return alarm(arg1);
8373 #endif
8374 #ifdef TARGET_NR_pause /* not on alpha */
8375     case TARGET_NR_pause:
8376         if (!block_signals()) {
8377             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8378         }
8379         return -TARGET_EINTR;
8380 #endif
8381 #ifdef TARGET_NR_utime
8382     case TARGET_NR_utime:
8383         {
8384             struct utimbuf tbuf, *host_tbuf;
8385             struct target_utimbuf *target_tbuf;
8386             if (arg2) {
8387                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8388                     return -TARGET_EFAULT;
8389                 tbuf.actime = tswapal(target_tbuf->actime);
8390                 tbuf.modtime = tswapal(target_tbuf->modtime);
8391                 unlock_user_struct(target_tbuf, arg2, 0);
8392                 host_tbuf = &tbuf;
8393             } else {
8394                 host_tbuf = NULL;
8395             }
8396             if (!(p = lock_user_string(arg1)))
8397                 return -TARGET_EFAULT;
8398             ret = get_errno(utime(p, host_tbuf));
8399             unlock_user(p, arg1, 0);
8400         }
8401         return ret;
8402 #endif
8403 #ifdef TARGET_NR_utimes
8404     case TARGET_NR_utimes:
8405         {
8406             struct timeval *tvp, tv[2];
8407             if (arg2) {
8408                 if (copy_from_user_timeval(&tv[0], arg2)
8409                     || copy_from_user_timeval(&tv[1],
8410                                               arg2 + sizeof(struct target_timeval)))
8411                     return -TARGET_EFAULT;
8412                 tvp = tv;
8413             } else {
8414                 tvp = NULL;
8415             }
8416             if (!(p = lock_user_string(arg1)))
8417                 return -TARGET_EFAULT;
8418             ret = get_errno(utimes(p, tvp));
8419             unlock_user(p, arg1, 0);
8420         }
8421         return ret;
8422 #endif
8423 #if defined(TARGET_NR_futimesat)
8424     case TARGET_NR_futimesat:
8425         {
8426             struct timeval *tvp, tv[2];
8427             if (arg3) {
8428                 if (copy_from_user_timeval(&tv[0], arg3)
8429                     || copy_from_user_timeval(&tv[1],
8430                                               arg3 + sizeof(struct target_timeval)))
8431                     return -TARGET_EFAULT;
8432                 tvp = tv;
8433             } else {
8434                 tvp = NULL;
8435             }
8436             if (!(p = lock_user_string(arg2))) {
8437                 return -TARGET_EFAULT;
8438             }
8439             ret = get_errno(futimesat(arg1, path(p), tvp));
8440             unlock_user(p, arg2, 0);
8441         }
8442         return ret;
8443 #endif
8444 #ifdef TARGET_NR_access
8445     case TARGET_NR_access:
8446         if (!(p = lock_user_string(arg1))) {
8447             return -TARGET_EFAULT;
8448         }
8449         ret = get_errno(access(path(p), arg2));
8450         unlock_user(p, arg1, 0);
8451         return ret;
8452 #endif
8453 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8454     case TARGET_NR_faccessat:
8455         if (!(p = lock_user_string(arg2))) {
8456             return -TARGET_EFAULT;
8457         }
8458         ret = get_errno(faccessat(arg1, p, arg3, 0));
8459         unlock_user(p, arg2, 0);
8460         return ret;
8461 #endif
8462 #ifdef TARGET_NR_nice /* not on alpha */
8463     case TARGET_NR_nice:
8464         return get_errno(nice(arg1));
8465 #endif
8466     case TARGET_NR_sync:
8467         sync();
8468         return 0;
8469 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8470     case TARGET_NR_syncfs:
8471         return get_errno(syncfs(arg1));
8472 #endif
8473     case TARGET_NR_kill:
8474         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8475 #ifdef TARGET_NR_rename
8476     case TARGET_NR_rename:
8477         {
8478             void *p2;
8479             p = lock_user_string(arg1);
8480             p2 = lock_user_string(arg2);
8481             if (!p || !p2)
8482                 ret = -TARGET_EFAULT;
8483             else
8484                 ret = get_errno(rename(p, p2));
8485             unlock_user(p2, arg2, 0);
8486             unlock_user(p, arg1, 0);
8487         }
8488         return ret;
8489 #endif
8490 #if defined(TARGET_NR_renameat)
8491     case TARGET_NR_renameat:
8492         {
8493             void *p2;
8494             p  = lock_user_string(arg2);
8495             p2 = lock_user_string(arg4);
8496             if (!p || !p2)
8497                 ret = -TARGET_EFAULT;
8498             else
8499                 ret = get_errno(renameat(arg1, p, arg3, p2));
8500             unlock_user(p2, arg4, 0);
8501             unlock_user(p, arg2, 0);
8502         }
8503         return ret;
8504 #endif
8505 #if defined(TARGET_NR_renameat2)
8506     case TARGET_NR_renameat2:
8507         {
8508             void *p2;
8509             p  = lock_user_string(arg2);
8510             p2 = lock_user_string(arg4);
8511             if (!p || !p2) {
8512                 ret = -TARGET_EFAULT;
8513             } else {
8514                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8515             }
8516             unlock_user(p2, arg4, 0);
8517             unlock_user(p, arg2, 0);
8518         }
8519         return ret;
8520 #endif
8521 #ifdef TARGET_NR_mkdir
8522     case TARGET_NR_mkdir:
8523         if (!(p = lock_user_string(arg1)))
8524             return -TARGET_EFAULT;
8525         ret = get_errno(mkdir(p, arg2));
8526         unlock_user(p, arg1, 0);
8527         return ret;
8528 #endif
8529 #if defined(TARGET_NR_mkdirat)
8530     case TARGET_NR_mkdirat:
8531         if (!(p = lock_user_string(arg2)))
8532             return -TARGET_EFAULT;
8533         ret = get_errno(mkdirat(arg1, p, arg3));
8534         unlock_user(p, arg2, 0);
8535         return ret;
8536 #endif
8537 #ifdef TARGET_NR_rmdir
8538     case TARGET_NR_rmdir:
8539         if (!(p = lock_user_string(arg1)))
8540             return -TARGET_EFAULT;
8541         ret = get_errno(rmdir(p));
8542         unlock_user(p, arg1, 0);
8543         return ret;
8544 #endif
8545     case TARGET_NR_dup:
8546         ret = get_errno(dup(arg1));
8547         if (ret >= 0) {
8548             fd_trans_dup(arg1, ret);
8549         }
8550         return ret;
8551 #ifdef TARGET_NR_pipe
8552     case TARGET_NR_pipe:
8553         return do_pipe(cpu_env, arg1, 0, 0);
8554 #endif
8555 #ifdef TARGET_NR_pipe2
8556     case TARGET_NR_pipe2:
8557         return do_pipe(cpu_env, arg1,
8558                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8559 #endif
8560     case TARGET_NR_times:
8561         {
8562             struct target_tms *tmsp;
8563             struct tms tms;
8564             ret = get_errno(times(&tms));
8565             if (arg1) {
8566                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8567                 if (!tmsp)
8568                     return -TARGET_EFAULT;
8569                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8570                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8571                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8572                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8573             }
8574             if (!is_error(ret))
8575                 ret = host_to_target_clock_t(ret);
8576         }
8577         return ret;
8578     case TARGET_NR_acct:
8579         if (arg1 == 0) {
8580             ret = get_errno(acct(NULL));
8581         } else {
8582             if (!(p = lock_user_string(arg1))) {
8583                 return -TARGET_EFAULT;
8584             }
8585             ret = get_errno(acct(path(p)));
8586             unlock_user(p, arg1, 0);
8587         }
8588         return ret;
8589 #ifdef TARGET_NR_umount2
8590     case TARGET_NR_umount2:
8591         if (!(p = lock_user_string(arg1)))
8592             return -TARGET_EFAULT;
8593         ret = get_errno(umount2(p, arg2));
8594         unlock_user(p, arg1, 0);
8595         return ret;
8596 #endif
8597     case TARGET_NR_ioctl:
8598         return do_ioctl(arg1, arg2, arg3);
8599 #ifdef TARGET_NR_fcntl
8600     case TARGET_NR_fcntl:
8601         return do_fcntl(arg1, arg2, arg3);
8602 #endif
8603     case TARGET_NR_setpgid:
8604         return get_errno(setpgid(arg1, arg2));
8605     case TARGET_NR_umask:
8606         return get_errno(umask(arg1));
8607     case TARGET_NR_chroot:
8608         if (!(p = lock_user_string(arg1)))
8609             return -TARGET_EFAULT;
8610         ret = get_errno(chroot(p));
8611         unlock_user(p, arg1, 0);
8612         return ret;
8613 #ifdef TARGET_NR_dup2
8614     case TARGET_NR_dup2:
8615         ret = get_errno(dup2(arg1, arg2));
8616         if (ret >= 0) {
8617             fd_trans_dup(arg1, arg2);
8618         }
8619         return ret;
8620 #endif
8621 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8622     case TARGET_NR_dup3:
8623     {
8624         int host_flags;
8625 
8626         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8627             return -EINVAL;
8628         }
8629         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8630         ret = get_errno(dup3(arg1, arg2, host_flags));
8631         if (ret >= 0) {
8632             fd_trans_dup(arg1, arg2);
8633         }
8634         return ret;
8635     }
8636 #endif
8637 #ifdef TARGET_NR_getppid /* not on alpha */
8638     case TARGET_NR_getppid:
8639         return get_errno(getppid());
8640 #endif
8641 #ifdef TARGET_NR_getpgrp
8642     case TARGET_NR_getpgrp:
8643         return get_errno(getpgrp());
8644 #endif
8645     case TARGET_NR_setsid:
8646         return get_errno(setsid());
8647 #ifdef TARGET_NR_sigaction
8648     case TARGET_NR_sigaction:
8649         {
8650 #if defined(TARGET_ALPHA)
8651             struct target_sigaction act, oact, *pact = 0;
8652             struct target_old_sigaction *old_act;
8653             if (arg2) {
8654                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8655                     return -TARGET_EFAULT;
8656                 act._sa_handler = old_act->_sa_handler;
8657                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8658                 act.sa_flags = old_act->sa_flags;
8659                 act.sa_restorer = 0;
8660                 unlock_user_struct(old_act, arg2, 0);
8661                 pact = &act;
8662             }
8663             ret = get_errno(do_sigaction(arg1, pact, &oact));
8664             if (!is_error(ret) && arg3) {
8665                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8666                     return -TARGET_EFAULT;
8667                 old_act->_sa_handler = oact._sa_handler;
8668                 old_act->sa_mask = oact.sa_mask.sig[0];
8669                 old_act->sa_flags = oact.sa_flags;
8670                 unlock_user_struct(old_act, arg3, 1);
8671             }
8672 #elif defined(TARGET_MIPS)
8673 	    struct target_sigaction act, oact, *pact, *old_act;
8674 
8675 	    if (arg2) {
8676                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8677                     return -TARGET_EFAULT;
8678 		act._sa_handler = old_act->_sa_handler;
8679 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8680 		act.sa_flags = old_act->sa_flags;
8681 		unlock_user_struct(old_act, arg2, 0);
8682 		pact = &act;
8683 	    } else {
8684 		pact = NULL;
8685 	    }
8686 
8687 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8688 
8689 	    if (!is_error(ret) && arg3) {
8690                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8691                     return -TARGET_EFAULT;
8692 		old_act->_sa_handler = oact._sa_handler;
8693 		old_act->sa_flags = oact.sa_flags;
8694 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8695 		old_act->sa_mask.sig[1] = 0;
8696 		old_act->sa_mask.sig[2] = 0;
8697 		old_act->sa_mask.sig[3] = 0;
8698 		unlock_user_struct(old_act, arg3, 1);
8699 	    }
8700 #else
8701             struct target_old_sigaction *old_act;
8702             struct target_sigaction act, oact, *pact;
8703             if (arg2) {
8704                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8705                     return -TARGET_EFAULT;
8706                 act._sa_handler = old_act->_sa_handler;
8707                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8708                 act.sa_flags = old_act->sa_flags;
8709                 act.sa_restorer = old_act->sa_restorer;
8710 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8711                 act.ka_restorer = 0;
8712 #endif
8713                 unlock_user_struct(old_act, arg2, 0);
8714                 pact = &act;
8715             } else {
8716                 pact = NULL;
8717             }
8718             ret = get_errno(do_sigaction(arg1, pact, &oact));
8719             if (!is_error(ret) && arg3) {
8720                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8721                     return -TARGET_EFAULT;
8722                 old_act->_sa_handler = oact._sa_handler;
8723                 old_act->sa_mask = oact.sa_mask.sig[0];
8724                 old_act->sa_flags = oact.sa_flags;
8725                 old_act->sa_restorer = oact.sa_restorer;
8726                 unlock_user_struct(old_act, arg3, 1);
8727             }
8728 #endif
8729         }
8730         return ret;
8731 #endif
8732     case TARGET_NR_rt_sigaction:
8733         {
8734 #if defined(TARGET_ALPHA)
8735             /* For Alpha and SPARC this is a 5 argument syscall, with
8736              * a 'restorer' parameter which must be copied into the
8737              * sa_restorer field of the sigaction struct.
8738              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8739              * and arg5 is the sigsetsize.
8740              * Alpha also has a separate rt_sigaction struct that it uses
8741              * here; SPARC uses the usual sigaction struct.
8742              */
8743             struct target_rt_sigaction *rt_act;
8744             struct target_sigaction act, oact, *pact = 0;
8745 
8746             if (arg4 != sizeof(target_sigset_t)) {
8747                 return -TARGET_EINVAL;
8748             }
8749             if (arg2) {
8750                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8751                     return -TARGET_EFAULT;
8752                 act._sa_handler = rt_act->_sa_handler;
8753                 act.sa_mask = rt_act->sa_mask;
8754                 act.sa_flags = rt_act->sa_flags;
8755                 act.sa_restorer = arg5;
8756                 unlock_user_struct(rt_act, arg2, 0);
8757                 pact = &act;
8758             }
8759             ret = get_errno(do_sigaction(arg1, pact, &oact));
8760             if (!is_error(ret) && arg3) {
8761                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8762                     return -TARGET_EFAULT;
8763                 rt_act->_sa_handler = oact._sa_handler;
8764                 rt_act->sa_mask = oact.sa_mask;
8765                 rt_act->sa_flags = oact.sa_flags;
8766                 unlock_user_struct(rt_act, arg3, 1);
8767             }
8768 #else
8769 #ifdef TARGET_SPARC
8770             target_ulong restorer = arg4;
8771             target_ulong sigsetsize = arg5;
8772 #else
8773             target_ulong sigsetsize = arg4;
8774 #endif
8775             struct target_sigaction *act;
8776             struct target_sigaction *oact;
8777 
8778             if (sigsetsize != sizeof(target_sigset_t)) {
8779                 return -TARGET_EINVAL;
8780             }
8781             if (arg2) {
8782                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8783                     return -TARGET_EFAULT;
8784                 }
8785 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8786                 act->ka_restorer = restorer;
8787 #endif
8788             } else {
8789                 act = NULL;
8790             }
8791             if (arg3) {
8792                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8793                     ret = -TARGET_EFAULT;
8794                     goto rt_sigaction_fail;
8795                 }
8796             } else
8797                 oact = NULL;
8798             ret = get_errno(do_sigaction(arg1, act, oact));
8799 	rt_sigaction_fail:
8800             if (act)
8801                 unlock_user_struct(act, arg2, 0);
8802             if (oact)
8803                 unlock_user_struct(oact, arg3, 1);
8804 #endif
8805         }
8806         return ret;
8807 #ifdef TARGET_NR_sgetmask /* not on alpha */
8808     case TARGET_NR_sgetmask:
8809         {
8810             sigset_t cur_set;
8811             abi_ulong target_set;
8812             ret = do_sigprocmask(0, NULL, &cur_set);
8813             if (!ret) {
8814                 host_to_target_old_sigset(&target_set, &cur_set);
8815                 ret = target_set;
8816             }
8817         }
8818         return ret;
8819 #endif
8820 #ifdef TARGET_NR_ssetmask /* not on alpha */
8821     case TARGET_NR_ssetmask:
8822         {
8823             sigset_t set, oset;
8824             abi_ulong target_set = arg1;
8825             target_to_host_old_sigset(&set, &target_set);
8826             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8827             if (!ret) {
8828                 host_to_target_old_sigset(&target_set, &oset);
8829                 ret = target_set;
8830             }
8831         }
8832         return ret;
8833 #endif
8834 #ifdef TARGET_NR_sigprocmask
8835     case TARGET_NR_sigprocmask:
8836         {
8837 #if defined(TARGET_ALPHA)
8838             sigset_t set, oldset;
8839             abi_ulong mask;
8840             int how;
8841 
8842             switch (arg1) {
8843             case TARGET_SIG_BLOCK:
8844                 how = SIG_BLOCK;
8845                 break;
8846             case TARGET_SIG_UNBLOCK:
8847                 how = SIG_UNBLOCK;
8848                 break;
8849             case TARGET_SIG_SETMASK:
8850                 how = SIG_SETMASK;
8851                 break;
8852             default:
8853                 return -TARGET_EINVAL;
8854             }
8855             mask = arg2;
8856             target_to_host_old_sigset(&set, &mask);
8857 
8858             ret = do_sigprocmask(how, &set, &oldset);
8859             if (!is_error(ret)) {
8860                 host_to_target_old_sigset(&mask, &oldset);
8861                 ret = mask;
8862                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8863             }
8864 #else
8865             sigset_t set, oldset, *set_ptr;
8866             int how;
8867 
8868             if (arg2) {
8869                 switch (arg1) {
8870                 case TARGET_SIG_BLOCK:
8871                     how = SIG_BLOCK;
8872                     break;
8873                 case TARGET_SIG_UNBLOCK:
8874                     how = SIG_UNBLOCK;
8875                     break;
8876                 case TARGET_SIG_SETMASK:
8877                     how = SIG_SETMASK;
8878                     break;
8879                 default:
8880                     return -TARGET_EINVAL;
8881                 }
8882                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8883                     return -TARGET_EFAULT;
8884                 target_to_host_old_sigset(&set, p);
8885                 unlock_user(p, arg2, 0);
8886                 set_ptr = &set;
8887             } else {
8888                 how = 0;
8889                 set_ptr = NULL;
8890             }
8891             ret = do_sigprocmask(how, set_ptr, &oldset);
8892             if (!is_error(ret) && arg3) {
8893                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8894                     return -TARGET_EFAULT;
8895                 host_to_target_old_sigset(p, &oldset);
8896                 unlock_user(p, arg3, sizeof(target_sigset_t));
8897             }
8898 #endif
8899         }
8900         return ret;
8901 #endif
8902     case TARGET_NR_rt_sigprocmask:
8903         {
8904             int how = arg1;
8905             sigset_t set, oldset, *set_ptr;
8906 
8907             if (arg4 != sizeof(target_sigset_t)) {
8908                 return -TARGET_EINVAL;
8909             }
8910 
8911             if (arg2) {
8912                 switch(how) {
8913                 case TARGET_SIG_BLOCK:
8914                     how = SIG_BLOCK;
8915                     break;
8916                 case TARGET_SIG_UNBLOCK:
8917                     how = SIG_UNBLOCK;
8918                     break;
8919                 case TARGET_SIG_SETMASK:
8920                     how = SIG_SETMASK;
8921                     break;
8922                 default:
8923                     return -TARGET_EINVAL;
8924                 }
8925                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8926                     return -TARGET_EFAULT;
8927                 target_to_host_sigset(&set, p);
8928                 unlock_user(p, arg2, 0);
8929                 set_ptr = &set;
8930             } else {
8931                 how = 0;
8932                 set_ptr = NULL;
8933             }
8934             ret = do_sigprocmask(how, set_ptr, &oldset);
8935             if (!is_error(ret) && arg3) {
8936                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8937                     return -TARGET_EFAULT;
8938                 host_to_target_sigset(p, &oldset);
8939                 unlock_user(p, arg3, sizeof(target_sigset_t));
8940             }
8941         }
8942         return ret;
8943 #ifdef TARGET_NR_sigpending
8944     case TARGET_NR_sigpending:
8945         {
8946             sigset_t set;
8947             ret = get_errno(sigpending(&set));
8948             if (!is_error(ret)) {
8949                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8950                     return -TARGET_EFAULT;
8951                 host_to_target_old_sigset(p, &set);
8952                 unlock_user(p, arg1, sizeof(target_sigset_t));
8953             }
8954         }
8955         return ret;
8956 #endif
8957     case TARGET_NR_rt_sigpending:
8958         {
8959             sigset_t set;
8960 
8961             /* Yes, this check is >, not != like most. We follow the kernel's
8962              * logic and it does it like this because it implements
8963              * NR_sigpending through the same code path, and in that case
8964              * the old_sigset_t is smaller in size.
8965              */
8966             if (arg2 > sizeof(target_sigset_t)) {
8967                 return -TARGET_EINVAL;
8968             }
8969 
8970             ret = get_errno(sigpending(&set));
8971             if (!is_error(ret)) {
8972                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8973                     return -TARGET_EFAULT;
8974                 host_to_target_sigset(p, &set);
8975                 unlock_user(p, arg1, sizeof(target_sigset_t));
8976             }
8977         }
8978         return ret;
8979 #ifdef TARGET_NR_sigsuspend
8980     case TARGET_NR_sigsuspend:
8981         {
8982             TaskState *ts = cpu->opaque;
8983 #if defined(TARGET_ALPHA)
8984             abi_ulong mask = arg1;
8985             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8986 #else
8987             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8988                 return -TARGET_EFAULT;
8989             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8990             unlock_user(p, arg1, 0);
8991 #endif
8992             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8993                                                SIGSET_T_SIZE));
8994             if (ret != -TARGET_ERESTARTSYS) {
8995                 ts->in_sigsuspend = 1;
8996             }
8997         }
8998         return ret;
8999 #endif
9000     case TARGET_NR_rt_sigsuspend:
9001         {
9002             TaskState *ts = cpu->opaque;
9003 
9004             if (arg2 != sizeof(target_sigset_t)) {
9005                 return -TARGET_EINVAL;
9006             }
9007             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9008                 return -TARGET_EFAULT;
9009             target_to_host_sigset(&ts->sigsuspend_mask, p);
9010             unlock_user(p, arg1, 0);
9011             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9012                                                SIGSET_T_SIZE));
9013             if (ret != -TARGET_ERESTARTSYS) {
9014                 ts->in_sigsuspend = 1;
9015             }
9016         }
9017         return ret;
9018 #ifdef TARGET_NR_rt_sigtimedwait
9019     case TARGET_NR_rt_sigtimedwait:
9020         {
9021             sigset_t set;
9022             struct timespec uts, *puts;
9023             siginfo_t uinfo;
9024 
9025             if (arg4 != sizeof(target_sigset_t)) {
9026                 return -TARGET_EINVAL;
9027             }
9028 
9029             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9030                 return -TARGET_EFAULT;
9031             target_to_host_sigset(&set, p);
9032             unlock_user(p, arg1, 0);
9033             if (arg3) {
9034                 puts = &uts;
9035                 if (target_to_host_timespec(puts, arg3)) {
9036                     return -TARGET_EFAULT;
9037                 }
9038             } else {
9039                 puts = NULL;
9040             }
9041             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9042                                                  SIGSET_T_SIZE));
9043             if (!is_error(ret)) {
9044                 if (arg2) {
9045                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9046                                   0);
9047                     if (!p) {
9048                         return -TARGET_EFAULT;
9049                     }
9050                     host_to_target_siginfo(p, &uinfo);
9051                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9052                 }
9053                 ret = host_to_target_signal(ret);
9054             }
9055         }
9056         return ret;
9057 #endif
9058 #ifdef TARGET_NR_rt_sigtimedwait_time64
9059     case TARGET_NR_rt_sigtimedwait_time64:
9060         {
9061             sigset_t set;
9062             struct timespec uts, *puts;
9063             siginfo_t uinfo;
9064 
9065             if (arg4 != sizeof(target_sigset_t)) {
9066                 return -TARGET_EINVAL;
9067             }
9068 
9069             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9070             if (!p) {
9071                 return -TARGET_EFAULT;
9072             }
9073             target_to_host_sigset(&set, p);
9074             unlock_user(p, arg1, 0);
9075             if (arg3) {
9076                 puts = &uts;
9077                 if (target_to_host_timespec64(puts, arg3)) {
9078                     return -TARGET_EFAULT;
9079                 }
9080             } else {
9081                 puts = NULL;
9082             }
9083             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9084                                                  SIGSET_T_SIZE));
9085             if (!is_error(ret)) {
9086                 if (arg2) {
9087                     p = lock_user(VERIFY_WRITE, arg2,
9088                                   sizeof(target_siginfo_t), 0);
9089                     if (!p) {
9090                         return -TARGET_EFAULT;
9091                     }
9092                     host_to_target_siginfo(p, &uinfo);
9093                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9094                 }
9095                 ret = host_to_target_signal(ret);
9096             }
9097         }
9098         return ret;
9099 #endif
9100     case TARGET_NR_rt_sigqueueinfo:
9101         {
9102             siginfo_t uinfo;
9103 
9104             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9105             if (!p) {
9106                 return -TARGET_EFAULT;
9107             }
9108             target_to_host_siginfo(&uinfo, p);
9109             unlock_user(p, arg3, 0);
9110             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9111         }
9112         return ret;
9113     case TARGET_NR_rt_tgsigqueueinfo:
9114         {
9115             siginfo_t uinfo;
9116 
9117             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9118             if (!p) {
9119                 return -TARGET_EFAULT;
9120             }
9121             target_to_host_siginfo(&uinfo, p);
9122             unlock_user(p, arg4, 0);
9123             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9124         }
9125         return ret;
9126 #ifdef TARGET_NR_sigreturn
9127     case TARGET_NR_sigreturn:
9128         if (block_signals()) {
9129             return -TARGET_ERESTARTSYS;
9130         }
9131         return do_sigreturn(cpu_env);
9132 #endif
9133     case TARGET_NR_rt_sigreturn:
9134         if (block_signals()) {
9135             return -TARGET_ERESTARTSYS;
9136         }
9137         return do_rt_sigreturn(cpu_env);
9138     case TARGET_NR_sethostname:
9139         if (!(p = lock_user_string(arg1)))
9140             return -TARGET_EFAULT;
9141         ret = get_errno(sethostname(p, arg2));
9142         unlock_user(p, arg1, 0);
9143         return ret;
9144 #ifdef TARGET_NR_setrlimit
9145     case TARGET_NR_setrlimit:
9146         {
9147             int resource = target_to_host_resource(arg1);
9148             struct target_rlimit *target_rlim;
9149             struct rlimit rlim;
9150             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9151                 return -TARGET_EFAULT;
9152             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9153             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9154             unlock_user_struct(target_rlim, arg2, 0);
9155             /*
9156              * If we just passed through resource limit settings for memory then
9157              * they would also apply to QEMU's own allocations, and QEMU will
9158              * crash or hang or die if its allocations fail. Ideally we would
9159              * track the guest allocations in QEMU and apply the limits ourselves.
9160              * For now, just tell the guest the call succeeded but don't actually
9161              * limit anything.
9162              */
9163             if (resource != RLIMIT_AS &&
9164                 resource != RLIMIT_DATA &&
9165                 resource != RLIMIT_STACK) {
9166                 return get_errno(setrlimit(resource, &rlim));
9167             } else {
9168                 return 0;
9169             }
9170         }
9171 #endif
9172 #ifdef TARGET_NR_getrlimit
9173     case TARGET_NR_getrlimit:
9174         {
9175             int resource = target_to_host_resource(arg1);
9176             struct target_rlimit *target_rlim;
9177             struct rlimit rlim;
9178 
9179             ret = get_errno(getrlimit(resource, &rlim));
9180             if (!is_error(ret)) {
9181                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9182                     return -TARGET_EFAULT;
9183                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9184                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9185                 unlock_user_struct(target_rlim, arg2, 1);
9186             }
9187         }
9188         return ret;
9189 #endif
9190     case TARGET_NR_getrusage:
9191         {
9192             struct rusage rusage;
9193             ret = get_errno(getrusage(arg1, &rusage));
9194             if (!is_error(ret)) {
9195                 ret = host_to_target_rusage(arg2, &rusage);
9196             }
9197         }
9198         return ret;
9199 #if defined(TARGET_NR_gettimeofday)
9200     case TARGET_NR_gettimeofday:
9201         {
9202             struct timeval tv;
9203             struct timezone tz;
9204 
9205             ret = get_errno(gettimeofday(&tv, &tz));
9206             if (!is_error(ret)) {
9207                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9208                     return -TARGET_EFAULT;
9209                 }
9210                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9211                     return -TARGET_EFAULT;
9212                 }
9213             }
9214         }
9215         return ret;
9216 #endif
9217 #if defined(TARGET_NR_settimeofday)
9218     case TARGET_NR_settimeofday:
9219         {
9220             struct timeval tv, *ptv = NULL;
9221             struct timezone tz, *ptz = NULL;
9222 
9223             if (arg1) {
9224                 if (copy_from_user_timeval(&tv, arg1)) {
9225                     return -TARGET_EFAULT;
9226                 }
9227                 ptv = &tv;
9228             }
9229 
9230             if (arg2) {
9231                 if (copy_from_user_timezone(&tz, arg2)) {
9232                     return -TARGET_EFAULT;
9233                 }
9234                 ptz = &tz;
9235             }
9236 
9237             return get_errno(settimeofday(ptv, ptz));
9238         }
9239 #endif
9240 #if defined(TARGET_NR_select)
9241     case TARGET_NR_select:
9242 #if defined(TARGET_WANT_NI_OLD_SELECT)
9243         /* some architectures used to have old_select here
9244          * but now ENOSYS it.
9245          */
9246         ret = -TARGET_ENOSYS;
9247 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9248         ret = do_old_select(arg1);
9249 #else
9250         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9251 #endif
9252         return ret;
9253 #endif
9254 #ifdef TARGET_NR_pselect6
9255     case TARGET_NR_pselect6:
9256         {
9257             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9258             fd_set rfds, wfds, efds;
9259             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9260             struct timespec ts, *ts_ptr;
9261 
9262             /*
9263              * The 6th arg is actually two args smashed together,
9264              * so we cannot use the C library.
9265              */
9266             sigset_t set;
9267             struct {
9268                 sigset_t *set;
9269                 size_t size;
9270             } sig, *sig_ptr;
9271 
9272             abi_ulong arg_sigset, arg_sigsize, *arg7;
9273             target_sigset_t *target_sigset;
9274 
9275             n = arg1;
9276             rfd_addr = arg2;
9277             wfd_addr = arg3;
9278             efd_addr = arg4;
9279             ts_addr = arg5;
9280 
9281             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9282             if (ret) {
9283                 return ret;
9284             }
9285             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9286             if (ret) {
9287                 return ret;
9288             }
9289             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9290             if (ret) {
9291                 return ret;
9292             }
9293 
9294             /*
9295              * This takes a timespec, and not a timeval, so we cannot
9296              * use the do_select() helper ...
9297              */
9298             if (ts_addr) {
9299                 if (target_to_host_timespec(&ts, ts_addr)) {
9300                     return -TARGET_EFAULT;
9301                 }
9302                 ts_ptr = &ts;
9303             } else {
9304                 ts_ptr = NULL;
9305             }
9306 
9307             /* Extract the two packed args for the sigset */
9308             if (arg6) {
9309                 sig_ptr = &sig;
9310                 sig.size = SIGSET_T_SIZE;
9311 
9312                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9313                 if (!arg7) {
9314                     return -TARGET_EFAULT;
9315                 }
9316                 arg_sigset = tswapal(arg7[0]);
9317                 arg_sigsize = tswapal(arg7[1]);
9318                 unlock_user(arg7, arg6, 0);
9319 
9320                 if (arg_sigset) {
9321                     sig.set = &set;
9322                     if (arg_sigsize != sizeof(*target_sigset)) {
9323                         /* Like the kernel, we enforce correct size sigsets */
9324                         return -TARGET_EINVAL;
9325                     }
9326                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9327                                               sizeof(*target_sigset), 1);
9328                     if (!target_sigset) {
9329                         return -TARGET_EFAULT;
9330                     }
9331                     target_to_host_sigset(&set, target_sigset);
9332                     unlock_user(target_sigset, arg_sigset, 0);
9333                 } else {
9334                     sig.set = NULL;
9335                 }
9336             } else {
9337                 sig_ptr = NULL;
9338             }
9339 
9340             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9341                                           ts_ptr, sig_ptr));
9342 
9343             if (!is_error(ret)) {
9344                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9345                     return -TARGET_EFAULT;
9346                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9347                     return -TARGET_EFAULT;
9348                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9349                     return -TARGET_EFAULT;
9350 
9351                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9352                     return -TARGET_EFAULT;
9353             }
9354         }
9355         return ret;
9356 #endif
9357 #ifdef TARGET_NR_symlink
9358     case TARGET_NR_symlink:
9359         {
9360             void *p2;
9361             p = lock_user_string(arg1);
9362             p2 = lock_user_string(arg2);
9363             if (!p || !p2)
9364                 ret = -TARGET_EFAULT;
9365             else
9366                 ret = get_errno(symlink(p, p2));
9367             unlock_user(p2, arg2, 0);
9368             unlock_user(p, arg1, 0);
9369         }
9370         return ret;
9371 #endif
9372 #if defined(TARGET_NR_symlinkat)
9373     case TARGET_NR_symlinkat:
9374         {
9375             void *p2;
9376             p  = lock_user_string(arg1);
9377             p2 = lock_user_string(arg3);
9378             if (!p || !p2)
9379                 ret = -TARGET_EFAULT;
9380             else
9381                 ret = get_errno(symlinkat(p, arg2, p2));
9382             unlock_user(p2, arg3, 0);
9383             unlock_user(p, arg1, 0);
9384         }
9385         return ret;
9386 #endif
9387 #ifdef TARGET_NR_readlink
9388     case TARGET_NR_readlink:
9389         {
9390             void *p2;
9391             p = lock_user_string(arg1);
9392             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9393             if (!p || !p2) {
9394                 ret = -TARGET_EFAULT;
9395             } else if (!arg3) {
9396                 /* Short circuit this for the magic exe check. */
9397                 ret = -TARGET_EINVAL;
9398             } else if (is_proc_myself((const char *)p, "exe")) {
9399                 char real[PATH_MAX], *temp;
9400                 temp = realpath(exec_path, real);
9401                 /* Return value is # of bytes that we wrote to the buffer. */
9402                 if (temp == NULL) {
9403                     ret = get_errno(-1);
9404                 } else {
9405                     /* Don't worry about sign mismatch as earlier mapping
9406                      * logic would have thrown a bad address error. */
9407                     ret = MIN(strlen(real), arg3);
9408                     /* We cannot NUL terminate the string. */
9409                     memcpy(p2, real, ret);
9410                 }
9411             } else {
9412                 ret = get_errno(readlink(path(p), p2, arg3));
9413             }
9414             unlock_user(p2, arg2, ret);
9415             unlock_user(p, arg1, 0);
9416         }
9417         return ret;
9418 #endif
9419 #if defined(TARGET_NR_readlinkat)
9420     case TARGET_NR_readlinkat:
9421         {
9422             void *p2;
9423             p  = lock_user_string(arg2);
9424             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9425             if (!p || !p2) {
9426                 ret = -TARGET_EFAULT;
9427             } else if (is_proc_myself((const char *)p, "exe")) {
9428                 char real[PATH_MAX], *temp;
9429                 temp = realpath(exec_path, real);
9430                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9431                 snprintf((char *)p2, arg4, "%s", real);
9432             } else {
9433                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9434             }
9435             unlock_user(p2, arg3, ret);
9436             unlock_user(p, arg2, 0);
9437         }
9438         return ret;
9439 #endif
9440 #ifdef TARGET_NR_swapon
9441     case TARGET_NR_swapon:
9442         if (!(p = lock_user_string(arg1)))
9443             return -TARGET_EFAULT;
9444         ret = get_errno(swapon(p, arg2));
9445         unlock_user(p, arg1, 0);
9446         return ret;
9447 #endif
9448     case TARGET_NR_reboot:
9449         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9450            /* arg4 must be ignored in all other cases */
9451            p = lock_user_string(arg4);
9452            if (!p) {
9453                return -TARGET_EFAULT;
9454            }
9455            ret = get_errno(reboot(arg1, arg2, arg3, p));
9456            unlock_user(p, arg4, 0);
9457         } else {
9458            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9459         }
9460         return ret;
9461 #ifdef TARGET_NR_mmap
9462     case TARGET_NR_mmap:
9463 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9464     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9465     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9466     || defined(TARGET_S390X)
9467         {
9468             abi_ulong *v;
9469             abi_ulong v1, v2, v3, v4, v5, v6;
9470             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9471                 return -TARGET_EFAULT;
9472             v1 = tswapal(v[0]);
9473             v2 = tswapal(v[1]);
9474             v3 = tswapal(v[2]);
9475             v4 = tswapal(v[3]);
9476             v5 = tswapal(v[4]);
9477             v6 = tswapal(v[5]);
9478             unlock_user(v, arg1, 0);
9479             ret = get_errno(target_mmap(v1, v2, v3,
9480                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9481                                         v5, v6));
9482         }
9483 #else
9484         ret = get_errno(target_mmap(arg1, arg2, arg3,
9485                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9486                                     arg5,
9487                                     arg6));
9488 #endif
9489         return ret;
9490 #endif
9491 #ifdef TARGET_NR_mmap2
9492     case TARGET_NR_mmap2:
9493 #ifndef MMAP_SHIFT
9494 #define MMAP_SHIFT 12
9495 #endif
9496         ret = target_mmap(arg1, arg2, arg3,
9497                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9498                           arg5, arg6 << MMAP_SHIFT);
9499         return get_errno(ret);
9500 #endif
9501     case TARGET_NR_munmap:
9502         return get_errno(target_munmap(arg1, arg2));
9503     case TARGET_NR_mprotect:
9504         {
9505             TaskState *ts = cpu->opaque;
9506             /* Special hack to detect libc making the stack executable.  */
9507             if ((arg3 & PROT_GROWSDOWN)
9508                 && arg1 >= ts->info->stack_limit
9509                 && arg1 <= ts->info->start_stack) {
9510                 arg3 &= ~PROT_GROWSDOWN;
9511                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9512                 arg1 = ts->info->stack_limit;
9513             }
9514         }
9515         return get_errno(target_mprotect(arg1, arg2, arg3));
9516 #ifdef TARGET_NR_mremap
9517     case TARGET_NR_mremap:
9518         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9519 #endif
9520         /* ??? msync/mlock/munlock are broken for softmmu.  */
9521 #ifdef TARGET_NR_msync
9522     case TARGET_NR_msync:
9523         return get_errno(msync(g2h(arg1), arg2, arg3));
9524 #endif
9525 #ifdef TARGET_NR_mlock
9526     case TARGET_NR_mlock:
9527         return get_errno(mlock(g2h(arg1), arg2));
9528 #endif
9529 #ifdef TARGET_NR_munlock
9530     case TARGET_NR_munlock:
9531         return get_errno(munlock(g2h(arg1), arg2));
9532 #endif
9533 #ifdef TARGET_NR_mlockall
9534     case TARGET_NR_mlockall:
9535         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9536 #endif
9537 #ifdef TARGET_NR_munlockall
9538     case TARGET_NR_munlockall:
9539         return get_errno(munlockall());
9540 #endif
9541 #ifdef TARGET_NR_truncate
9542     case TARGET_NR_truncate:
9543         if (!(p = lock_user_string(arg1)))
9544             return -TARGET_EFAULT;
9545         ret = get_errno(truncate(p, arg2));
9546         unlock_user(p, arg1, 0);
9547         return ret;
9548 #endif
9549 #ifdef TARGET_NR_ftruncate
9550     case TARGET_NR_ftruncate:
9551         return get_errno(ftruncate(arg1, arg2));
9552 #endif
9553     case TARGET_NR_fchmod:
9554         return get_errno(fchmod(arg1, arg2));
9555 #if defined(TARGET_NR_fchmodat)
9556     case TARGET_NR_fchmodat:
9557         if (!(p = lock_user_string(arg2)))
9558             return -TARGET_EFAULT;
9559         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9560         unlock_user(p, arg2, 0);
9561         return ret;
9562 #endif
9563     case TARGET_NR_getpriority:
9564         /* Note that negative values are valid for getpriority, so we must
9565            differentiate based on errno settings.  */
9566         errno = 0;
9567         ret = getpriority(arg1, arg2);
9568         if (ret == -1 && errno != 0) {
9569             return -host_to_target_errno(errno);
9570         }
9571 #ifdef TARGET_ALPHA
9572         /* Return value is the unbiased priority.  Signal no error.  */
9573         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9574 #else
9575         /* Return value is a biased priority to avoid negative numbers.  */
9576         ret = 20 - ret;
9577 #endif
9578         return ret;
9579     case TARGET_NR_setpriority:
9580         return get_errno(setpriority(arg1, arg2, arg3));
9581 #ifdef TARGET_NR_statfs
9582     case TARGET_NR_statfs:
9583         if (!(p = lock_user_string(arg1))) {
9584             return -TARGET_EFAULT;
9585         }
9586         ret = get_errno(statfs(path(p), &stfs));
9587         unlock_user(p, arg1, 0);
9588     convert_statfs:
9589         if (!is_error(ret)) {
9590             struct target_statfs *target_stfs;
9591 
9592             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9593                 return -TARGET_EFAULT;
9594             __put_user(stfs.f_type, &target_stfs->f_type);
9595             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9596             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9597             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9598             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9599             __put_user(stfs.f_files, &target_stfs->f_files);
9600             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9601             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9602             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9603             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9604             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9605 #ifdef _STATFS_F_FLAGS
9606             __put_user(stfs.f_flags, &target_stfs->f_flags);
9607 #else
9608             __put_user(0, &target_stfs->f_flags);
9609 #endif
9610             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9611             unlock_user_struct(target_stfs, arg2, 1);
9612         }
9613         return ret;
9614 #endif
9615 #ifdef TARGET_NR_fstatfs
9616     case TARGET_NR_fstatfs:
9617         ret = get_errno(fstatfs(arg1, &stfs));
9618         goto convert_statfs;
9619 #endif
9620 #ifdef TARGET_NR_statfs64
9621     case TARGET_NR_statfs64:
9622         if (!(p = lock_user_string(arg1))) {
9623             return -TARGET_EFAULT;
9624         }
9625         ret = get_errno(statfs(path(p), &stfs));
9626         unlock_user(p, arg1, 0);
9627     convert_statfs64:
9628         if (!is_error(ret)) {
9629             struct target_statfs64 *target_stfs;
9630 
9631             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9632                 return -TARGET_EFAULT;
9633             __put_user(stfs.f_type, &target_stfs->f_type);
9634             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9635             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9636             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9637             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9638             __put_user(stfs.f_files, &target_stfs->f_files);
9639             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9640             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9641             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9642             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9643             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9644             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9645             unlock_user_struct(target_stfs, arg3, 1);
9646         }
9647         return ret;
9648     case TARGET_NR_fstatfs64:
9649         ret = get_errno(fstatfs(arg1, &stfs));
9650         goto convert_statfs64;
9651 #endif
9652 #ifdef TARGET_NR_socketcall
9653     case TARGET_NR_socketcall:
9654         return do_socketcall(arg1, arg2);
9655 #endif
9656 #ifdef TARGET_NR_accept
9657     case TARGET_NR_accept:
9658         return do_accept4(arg1, arg2, arg3, 0);
9659 #endif
9660 #ifdef TARGET_NR_accept4
9661     case TARGET_NR_accept4:
9662         return do_accept4(arg1, arg2, arg3, arg4);
9663 #endif
9664 #ifdef TARGET_NR_bind
9665     case TARGET_NR_bind:
9666         return do_bind(arg1, arg2, arg3);
9667 #endif
9668 #ifdef TARGET_NR_connect
9669     case TARGET_NR_connect:
9670         return do_connect(arg1, arg2, arg3);
9671 #endif
9672 #ifdef TARGET_NR_getpeername
9673     case TARGET_NR_getpeername:
9674         return do_getpeername(arg1, arg2, arg3);
9675 #endif
9676 #ifdef TARGET_NR_getsockname
9677     case TARGET_NR_getsockname:
9678         return do_getsockname(arg1, arg2, arg3);
9679 #endif
9680 #ifdef TARGET_NR_getsockopt
9681     case TARGET_NR_getsockopt:
9682         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9683 #endif
9684 #ifdef TARGET_NR_listen
9685     case TARGET_NR_listen:
9686         return get_errno(listen(arg1, arg2));
9687 #endif
9688 #ifdef TARGET_NR_recv
9689     case TARGET_NR_recv:
9690         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9691 #endif
9692 #ifdef TARGET_NR_recvfrom
9693     case TARGET_NR_recvfrom:
9694         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9695 #endif
9696 #ifdef TARGET_NR_recvmsg
9697     case TARGET_NR_recvmsg:
9698         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9699 #endif
9700 #ifdef TARGET_NR_send
9701     case TARGET_NR_send:
9702         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9703 #endif
9704 #ifdef TARGET_NR_sendmsg
9705     case TARGET_NR_sendmsg:
9706         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9707 #endif
9708 #ifdef TARGET_NR_sendmmsg
9709     case TARGET_NR_sendmmsg:
9710         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9711 #endif
9712 #ifdef TARGET_NR_recvmmsg
9713     case TARGET_NR_recvmmsg:
9714         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9715 #endif
9716 #ifdef TARGET_NR_sendto
9717     case TARGET_NR_sendto:
9718         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9719 #endif
9720 #ifdef TARGET_NR_shutdown
9721     case TARGET_NR_shutdown:
9722         return get_errno(shutdown(arg1, arg2));
9723 #endif
9724 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9725     case TARGET_NR_getrandom:
9726         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9727         if (!p) {
9728             return -TARGET_EFAULT;
9729         }
9730         ret = get_errno(getrandom(p, arg2, arg3));
9731         unlock_user(p, arg1, ret);
9732         return ret;
9733 #endif
9734 #ifdef TARGET_NR_socket
9735     case TARGET_NR_socket:
9736         return do_socket(arg1, arg2, arg3);
9737 #endif
9738 #ifdef TARGET_NR_socketpair
9739     case TARGET_NR_socketpair:
9740         return do_socketpair(arg1, arg2, arg3, arg4);
9741 #endif
9742 #ifdef TARGET_NR_setsockopt
9743     case TARGET_NR_setsockopt:
9744         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9745 #endif
9746 #if defined(TARGET_NR_syslog)
9747     case TARGET_NR_syslog:
9748         {
9749             int len = arg2;
9750 
9751             switch (arg1) {
9752             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9753             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9754             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9755             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9756             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9757             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9758             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9759             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9760                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9761             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9762             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9763             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9764                 {
9765                     if (len < 0) {
9766                         return -TARGET_EINVAL;
9767                     }
9768                     if (len == 0) {
9769                         return 0;
9770                     }
9771                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9772                     if (!p) {
9773                         return -TARGET_EFAULT;
9774                     }
9775                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9776                     unlock_user(p, arg2, arg3);
9777                 }
9778                 return ret;
9779             default:
9780                 return -TARGET_EINVAL;
9781             }
9782         }
9783         break;
9784 #endif
9785     case TARGET_NR_setitimer:
9786         {
9787             struct itimerval value, ovalue, *pvalue;
9788 
9789             if (arg2) {
9790                 pvalue = &value;
9791                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9792                     || copy_from_user_timeval(&pvalue->it_value,
9793                                               arg2 + sizeof(struct target_timeval)))
9794                     return -TARGET_EFAULT;
9795             } else {
9796                 pvalue = NULL;
9797             }
9798             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9799             if (!is_error(ret) && arg3) {
9800                 if (copy_to_user_timeval(arg3,
9801                                          &ovalue.it_interval)
9802                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9803                                             &ovalue.it_value))
9804                     return -TARGET_EFAULT;
9805             }
9806         }
9807         return ret;
9808     case TARGET_NR_getitimer:
9809         {
9810             struct itimerval value;
9811 
9812             ret = get_errno(getitimer(arg1, &value));
9813             if (!is_error(ret) && arg2) {
9814                 if (copy_to_user_timeval(arg2,
9815                                          &value.it_interval)
9816                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9817                                             &value.it_value))
9818                     return -TARGET_EFAULT;
9819             }
9820         }
9821         return ret;
9822 #ifdef TARGET_NR_stat
9823     case TARGET_NR_stat:
9824         if (!(p = lock_user_string(arg1))) {
9825             return -TARGET_EFAULT;
9826         }
9827         ret = get_errno(stat(path(p), &st));
9828         unlock_user(p, arg1, 0);
9829         goto do_stat;
9830 #endif
9831 #ifdef TARGET_NR_lstat
9832     case TARGET_NR_lstat:
9833         if (!(p = lock_user_string(arg1))) {
9834             return -TARGET_EFAULT;
9835         }
9836         ret = get_errno(lstat(path(p), &st));
9837         unlock_user(p, arg1, 0);
9838         goto do_stat;
9839 #endif
9840 #ifdef TARGET_NR_fstat
9841     case TARGET_NR_fstat:
9842         {
9843             ret = get_errno(fstat(arg1, &st));
9844 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9845         do_stat:
9846 #endif
9847             if (!is_error(ret)) {
9848                 struct target_stat *target_st;
9849 
9850                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9851                     return -TARGET_EFAULT;
9852                 memset(target_st, 0, sizeof(*target_st));
9853                 __put_user(st.st_dev, &target_st->st_dev);
9854                 __put_user(st.st_ino, &target_st->st_ino);
9855                 __put_user(st.st_mode, &target_st->st_mode);
9856                 __put_user(st.st_uid, &target_st->st_uid);
9857                 __put_user(st.st_gid, &target_st->st_gid);
9858                 __put_user(st.st_nlink, &target_st->st_nlink);
9859                 __put_user(st.st_rdev, &target_st->st_rdev);
9860                 __put_user(st.st_size, &target_st->st_size);
9861                 __put_user(st.st_blksize, &target_st->st_blksize);
9862                 __put_user(st.st_blocks, &target_st->st_blocks);
9863                 __put_user(st.st_atime, &target_st->target_st_atime);
9864                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9865                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9866 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9867     defined(TARGET_STAT_HAVE_NSEC)
9868                 __put_user(st.st_atim.tv_nsec,
9869                            &target_st->target_st_atime_nsec);
9870                 __put_user(st.st_mtim.tv_nsec,
9871                            &target_st->target_st_mtime_nsec);
9872                 __put_user(st.st_ctim.tv_nsec,
9873                            &target_st->target_st_ctime_nsec);
9874 #endif
9875                 unlock_user_struct(target_st, arg2, 1);
9876             }
9877         }
9878         return ret;
9879 #endif
9880     case TARGET_NR_vhangup:
9881         return get_errno(vhangup());
9882 #ifdef TARGET_NR_syscall
9883     case TARGET_NR_syscall:
9884         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9885                           arg6, arg7, arg8, 0);
9886 #endif
9887 #if defined(TARGET_NR_wait4)
9888     case TARGET_NR_wait4:
9889         {
9890             int status;
9891             abi_long status_ptr = arg2;
9892             struct rusage rusage, *rusage_ptr;
9893             abi_ulong target_rusage = arg4;
9894             abi_long rusage_err;
9895             if (target_rusage)
9896                 rusage_ptr = &rusage;
9897             else
9898                 rusage_ptr = NULL;
9899             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9900             if (!is_error(ret)) {
9901                 if (status_ptr && ret) {
9902                     status = host_to_target_waitstatus(status);
9903                     if (put_user_s32(status, status_ptr))
9904                         return -TARGET_EFAULT;
9905                 }
9906                 if (target_rusage) {
9907                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9908                     if (rusage_err) {
9909                         ret = rusage_err;
9910                     }
9911                 }
9912             }
9913         }
9914         return ret;
9915 #endif
9916 #ifdef TARGET_NR_swapoff
9917     case TARGET_NR_swapoff:
9918         if (!(p = lock_user_string(arg1)))
9919             return -TARGET_EFAULT;
9920         ret = get_errno(swapoff(p));
9921         unlock_user(p, arg1, 0);
9922         return ret;
9923 #endif
9924     case TARGET_NR_sysinfo:
9925         {
9926             struct target_sysinfo *target_value;
9927             struct sysinfo value;
9928             ret = get_errno(sysinfo(&value));
9929             if (!is_error(ret) && arg1)
9930             {
9931                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9932                     return -TARGET_EFAULT;
9933                 __put_user(value.uptime, &target_value->uptime);
9934                 __put_user(value.loads[0], &target_value->loads[0]);
9935                 __put_user(value.loads[1], &target_value->loads[1]);
9936                 __put_user(value.loads[2], &target_value->loads[2]);
9937                 __put_user(value.totalram, &target_value->totalram);
9938                 __put_user(value.freeram, &target_value->freeram);
9939                 __put_user(value.sharedram, &target_value->sharedram);
9940                 __put_user(value.bufferram, &target_value->bufferram);
9941                 __put_user(value.totalswap, &target_value->totalswap);
9942                 __put_user(value.freeswap, &target_value->freeswap);
9943                 __put_user(value.procs, &target_value->procs);
9944                 __put_user(value.totalhigh, &target_value->totalhigh);
9945                 __put_user(value.freehigh, &target_value->freehigh);
9946                 __put_user(value.mem_unit, &target_value->mem_unit);
9947                 unlock_user_struct(target_value, arg1, 1);
9948             }
9949         }
9950         return ret;
9951 #ifdef TARGET_NR_ipc
9952     case TARGET_NR_ipc:
9953         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9954 #endif
9955 #ifdef TARGET_NR_semget
9956     case TARGET_NR_semget:
9957         return get_errno(semget(arg1, arg2, arg3));
9958 #endif
9959 #ifdef TARGET_NR_semop
9960     case TARGET_NR_semop:
9961         return do_semtimedop(arg1, arg2, arg3, 0, false);
9962 #endif
9963 #ifdef TARGET_NR_semtimedop
9964     case TARGET_NR_semtimedop:
9965         return do_semtimedop(arg1, arg2, arg3, arg4, false);
9966 #endif
9967 #ifdef TARGET_NR_semtimedop_time64
9968     case TARGET_NR_semtimedop_time64:
9969         return do_semtimedop(arg1, arg2, arg3, arg4, true);
9970 #endif
9971 #ifdef TARGET_NR_semctl
9972     case TARGET_NR_semctl:
9973         return do_semctl(arg1, arg2, arg3, arg4);
9974 #endif
9975 #ifdef TARGET_NR_msgctl
9976     case TARGET_NR_msgctl:
9977         return do_msgctl(arg1, arg2, arg3);
9978 #endif
9979 #ifdef TARGET_NR_msgget
9980     case TARGET_NR_msgget:
9981         return get_errno(msgget(arg1, arg2));
9982 #endif
9983 #ifdef TARGET_NR_msgrcv
9984     case TARGET_NR_msgrcv:
9985         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9986 #endif
9987 #ifdef TARGET_NR_msgsnd
9988     case TARGET_NR_msgsnd:
9989         return do_msgsnd(arg1, arg2, arg3, arg4);
9990 #endif
9991 #ifdef TARGET_NR_shmget
9992     case TARGET_NR_shmget:
9993         return get_errno(shmget(arg1, arg2, arg3));
9994 #endif
9995 #ifdef TARGET_NR_shmctl
9996     case TARGET_NR_shmctl:
9997         return do_shmctl(arg1, arg2, arg3);
9998 #endif
9999 #ifdef TARGET_NR_shmat
10000     case TARGET_NR_shmat:
10001         return do_shmat(cpu_env, arg1, arg2, arg3);
10002 #endif
10003 #ifdef TARGET_NR_shmdt
10004     case TARGET_NR_shmdt:
10005         return do_shmdt(arg1);
10006 #endif
10007     case TARGET_NR_fsync:
10008         return get_errno(fsync(arg1));
10009     case TARGET_NR_clone:
10010         /* Linux manages to have three different orderings for its
10011          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10012          * match the kernel's CONFIG_CLONE_* settings.
10013          * Microblaze is further special in that it uses a sixth
10014          * implicit argument to clone for the TLS pointer.
10015          */
10016 #if defined(TARGET_MICROBLAZE)
10017         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10018 #elif defined(TARGET_CLONE_BACKWARDS)
10019         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10020 #elif defined(TARGET_CLONE_BACKWARDS2)
10021         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10022 #else
10023         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10024 #endif
10025         return ret;
10026 #ifdef __NR_exit_group
10027         /* new thread calls */
10028     case TARGET_NR_exit_group:
10029         preexit_cleanup(cpu_env, arg1);
10030         return get_errno(exit_group(arg1));
10031 #endif
10032     case TARGET_NR_setdomainname:
10033         if (!(p = lock_user_string(arg1)))
10034             return -TARGET_EFAULT;
10035         ret = get_errno(setdomainname(p, arg2));
10036         unlock_user(p, arg1, 0);
10037         return ret;
10038     case TARGET_NR_uname:
10039         /* no need to transcode because we use the linux syscall */
10040         {
10041             struct new_utsname * buf;
10042 
10043             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10044                 return -TARGET_EFAULT;
10045             ret = get_errno(sys_uname(buf));
10046             if (!is_error(ret)) {
10047                 /* Overwrite the native machine name with whatever is being
10048                    emulated. */
10049                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10050                           sizeof(buf->machine));
10051                 /* Allow the user to override the reported release.  */
10052                 if (qemu_uname_release && *qemu_uname_release) {
10053                     g_strlcpy(buf->release, qemu_uname_release,
10054                               sizeof(buf->release));
10055                 }
10056             }
10057             unlock_user_struct(buf, arg1, 1);
10058         }
10059         return ret;
10060 #ifdef TARGET_I386
10061     case TARGET_NR_modify_ldt:
10062         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10063 #if !defined(TARGET_X86_64)
10064     case TARGET_NR_vm86:
10065         return do_vm86(cpu_env, arg1, arg2);
10066 #endif
10067 #endif
10068 #if defined(TARGET_NR_adjtimex)
10069     case TARGET_NR_adjtimex:
10070         {
10071             struct timex host_buf;
10072 
10073             if (target_to_host_timex(&host_buf, arg1) != 0) {
10074                 return -TARGET_EFAULT;
10075             }
10076             ret = get_errno(adjtimex(&host_buf));
10077             if (!is_error(ret)) {
10078                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10079                     return -TARGET_EFAULT;
10080                 }
10081             }
10082         }
10083         return ret;
10084 #endif
10085 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10086     case TARGET_NR_clock_adjtime:
10087         {
10088             struct timex htx, *phtx = &htx;
10089 
10090             if (target_to_host_timex(phtx, arg2) != 0) {
10091                 return -TARGET_EFAULT;
10092             }
10093             ret = get_errno(clock_adjtime(arg1, phtx));
10094             if (!is_error(ret) && phtx) {
10095                 if (host_to_target_timex(arg2, phtx) != 0) {
10096                     return -TARGET_EFAULT;
10097                 }
10098             }
10099         }
10100         return ret;
10101 #endif
10102 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10103     case TARGET_NR_clock_adjtime64:
10104         {
10105             struct timex htx;
10106 
10107             if (target_to_host_timex64(&htx, arg2) != 0) {
10108                 return -TARGET_EFAULT;
10109             }
10110             ret = get_errno(clock_adjtime(arg1, &htx));
10111             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10112                     return -TARGET_EFAULT;
10113             }
10114         }
10115         return ret;
10116 #endif
10117     case TARGET_NR_getpgid:
10118         return get_errno(getpgid(arg1));
10119     case TARGET_NR_fchdir:
10120         return get_errno(fchdir(arg1));
10121     case TARGET_NR_personality:
10122         return get_errno(personality(arg1));
10123 #ifdef TARGET_NR__llseek /* Not on alpha */
10124     case TARGET_NR__llseek:
10125         {
10126             int64_t res;
10127 #if !defined(__NR_llseek)
10128             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10129             if (res == -1) {
10130                 ret = get_errno(res);
10131             } else {
10132                 ret = 0;
10133             }
10134 #else
10135             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10136 #endif
10137             if ((ret == 0) && put_user_s64(res, arg4)) {
10138                 return -TARGET_EFAULT;
10139             }
10140         }
10141         return ret;
10142 #endif
10143 #ifdef TARGET_NR_getdents
10144     case TARGET_NR_getdents:
10145 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10146 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10147         {
10148             struct target_dirent *target_dirp;
10149             struct linux_dirent *dirp;
10150             abi_long count = arg3;
10151 
10152             dirp = g_try_malloc(count);
10153             if (!dirp) {
10154                 return -TARGET_ENOMEM;
10155             }
10156 
10157             ret = get_errno(sys_getdents(arg1, dirp, count));
10158             if (!is_error(ret)) {
10159                 struct linux_dirent *de;
10160 		struct target_dirent *tde;
10161                 int len = ret;
10162                 int reclen, treclen;
10163 		int count1, tnamelen;
10164 
10165 		count1 = 0;
10166                 de = dirp;
10167                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10168                     return -TARGET_EFAULT;
10169 		tde = target_dirp;
10170                 while (len > 0) {
10171                     reclen = de->d_reclen;
10172                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10173                     assert(tnamelen >= 0);
10174                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10175                     assert(count1 + treclen <= count);
10176                     tde->d_reclen = tswap16(treclen);
10177                     tde->d_ino = tswapal(de->d_ino);
10178                     tde->d_off = tswapal(de->d_off);
10179                     memcpy(tde->d_name, de->d_name, tnamelen);
10180                     de = (struct linux_dirent *)((char *)de + reclen);
10181                     len -= reclen;
10182                     tde = (struct target_dirent *)((char *)tde + treclen);
10183 		    count1 += treclen;
10184                 }
10185 		ret = count1;
10186                 unlock_user(target_dirp, arg2, ret);
10187             }
10188             g_free(dirp);
10189         }
10190 #else
10191         {
10192             struct linux_dirent *dirp;
10193             abi_long count = arg3;
10194 
10195             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10196                 return -TARGET_EFAULT;
10197             ret = get_errno(sys_getdents(arg1, dirp, count));
10198             if (!is_error(ret)) {
10199                 struct linux_dirent *de;
10200                 int len = ret;
10201                 int reclen;
10202                 de = dirp;
10203                 while (len > 0) {
10204                     reclen = de->d_reclen;
10205                     if (reclen > len)
10206                         break;
10207                     de->d_reclen = tswap16(reclen);
10208                     tswapls(&de->d_ino);
10209                     tswapls(&de->d_off);
10210                     de = (struct linux_dirent *)((char *)de + reclen);
10211                     len -= reclen;
10212                 }
10213             }
10214             unlock_user(dirp, arg2, ret);
10215         }
10216 #endif
10217 #else
10218         /* Implement getdents in terms of getdents64 */
10219         {
10220             struct linux_dirent64 *dirp;
10221             abi_long count = arg3;
10222 
10223             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10224             if (!dirp) {
10225                 return -TARGET_EFAULT;
10226             }
10227             ret = get_errno(sys_getdents64(arg1, dirp, count));
10228             if (!is_error(ret)) {
10229                 /* Convert the dirent64 structs to target dirent.  We do this
10230                  * in-place, since we can guarantee that a target_dirent is no
10231                  * larger than a dirent64; however this means we have to be
10232                  * careful to read everything before writing in the new format.
10233                  */
10234                 struct linux_dirent64 *de;
10235                 struct target_dirent *tde;
10236                 int len = ret;
10237                 int tlen = 0;
10238 
10239                 de = dirp;
10240                 tde = (struct target_dirent *)dirp;
10241                 while (len > 0) {
10242                     int namelen, treclen;
10243                     int reclen = de->d_reclen;
10244                     uint64_t ino = de->d_ino;
10245                     int64_t off = de->d_off;
10246                     uint8_t type = de->d_type;
10247 
10248                     namelen = strlen(de->d_name);
10249                     treclen = offsetof(struct target_dirent, d_name)
10250                         + namelen + 2;
10251                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10252 
10253                     memmove(tde->d_name, de->d_name, namelen + 1);
10254                     tde->d_ino = tswapal(ino);
10255                     tde->d_off = tswapal(off);
10256                     tde->d_reclen = tswap16(treclen);
10257                     /* The target_dirent type is in what was formerly a padding
10258                      * byte at the end of the structure:
10259                      */
10260                     *(((char *)tde) + treclen - 1) = type;
10261 
10262                     de = (struct linux_dirent64 *)((char *)de + reclen);
10263                     tde = (struct target_dirent *)((char *)tde + treclen);
10264                     len -= reclen;
10265                     tlen += treclen;
10266                 }
10267                 ret = tlen;
10268             }
10269             unlock_user(dirp, arg2, ret);
10270         }
10271 #endif
10272         return ret;
10273 #endif /* TARGET_NR_getdents */
10274 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10275     case TARGET_NR_getdents64:
10276         {
10277             struct linux_dirent64 *dirp;
10278             abi_long count = arg3;
10279             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10280                 return -TARGET_EFAULT;
10281             ret = get_errno(sys_getdents64(arg1, dirp, count));
10282             if (!is_error(ret)) {
10283                 struct linux_dirent64 *de;
10284                 int len = ret;
10285                 int reclen;
10286                 de = dirp;
10287                 while (len > 0) {
10288                     reclen = de->d_reclen;
10289                     if (reclen > len)
10290                         break;
10291                     de->d_reclen = tswap16(reclen);
10292                     tswap64s((uint64_t *)&de->d_ino);
10293                     tswap64s((uint64_t *)&de->d_off);
10294                     de = (struct linux_dirent64 *)((char *)de + reclen);
10295                     len -= reclen;
10296                 }
10297             }
10298             unlock_user(dirp, arg2, ret);
10299         }
10300         return ret;
10301 #endif /* TARGET_NR_getdents64 */
10302 #if defined(TARGET_NR__newselect)
10303     case TARGET_NR__newselect:
10304         return do_select(arg1, arg2, arg3, arg4, arg5);
10305 #endif
10306 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10307 # ifdef TARGET_NR_poll
10308     case TARGET_NR_poll:
10309 # endif
10310 # ifdef TARGET_NR_ppoll
10311     case TARGET_NR_ppoll:
10312 # endif
10313         {
10314             struct target_pollfd *target_pfd;
10315             unsigned int nfds = arg2;
10316             struct pollfd *pfd;
10317             unsigned int i;
10318 
10319             pfd = NULL;
10320             target_pfd = NULL;
10321             if (nfds) {
10322                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10323                     return -TARGET_EINVAL;
10324                 }
10325 
10326                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10327                                        sizeof(struct target_pollfd) * nfds, 1);
10328                 if (!target_pfd) {
10329                     return -TARGET_EFAULT;
10330                 }
10331 
10332                 pfd = alloca(sizeof(struct pollfd) * nfds);
10333                 for (i = 0; i < nfds; i++) {
10334                     pfd[i].fd = tswap32(target_pfd[i].fd);
10335                     pfd[i].events = tswap16(target_pfd[i].events);
10336                 }
10337             }
10338 
10339             switch (num) {
10340 # ifdef TARGET_NR_ppoll
10341             case TARGET_NR_ppoll:
10342             {
10343                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10344                 target_sigset_t *target_set;
10345                 sigset_t _set, *set = &_set;
10346 
10347                 if (arg3) {
10348                     if (target_to_host_timespec(timeout_ts, arg3)) {
10349                         unlock_user(target_pfd, arg1, 0);
10350                         return -TARGET_EFAULT;
10351                     }
10352                 } else {
10353                     timeout_ts = NULL;
10354                 }
10355 
10356                 if (arg4) {
10357                     if (arg5 != sizeof(target_sigset_t)) {
10358                         unlock_user(target_pfd, arg1, 0);
10359                         return -TARGET_EINVAL;
10360                     }
10361 
10362                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10363                     if (!target_set) {
10364                         unlock_user(target_pfd, arg1, 0);
10365                         return -TARGET_EFAULT;
10366                     }
10367                     target_to_host_sigset(set, target_set);
10368                 } else {
10369                     set = NULL;
10370                 }
10371 
10372                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10373                                            set, SIGSET_T_SIZE));
10374 
10375                 if (!is_error(ret) && arg3) {
10376                     host_to_target_timespec(arg3, timeout_ts);
10377                 }
10378                 if (arg4) {
10379                     unlock_user(target_set, arg4, 0);
10380                 }
10381                 break;
10382             }
10383 # endif
10384 # ifdef TARGET_NR_poll
10385             case TARGET_NR_poll:
10386             {
10387                 struct timespec ts, *pts;
10388 
10389                 if (arg3 >= 0) {
10390                     /* Convert ms to secs, ns */
10391                     ts.tv_sec = arg3 / 1000;
10392                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10393                     pts = &ts;
10394                 } else {
10395                     /* -ve poll() timeout means "infinite" */
10396                     pts = NULL;
10397                 }
10398                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10399                 break;
10400             }
10401 # endif
10402             default:
10403                 g_assert_not_reached();
10404             }
10405 
10406             if (!is_error(ret)) {
10407                 for(i = 0; i < nfds; i++) {
10408                     target_pfd[i].revents = tswap16(pfd[i].revents);
10409                 }
10410             }
10411             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10412         }
10413         return ret;
10414 #endif
10415     case TARGET_NR_flock:
10416         /* NOTE: the flock constant seems to be the same for every
10417            Linux platform */
10418         return get_errno(safe_flock(arg1, arg2));
10419     case TARGET_NR_readv:
10420         {
10421             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10422             if (vec != NULL) {
10423                 ret = get_errno(safe_readv(arg1, vec, arg3));
10424                 unlock_iovec(vec, arg2, arg3, 1);
10425             } else {
10426                 ret = -host_to_target_errno(errno);
10427             }
10428         }
10429         return ret;
10430     case TARGET_NR_writev:
10431         {
10432             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10433             if (vec != NULL) {
10434                 ret = get_errno(safe_writev(arg1, vec, arg3));
10435                 unlock_iovec(vec, arg2, arg3, 0);
10436             } else {
10437                 ret = -host_to_target_errno(errno);
10438             }
10439         }
10440         return ret;
10441 #if defined(TARGET_NR_preadv)
10442     case TARGET_NR_preadv:
10443         {
10444             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10445             if (vec != NULL) {
10446                 unsigned long low, high;
10447 
10448                 target_to_host_low_high(arg4, arg5, &low, &high);
10449                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10450                 unlock_iovec(vec, arg2, arg3, 1);
10451             } else {
10452                 ret = -host_to_target_errno(errno);
10453            }
10454         }
10455         return ret;
10456 #endif
10457 #if defined(TARGET_NR_pwritev)
10458     case TARGET_NR_pwritev:
10459         {
10460             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10461             if (vec != NULL) {
10462                 unsigned long low, high;
10463 
10464                 target_to_host_low_high(arg4, arg5, &low, &high);
10465                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10466                 unlock_iovec(vec, arg2, arg3, 0);
10467             } else {
10468                 ret = -host_to_target_errno(errno);
10469            }
10470         }
10471         return ret;
10472 #endif
10473     case TARGET_NR_getsid:
10474         return get_errno(getsid(arg1));
10475 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10476     case TARGET_NR_fdatasync:
10477         return get_errno(fdatasync(arg1));
10478 #endif
10479 #ifdef TARGET_NR__sysctl
10480     case TARGET_NR__sysctl:
10481         /* We don't implement this, but ENOTDIR is always a safe
10482            return value. */
10483         return -TARGET_ENOTDIR;
10484 #endif
10485     case TARGET_NR_sched_getaffinity:
10486         {
10487             unsigned int mask_size;
10488             unsigned long *mask;
10489 
10490             /*
10491              * sched_getaffinity needs multiples of ulong, so need to take
10492              * care of mismatches between target ulong and host ulong sizes.
10493              */
10494             if (arg2 & (sizeof(abi_ulong) - 1)) {
10495                 return -TARGET_EINVAL;
10496             }
10497             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10498 
10499             mask = alloca(mask_size);
10500             memset(mask, 0, mask_size);
10501             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10502 
10503             if (!is_error(ret)) {
10504                 if (ret > arg2) {
10505                     /* More data returned than the caller's buffer will fit.
10506                      * This only happens if sizeof(abi_long) < sizeof(long)
10507                      * and the caller passed us a buffer holding an odd number
10508                      * of abi_longs. If the host kernel is actually using the
10509                      * extra 4 bytes then fail EINVAL; otherwise we can just
10510                      * ignore them and only copy the interesting part.
10511                      */
10512                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10513                     if (numcpus > arg2 * 8) {
10514                         return -TARGET_EINVAL;
10515                     }
10516                     ret = arg2;
10517                 }
10518 
10519                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10520                     return -TARGET_EFAULT;
10521                 }
10522             }
10523         }
10524         return ret;
10525     case TARGET_NR_sched_setaffinity:
10526         {
10527             unsigned int mask_size;
10528             unsigned long *mask;
10529 
10530             /*
10531              * sched_setaffinity needs multiples of ulong, so need to take
10532              * care of mismatches between target ulong and host ulong sizes.
10533              */
10534             if (arg2 & (sizeof(abi_ulong) - 1)) {
10535                 return -TARGET_EINVAL;
10536             }
10537             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10538             mask = alloca(mask_size);
10539 
10540             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10541             if (ret) {
10542                 return ret;
10543             }
10544 
10545             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10546         }
10547     case TARGET_NR_getcpu:
10548         {
10549             unsigned cpu, node;
10550             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10551                                        arg2 ? &node : NULL,
10552                                        NULL));
10553             if (is_error(ret)) {
10554                 return ret;
10555             }
10556             if (arg1 && put_user_u32(cpu, arg1)) {
10557                 return -TARGET_EFAULT;
10558             }
10559             if (arg2 && put_user_u32(node, arg2)) {
10560                 return -TARGET_EFAULT;
10561             }
10562         }
10563         return ret;
10564     case TARGET_NR_sched_setparam:
10565         {
10566             struct sched_param *target_schp;
10567             struct sched_param schp;
10568 
10569             if (arg2 == 0) {
10570                 return -TARGET_EINVAL;
10571             }
10572             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10573                 return -TARGET_EFAULT;
10574             schp.sched_priority = tswap32(target_schp->sched_priority);
10575             unlock_user_struct(target_schp, arg2, 0);
10576             return get_errno(sched_setparam(arg1, &schp));
10577         }
10578     case TARGET_NR_sched_getparam:
10579         {
10580             struct sched_param *target_schp;
10581             struct sched_param schp;
10582 
10583             if (arg2 == 0) {
10584                 return -TARGET_EINVAL;
10585             }
10586             ret = get_errno(sched_getparam(arg1, &schp));
10587             if (!is_error(ret)) {
10588                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10589                     return -TARGET_EFAULT;
10590                 target_schp->sched_priority = tswap32(schp.sched_priority);
10591                 unlock_user_struct(target_schp, arg2, 1);
10592             }
10593         }
10594         return ret;
10595     case TARGET_NR_sched_setscheduler:
10596         {
10597             struct sched_param *target_schp;
10598             struct sched_param schp;
10599             if (arg3 == 0) {
10600                 return -TARGET_EINVAL;
10601             }
10602             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10603                 return -TARGET_EFAULT;
10604             schp.sched_priority = tswap32(target_schp->sched_priority);
10605             unlock_user_struct(target_schp, arg3, 0);
10606             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10607         }
10608     case TARGET_NR_sched_getscheduler:
10609         return get_errno(sched_getscheduler(arg1));
10610     case TARGET_NR_sched_yield:
10611         return get_errno(sched_yield());
10612     case TARGET_NR_sched_get_priority_max:
10613         return get_errno(sched_get_priority_max(arg1));
10614     case TARGET_NR_sched_get_priority_min:
10615         return get_errno(sched_get_priority_min(arg1));
10616 #ifdef TARGET_NR_sched_rr_get_interval
10617     case TARGET_NR_sched_rr_get_interval:
10618         {
10619             struct timespec ts;
10620             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10621             if (!is_error(ret)) {
10622                 ret = host_to_target_timespec(arg2, &ts);
10623             }
10624         }
10625         return ret;
10626 #endif
10627 #ifdef TARGET_NR_sched_rr_get_interval_time64
10628     case TARGET_NR_sched_rr_get_interval_time64:
10629         {
10630             struct timespec ts;
10631             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10632             if (!is_error(ret)) {
10633                 ret = host_to_target_timespec64(arg2, &ts);
10634             }
10635         }
10636         return ret;
10637 #endif
10638 #if defined(TARGET_NR_nanosleep)
10639     case TARGET_NR_nanosleep:
10640         {
10641             struct timespec req, rem;
10642             target_to_host_timespec(&req, arg1);
10643             ret = get_errno(safe_nanosleep(&req, &rem));
10644             if (is_error(ret) && arg2) {
10645                 host_to_target_timespec(arg2, &rem);
10646             }
10647         }
10648         return ret;
10649 #endif
10650     case TARGET_NR_prctl:
10651         switch (arg1) {
10652         case PR_GET_PDEATHSIG:
10653         {
10654             int deathsig;
10655             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10656             if (!is_error(ret) && arg2
10657                 && put_user_ual(deathsig, arg2)) {
10658                 return -TARGET_EFAULT;
10659             }
10660             return ret;
10661         }
10662 #ifdef PR_GET_NAME
10663         case PR_GET_NAME:
10664         {
10665             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10666             if (!name) {
10667                 return -TARGET_EFAULT;
10668             }
10669             ret = get_errno(prctl(arg1, (unsigned long)name,
10670                                   arg3, arg4, arg5));
10671             unlock_user(name, arg2, 16);
10672             return ret;
10673         }
10674         case PR_SET_NAME:
10675         {
10676             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10677             if (!name) {
10678                 return -TARGET_EFAULT;
10679             }
10680             ret = get_errno(prctl(arg1, (unsigned long)name,
10681                                   arg3, arg4, arg5));
10682             unlock_user(name, arg2, 0);
10683             return ret;
10684         }
10685 #endif
10686 #ifdef TARGET_MIPS
10687         case TARGET_PR_GET_FP_MODE:
10688         {
10689             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10690             ret = 0;
10691             if (env->CP0_Status & (1 << CP0St_FR)) {
10692                 ret |= TARGET_PR_FP_MODE_FR;
10693             }
10694             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10695                 ret |= TARGET_PR_FP_MODE_FRE;
10696             }
10697             return ret;
10698         }
10699         case TARGET_PR_SET_FP_MODE:
10700         {
10701             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10702             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10703             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10704             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10705             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10706 
10707             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10708                                             TARGET_PR_FP_MODE_FRE;
10709 
10710             /* If nothing to change, return right away, successfully.  */
10711             if (old_fr == new_fr && old_fre == new_fre) {
10712                 return 0;
10713             }
10714             /* Check the value is valid */
10715             if (arg2 & ~known_bits) {
10716                 return -TARGET_EOPNOTSUPP;
10717             }
10718             /* Setting FRE without FR is not supported.  */
10719             if (new_fre && !new_fr) {
10720                 return -TARGET_EOPNOTSUPP;
10721             }
10722             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10723                 /* FR1 is not supported */
10724                 return -TARGET_EOPNOTSUPP;
10725             }
10726             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10727                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10728                 /* cannot set FR=0 */
10729                 return -TARGET_EOPNOTSUPP;
10730             }
10731             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10732                 /* Cannot set FRE=1 */
10733                 return -TARGET_EOPNOTSUPP;
10734             }
10735 
10736             int i;
10737             fpr_t *fpr = env->active_fpu.fpr;
10738             for (i = 0; i < 32 ; i += 2) {
10739                 if (!old_fr && new_fr) {
10740                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10741                 } else if (old_fr && !new_fr) {
10742                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10743                 }
10744             }
10745 
10746             if (new_fr) {
10747                 env->CP0_Status |= (1 << CP0St_FR);
10748                 env->hflags |= MIPS_HFLAG_F64;
10749             } else {
10750                 env->CP0_Status &= ~(1 << CP0St_FR);
10751                 env->hflags &= ~MIPS_HFLAG_F64;
10752             }
10753             if (new_fre) {
10754                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10755                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10756                     env->hflags |= MIPS_HFLAG_FRE;
10757                 }
10758             } else {
10759                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10760                 env->hflags &= ~MIPS_HFLAG_FRE;
10761             }
10762 
10763             return 0;
10764         }
10765 #endif /* MIPS */
10766 #ifdef TARGET_AARCH64
10767         case TARGET_PR_SVE_SET_VL:
10768             /*
10769              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10770              * PR_SVE_VL_INHERIT.  Note the kernel definition
10771              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10772              * even though the current architectural maximum is VQ=16.
10773              */
10774             ret = -TARGET_EINVAL;
10775             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10776                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10777                 CPUARMState *env = cpu_env;
10778                 ARMCPU *cpu = env_archcpu(env);
10779                 uint32_t vq, old_vq;
10780 
10781                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10782                 vq = MAX(arg2 / 16, 1);
10783                 vq = MIN(vq, cpu->sve_max_vq);
10784 
10785                 if (vq < old_vq) {
10786                     aarch64_sve_narrow_vq(env, vq);
10787                 }
10788                 env->vfp.zcr_el[1] = vq - 1;
10789                 arm_rebuild_hflags(env);
10790                 ret = vq * 16;
10791             }
10792             return ret;
10793         case TARGET_PR_SVE_GET_VL:
10794             ret = -TARGET_EINVAL;
10795             {
10796                 ARMCPU *cpu = env_archcpu(cpu_env);
10797                 if (cpu_isar_feature(aa64_sve, cpu)) {
10798                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10799                 }
10800             }
10801             return ret;
10802         case TARGET_PR_PAC_RESET_KEYS:
10803             {
10804                 CPUARMState *env = cpu_env;
10805                 ARMCPU *cpu = env_archcpu(env);
10806 
10807                 if (arg3 || arg4 || arg5) {
10808                     return -TARGET_EINVAL;
10809                 }
10810                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10811                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10812                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10813                                TARGET_PR_PAC_APGAKEY);
10814                     int ret = 0;
10815                     Error *err = NULL;
10816 
10817                     if (arg2 == 0) {
10818                         arg2 = all;
10819                     } else if (arg2 & ~all) {
10820                         return -TARGET_EINVAL;
10821                     }
10822                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10823                         ret |= qemu_guest_getrandom(&env->keys.apia,
10824                                                     sizeof(ARMPACKey), &err);
10825                     }
10826                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10827                         ret |= qemu_guest_getrandom(&env->keys.apib,
10828                                                     sizeof(ARMPACKey), &err);
10829                     }
10830                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10831                         ret |= qemu_guest_getrandom(&env->keys.apda,
10832                                                     sizeof(ARMPACKey), &err);
10833                     }
10834                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10835                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10836                                                     sizeof(ARMPACKey), &err);
10837                     }
10838                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10839                         ret |= qemu_guest_getrandom(&env->keys.apga,
10840                                                     sizeof(ARMPACKey), &err);
10841                     }
10842                     if (ret != 0) {
10843                         /*
10844                          * Some unknown failure in the crypto.  The best
10845                          * we can do is log it and fail the syscall.
10846                          * The real syscall cannot fail this way.
10847                          */
10848                         qemu_log_mask(LOG_UNIMP,
10849                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10850                                       error_get_pretty(err));
10851                         error_free(err);
10852                         return -TARGET_EIO;
10853                     }
10854                     return 0;
10855                 }
10856             }
10857             return -TARGET_EINVAL;
10858 #endif /* AARCH64 */
10859         case PR_GET_SECCOMP:
10860         case PR_SET_SECCOMP:
10861             /* Disable seccomp to prevent the target disabling syscalls we
10862              * need. */
10863             return -TARGET_EINVAL;
10864         default:
10865             /* Most prctl options have no pointer arguments */
10866             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10867         }
10868         break;
10869 #ifdef TARGET_NR_arch_prctl
10870     case TARGET_NR_arch_prctl:
10871         return do_arch_prctl(cpu_env, arg1, arg2);
10872 #endif
10873 #ifdef TARGET_NR_pread64
10874     case TARGET_NR_pread64:
10875         if (regpairs_aligned(cpu_env, num)) {
10876             arg4 = arg5;
10877             arg5 = arg6;
10878         }
10879         if (arg2 == 0 && arg3 == 0) {
10880             /* Special-case NULL buffer and zero length, which should succeed */
10881             p = 0;
10882         } else {
10883             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10884             if (!p) {
10885                 return -TARGET_EFAULT;
10886             }
10887         }
10888         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10889         unlock_user(p, arg2, ret);
10890         return ret;
10891     case TARGET_NR_pwrite64:
10892         if (regpairs_aligned(cpu_env, num)) {
10893             arg4 = arg5;
10894             arg5 = arg6;
10895         }
10896         if (arg2 == 0 && arg3 == 0) {
10897             /* Special-case NULL buffer and zero length, which should succeed */
10898             p = 0;
10899         } else {
10900             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10901             if (!p) {
10902                 return -TARGET_EFAULT;
10903             }
10904         }
10905         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10906         unlock_user(p, arg2, 0);
10907         return ret;
10908 #endif
10909     case TARGET_NR_getcwd:
10910         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10911             return -TARGET_EFAULT;
10912         ret = get_errno(sys_getcwd1(p, arg2));
10913         unlock_user(p, arg1, ret);
10914         return ret;
10915     case TARGET_NR_capget:
10916     case TARGET_NR_capset:
10917     {
10918         struct target_user_cap_header *target_header;
10919         struct target_user_cap_data *target_data = NULL;
10920         struct __user_cap_header_struct header;
10921         struct __user_cap_data_struct data[2];
10922         struct __user_cap_data_struct *dataptr = NULL;
10923         int i, target_datalen;
10924         int data_items = 1;
10925 
10926         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10927             return -TARGET_EFAULT;
10928         }
10929         header.version = tswap32(target_header->version);
10930         header.pid = tswap32(target_header->pid);
10931 
10932         if (header.version != _LINUX_CAPABILITY_VERSION) {
10933             /* Version 2 and up takes pointer to two user_data structs */
10934             data_items = 2;
10935         }
10936 
10937         target_datalen = sizeof(*target_data) * data_items;
10938 
10939         if (arg2) {
10940             if (num == TARGET_NR_capget) {
10941                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10942             } else {
10943                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10944             }
10945             if (!target_data) {
10946                 unlock_user_struct(target_header, arg1, 0);
10947                 return -TARGET_EFAULT;
10948             }
10949 
10950             if (num == TARGET_NR_capset) {
10951                 for (i = 0; i < data_items; i++) {
10952                     data[i].effective = tswap32(target_data[i].effective);
10953                     data[i].permitted = tswap32(target_data[i].permitted);
10954                     data[i].inheritable = tswap32(target_data[i].inheritable);
10955                 }
10956             }
10957 
10958             dataptr = data;
10959         }
10960 
10961         if (num == TARGET_NR_capget) {
10962             ret = get_errno(capget(&header, dataptr));
10963         } else {
10964             ret = get_errno(capset(&header, dataptr));
10965         }
10966 
10967         /* The kernel always updates version for both capget and capset */
10968         target_header->version = tswap32(header.version);
10969         unlock_user_struct(target_header, arg1, 1);
10970 
10971         if (arg2) {
10972             if (num == TARGET_NR_capget) {
10973                 for (i = 0; i < data_items; i++) {
10974                     target_data[i].effective = tswap32(data[i].effective);
10975                     target_data[i].permitted = tswap32(data[i].permitted);
10976                     target_data[i].inheritable = tswap32(data[i].inheritable);
10977                 }
10978                 unlock_user(target_data, arg2, target_datalen);
10979             } else {
10980                 unlock_user(target_data, arg2, 0);
10981             }
10982         }
10983         return ret;
10984     }
10985     case TARGET_NR_sigaltstack:
10986         return do_sigaltstack(arg1, arg2,
10987                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10988 
10989 #ifdef CONFIG_SENDFILE
10990 #ifdef TARGET_NR_sendfile
10991     case TARGET_NR_sendfile:
10992     {
10993         off_t *offp = NULL;
10994         off_t off;
10995         if (arg3) {
10996             ret = get_user_sal(off, arg3);
10997             if (is_error(ret)) {
10998                 return ret;
10999             }
11000             offp = &off;
11001         }
11002         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11003         if (!is_error(ret) && arg3) {
11004             abi_long ret2 = put_user_sal(off, arg3);
11005             if (is_error(ret2)) {
11006                 ret = ret2;
11007             }
11008         }
11009         return ret;
11010     }
11011 #endif
11012 #ifdef TARGET_NR_sendfile64
11013     case TARGET_NR_sendfile64:
11014     {
11015         off_t *offp = NULL;
11016         off_t off;
11017         if (arg3) {
11018             ret = get_user_s64(off, arg3);
11019             if (is_error(ret)) {
11020                 return ret;
11021             }
11022             offp = &off;
11023         }
11024         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11025         if (!is_error(ret) && arg3) {
11026             abi_long ret2 = put_user_s64(off, arg3);
11027             if (is_error(ret2)) {
11028                 ret = ret2;
11029             }
11030         }
11031         return ret;
11032     }
11033 #endif
11034 #endif
11035 #ifdef TARGET_NR_vfork
11036     case TARGET_NR_vfork:
11037         return get_errno(do_fork(cpu_env,
11038                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11039                          0, 0, 0, 0));
11040 #endif
11041 #ifdef TARGET_NR_ugetrlimit
11042     case TARGET_NR_ugetrlimit:
11043     {
11044 	struct rlimit rlim;
11045 	int resource = target_to_host_resource(arg1);
11046 	ret = get_errno(getrlimit(resource, &rlim));
11047 	if (!is_error(ret)) {
11048 	    struct target_rlimit *target_rlim;
11049             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11050                 return -TARGET_EFAULT;
11051 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11052 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11053             unlock_user_struct(target_rlim, arg2, 1);
11054 	}
11055         return ret;
11056     }
11057 #endif
11058 #ifdef TARGET_NR_truncate64
11059     case TARGET_NR_truncate64:
11060         if (!(p = lock_user_string(arg1)))
11061             return -TARGET_EFAULT;
11062 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11063         unlock_user(p, arg1, 0);
11064         return ret;
11065 #endif
11066 #ifdef TARGET_NR_ftruncate64
11067     case TARGET_NR_ftruncate64:
11068         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11069 #endif
11070 #ifdef TARGET_NR_stat64
11071     case TARGET_NR_stat64:
11072         if (!(p = lock_user_string(arg1))) {
11073             return -TARGET_EFAULT;
11074         }
11075         ret = get_errno(stat(path(p), &st));
11076         unlock_user(p, arg1, 0);
11077         if (!is_error(ret))
11078             ret = host_to_target_stat64(cpu_env, arg2, &st);
11079         return ret;
11080 #endif
11081 #ifdef TARGET_NR_lstat64
11082     case TARGET_NR_lstat64:
11083         if (!(p = lock_user_string(arg1))) {
11084             return -TARGET_EFAULT;
11085         }
11086         ret = get_errno(lstat(path(p), &st));
11087         unlock_user(p, arg1, 0);
11088         if (!is_error(ret))
11089             ret = host_to_target_stat64(cpu_env, arg2, &st);
11090         return ret;
11091 #endif
11092 #ifdef TARGET_NR_fstat64
11093     case TARGET_NR_fstat64:
11094         ret = get_errno(fstat(arg1, &st));
11095         if (!is_error(ret))
11096             ret = host_to_target_stat64(cpu_env, arg2, &st);
11097         return ret;
11098 #endif
11099 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11100 #ifdef TARGET_NR_fstatat64
11101     case TARGET_NR_fstatat64:
11102 #endif
11103 #ifdef TARGET_NR_newfstatat
11104     case TARGET_NR_newfstatat:
11105 #endif
11106         if (!(p = lock_user_string(arg2))) {
11107             return -TARGET_EFAULT;
11108         }
11109         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11110         unlock_user(p, arg2, 0);
11111         if (!is_error(ret))
11112             ret = host_to_target_stat64(cpu_env, arg3, &st);
11113         return ret;
11114 #endif
11115 #if defined(TARGET_NR_statx)
11116     case TARGET_NR_statx:
11117         {
11118             struct target_statx *target_stx;
11119             int dirfd = arg1;
11120             int flags = arg3;
11121 
11122             p = lock_user_string(arg2);
11123             if (p == NULL) {
11124                 return -TARGET_EFAULT;
11125             }
11126 #if defined(__NR_statx)
11127             {
11128                 /*
11129                  * It is assumed that struct statx is architecture independent.
11130                  */
11131                 struct target_statx host_stx;
11132                 int mask = arg4;
11133 
11134                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11135                 if (!is_error(ret)) {
11136                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11137                         unlock_user(p, arg2, 0);
11138                         return -TARGET_EFAULT;
11139                     }
11140                 }
11141 
11142                 if (ret != -TARGET_ENOSYS) {
11143                     unlock_user(p, arg2, 0);
11144                     return ret;
11145                 }
11146             }
11147 #endif
11148             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11149             unlock_user(p, arg2, 0);
11150 
11151             if (!is_error(ret)) {
11152                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11153                     return -TARGET_EFAULT;
11154                 }
11155                 memset(target_stx, 0, sizeof(*target_stx));
11156                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11157                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11158                 __put_user(st.st_ino, &target_stx->stx_ino);
11159                 __put_user(st.st_mode, &target_stx->stx_mode);
11160                 __put_user(st.st_uid, &target_stx->stx_uid);
11161                 __put_user(st.st_gid, &target_stx->stx_gid);
11162                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11163                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11164                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11165                 __put_user(st.st_size, &target_stx->stx_size);
11166                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11167                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11168                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11169                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11170                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11171                 unlock_user_struct(target_stx, arg5, 1);
11172             }
11173         }
11174         return ret;
11175 #endif
11176 #ifdef TARGET_NR_lchown
11177     case TARGET_NR_lchown:
11178         if (!(p = lock_user_string(arg1)))
11179             return -TARGET_EFAULT;
11180         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11181         unlock_user(p, arg1, 0);
11182         return ret;
11183 #endif
11184 #ifdef TARGET_NR_getuid
11185     case TARGET_NR_getuid:
11186         return get_errno(high2lowuid(getuid()));
11187 #endif
11188 #ifdef TARGET_NR_getgid
11189     case TARGET_NR_getgid:
11190         return get_errno(high2lowgid(getgid()));
11191 #endif
11192 #ifdef TARGET_NR_geteuid
11193     case TARGET_NR_geteuid:
11194         return get_errno(high2lowuid(geteuid()));
11195 #endif
11196 #ifdef TARGET_NR_getegid
11197     case TARGET_NR_getegid:
11198         return get_errno(high2lowgid(getegid()));
11199 #endif
11200     case TARGET_NR_setreuid:
11201         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11202     case TARGET_NR_setregid:
11203         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11204     case TARGET_NR_getgroups:
11205         {
11206             int gidsetsize = arg1;
11207             target_id *target_grouplist;
11208             gid_t *grouplist;
11209             int i;
11210 
11211             grouplist = alloca(gidsetsize * sizeof(gid_t));
11212             ret = get_errno(getgroups(gidsetsize, grouplist));
11213             if (gidsetsize == 0)
11214                 return ret;
11215             if (!is_error(ret)) {
11216                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11217                 if (!target_grouplist)
11218                     return -TARGET_EFAULT;
11219                 for(i = 0;i < ret; i++)
11220                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11221                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11222             }
11223         }
11224         return ret;
11225     case TARGET_NR_setgroups:
11226         {
11227             int gidsetsize = arg1;
11228             target_id *target_grouplist;
11229             gid_t *grouplist = NULL;
11230             int i;
11231             if (gidsetsize) {
11232                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11233                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11234                 if (!target_grouplist) {
11235                     return -TARGET_EFAULT;
11236                 }
11237                 for (i = 0; i < gidsetsize; i++) {
11238                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11239                 }
11240                 unlock_user(target_grouplist, arg2, 0);
11241             }
11242             return get_errno(setgroups(gidsetsize, grouplist));
11243         }
11244     case TARGET_NR_fchown:
11245         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11246 #if defined(TARGET_NR_fchownat)
11247     case TARGET_NR_fchownat:
11248         if (!(p = lock_user_string(arg2)))
11249             return -TARGET_EFAULT;
11250         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11251                                  low2highgid(arg4), arg5));
11252         unlock_user(p, arg2, 0);
11253         return ret;
11254 #endif
11255 #ifdef TARGET_NR_setresuid
11256     case TARGET_NR_setresuid:
11257         return get_errno(sys_setresuid(low2highuid(arg1),
11258                                        low2highuid(arg2),
11259                                        low2highuid(arg3)));
11260 #endif
11261 #ifdef TARGET_NR_getresuid
11262     case TARGET_NR_getresuid:
11263         {
11264             uid_t ruid, euid, suid;
11265             ret = get_errno(getresuid(&ruid, &euid, &suid));
11266             if (!is_error(ret)) {
11267                 if (put_user_id(high2lowuid(ruid), arg1)
11268                     || put_user_id(high2lowuid(euid), arg2)
11269                     || put_user_id(high2lowuid(suid), arg3))
11270                     return -TARGET_EFAULT;
11271             }
11272         }
11273         return ret;
11274 #endif
11275 #ifdef TARGET_NR_getresgid
11276     case TARGET_NR_setresgid:
11277         return get_errno(sys_setresgid(low2highgid(arg1),
11278                                        low2highgid(arg2),
11279                                        low2highgid(arg3)));
11280 #endif
11281 #ifdef TARGET_NR_getresgid
11282     case TARGET_NR_getresgid:
11283         {
11284             gid_t rgid, egid, sgid;
11285             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11286             if (!is_error(ret)) {
11287                 if (put_user_id(high2lowgid(rgid), arg1)
11288                     || put_user_id(high2lowgid(egid), arg2)
11289                     || put_user_id(high2lowgid(sgid), arg3))
11290                     return -TARGET_EFAULT;
11291             }
11292         }
11293         return ret;
11294 #endif
11295 #ifdef TARGET_NR_chown
11296     case TARGET_NR_chown:
11297         if (!(p = lock_user_string(arg1)))
11298             return -TARGET_EFAULT;
11299         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11300         unlock_user(p, arg1, 0);
11301         return ret;
11302 #endif
11303     case TARGET_NR_setuid:
11304         return get_errno(sys_setuid(low2highuid(arg1)));
11305     case TARGET_NR_setgid:
11306         return get_errno(sys_setgid(low2highgid(arg1)));
11307     case TARGET_NR_setfsuid:
11308         return get_errno(setfsuid(arg1));
11309     case TARGET_NR_setfsgid:
11310         return get_errno(setfsgid(arg1));
11311 
11312 #ifdef TARGET_NR_lchown32
11313     case TARGET_NR_lchown32:
11314         if (!(p = lock_user_string(arg1)))
11315             return -TARGET_EFAULT;
11316         ret = get_errno(lchown(p, arg2, arg3));
11317         unlock_user(p, arg1, 0);
11318         return ret;
11319 #endif
11320 #ifdef TARGET_NR_getuid32
11321     case TARGET_NR_getuid32:
11322         return get_errno(getuid());
11323 #endif
11324 
11325 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11326    /* Alpha specific */
11327     case TARGET_NR_getxuid:
11328          {
11329             uid_t euid;
11330             euid=geteuid();
11331             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11332          }
11333         return get_errno(getuid());
11334 #endif
11335 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11336    /* Alpha specific */
11337     case TARGET_NR_getxgid:
11338          {
11339             uid_t egid;
11340             egid=getegid();
11341             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11342          }
11343         return get_errno(getgid());
11344 #endif
11345 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11346     /* Alpha specific */
11347     case TARGET_NR_osf_getsysinfo:
11348         ret = -TARGET_EOPNOTSUPP;
11349         switch (arg1) {
11350           case TARGET_GSI_IEEE_FP_CONTROL:
11351             {
11352                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11353                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11354 
11355                 swcr &= ~SWCR_STATUS_MASK;
11356                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11357 
11358                 if (put_user_u64 (swcr, arg2))
11359                         return -TARGET_EFAULT;
11360                 ret = 0;
11361             }
11362             break;
11363 
11364           /* case GSI_IEEE_STATE_AT_SIGNAL:
11365              -- Not implemented in linux kernel.
11366              case GSI_UACPROC:
11367              -- Retrieves current unaligned access state; not much used.
11368              case GSI_PROC_TYPE:
11369              -- Retrieves implver information; surely not used.
11370              case GSI_GET_HWRPB:
11371              -- Grabs a copy of the HWRPB; surely not used.
11372           */
11373         }
11374         return ret;
11375 #endif
11376 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11377     /* Alpha specific */
11378     case TARGET_NR_osf_setsysinfo:
11379         ret = -TARGET_EOPNOTSUPP;
11380         switch (arg1) {
11381           case TARGET_SSI_IEEE_FP_CONTROL:
11382             {
11383                 uint64_t swcr, fpcr;
11384 
11385                 if (get_user_u64 (swcr, arg2)) {
11386                     return -TARGET_EFAULT;
11387                 }
11388 
11389                 /*
11390                  * The kernel calls swcr_update_status to update the
11391                  * status bits from the fpcr at every point that it
11392                  * could be queried.  Therefore, we store the status
11393                  * bits only in FPCR.
11394                  */
11395                 ((CPUAlphaState *)cpu_env)->swcr
11396                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11397 
11398                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11399                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11400                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11401                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11402                 ret = 0;
11403             }
11404             break;
11405 
11406           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11407             {
11408                 uint64_t exc, fpcr, fex;
11409 
11410                 if (get_user_u64(exc, arg2)) {
11411                     return -TARGET_EFAULT;
11412                 }
11413                 exc &= SWCR_STATUS_MASK;
11414                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11415 
11416                 /* Old exceptions are not signaled.  */
11417                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11418                 fex = exc & ~fex;
11419                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11420                 fex &= ((CPUArchState *)cpu_env)->swcr;
11421 
11422                 /* Update the hardware fpcr.  */
11423                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11424                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11425 
11426                 if (fex) {
11427                     int si_code = TARGET_FPE_FLTUNK;
11428                     target_siginfo_t info;
11429 
11430                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11431                         si_code = TARGET_FPE_FLTUND;
11432                     }
11433                     if (fex & SWCR_TRAP_ENABLE_INE) {
11434                         si_code = TARGET_FPE_FLTRES;
11435                     }
11436                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11437                         si_code = TARGET_FPE_FLTUND;
11438                     }
11439                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11440                         si_code = TARGET_FPE_FLTOVF;
11441                     }
11442                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11443                         si_code = TARGET_FPE_FLTDIV;
11444                     }
11445                     if (fex & SWCR_TRAP_ENABLE_INV) {
11446                         si_code = TARGET_FPE_FLTINV;
11447                     }
11448 
11449                     info.si_signo = SIGFPE;
11450                     info.si_errno = 0;
11451                     info.si_code = si_code;
11452                     info._sifields._sigfault._addr
11453                         = ((CPUArchState *)cpu_env)->pc;
11454                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11455                                  QEMU_SI_FAULT, &info);
11456                 }
11457                 ret = 0;
11458             }
11459             break;
11460 
11461           /* case SSI_NVPAIRS:
11462              -- Used with SSIN_UACPROC to enable unaligned accesses.
11463              case SSI_IEEE_STATE_AT_SIGNAL:
11464              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11465              -- Not implemented in linux kernel
11466           */
11467         }
11468         return ret;
11469 #endif
11470 #ifdef TARGET_NR_osf_sigprocmask
11471     /* Alpha specific.  */
11472     case TARGET_NR_osf_sigprocmask:
11473         {
11474             abi_ulong mask;
11475             int how;
11476             sigset_t set, oldset;
11477 
11478             switch(arg1) {
11479             case TARGET_SIG_BLOCK:
11480                 how = SIG_BLOCK;
11481                 break;
11482             case TARGET_SIG_UNBLOCK:
11483                 how = SIG_UNBLOCK;
11484                 break;
11485             case TARGET_SIG_SETMASK:
11486                 how = SIG_SETMASK;
11487                 break;
11488             default:
11489                 return -TARGET_EINVAL;
11490             }
11491             mask = arg2;
11492             target_to_host_old_sigset(&set, &mask);
11493             ret = do_sigprocmask(how, &set, &oldset);
11494             if (!ret) {
11495                 host_to_target_old_sigset(&mask, &oldset);
11496                 ret = mask;
11497             }
11498         }
11499         return ret;
11500 #endif
11501 
11502 #ifdef TARGET_NR_getgid32
11503     case TARGET_NR_getgid32:
11504         return get_errno(getgid());
11505 #endif
11506 #ifdef TARGET_NR_geteuid32
11507     case TARGET_NR_geteuid32:
11508         return get_errno(geteuid());
11509 #endif
11510 #ifdef TARGET_NR_getegid32
11511     case TARGET_NR_getegid32:
11512         return get_errno(getegid());
11513 #endif
11514 #ifdef TARGET_NR_setreuid32
11515     case TARGET_NR_setreuid32:
11516         return get_errno(setreuid(arg1, arg2));
11517 #endif
11518 #ifdef TARGET_NR_setregid32
11519     case TARGET_NR_setregid32:
11520         return get_errno(setregid(arg1, arg2));
11521 #endif
11522 #ifdef TARGET_NR_getgroups32
11523     case TARGET_NR_getgroups32:
11524         {
11525             int gidsetsize = arg1;
11526             uint32_t *target_grouplist;
11527             gid_t *grouplist;
11528             int i;
11529 
11530             grouplist = alloca(gidsetsize * sizeof(gid_t));
11531             ret = get_errno(getgroups(gidsetsize, grouplist));
11532             if (gidsetsize == 0)
11533                 return ret;
11534             if (!is_error(ret)) {
11535                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11536                 if (!target_grouplist) {
11537                     return -TARGET_EFAULT;
11538                 }
11539                 for(i = 0;i < ret; i++)
11540                     target_grouplist[i] = tswap32(grouplist[i]);
11541                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11542             }
11543         }
11544         return ret;
11545 #endif
11546 #ifdef TARGET_NR_setgroups32
11547     case TARGET_NR_setgroups32:
11548         {
11549             int gidsetsize = arg1;
11550             uint32_t *target_grouplist;
11551             gid_t *grouplist;
11552             int i;
11553 
11554             grouplist = alloca(gidsetsize * sizeof(gid_t));
11555             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11556             if (!target_grouplist) {
11557                 return -TARGET_EFAULT;
11558             }
11559             for(i = 0;i < gidsetsize; i++)
11560                 grouplist[i] = tswap32(target_grouplist[i]);
11561             unlock_user(target_grouplist, arg2, 0);
11562             return get_errno(setgroups(gidsetsize, grouplist));
11563         }
11564 #endif
11565 #ifdef TARGET_NR_fchown32
11566     case TARGET_NR_fchown32:
11567         return get_errno(fchown(arg1, arg2, arg3));
11568 #endif
11569 #ifdef TARGET_NR_setresuid32
11570     case TARGET_NR_setresuid32:
11571         return get_errno(sys_setresuid(arg1, arg2, arg3));
11572 #endif
11573 #ifdef TARGET_NR_getresuid32
11574     case TARGET_NR_getresuid32:
11575         {
11576             uid_t ruid, euid, suid;
11577             ret = get_errno(getresuid(&ruid, &euid, &suid));
11578             if (!is_error(ret)) {
11579                 if (put_user_u32(ruid, arg1)
11580                     || put_user_u32(euid, arg2)
11581                     || put_user_u32(suid, arg3))
11582                     return -TARGET_EFAULT;
11583             }
11584         }
11585         return ret;
11586 #endif
11587 #ifdef TARGET_NR_setresgid32
11588     case TARGET_NR_setresgid32:
11589         return get_errno(sys_setresgid(arg1, arg2, arg3));
11590 #endif
11591 #ifdef TARGET_NR_getresgid32
11592     case TARGET_NR_getresgid32:
11593         {
11594             gid_t rgid, egid, sgid;
11595             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11596             if (!is_error(ret)) {
11597                 if (put_user_u32(rgid, arg1)
11598                     || put_user_u32(egid, arg2)
11599                     || put_user_u32(sgid, arg3))
11600                     return -TARGET_EFAULT;
11601             }
11602         }
11603         return ret;
11604 #endif
11605 #ifdef TARGET_NR_chown32
11606     case TARGET_NR_chown32:
11607         if (!(p = lock_user_string(arg1)))
11608             return -TARGET_EFAULT;
11609         ret = get_errno(chown(p, arg2, arg3));
11610         unlock_user(p, arg1, 0);
11611         return ret;
11612 #endif
11613 #ifdef TARGET_NR_setuid32
11614     case TARGET_NR_setuid32:
11615         return get_errno(sys_setuid(arg1));
11616 #endif
11617 #ifdef TARGET_NR_setgid32
11618     case TARGET_NR_setgid32:
11619         return get_errno(sys_setgid(arg1));
11620 #endif
11621 #ifdef TARGET_NR_setfsuid32
11622     case TARGET_NR_setfsuid32:
11623         return get_errno(setfsuid(arg1));
11624 #endif
11625 #ifdef TARGET_NR_setfsgid32
11626     case TARGET_NR_setfsgid32:
11627         return get_errno(setfsgid(arg1));
11628 #endif
11629 #ifdef TARGET_NR_mincore
11630     case TARGET_NR_mincore:
11631         {
11632             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11633             if (!a) {
11634                 return -TARGET_ENOMEM;
11635             }
11636             p = lock_user_string(arg3);
11637             if (!p) {
11638                 ret = -TARGET_EFAULT;
11639             } else {
11640                 ret = get_errno(mincore(a, arg2, p));
11641                 unlock_user(p, arg3, ret);
11642             }
11643             unlock_user(a, arg1, 0);
11644         }
11645         return ret;
11646 #endif
11647 #ifdef TARGET_NR_arm_fadvise64_64
11648     case TARGET_NR_arm_fadvise64_64:
11649         /* arm_fadvise64_64 looks like fadvise64_64 but
11650          * with different argument order: fd, advice, offset, len
11651          * rather than the usual fd, offset, len, advice.
11652          * Note that offset and len are both 64-bit so appear as
11653          * pairs of 32-bit registers.
11654          */
11655         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11656                             target_offset64(arg5, arg6), arg2);
11657         return -host_to_target_errno(ret);
11658 #endif
11659 
11660 #if TARGET_ABI_BITS == 32
11661 
11662 #ifdef TARGET_NR_fadvise64_64
11663     case TARGET_NR_fadvise64_64:
11664 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11665         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11666         ret = arg2;
11667         arg2 = arg3;
11668         arg3 = arg4;
11669         arg4 = arg5;
11670         arg5 = arg6;
11671         arg6 = ret;
11672 #else
11673         /* 6 args: fd, offset (high, low), len (high, low), advice */
11674         if (regpairs_aligned(cpu_env, num)) {
11675             /* offset is in (3,4), len in (5,6) and advice in 7 */
11676             arg2 = arg3;
11677             arg3 = arg4;
11678             arg4 = arg5;
11679             arg5 = arg6;
11680             arg6 = arg7;
11681         }
11682 #endif
11683         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11684                             target_offset64(arg4, arg5), arg6);
11685         return -host_to_target_errno(ret);
11686 #endif
11687 
11688 #ifdef TARGET_NR_fadvise64
11689     case TARGET_NR_fadvise64:
11690         /* 5 args: fd, offset (high, low), len, advice */
11691         if (regpairs_aligned(cpu_env, num)) {
11692             /* offset is in (3,4), len in 5 and advice in 6 */
11693             arg2 = arg3;
11694             arg3 = arg4;
11695             arg4 = arg5;
11696             arg5 = arg6;
11697         }
11698         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11699         return -host_to_target_errno(ret);
11700 #endif
11701 
11702 #else /* not a 32-bit ABI */
11703 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11704 #ifdef TARGET_NR_fadvise64_64
11705     case TARGET_NR_fadvise64_64:
11706 #endif
11707 #ifdef TARGET_NR_fadvise64
11708     case TARGET_NR_fadvise64:
11709 #endif
11710 #ifdef TARGET_S390X
11711         switch (arg4) {
11712         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11713         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11714         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11715         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11716         default: break;
11717         }
11718 #endif
11719         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11720 #endif
11721 #endif /* end of 64-bit ABI fadvise handling */
11722 
11723 #ifdef TARGET_NR_madvise
11724     case TARGET_NR_madvise:
11725         /* A straight passthrough may not be safe because qemu sometimes
11726            turns private file-backed mappings into anonymous mappings.
11727            This will break MADV_DONTNEED.
11728            This is a hint, so ignoring and returning success is ok.  */
11729         return 0;
11730 #endif
11731 #ifdef TARGET_NR_fcntl64
11732     case TARGET_NR_fcntl64:
11733     {
11734         int cmd;
11735         struct flock64 fl;
11736         from_flock64_fn *copyfrom = copy_from_user_flock64;
11737         to_flock64_fn *copyto = copy_to_user_flock64;
11738 
11739 #ifdef TARGET_ARM
11740         if (!((CPUARMState *)cpu_env)->eabi) {
11741             copyfrom = copy_from_user_oabi_flock64;
11742             copyto = copy_to_user_oabi_flock64;
11743         }
11744 #endif
11745 
11746         cmd = target_to_host_fcntl_cmd(arg2);
11747         if (cmd == -TARGET_EINVAL) {
11748             return cmd;
11749         }
11750 
11751         switch(arg2) {
11752         case TARGET_F_GETLK64:
11753             ret = copyfrom(&fl, arg3);
11754             if (ret) {
11755                 break;
11756             }
11757             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11758             if (ret == 0) {
11759                 ret = copyto(arg3, &fl);
11760             }
11761 	    break;
11762 
11763         case TARGET_F_SETLK64:
11764         case TARGET_F_SETLKW64:
11765             ret = copyfrom(&fl, arg3);
11766             if (ret) {
11767                 break;
11768             }
11769             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11770 	    break;
11771         default:
11772             ret = do_fcntl(arg1, arg2, arg3);
11773             break;
11774         }
11775         return ret;
11776     }
11777 #endif
11778 #ifdef TARGET_NR_cacheflush
11779     case TARGET_NR_cacheflush:
11780         /* self-modifying code is handled automatically, so nothing needed */
11781         return 0;
11782 #endif
11783 #ifdef TARGET_NR_getpagesize
11784     case TARGET_NR_getpagesize:
11785         return TARGET_PAGE_SIZE;
11786 #endif
11787     case TARGET_NR_gettid:
11788         return get_errno(sys_gettid());
11789 #ifdef TARGET_NR_readahead
11790     case TARGET_NR_readahead:
11791 #if TARGET_ABI_BITS == 32
11792         if (regpairs_aligned(cpu_env, num)) {
11793             arg2 = arg3;
11794             arg3 = arg4;
11795             arg4 = arg5;
11796         }
11797         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11798 #else
11799         ret = get_errno(readahead(arg1, arg2, arg3));
11800 #endif
11801         return ret;
11802 #endif
11803 #ifdef CONFIG_ATTR
11804 #ifdef TARGET_NR_setxattr
11805     case TARGET_NR_listxattr:
11806     case TARGET_NR_llistxattr:
11807     {
11808         void *p, *b = 0;
11809         if (arg2) {
11810             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11811             if (!b) {
11812                 return -TARGET_EFAULT;
11813             }
11814         }
11815         p = lock_user_string(arg1);
11816         if (p) {
11817             if (num == TARGET_NR_listxattr) {
11818                 ret = get_errno(listxattr(p, b, arg3));
11819             } else {
11820                 ret = get_errno(llistxattr(p, b, arg3));
11821             }
11822         } else {
11823             ret = -TARGET_EFAULT;
11824         }
11825         unlock_user(p, arg1, 0);
11826         unlock_user(b, arg2, arg3);
11827         return ret;
11828     }
11829     case TARGET_NR_flistxattr:
11830     {
11831         void *b = 0;
11832         if (arg2) {
11833             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11834             if (!b) {
11835                 return -TARGET_EFAULT;
11836             }
11837         }
11838         ret = get_errno(flistxattr(arg1, b, arg3));
11839         unlock_user(b, arg2, arg3);
11840         return ret;
11841     }
11842     case TARGET_NR_setxattr:
11843     case TARGET_NR_lsetxattr:
11844         {
11845             void *p, *n, *v = 0;
11846             if (arg3) {
11847                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11848                 if (!v) {
11849                     return -TARGET_EFAULT;
11850                 }
11851             }
11852             p = lock_user_string(arg1);
11853             n = lock_user_string(arg2);
11854             if (p && n) {
11855                 if (num == TARGET_NR_setxattr) {
11856                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11857                 } else {
11858                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11859                 }
11860             } else {
11861                 ret = -TARGET_EFAULT;
11862             }
11863             unlock_user(p, arg1, 0);
11864             unlock_user(n, arg2, 0);
11865             unlock_user(v, arg3, 0);
11866         }
11867         return ret;
11868     case TARGET_NR_fsetxattr:
11869         {
11870             void *n, *v = 0;
11871             if (arg3) {
11872                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11873                 if (!v) {
11874                     return -TARGET_EFAULT;
11875                 }
11876             }
11877             n = lock_user_string(arg2);
11878             if (n) {
11879                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11880             } else {
11881                 ret = -TARGET_EFAULT;
11882             }
11883             unlock_user(n, arg2, 0);
11884             unlock_user(v, arg3, 0);
11885         }
11886         return ret;
11887     case TARGET_NR_getxattr:
11888     case TARGET_NR_lgetxattr:
11889         {
11890             void *p, *n, *v = 0;
11891             if (arg3) {
11892                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11893                 if (!v) {
11894                     return -TARGET_EFAULT;
11895                 }
11896             }
11897             p = lock_user_string(arg1);
11898             n = lock_user_string(arg2);
11899             if (p && n) {
11900                 if (num == TARGET_NR_getxattr) {
11901                     ret = get_errno(getxattr(p, n, v, arg4));
11902                 } else {
11903                     ret = get_errno(lgetxattr(p, n, v, arg4));
11904                 }
11905             } else {
11906                 ret = -TARGET_EFAULT;
11907             }
11908             unlock_user(p, arg1, 0);
11909             unlock_user(n, arg2, 0);
11910             unlock_user(v, arg3, arg4);
11911         }
11912         return ret;
11913     case TARGET_NR_fgetxattr:
11914         {
11915             void *n, *v = 0;
11916             if (arg3) {
11917                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11918                 if (!v) {
11919                     return -TARGET_EFAULT;
11920                 }
11921             }
11922             n = lock_user_string(arg2);
11923             if (n) {
11924                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11925             } else {
11926                 ret = -TARGET_EFAULT;
11927             }
11928             unlock_user(n, arg2, 0);
11929             unlock_user(v, arg3, arg4);
11930         }
11931         return ret;
11932     case TARGET_NR_removexattr:
11933     case TARGET_NR_lremovexattr:
11934         {
11935             void *p, *n;
11936             p = lock_user_string(arg1);
11937             n = lock_user_string(arg2);
11938             if (p && n) {
11939                 if (num == TARGET_NR_removexattr) {
11940                     ret = get_errno(removexattr(p, n));
11941                 } else {
11942                     ret = get_errno(lremovexattr(p, n));
11943                 }
11944             } else {
11945                 ret = -TARGET_EFAULT;
11946             }
11947             unlock_user(p, arg1, 0);
11948             unlock_user(n, arg2, 0);
11949         }
11950         return ret;
11951     case TARGET_NR_fremovexattr:
11952         {
11953             void *n;
11954             n = lock_user_string(arg2);
11955             if (n) {
11956                 ret = get_errno(fremovexattr(arg1, n));
11957             } else {
11958                 ret = -TARGET_EFAULT;
11959             }
11960             unlock_user(n, arg2, 0);
11961         }
11962         return ret;
11963 #endif
11964 #endif /* CONFIG_ATTR */
11965 #ifdef TARGET_NR_set_thread_area
11966     case TARGET_NR_set_thread_area:
11967 #if defined(TARGET_MIPS)
11968       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11969       return 0;
11970 #elif defined(TARGET_CRIS)
11971       if (arg1 & 0xff)
11972           ret = -TARGET_EINVAL;
11973       else {
11974           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11975           ret = 0;
11976       }
11977       return ret;
11978 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11979       return do_set_thread_area(cpu_env, arg1);
11980 #elif defined(TARGET_M68K)
11981       {
11982           TaskState *ts = cpu->opaque;
11983           ts->tp_value = arg1;
11984           return 0;
11985       }
11986 #else
11987       return -TARGET_ENOSYS;
11988 #endif
11989 #endif
11990 #ifdef TARGET_NR_get_thread_area
11991     case TARGET_NR_get_thread_area:
11992 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11993         return do_get_thread_area(cpu_env, arg1);
11994 #elif defined(TARGET_M68K)
11995         {
11996             TaskState *ts = cpu->opaque;
11997             return ts->tp_value;
11998         }
11999 #else
12000         return -TARGET_ENOSYS;
12001 #endif
12002 #endif
12003 #ifdef TARGET_NR_getdomainname
12004     case TARGET_NR_getdomainname:
12005         return -TARGET_ENOSYS;
12006 #endif
12007 
12008 #ifdef TARGET_NR_clock_settime
12009     case TARGET_NR_clock_settime:
12010     {
12011         struct timespec ts;
12012 
12013         ret = target_to_host_timespec(&ts, arg2);
12014         if (!is_error(ret)) {
12015             ret = get_errno(clock_settime(arg1, &ts));
12016         }
12017         return ret;
12018     }
12019 #endif
12020 #ifdef TARGET_NR_clock_settime64
12021     case TARGET_NR_clock_settime64:
12022     {
12023         struct timespec ts;
12024 
12025         ret = target_to_host_timespec64(&ts, arg2);
12026         if (!is_error(ret)) {
12027             ret = get_errno(clock_settime(arg1, &ts));
12028         }
12029         return ret;
12030     }
12031 #endif
12032 #ifdef TARGET_NR_clock_gettime
12033     case TARGET_NR_clock_gettime:
12034     {
12035         struct timespec ts;
12036         ret = get_errno(clock_gettime(arg1, &ts));
12037         if (!is_error(ret)) {
12038             ret = host_to_target_timespec(arg2, &ts);
12039         }
12040         return ret;
12041     }
12042 #endif
12043 #ifdef TARGET_NR_clock_gettime64
12044     case TARGET_NR_clock_gettime64:
12045     {
12046         struct timespec ts;
12047         ret = get_errno(clock_gettime(arg1, &ts));
12048         if (!is_error(ret)) {
12049             ret = host_to_target_timespec64(arg2, &ts);
12050         }
12051         return ret;
12052     }
12053 #endif
12054 #ifdef TARGET_NR_clock_getres
12055     case TARGET_NR_clock_getres:
12056     {
12057         struct timespec ts;
12058         ret = get_errno(clock_getres(arg1, &ts));
12059         if (!is_error(ret)) {
12060             host_to_target_timespec(arg2, &ts);
12061         }
12062         return ret;
12063     }
12064 #endif
12065 #ifdef TARGET_NR_clock_getres_time64
12066     case TARGET_NR_clock_getres_time64:
12067     {
12068         struct timespec ts;
12069         ret = get_errno(clock_getres(arg1, &ts));
12070         if (!is_error(ret)) {
12071             host_to_target_timespec64(arg2, &ts);
12072         }
12073         return ret;
12074     }
12075 #endif
12076 #ifdef TARGET_NR_clock_nanosleep
12077     case TARGET_NR_clock_nanosleep:
12078     {
12079         struct timespec ts;
12080         if (target_to_host_timespec(&ts, arg3)) {
12081             return -TARGET_EFAULT;
12082         }
12083         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12084                                              &ts, arg4 ? &ts : NULL));
12085         /*
12086          * if the call is interrupted by a signal handler, it fails
12087          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12088          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12089          */
12090         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12091             host_to_target_timespec(arg4, &ts)) {
12092               return -TARGET_EFAULT;
12093         }
12094 
12095         return ret;
12096     }
12097 #endif
12098 #ifdef TARGET_NR_clock_nanosleep_time64
12099     case TARGET_NR_clock_nanosleep_time64:
12100     {
12101         struct timespec ts;
12102 
12103         if (target_to_host_timespec64(&ts, arg3)) {
12104             return -TARGET_EFAULT;
12105         }
12106 
12107         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12108                                              &ts, arg4 ? &ts : NULL));
12109 
12110         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12111             host_to_target_timespec64(arg4, &ts)) {
12112             return -TARGET_EFAULT;
12113         }
12114         return ret;
12115     }
12116 #endif
12117 
12118 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12119     case TARGET_NR_set_tid_address:
12120         return get_errno(set_tid_address((int *)g2h(arg1)));
12121 #endif
12122 
12123     case TARGET_NR_tkill:
12124         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12125 
12126     case TARGET_NR_tgkill:
12127         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12128                          target_to_host_signal(arg3)));
12129 
12130 #ifdef TARGET_NR_set_robust_list
12131     case TARGET_NR_set_robust_list:
12132     case TARGET_NR_get_robust_list:
12133         /* The ABI for supporting robust futexes has userspace pass
12134          * the kernel a pointer to a linked list which is updated by
12135          * userspace after the syscall; the list is walked by the kernel
12136          * when the thread exits. Since the linked list in QEMU guest
12137          * memory isn't a valid linked list for the host and we have
12138          * no way to reliably intercept the thread-death event, we can't
12139          * support these. Silently return ENOSYS so that guest userspace
12140          * falls back to a non-robust futex implementation (which should
12141          * be OK except in the corner case of the guest crashing while
12142          * holding a mutex that is shared with another process via
12143          * shared memory).
12144          */
12145         return -TARGET_ENOSYS;
12146 #endif
12147 
12148 #if defined(TARGET_NR_utimensat)
12149     case TARGET_NR_utimensat:
12150         {
12151             struct timespec *tsp, ts[2];
12152             if (!arg3) {
12153                 tsp = NULL;
12154             } else {
12155                 if (target_to_host_timespec(ts, arg3)) {
12156                     return -TARGET_EFAULT;
12157                 }
12158                 if (target_to_host_timespec(ts + 1, arg3 +
12159                                             sizeof(struct target_timespec))) {
12160                     return -TARGET_EFAULT;
12161                 }
12162                 tsp = ts;
12163             }
12164             if (!arg2)
12165                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12166             else {
12167                 if (!(p = lock_user_string(arg2))) {
12168                     return -TARGET_EFAULT;
12169                 }
12170                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12171                 unlock_user(p, arg2, 0);
12172             }
12173         }
12174         return ret;
12175 #endif
12176 #ifdef TARGET_NR_utimensat_time64
12177     case TARGET_NR_utimensat_time64:
12178         {
12179             struct timespec *tsp, ts[2];
12180             if (!arg3) {
12181                 tsp = NULL;
12182             } else {
12183                 if (target_to_host_timespec64(ts, arg3)) {
12184                     return -TARGET_EFAULT;
12185                 }
12186                 if (target_to_host_timespec64(ts + 1, arg3 +
12187                                      sizeof(struct target__kernel_timespec))) {
12188                     return -TARGET_EFAULT;
12189                 }
12190                 tsp = ts;
12191             }
12192             if (!arg2)
12193                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12194             else {
12195                 p = lock_user_string(arg2);
12196                 if (!p) {
12197                     return -TARGET_EFAULT;
12198                 }
12199                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12200                 unlock_user(p, arg2, 0);
12201             }
12202         }
12203         return ret;
12204 #endif
12205 #ifdef TARGET_NR_futex
12206     case TARGET_NR_futex:
12207         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12208 #endif
12209 #ifdef TARGET_NR_futex_time64
12210     case TARGET_NR_futex_time64:
12211         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12212 #endif
12213 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12214     case TARGET_NR_inotify_init:
12215         ret = get_errno(sys_inotify_init());
12216         if (ret >= 0) {
12217             fd_trans_register(ret, &target_inotify_trans);
12218         }
12219         return ret;
12220 #endif
12221 #ifdef CONFIG_INOTIFY1
12222 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12223     case TARGET_NR_inotify_init1:
12224         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12225                                           fcntl_flags_tbl)));
12226         if (ret >= 0) {
12227             fd_trans_register(ret, &target_inotify_trans);
12228         }
12229         return ret;
12230 #endif
12231 #endif
12232 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12233     case TARGET_NR_inotify_add_watch:
12234         p = lock_user_string(arg2);
12235         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12236         unlock_user(p, arg2, 0);
12237         return ret;
12238 #endif
12239 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12240     case TARGET_NR_inotify_rm_watch:
12241         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12242 #endif
12243 
12244 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12245     case TARGET_NR_mq_open:
12246         {
12247             struct mq_attr posix_mq_attr;
12248             struct mq_attr *pposix_mq_attr;
12249             int host_flags;
12250 
12251             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12252             pposix_mq_attr = NULL;
12253             if (arg4) {
12254                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12255                     return -TARGET_EFAULT;
12256                 }
12257                 pposix_mq_attr = &posix_mq_attr;
12258             }
12259             p = lock_user_string(arg1 - 1);
12260             if (!p) {
12261                 return -TARGET_EFAULT;
12262             }
12263             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12264             unlock_user (p, arg1, 0);
12265         }
12266         return ret;
12267 
12268     case TARGET_NR_mq_unlink:
12269         p = lock_user_string(arg1 - 1);
12270         if (!p) {
12271             return -TARGET_EFAULT;
12272         }
12273         ret = get_errno(mq_unlink(p));
12274         unlock_user (p, arg1, 0);
12275         return ret;
12276 
12277 #ifdef TARGET_NR_mq_timedsend
12278     case TARGET_NR_mq_timedsend:
12279         {
12280             struct timespec ts;
12281 
12282             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12283             if (arg5 != 0) {
12284                 if (target_to_host_timespec(&ts, arg5)) {
12285                     return -TARGET_EFAULT;
12286                 }
12287                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12288                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12289                     return -TARGET_EFAULT;
12290                 }
12291             } else {
12292                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12293             }
12294             unlock_user (p, arg2, arg3);
12295         }
12296         return ret;
12297 #endif
12298 #ifdef TARGET_NR_mq_timedsend_time64
12299     case TARGET_NR_mq_timedsend_time64:
12300         {
12301             struct timespec ts;
12302 
12303             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12304             if (arg5 != 0) {
12305                 if (target_to_host_timespec64(&ts, arg5)) {
12306                     return -TARGET_EFAULT;
12307                 }
12308                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12309                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12310                     return -TARGET_EFAULT;
12311                 }
12312             } else {
12313                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12314             }
12315             unlock_user(p, arg2, arg3);
12316         }
12317         return ret;
12318 #endif
12319 
12320 #ifdef TARGET_NR_mq_timedreceive
12321     case TARGET_NR_mq_timedreceive:
12322         {
12323             struct timespec ts;
12324             unsigned int prio;
12325 
12326             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12327             if (arg5 != 0) {
12328                 if (target_to_host_timespec(&ts, arg5)) {
12329                     return -TARGET_EFAULT;
12330                 }
12331                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12332                                                      &prio, &ts));
12333                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12334                     return -TARGET_EFAULT;
12335                 }
12336             } else {
12337                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12338                                                      &prio, NULL));
12339             }
12340             unlock_user (p, arg2, arg3);
12341             if (arg4 != 0)
12342                 put_user_u32(prio, arg4);
12343         }
12344         return ret;
12345 #endif
12346 #ifdef TARGET_NR_mq_timedreceive_time64
12347     case TARGET_NR_mq_timedreceive_time64:
12348         {
12349             struct timespec ts;
12350             unsigned int prio;
12351 
12352             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12353             if (arg5 != 0) {
12354                 if (target_to_host_timespec64(&ts, arg5)) {
12355                     return -TARGET_EFAULT;
12356                 }
12357                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12358                                                      &prio, &ts));
12359                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12360                     return -TARGET_EFAULT;
12361                 }
12362             } else {
12363                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12364                                                      &prio, NULL));
12365             }
12366             unlock_user(p, arg2, arg3);
12367             if (arg4 != 0) {
12368                 put_user_u32(prio, arg4);
12369             }
12370         }
12371         return ret;
12372 #endif
12373 
12374     /* Not implemented for now... */
12375 /*     case TARGET_NR_mq_notify: */
12376 /*         break; */
12377 
12378     case TARGET_NR_mq_getsetattr:
12379         {
12380             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12381             ret = 0;
12382             if (arg2 != 0) {
12383                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12384                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12385                                            &posix_mq_attr_out));
12386             } else if (arg3 != 0) {
12387                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12388             }
12389             if (ret == 0 && arg3 != 0) {
12390                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12391             }
12392         }
12393         return ret;
12394 #endif
12395 
12396 #ifdef CONFIG_SPLICE
12397 #ifdef TARGET_NR_tee
12398     case TARGET_NR_tee:
12399         {
12400             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12401         }
12402         return ret;
12403 #endif
12404 #ifdef TARGET_NR_splice
12405     case TARGET_NR_splice:
12406         {
12407             loff_t loff_in, loff_out;
12408             loff_t *ploff_in = NULL, *ploff_out = NULL;
12409             if (arg2) {
12410                 if (get_user_u64(loff_in, arg2)) {
12411                     return -TARGET_EFAULT;
12412                 }
12413                 ploff_in = &loff_in;
12414             }
12415             if (arg4) {
12416                 if (get_user_u64(loff_out, arg4)) {
12417                     return -TARGET_EFAULT;
12418                 }
12419                 ploff_out = &loff_out;
12420             }
12421             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12422             if (arg2) {
12423                 if (put_user_u64(loff_in, arg2)) {
12424                     return -TARGET_EFAULT;
12425                 }
12426             }
12427             if (arg4) {
12428                 if (put_user_u64(loff_out, arg4)) {
12429                     return -TARGET_EFAULT;
12430                 }
12431             }
12432         }
12433         return ret;
12434 #endif
12435 #ifdef TARGET_NR_vmsplice
12436 	case TARGET_NR_vmsplice:
12437         {
12438             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12439             if (vec != NULL) {
12440                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12441                 unlock_iovec(vec, arg2, arg3, 0);
12442             } else {
12443                 ret = -host_to_target_errno(errno);
12444             }
12445         }
12446         return ret;
12447 #endif
12448 #endif /* CONFIG_SPLICE */
12449 #ifdef CONFIG_EVENTFD
12450 #if defined(TARGET_NR_eventfd)
12451     case TARGET_NR_eventfd:
12452         ret = get_errno(eventfd(arg1, 0));
12453         if (ret >= 0) {
12454             fd_trans_register(ret, &target_eventfd_trans);
12455         }
12456         return ret;
12457 #endif
12458 #if defined(TARGET_NR_eventfd2)
12459     case TARGET_NR_eventfd2:
12460     {
12461         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12462         if (arg2 & TARGET_O_NONBLOCK) {
12463             host_flags |= O_NONBLOCK;
12464         }
12465         if (arg2 & TARGET_O_CLOEXEC) {
12466             host_flags |= O_CLOEXEC;
12467         }
12468         ret = get_errno(eventfd(arg1, host_flags));
12469         if (ret >= 0) {
12470             fd_trans_register(ret, &target_eventfd_trans);
12471         }
12472         return ret;
12473     }
12474 #endif
12475 #endif /* CONFIG_EVENTFD  */
12476 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12477     case TARGET_NR_fallocate:
12478 #if TARGET_ABI_BITS == 32
12479         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12480                                   target_offset64(arg5, arg6)));
12481 #else
12482         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12483 #endif
12484         return ret;
12485 #endif
12486 #if defined(CONFIG_SYNC_FILE_RANGE)
12487 #if defined(TARGET_NR_sync_file_range)
12488     case TARGET_NR_sync_file_range:
12489 #if TARGET_ABI_BITS == 32
12490 #if defined(TARGET_MIPS)
12491         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12492                                         target_offset64(arg5, arg6), arg7));
12493 #else
12494         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12495                                         target_offset64(arg4, arg5), arg6));
12496 #endif /* !TARGET_MIPS */
12497 #else
12498         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12499 #endif
12500         return ret;
12501 #endif
12502 #if defined(TARGET_NR_sync_file_range2) || \
12503     defined(TARGET_NR_arm_sync_file_range)
12504 #if defined(TARGET_NR_sync_file_range2)
12505     case TARGET_NR_sync_file_range2:
12506 #endif
12507 #if defined(TARGET_NR_arm_sync_file_range)
12508     case TARGET_NR_arm_sync_file_range:
12509 #endif
12510         /* This is like sync_file_range but the arguments are reordered */
12511 #if TARGET_ABI_BITS == 32
12512         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12513                                         target_offset64(arg5, arg6), arg2));
12514 #else
12515         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12516 #endif
12517         return ret;
12518 #endif
12519 #endif
12520 #if defined(TARGET_NR_signalfd4)
12521     case TARGET_NR_signalfd4:
12522         return do_signalfd4(arg1, arg2, arg4);
12523 #endif
12524 #if defined(TARGET_NR_signalfd)
12525     case TARGET_NR_signalfd:
12526         return do_signalfd4(arg1, arg2, 0);
12527 #endif
12528 #if defined(CONFIG_EPOLL)
12529 #if defined(TARGET_NR_epoll_create)
12530     case TARGET_NR_epoll_create:
12531         return get_errno(epoll_create(arg1));
12532 #endif
12533 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12534     case TARGET_NR_epoll_create1:
12535         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12536 #endif
12537 #if defined(TARGET_NR_epoll_ctl)
12538     case TARGET_NR_epoll_ctl:
12539     {
12540         struct epoll_event ep;
12541         struct epoll_event *epp = 0;
12542         if (arg4) {
12543             struct target_epoll_event *target_ep;
12544             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12545                 return -TARGET_EFAULT;
12546             }
12547             ep.events = tswap32(target_ep->events);
12548             /* The epoll_data_t union is just opaque data to the kernel,
12549              * so we transfer all 64 bits across and need not worry what
12550              * actual data type it is.
12551              */
12552             ep.data.u64 = tswap64(target_ep->data.u64);
12553             unlock_user_struct(target_ep, arg4, 0);
12554             epp = &ep;
12555         }
12556         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12557     }
12558 #endif
12559 
12560 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12561 #if defined(TARGET_NR_epoll_wait)
12562     case TARGET_NR_epoll_wait:
12563 #endif
12564 #if defined(TARGET_NR_epoll_pwait)
12565     case TARGET_NR_epoll_pwait:
12566 #endif
12567     {
12568         struct target_epoll_event *target_ep;
12569         struct epoll_event *ep;
12570         int epfd = arg1;
12571         int maxevents = arg3;
12572         int timeout = arg4;
12573 
12574         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12575             return -TARGET_EINVAL;
12576         }
12577 
12578         target_ep = lock_user(VERIFY_WRITE, arg2,
12579                               maxevents * sizeof(struct target_epoll_event), 1);
12580         if (!target_ep) {
12581             return -TARGET_EFAULT;
12582         }
12583 
12584         ep = g_try_new(struct epoll_event, maxevents);
12585         if (!ep) {
12586             unlock_user(target_ep, arg2, 0);
12587             return -TARGET_ENOMEM;
12588         }
12589 
12590         switch (num) {
12591 #if defined(TARGET_NR_epoll_pwait)
12592         case TARGET_NR_epoll_pwait:
12593         {
12594             target_sigset_t *target_set;
12595             sigset_t _set, *set = &_set;
12596 
12597             if (arg5) {
12598                 if (arg6 != sizeof(target_sigset_t)) {
12599                     ret = -TARGET_EINVAL;
12600                     break;
12601                 }
12602 
12603                 target_set = lock_user(VERIFY_READ, arg5,
12604                                        sizeof(target_sigset_t), 1);
12605                 if (!target_set) {
12606                     ret = -TARGET_EFAULT;
12607                     break;
12608                 }
12609                 target_to_host_sigset(set, target_set);
12610                 unlock_user(target_set, arg5, 0);
12611             } else {
12612                 set = NULL;
12613             }
12614 
12615             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12616                                              set, SIGSET_T_SIZE));
12617             break;
12618         }
12619 #endif
12620 #if defined(TARGET_NR_epoll_wait)
12621         case TARGET_NR_epoll_wait:
12622             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12623                                              NULL, 0));
12624             break;
12625 #endif
12626         default:
12627             ret = -TARGET_ENOSYS;
12628         }
12629         if (!is_error(ret)) {
12630             int i;
12631             for (i = 0; i < ret; i++) {
12632                 target_ep[i].events = tswap32(ep[i].events);
12633                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12634             }
12635             unlock_user(target_ep, arg2,
12636                         ret * sizeof(struct target_epoll_event));
12637         } else {
12638             unlock_user(target_ep, arg2, 0);
12639         }
12640         g_free(ep);
12641         return ret;
12642     }
12643 #endif
12644 #endif
12645 #ifdef TARGET_NR_prlimit64
12646     case TARGET_NR_prlimit64:
12647     {
12648         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12649         struct target_rlimit64 *target_rnew, *target_rold;
12650         struct host_rlimit64 rnew, rold, *rnewp = 0;
12651         int resource = target_to_host_resource(arg2);
12652 
12653         if (arg3 && (resource != RLIMIT_AS &&
12654                      resource != RLIMIT_DATA &&
12655                      resource != RLIMIT_STACK)) {
12656             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12657                 return -TARGET_EFAULT;
12658             }
12659             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12660             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12661             unlock_user_struct(target_rnew, arg3, 0);
12662             rnewp = &rnew;
12663         }
12664 
12665         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12666         if (!is_error(ret) && arg4) {
12667             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12668                 return -TARGET_EFAULT;
12669             }
12670             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12671             target_rold->rlim_max = tswap64(rold.rlim_max);
12672             unlock_user_struct(target_rold, arg4, 1);
12673         }
12674         return ret;
12675     }
12676 #endif
12677 #ifdef TARGET_NR_gethostname
12678     case TARGET_NR_gethostname:
12679     {
12680         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12681         if (name) {
12682             ret = get_errno(gethostname(name, arg2));
12683             unlock_user(name, arg1, arg2);
12684         } else {
12685             ret = -TARGET_EFAULT;
12686         }
12687         return ret;
12688     }
12689 #endif
12690 #ifdef TARGET_NR_atomic_cmpxchg_32
12691     case TARGET_NR_atomic_cmpxchg_32:
12692     {
12693         /* should use start_exclusive from main.c */
12694         abi_ulong mem_value;
12695         if (get_user_u32(mem_value, arg6)) {
12696             target_siginfo_t info;
12697             info.si_signo = SIGSEGV;
12698             info.si_errno = 0;
12699             info.si_code = TARGET_SEGV_MAPERR;
12700             info._sifields._sigfault._addr = arg6;
12701             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12702                          QEMU_SI_FAULT, &info);
12703             ret = 0xdeadbeef;
12704 
12705         }
12706         if (mem_value == arg2)
12707             put_user_u32(arg1, arg6);
12708         return mem_value;
12709     }
12710 #endif
12711 #ifdef TARGET_NR_atomic_barrier
12712     case TARGET_NR_atomic_barrier:
12713         /* Like the kernel implementation and the
12714            qemu arm barrier, no-op this? */
12715         return 0;
12716 #endif
12717 
12718 #ifdef TARGET_NR_timer_create
12719     case TARGET_NR_timer_create:
12720     {
12721         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12722 
12723         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12724 
12725         int clkid = arg1;
12726         int timer_index = next_free_host_timer();
12727 
12728         if (timer_index < 0) {
12729             ret = -TARGET_EAGAIN;
12730         } else {
12731             timer_t *phtimer = g_posix_timers  + timer_index;
12732 
12733             if (arg2) {
12734                 phost_sevp = &host_sevp;
12735                 ret = target_to_host_sigevent(phost_sevp, arg2);
12736                 if (ret != 0) {
12737                     return ret;
12738                 }
12739             }
12740 
12741             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12742             if (ret) {
12743                 phtimer = NULL;
12744             } else {
12745                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12746                     return -TARGET_EFAULT;
12747                 }
12748             }
12749         }
12750         return ret;
12751     }
12752 #endif
12753 
12754 #ifdef TARGET_NR_timer_settime
12755     case TARGET_NR_timer_settime:
12756     {
12757         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12758          * struct itimerspec * old_value */
12759         target_timer_t timerid = get_timer_id(arg1);
12760 
12761         if (timerid < 0) {
12762             ret = timerid;
12763         } else if (arg3 == 0) {
12764             ret = -TARGET_EINVAL;
12765         } else {
12766             timer_t htimer = g_posix_timers[timerid];
12767             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12768 
12769             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12770                 return -TARGET_EFAULT;
12771             }
12772             ret = get_errno(
12773                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12774             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12775                 return -TARGET_EFAULT;
12776             }
12777         }
12778         return ret;
12779     }
12780 #endif
12781 
12782 #ifdef TARGET_NR_timer_settime64
12783     case TARGET_NR_timer_settime64:
12784     {
12785         target_timer_t timerid = get_timer_id(arg1);
12786 
12787         if (timerid < 0) {
12788             ret = timerid;
12789         } else if (arg3 == 0) {
12790             ret = -TARGET_EINVAL;
12791         } else {
12792             timer_t htimer = g_posix_timers[timerid];
12793             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12794 
12795             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12796                 return -TARGET_EFAULT;
12797             }
12798             ret = get_errno(
12799                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12800             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12801                 return -TARGET_EFAULT;
12802             }
12803         }
12804         return ret;
12805     }
12806 #endif
12807 
12808 #ifdef TARGET_NR_timer_gettime
12809     case TARGET_NR_timer_gettime:
12810     {
12811         /* args: timer_t timerid, struct itimerspec *curr_value */
12812         target_timer_t timerid = get_timer_id(arg1);
12813 
12814         if (timerid < 0) {
12815             ret = timerid;
12816         } else if (!arg2) {
12817             ret = -TARGET_EFAULT;
12818         } else {
12819             timer_t htimer = g_posix_timers[timerid];
12820             struct itimerspec hspec;
12821             ret = get_errno(timer_gettime(htimer, &hspec));
12822 
12823             if (host_to_target_itimerspec(arg2, &hspec)) {
12824                 ret = -TARGET_EFAULT;
12825             }
12826         }
12827         return ret;
12828     }
12829 #endif
12830 
12831 #ifdef TARGET_NR_timer_gettime64
12832     case TARGET_NR_timer_gettime64:
12833     {
12834         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12835         target_timer_t timerid = get_timer_id(arg1);
12836 
12837         if (timerid < 0) {
12838             ret = timerid;
12839         } else if (!arg2) {
12840             ret = -TARGET_EFAULT;
12841         } else {
12842             timer_t htimer = g_posix_timers[timerid];
12843             struct itimerspec hspec;
12844             ret = get_errno(timer_gettime(htimer, &hspec));
12845 
12846             if (host_to_target_itimerspec64(arg2, &hspec)) {
12847                 ret = -TARGET_EFAULT;
12848             }
12849         }
12850         return ret;
12851     }
12852 #endif
12853 
12854 #ifdef TARGET_NR_timer_getoverrun
12855     case TARGET_NR_timer_getoverrun:
12856     {
12857         /* args: timer_t timerid */
12858         target_timer_t timerid = get_timer_id(arg1);
12859 
12860         if (timerid < 0) {
12861             ret = timerid;
12862         } else {
12863             timer_t htimer = g_posix_timers[timerid];
12864             ret = get_errno(timer_getoverrun(htimer));
12865         }
12866         return ret;
12867     }
12868 #endif
12869 
12870 #ifdef TARGET_NR_timer_delete
12871     case TARGET_NR_timer_delete:
12872     {
12873         /* args: timer_t timerid */
12874         target_timer_t timerid = get_timer_id(arg1);
12875 
12876         if (timerid < 0) {
12877             ret = timerid;
12878         } else {
12879             timer_t htimer = g_posix_timers[timerid];
12880             ret = get_errno(timer_delete(htimer));
12881             g_posix_timers[timerid] = 0;
12882         }
12883         return ret;
12884     }
12885 #endif
12886 
12887 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12888     case TARGET_NR_timerfd_create:
12889         return get_errno(timerfd_create(arg1,
12890                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12891 #endif
12892 
12893 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12894     case TARGET_NR_timerfd_gettime:
12895         {
12896             struct itimerspec its_curr;
12897 
12898             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12899 
12900             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12901                 return -TARGET_EFAULT;
12902             }
12903         }
12904         return ret;
12905 #endif
12906 
12907 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12908     case TARGET_NR_timerfd_gettime64:
12909         {
12910             struct itimerspec its_curr;
12911 
12912             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12913 
12914             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12915                 return -TARGET_EFAULT;
12916             }
12917         }
12918         return ret;
12919 #endif
12920 
12921 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12922     case TARGET_NR_timerfd_settime:
12923         {
12924             struct itimerspec its_new, its_old, *p_new;
12925 
12926             if (arg3) {
12927                 if (target_to_host_itimerspec(&its_new, arg3)) {
12928                     return -TARGET_EFAULT;
12929                 }
12930                 p_new = &its_new;
12931             } else {
12932                 p_new = NULL;
12933             }
12934 
12935             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12936 
12937             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12938                 return -TARGET_EFAULT;
12939             }
12940         }
12941         return ret;
12942 #endif
12943 
12944 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12945     case TARGET_NR_timerfd_settime64:
12946         {
12947             struct itimerspec its_new, its_old, *p_new;
12948 
12949             if (arg3) {
12950                 if (target_to_host_itimerspec64(&its_new, arg3)) {
12951                     return -TARGET_EFAULT;
12952                 }
12953                 p_new = &its_new;
12954             } else {
12955                 p_new = NULL;
12956             }
12957 
12958             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12959 
12960             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
12961                 return -TARGET_EFAULT;
12962             }
12963         }
12964         return ret;
12965 #endif
12966 
12967 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12968     case TARGET_NR_ioprio_get:
12969         return get_errno(ioprio_get(arg1, arg2));
12970 #endif
12971 
12972 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12973     case TARGET_NR_ioprio_set:
12974         return get_errno(ioprio_set(arg1, arg2, arg3));
12975 #endif
12976 
12977 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12978     case TARGET_NR_setns:
12979         return get_errno(setns(arg1, arg2));
12980 #endif
12981 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12982     case TARGET_NR_unshare:
12983         return get_errno(unshare(arg1));
12984 #endif
12985 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12986     case TARGET_NR_kcmp:
12987         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12988 #endif
12989 #ifdef TARGET_NR_swapcontext
12990     case TARGET_NR_swapcontext:
12991         /* PowerPC specific.  */
12992         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12993 #endif
12994 #ifdef TARGET_NR_memfd_create
12995     case TARGET_NR_memfd_create:
12996         p = lock_user_string(arg1);
12997         if (!p) {
12998             return -TARGET_EFAULT;
12999         }
13000         ret = get_errno(memfd_create(p, arg2));
13001         fd_trans_unregister(ret);
13002         unlock_user(p, arg1, 0);
13003         return ret;
13004 #endif
13005 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13006     case TARGET_NR_membarrier:
13007         return get_errno(membarrier(arg1, arg2));
13008 #endif
13009 
13010     default:
13011         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13012         return -TARGET_ENOSYS;
13013     }
13014     return ret;
13015 }
13016 
13017 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13018                     abi_long arg2, abi_long arg3, abi_long arg4,
13019                     abi_long arg5, abi_long arg6, abi_long arg7,
13020                     abi_long arg8)
13021 {
13022     CPUState *cpu = env_cpu(cpu_env);
13023     abi_long ret;
13024 
13025 #ifdef DEBUG_ERESTARTSYS
13026     /* Debug-only code for exercising the syscall-restart code paths
13027      * in the per-architecture cpu main loops: restart every syscall
13028      * the guest makes once before letting it through.
13029      */
13030     {
13031         static bool flag;
13032         flag = !flag;
13033         if (flag) {
13034             return -TARGET_ERESTARTSYS;
13035         }
13036     }
13037 #endif
13038 
13039     record_syscall_start(cpu, num, arg1,
13040                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13041 
13042     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13043         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13044     }
13045 
13046     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13047                       arg5, arg6, arg7, arg8);
13048 
13049     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13050         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13051                           arg3, arg4, arg5, arg6);
13052     }
13053 
13054     record_syscall_return(cpu, num, ret);
13055     return ret;
13056 }
13057