xref: /openbmc/qemu/linux-user/syscall.c (revision d107e375)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #include <libdrm/i915_drm.h>
118 #endif
119 #include "linux_loop.h"
120 #include "uname.h"
121 
122 #include "qemu.h"
123 #include "qemu/guest-random.h"
124 #include "qemu/selfmap.h"
125 #include "user/syscall-trace.h"
126 #include "qapi/error.h"
127 #include "fd-trans.h"
128 #include "tcg/tcg.h"
129 
130 #ifndef CLONE_IO
131 #define CLONE_IO                0x80000000      /* Clone io context */
132 #endif
133 
134 /* We can't directly call the host clone syscall, because this will
135  * badly confuse libc (breaking mutexes, for example). So we must
136  * divide clone flags into:
137  *  * flag combinations that look like pthread_create()
138  *  * flag combinations that look like fork()
139  *  * flags we can implement within QEMU itself
140  *  * flags we can't support and will return an error for
141  */
142 /* For thread creation, all these flags must be present; for
143  * fork, none must be present.
144  */
145 #define CLONE_THREAD_FLAGS                              \
146     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
147      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
148 
149 /* These flags are ignored:
150  * CLONE_DETACHED is now ignored by the kernel;
151  * CLONE_IO is just an optimisation hint to the I/O scheduler
152  */
153 #define CLONE_IGNORED_FLAGS                     \
154     (CLONE_DETACHED | CLONE_IO)
155 
156 /* Flags for fork which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_FORK_FLAGS               \
158     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
159      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
160 
161 /* Flags for thread creation which we can implement within QEMU itself */
162 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
163     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
164      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
165 
166 #define CLONE_INVALID_FORK_FLAGS                                        \
167     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
168 
169 #define CLONE_INVALID_THREAD_FLAGS                                      \
170     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
171        CLONE_IGNORED_FLAGS))
172 
173 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
174  * have almost all been allocated. We cannot support any of
175  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
176  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
177  * The checks against the invalid thread masks above will catch these.
178  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
179  */
180 
181 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
182  * once. This exercises the codepaths for restart.
183  */
184 //#define DEBUG_ERESTARTSYS
185 
186 //#include <linux/msdos_fs.h>
187 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
188 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
189 
190 #undef _syscall0
191 #undef _syscall1
192 #undef _syscall2
193 #undef _syscall3
194 #undef _syscall4
195 #undef _syscall5
196 #undef _syscall6
197 
198 #define _syscall0(type,name)		\
199 static type name (void)			\
200 {					\
201 	return syscall(__NR_##name);	\
202 }
203 
204 #define _syscall1(type,name,type1,arg1)		\
205 static type name (type1 arg1)			\
206 {						\
207 	return syscall(__NR_##name, arg1);	\
208 }
209 
210 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
211 static type name (type1 arg1,type2 arg2)		\
212 {							\
213 	return syscall(__NR_##name, arg1, arg2);	\
214 }
215 
216 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
217 static type name (type1 arg1,type2 arg2,type3 arg3)		\
218 {								\
219 	return syscall(__NR_##name, arg1, arg2, arg3);		\
220 }
221 
222 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
224 {										\
225 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
226 }
227 
228 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
229 		  type5,arg5)							\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
233 }
234 
235 
236 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
237 		  type5,arg5,type6,arg6)					\
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
239                   type6 arg6)							\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
242 }
243 
244 
245 #define __NR_sys_uname __NR_uname
246 #define __NR_sys_getcwd1 __NR_getcwd
247 #define __NR_sys_getdents __NR_getdents
248 #define __NR_sys_getdents64 __NR_getdents64
249 #define __NR_sys_getpriority __NR_getpriority
250 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
251 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
252 #define __NR_sys_syslog __NR_syslog
253 #if defined(__NR_futex)
254 # define __NR_sys_futex __NR_futex
255 #endif
256 #if defined(__NR_futex_time64)
257 # define __NR_sys_futex_time64 __NR_futex_time64
258 #endif
259 #define __NR_sys_inotify_init __NR_inotify_init
260 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
261 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
262 #define __NR_sys_statx __NR_statx
263 
264 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
265 #define __NR__llseek __NR_lseek
266 #endif
267 
268 /* Newer kernel ports have llseek() instead of _llseek() */
269 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
270 #define TARGET_NR__llseek TARGET_NR_llseek
271 #endif
272 
273 #define __NR_sys_gettid __NR_gettid
274 _syscall0(int, sys_gettid)
275 
276 /* For the 64-bit guest on 32-bit host case we must emulate
277  * getdents using getdents64, because otherwise the host
278  * might hand us back more dirent records than we can fit
279  * into the guest buffer after structure format conversion.
280  * Otherwise we emulate getdents with getdents if the host has it.
281  */
282 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
283 #define EMULATE_GETDENTS_WITH_GETDENTS
284 #endif
285 
286 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
287 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
288 #endif
289 #if (defined(TARGET_NR_getdents) && \
290       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
291     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
292 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
293 #endif
294 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
295 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
296           loff_t *, res, uint, wh);
297 #endif
298 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
299 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
300           siginfo_t *, uinfo)
301 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
302 #ifdef __NR_exit_group
303 _syscall1(int,exit_group,int,error_code)
304 #endif
305 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
306 _syscall1(int,set_tid_address,int *,tidptr)
307 #endif
308 #if defined(__NR_futex)
309 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
310           const struct timespec *,timeout,int *,uaddr2,int,val3)
311 #endif
312 #if defined(__NR_futex_time64)
313 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
314           const struct timespec *,timeout,int *,uaddr2,int,val3)
315 #endif
316 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
317 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
318           unsigned long *, user_mask_ptr);
319 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
320 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
321           unsigned long *, user_mask_ptr);
322 #define __NR_sys_getcpu __NR_getcpu
323 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
324 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
325           void *, arg);
326 _syscall2(int, capget, struct __user_cap_header_struct *, header,
327           struct __user_cap_data_struct *, data);
328 _syscall2(int, capset, struct __user_cap_header_struct *, header,
329           struct __user_cap_data_struct *, data);
330 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
331 _syscall2(int, ioprio_get, int, which, int, who)
332 #endif
333 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
334 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
335 #endif
336 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
337 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
338 #endif
339 
340 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
341 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
342           unsigned long, idx1, unsigned long, idx2)
343 #endif
344 
345 /*
346  * It is assumed that struct statx is architecture independent.
347  */
348 #if defined(TARGET_NR_statx) && defined(__NR_statx)
349 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
350           unsigned int, mask, struct target_statx *, statxbuf)
351 #endif
352 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
353 _syscall2(int, membarrier, int, cmd, int, flags)
354 #endif
355 
356 static bitmask_transtbl fcntl_flags_tbl[] = {
357   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
358   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
359   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
360   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
361   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
362   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
363   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
364   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
365   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
366   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
367   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
368   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
369   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
370 #if defined(O_DIRECT)
371   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
372 #endif
373 #if defined(O_NOATIME)
374   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
375 #endif
376 #if defined(O_CLOEXEC)
377   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
378 #endif
379 #if defined(O_PATH)
380   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
381 #endif
382 #if defined(O_TMPFILE)
383   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
384 #endif
385   /* Don't terminate the list prematurely on 64-bit host+guest.  */
386 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
387   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
388 #endif
389   { 0, 0, 0, 0 }
390 };
391 
392 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
393 
394 #ifdef TARGET_NR_utimensat
395 #if defined(__NR_utimensat)
396 #define __NR_sys_utimensat __NR_utimensat
397 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
398           const struct timespec *,tsp,int,flags)
399 #else
400 static int sys_utimensat(int dirfd, const char *pathname,
401                          const struct timespec times[2], int flags)
402 {
403     errno = ENOSYS;
404     return -1;
405 }
406 #endif
407 #endif /* TARGET_NR_utimensat */
408 
409 #ifdef TARGET_NR_renameat2
410 #if defined(__NR_renameat2)
411 #define __NR_sys_renameat2 __NR_renameat2
412 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
413           const char *, new, unsigned int, flags)
414 #else
415 static int sys_renameat2(int oldfd, const char *old,
416                          int newfd, const char *new, int flags)
417 {
418     if (flags == 0) {
419         return renameat(oldfd, old, newfd, new);
420     }
421     errno = ENOSYS;
422     return -1;
423 }
424 #endif
425 #endif /* TARGET_NR_renameat2 */
426 
427 #ifdef CONFIG_INOTIFY
428 #include <sys/inotify.h>
429 
430 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
431 static int sys_inotify_init(void)
432 {
433   return (inotify_init());
434 }
435 #endif
436 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
437 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
438 {
439   return (inotify_add_watch(fd, pathname, mask));
440 }
441 #endif
442 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
443 static int sys_inotify_rm_watch(int fd, int32_t wd)
444 {
445   return (inotify_rm_watch(fd, wd));
446 }
447 #endif
448 #ifdef CONFIG_INOTIFY1
449 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
450 static int sys_inotify_init1(int flags)
451 {
452   return (inotify_init1(flags));
453 }
454 #endif
455 #endif
456 #else
457 /* Userspace can usually survive runtime without inotify */
458 #undef TARGET_NR_inotify_init
459 #undef TARGET_NR_inotify_init1
460 #undef TARGET_NR_inotify_add_watch
461 #undef TARGET_NR_inotify_rm_watch
462 #endif /* CONFIG_INOTIFY  */
463 
464 #if defined(TARGET_NR_prlimit64)
465 #ifndef __NR_prlimit64
466 # define __NR_prlimit64 -1
467 #endif
468 #define __NR_sys_prlimit64 __NR_prlimit64
469 /* The glibc rlimit structure may not be that used by the underlying syscall */
470 struct host_rlimit64 {
471     uint64_t rlim_cur;
472     uint64_t rlim_max;
473 };
474 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
475           const struct host_rlimit64 *, new_limit,
476           struct host_rlimit64 *, old_limit)
477 #endif
478 
479 
480 #if defined(TARGET_NR_timer_create)
481 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
482 static timer_t g_posix_timers[32] = { 0, } ;
483 
484 static inline int next_free_host_timer(void)
485 {
486     int k ;
487     /* FIXME: Does finding the next free slot require a lock? */
488     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
489         if (g_posix_timers[k] == 0) {
490             g_posix_timers[k] = (timer_t) 1;
491             return k;
492         }
493     }
494     return -1;
495 }
496 #endif
497 
498 #define ERRNO_TABLE_SIZE 1200
499 
500 /* target_to_host_errno_table[] is initialized from
501  * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
503 };
504 
505 /*
506  * This list is the union of errno values overridden in asm-<arch>/errno.h
507  * minus the errnos that are not actually generic to all archs.
508  */
509 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
510     [EAGAIN]		= TARGET_EAGAIN,
511     [EIDRM]		= TARGET_EIDRM,
512     [ECHRNG]		= TARGET_ECHRNG,
513     [EL2NSYNC]		= TARGET_EL2NSYNC,
514     [EL3HLT]		= TARGET_EL3HLT,
515     [EL3RST]		= TARGET_EL3RST,
516     [ELNRNG]		= TARGET_ELNRNG,
517     [EUNATCH]		= TARGET_EUNATCH,
518     [ENOCSI]		= TARGET_ENOCSI,
519     [EL2HLT]		= TARGET_EL2HLT,
520     [EDEADLK]		= TARGET_EDEADLK,
521     [ENOLCK]		= TARGET_ENOLCK,
522     [EBADE]		= TARGET_EBADE,
523     [EBADR]		= TARGET_EBADR,
524     [EXFULL]		= TARGET_EXFULL,
525     [ENOANO]		= TARGET_ENOANO,
526     [EBADRQC]		= TARGET_EBADRQC,
527     [EBADSLT]		= TARGET_EBADSLT,
528     [EBFONT]		= TARGET_EBFONT,
529     [ENOSTR]		= TARGET_ENOSTR,
530     [ENODATA]		= TARGET_ENODATA,
531     [ETIME]		= TARGET_ETIME,
532     [ENOSR]		= TARGET_ENOSR,
533     [ENONET]		= TARGET_ENONET,
534     [ENOPKG]		= TARGET_ENOPKG,
535     [EREMOTE]		= TARGET_EREMOTE,
536     [ENOLINK]		= TARGET_ENOLINK,
537     [EADV]		= TARGET_EADV,
538     [ESRMNT]		= TARGET_ESRMNT,
539     [ECOMM]		= TARGET_ECOMM,
540     [EPROTO]		= TARGET_EPROTO,
541     [EDOTDOT]		= TARGET_EDOTDOT,
542     [EMULTIHOP]		= TARGET_EMULTIHOP,
543     [EBADMSG]		= TARGET_EBADMSG,
544     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
545     [EOVERFLOW]		= TARGET_EOVERFLOW,
546     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
547     [EBADFD]		= TARGET_EBADFD,
548     [EREMCHG]		= TARGET_EREMCHG,
549     [ELIBACC]		= TARGET_ELIBACC,
550     [ELIBBAD]		= TARGET_ELIBBAD,
551     [ELIBSCN]		= TARGET_ELIBSCN,
552     [ELIBMAX]		= TARGET_ELIBMAX,
553     [ELIBEXEC]		= TARGET_ELIBEXEC,
554     [EILSEQ]		= TARGET_EILSEQ,
555     [ENOSYS]		= TARGET_ENOSYS,
556     [ELOOP]		= TARGET_ELOOP,
557     [ERESTART]		= TARGET_ERESTART,
558     [ESTRPIPE]		= TARGET_ESTRPIPE,
559     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
560     [EUSERS]		= TARGET_EUSERS,
561     [ENOTSOCK]		= TARGET_ENOTSOCK,
562     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
563     [EMSGSIZE]		= TARGET_EMSGSIZE,
564     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
565     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
566     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
567     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
568     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
569     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
570     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
571     [EADDRINUSE]	= TARGET_EADDRINUSE,
572     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
573     [ENETDOWN]		= TARGET_ENETDOWN,
574     [ENETUNREACH]	= TARGET_ENETUNREACH,
575     [ENETRESET]		= TARGET_ENETRESET,
576     [ECONNABORTED]	= TARGET_ECONNABORTED,
577     [ECONNRESET]	= TARGET_ECONNRESET,
578     [ENOBUFS]		= TARGET_ENOBUFS,
579     [EISCONN]		= TARGET_EISCONN,
580     [ENOTCONN]		= TARGET_ENOTCONN,
581     [EUCLEAN]		= TARGET_EUCLEAN,
582     [ENOTNAM]		= TARGET_ENOTNAM,
583     [ENAVAIL]		= TARGET_ENAVAIL,
584     [EISNAM]		= TARGET_EISNAM,
585     [EREMOTEIO]		= TARGET_EREMOTEIO,
586     [EDQUOT]            = TARGET_EDQUOT,
587     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
588     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
589     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
590     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
591     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
592     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
593     [EALREADY]		= TARGET_EALREADY,
594     [EINPROGRESS]	= TARGET_EINPROGRESS,
595     [ESTALE]		= TARGET_ESTALE,
596     [ECANCELED]		= TARGET_ECANCELED,
597     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
598     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
599 #ifdef ENOKEY
600     [ENOKEY]		= TARGET_ENOKEY,
601 #endif
602 #ifdef EKEYEXPIRED
603     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
604 #endif
605 #ifdef EKEYREVOKED
606     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
607 #endif
608 #ifdef EKEYREJECTED
609     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
610 #endif
611 #ifdef EOWNERDEAD
612     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
613 #endif
614 #ifdef ENOTRECOVERABLE
615     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
616 #endif
617 #ifdef ENOMSG
618     [ENOMSG]            = TARGET_ENOMSG,
619 #endif
620 #ifdef ERKFILL
621     [ERFKILL]           = TARGET_ERFKILL,
622 #endif
623 #ifdef EHWPOISON
624     [EHWPOISON]         = TARGET_EHWPOISON,
625 #endif
626 };
627 
628 static inline int host_to_target_errno(int err)
629 {
630     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
631         host_to_target_errno_table[err]) {
632         return host_to_target_errno_table[err];
633     }
634     return err;
635 }
636 
637 static inline int target_to_host_errno(int err)
638 {
639     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
640         target_to_host_errno_table[err]) {
641         return target_to_host_errno_table[err];
642     }
643     return err;
644 }
645 
646 static inline abi_long get_errno(abi_long ret)
647 {
648     if (ret == -1)
649         return -host_to_target_errno(errno);
650     else
651         return ret;
652 }
653 
654 const char *target_strerror(int err)
655 {
656     if (err == TARGET_ERESTARTSYS) {
657         return "To be restarted";
658     }
659     if (err == TARGET_QEMU_ESIGRETURN) {
660         return "Successful exit from sigreturn";
661     }
662 
663     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
664         return NULL;
665     }
666     return strerror(target_to_host_errno(err));
667 }
668 
669 #define safe_syscall0(type, name) \
670 static type safe_##name(void) \
671 { \
672     return safe_syscall(__NR_##name); \
673 }
674 
675 #define safe_syscall1(type, name, type1, arg1) \
676 static type safe_##name(type1 arg1) \
677 { \
678     return safe_syscall(__NR_##name, arg1); \
679 }
680 
681 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
682 static type safe_##name(type1 arg1, type2 arg2) \
683 { \
684     return safe_syscall(__NR_##name, arg1, arg2); \
685 }
686 
687 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
689 { \
690     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
691 }
692 
693 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
694     type4, arg4) \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
696 { \
697     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
698 }
699 
700 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
701     type4, arg4, type5, arg5) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
703     type5 arg5) \
704 { \
705     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
706 }
707 
708 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
709     type4, arg4, type5, arg5, type6, arg6) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
711     type5 arg5, type6 arg6) \
712 { \
713     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
714 }
715 
716 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
717 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
718 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
719               int, flags, mode_t, mode)
720 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
721 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
722               struct rusage *, rusage)
723 #endif
724 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
725               int, options, struct rusage *, rusage)
726 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
727 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
728     defined(TARGET_NR_pselect6)
729 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
730               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
731 #endif
732 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
733 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
734               struct timespec *, tsp, const sigset_t *, sigmask,
735               size_t, sigsetsize)
736 #endif
737 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
738               int, maxevents, int, timeout, const sigset_t *, sigmask,
739               size_t, sigsetsize)
740 #if defined(__NR_futex)
741 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
742               const struct timespec *,timeout,int *,uaddr2,int,val3)
743 #endif
744 #if defined(__NR_futex_time64)
745 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
746               const struct timespec *,timeout,int *,uaddr2,int,val3)
747 #endif
748 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
749 safe_syscall2(int, kill, pid_t, pid, int, sig)
750 safe_syscall2(int, tkill, int, tid, int, sig)
751 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
752 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
753 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
754 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
755               unsigned long, pos_l, unsigned long, pos_h)
756 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
757               unsigned long, pos_l, unsigned long, pos_h)
758 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
759               socklen_t, addrlen)
760 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
761               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
762 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
763               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
764 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
765 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
766 safe_syscall2(int, flock, int, fd, int, operation)
767 #ifdef TARGET_NR_rt_sigtimedwait
768 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
769               const struct timespec *, uts, size_t, sigsetsize)
770 #endif
771 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
772               int, flags)
773 #if defined(TARGET_NR_nanosleep)
774 safe_syscall2(int, nanosleep, const struct timespec *, req,
775               struct timespec *, rem)
776 #endif
777 #ifdef TARGET_NR_clock_nanosleep
778 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
779               const struct timespec *, req, struct timespec *, rem)
780 #endif
781 #ifdef __NR_ipc
782 #ifdef __s390x__
783 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
784               void *, ptr)
785 #else
786 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
787               void *, ptr, long, fifth)
788 #endif
789 #endif
790 #ifdef __NR_msgsnd
791 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
792               int, flags)
793 #endif
794 #ifdef __NR_msgrcv
795 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
796               long, msgtype, int, flags)
797 #endif
798 #ifdef __NR_semtimedop
799 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
800               unsigned, nsops, const struct timespec *, timeout)
801 #endif
802 #if defined(TARGET_NR_mq_timedsend) || \
803     defined(TARGET_NR_mq_timedsend_time64)
804 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
805               size_t, len, unsigned, prio, const struct timespec *, timeout)
806 #endif
807 #if defined(TARGET_NR_mq_timedreceive) || \
808     defined(TARGET_NR_mq_timedreceive_time64)
809 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
810               size_t, len, unsigned *, prio, const struct timespec *, timeout)
811 #endif
812 /* We do ioctl like this rather than via safe_syscall3 to preserve the
813  * "third argument might be integer or pointer or not present" behaviour of
814  * the libc function.
815  */
816 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
817 /* Similarly for fcntl. Note that callers must always:
818  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
819  *  use the flock64 struct rather than unsuffixed flock
820  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
821  */
822 #ifdef __NR_fcntl64
823 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
824 #else
825 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
826 #endif
827 
828 static inline int host_to_target_sock_type(int host_type)
829 {
830     int target_type;
831 
832     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
833     case SOCK_DGRAM:
834         target_type = TARGET_SOCK_DGRAM;
835         break;
836     case SOCK_STREAM:
837         target_type = TARGET_SOCK_STREAM;
838         break;
839     default:
840         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
841         break;
842     }
843 
844 #if defined(SOCK_CLOEXEC)
845     if (host_type & SOCK_CLOEXEC) {
846         target_type |= TARGET_SOCK_CLOEXEC;
847     }
848 #endif
849 
850 #if defined(SOCK_NONBLOCK)
851     if (host_type & SOCK_NONBLOCK) {
852         target_type |= TARGET_SOCK_NONBLOCK;
853     }
854 #endif
855 
856     return target_type;
857 }
858 
859 static abi_ulong target_brk;
860 static abi_ulong target_original_brk;
861 static abi_ulong brk_page;
862 
863 void target_set_brk(abi_ulong new_brk)
864 {
865     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
866     brk_page = HOST_PAGE_ALIGN(target_brk);
867 }
868 
869 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
870 #define DEBUGF_BRK(message, args...)
871 
872 /* do_brk() must return target values and target errnos. */
873 abi_long do_brk(abi_ulong new_brk)
874 {
875     abi_long mapped_addr;
876     abi_ulong new_alloc_size;
877 
878     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
879 
880     if (!new_brk) {
881         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
882         return target_brk;
883     }
884     if (new_brk < target_original_brk) {
885         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
886                    target_brk);
887         return target_brk;
888     }
889 
890     /* If the new brk is less than the highest page reserved to the
891      * target heap allocation, set it and we're almost done...  */
892     if (new_brk <= brk_page) {
893         /* Heap contents are initialized to zero, as for anonymous
894          * mapped pages.  */
895         if (new_brk > target_brk) {
896             memset(g2h(target_brk), 0, new_brk - target_brk);
897         }
898 	target_brk = new_brk;
899         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
900 	return target_brk;
901     }
902 
903     /* We need to allocate more memory after the brk... Note that
904      * we don't use MAP_FIXED because that will map over the top of
905      * any existing mapping (like the one with the host libc or qemu
906      * itself); instead we treat "mapped but at wrong address" as
907      * a failure and unmap again.
908      */
909     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
910     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
911                                         PROT_READ|PROT_WRITE,
912                                         MAP_ANON|MAP_PRIVATE, 0, 0));
913 
914     if (mapped_addr == brk_page) {
915         /* Heap contents are initialized to zero, as for anonymous
916          * mapped pages.  Technically the new pages are already
917          * initialized to zero since they *are* anonymous mapped
918          * pages, however we have to take care with the contents that
919          * come from the remaining part of the previous page: it may
920          * contains garbage data due to a previous heap usage (grown
921          * then shrunken).  */
922         memset(g2h(target_brk), 0, brk_page - target_brk);
923 
924         target_brk = new_brk;
925         brk_page = HOST_PAGE_ALIGN(target_brk);
926         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
927             target_brk);
928         return target_brk;
929     } else if (mapped_addr != -1) {
930         /* Mapped but at wrong address, meaning there wasn't actually
931          * enough space for this brk.
932          */
933         target_munmap(mapped_addr, new_alloc_size);
934         mapped_addr = -1;
935         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
936     }
937     else {
938         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
939     }
940 
941 #if defined(TARGET_ALPHA)
942     /* We (partially) emulate OSF/1 on Alpha, which requires we
943        return a proper errno, not an unchanged brk value.  */
944     return -TARGET_ENOMEM;
945 #endif
946     /* For everything else, return the previous break. */
947     return target_brk;
948 }
949 
950 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
951     defined(TARGET_NR_pselect6)
952 static inline abi_long copy_from_user_fdset(fd_set *fds,
953                                             abi_ulong target_fds_addr,
954                                             int n)
955 {
956     int i, nw, j, k;
957     abi_ulong b, *target_fds;
958 
959     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
960     if (!(target_fds = lock_user(VERIFY_READ,
961                                  target_fds_addr,
962                                  sizeof(abi_ulong) * nw,
963                                  1)))
964         return -TARGET_EFAULT;
965 
966     FD_ZERO(fds);
967     k = 0;
968     for (i = 0; i < nw; i++) {
969         /* grab the abi_ulong */
970         __get_user(b, &target_fds[i]);
971         for (j = 0; j < TARGET_ABI_BITS; j++) {
972             /* check the bit inside the abi_ulong */
973             if ((b >> j) & 1)
974                 FD_SET(k, fds);
975             k++;
976         }
977     }
978 
979     unlock_user(target_fds, target_fds_addr, 0);
980 
981     return 0;
982 }
983 
984 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
985                                                  abi_ulong target_fds_addr,
986                                                  int n)
987 {
988     if (target_fds_addr) {
989         if (copy_from_user_fdset(fds, target_fds_addr, n))
990             return -TARGET_EFAULT;
991         *fds_ptr = fds;
992     } else {
993         *fds_ptr = NULL;
994     }
995     return 0;
996 }
997 
998 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
999                                           const fd_set *fds,
1000                                           int n)
1001 {
1002     int i, nw, j, k;
1003     abi_long v;
1004     abi_ulong *target_fds;
1005 
1006     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1007     if (!(target_fds = lock_user(VERIFY_WRITE,
1008                                  target_fds_addr,
1009                                  sizeof(abi_ulong) * nw,
1010                                  0)))
1011         return -TARGET_EFAULT;
1012 
1013     k = 0;
1014     for (i = 0; i < nw; i++) {
1015         v = 0;
1016         for (j = 0; j < TARGET_ABI_BITS; j++) {
1017             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1018             k++;
1019         }
1020         __put_user(v, &target_fds[i]);
1021     }
1022 
1023     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1024 
1025     return 0;
1026 }
1027 #endif
1028 
1029 #if defined(__alpha__)
1030 #define HOST_HZ 1024
1031 #else
1032 #define HOST_HZ 100
1033 #endif
1034 
1035 static inline abi_long host_to_target_clock_t(long ticks)
1036 {
1037 #if HOST_HZ == TARGET_HZ
1038     return ticks;
1039 #else
1040     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1041 #endif
1042 }
1043 
1044 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1045                                              const struct rusage *rusage)
1046 {
1047     struct target_rusage *target_rusage;
1048 
1049     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1050         return -TARGET_EFAULT;
1051     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1052     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1053     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1054     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1055     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1056     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1057     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1058     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1059     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1060     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1061     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1062     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1063     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1064     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1065     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1066     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1067     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1068     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1069     unlock_user_struct(target_rusage, target_addr, 1);
1070 
1071     return 0;
1072 }
1073 
1074 #ifdef TARGET_NR_setrlimit
1075 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1076 {
1077     abi_ulong target_rlim_swap;
1078     rlim_t result;
1079 
1080     target_rlim_swap = tswapal(target_rlim);
1081     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1082         return RLIM_INFINITY;
1083 
1084     result = target_rlim_swap;
1085     if (target_rlim_swap != (rlim_t)result)
1086         return RLIM_INFINITY;
1087 
1088     return result;
1089 }
1090 #endif
1091 
1092 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1093 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1094 {
1095     abi_ulong target_rlim_swap;
1096     abi_ulong result;
1097 
1098     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1099         target_rlim_swap = TARGET_RLIM_INFINITY;
1100     else
1101         target_rlim_swap = rlim;
1102     result = tswapal(target_rlim_swap);
1103 
1104     return result;
1105 }
1106 #endif
1107 
1108 static inline int target_to_host_resource(int code)
1109 {
1110     switch (code) {
1111     case TARGET_RLIMIT_AS:
1112         return RLIMIT_AS;
1113     case TARGET_RLIMIT_CORE:
1114         return RLIMIT_CORE;
1115     case TARGET_RLIMIT_CPU:
1116         return RLIMIT_CPU;
1117     case TARGET_RLIMIT_DATA:
1118         return RLIMIT_DATA;
1119     case TARGET_RLIMIT_FSIZE:
1120         return RLIMIT_FSIZE;
1121     case TARGET_RLIMIT_LOCKS:
1122         return RLIMIT_LOCKS;
1123     case TARGET_RLIMIT_MEMLOCK:
1124         return RLIMIT_MEMLOCK;
1125     case TARGET_RLIMIT_MSGQUEUE:
1126         return RLIMIT_MSGQUEUE;
1127     case TARGET_RLIMIT_NICE:
1128         return RLIMIT_NICE;
1129     case TARGET_RLIMIT_NOFILE:
1130         return RLIMIT_NOFILE;
1131     case TARGET_RLIMIT_NPROC:
1132         return RLIMIT_NPROC;
1133     case TARGET_RLIMIT_RSS:
1134         return RLIMIT_RSS;
1135     case TARGET_RLIMIT_RTPRIO:
1136         return RLIMIT_RTPRIO;
1137     case TARGET_RLIMIT_SIGPENDING:
1138         return RLIMIT_SIGPENDING;
1139     case TARGET_RLIMIT_STACK:
1140         return RLIMIT_STACK;
1141     default:
1142         return code;
1143     }
1144 }
1145 
1146 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1147                                               abi_ulong target_tv_addr)
1148 {
1149     struct target_timeval *target_tv;
1150 
1151     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1152         return -TARGET_EFAULT;
1153     }
1154 
1155     __get_user(tv->tv_sec, &target_tv->tv_sec);
1156     __get_user(tv->tv_usec, &target_tv->tv_usec);
1157 
1158     unlock_user_struct(target_tv, target_tv_addr, 0);
1159 
1160     return 0;
1161 }
1162 
1163 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1164                                             const struct timeval *tv)
1165 {
1166     struct target_timeval *target_tv;
1167 
1168     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1169         return -TARGET_EFAULT;
1170     }
1171 
1172     __put_user(tv->tv_sec, &target_tv->tv_sec);
1173     __put_user(tv->tv_usec, &target_tv->tv_usec);
1174 
1175     unlock_user_struct(target_tv, target_tv_addr, 1);
1176 
1177     return 0;
1178 }
1179 
1180 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1181                                              const struct timeval *tv)
1182 {
1183     struct target__kernel_sock_timeval *target_tv;
1184 
1185     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1186         return -TARGET_EFAULT;
1187     }
1188 
1189     __put_user(tv->tv_sec, &target_tv->tv_sec);
1190     __put_user(tv->tv_usec, &target_tv->tv_usec);
1191 
1192     unlock_user_struct(target_tv, target_tv_addr, 1);
1193 
1194     return 0;
1195 }
1196 
1197 #if defined(TARGET_NR_futex) || \
1198     defined(TARGET_NR_rt_sigtimedwait) || \
1199     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1200     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1201     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1202     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1203     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1204     defined(TARGET_NR_timer_settime) || \
1205     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1206 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1207                                                abi_ulong target_addr)
1208 {
1209     struct target_timespec *target_ts;
1210 
1211     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1212         return -TARGET_EFAULT;
1213     }
1214     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1215     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1216     unlock_user_struct(target_ts, target_addr, 0);
1217     return 0;
1218 }
1219 #endif
1220 
1221 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1222     defined(TARGET_NR_timer_settime64) || \
1223     defined(TARGET_NR_mq_timedsend_time64) || \
1224     defined(TARGET_NR_mq_timedreceive_time64) || \
1225     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
1226 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1227                                                  abi_ulong target_addr)
1228 {
1229     struct target__kernel_timespec *target_ts;
1230 
1231     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1232         return -TARGET_EFAULT;
1233     }
1234     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1235     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1236     /* in 32bit mode, this drops the padding */
1237     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1238     unlock_user_struct(target_ts, target_addr, 0);
1239     return 0;
1240 }
1241 #endif
1242 
1243 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1244                                                struct timespec *host_ts)
1245 {
1246     struct target_timespec *target_ts;
1247 
1248     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1249         return -TARGET_EFAULT;
1250     }
1251     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1252     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1253     unlock_user_struct(target_ts, target_addr, 1);
1254     return 0;
1255 }
1256 
1257 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1258                                                  struct timespec *host_ts)
1259 {
1260     struct target__kernel_timespec *target_ts;
1261 
1262     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1263         return -TARGET_EFAULT;
1264     }
1265     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1266     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1267     unlock_user_struct(target_ts, target_addr, 1);
1268     return 0;
1269 }
1270 
1271 #if defined(TARGET_NR_gettimeofday)
1272 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1273                                              struct timezone *tz)
1274 {
1275     struct target_timezone *target_tz;
1276 
1277     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1278         return -TARGET_EFAULT;
1279     }
1280 
1281     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1282     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1283 
1284     unlock_user_struct(target_tz, target_tz_addr, 1);
1285 
1286     return 0;
1287 }
1288 #endif
1289 
1290 #if defined(TARGET_NR_settimeofday)
1291 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1292                                                abi_ulong target_tz_addr)
1293 {
1294     struct target_timezone *target_tz;
1295 
1296     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1297         return -TARGET_EFAULT;
1298     }
1299 
1300     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1301     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1302 
1303     unlock_user_struct(target_tz, target_tz_addr, 0);
1304 
1305     return 0;
1306 }
1307 #endif
1308 
1309 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1310 #include <mqueue.h>
1311 
1312 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1313                                               abi_ulong target_mq_attr_addr)
1314 {
1315     struct target_mq_attr *target_mq_attr;
1316 
1317     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1318                           target_mq_attr_addr, 1))
1319         return -TARGET_EFAULT;
1320 
1321     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1322     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1323     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1324     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1325 
1326     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1327 
1328     return 0;
1329 }
1330 
1331 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1332                                             const struct mq_attr *attr)
1333 {
1334     struct target_mq_attr *target_mq_attr;
1335 
1336     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1337                           target_mq_attr_addr, 0))
1338         return -TARGET_EFAULT;
1339 
1340     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1341     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1342     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1343     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1344 
1345     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1346 
1347     return 0;
1348 }
1349 #endif
1350 
1351 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1352 /* do_select() must return target values and target errnos. */
1353 static abi_long do_select(int n,
1354                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1355                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1356 {
1357     fd_set rfds, wfds, efds;
1358     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1359     struct timeval tv;
1360     struct timespec ts, *ts_ptr;
1361     abi_long ret;
1362 
1363     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1364     if (ret) {
1365         return ret;
1366     }
1367     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1368     if (ret) {
1369         return ret;
1370     }
1371     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1372     if (ret) {
1373         return ret;
1374     }
1375 
1376     if (target_tv_addr) {
1377         if (copy_from_user_timeval(&tv, target_tv_addr))
1378             return -TARGET_EFAULT;
1379         ts.tv_sec = tv.tv_sec;
1380         ts.tv_nsec = tv.tv_usec * 1000;
1381         ts_ptr = &ts;
1382     } else {
1383         ts_ptr = NULL;
1384     }
1385 
1386     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1387                                   ts_ptr, NULL));
1388 
1389     if (!is_error(ret)) {
1390         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1391             return -TARGET_EFAULT;
1392         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1393             return -TARGET_EFAULT;
1394         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1395             return -TARGET_EFAULT;
1396 
1397         if (target_tv_addr) {
1398             tv.tv_sec = ts.tv_sec;
1399             tv.tv_usec = ts.tv_nsec / 1000;
1400             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1401                 return -TARGET_EFAULT;
1402             }
1403         }
1404     }
1405 
1406     return ret;
1407 }
1408 
1409 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1410 static abi_long do_old_select(abi_ulong arg1)
1411 {
1412     struct target_sel_arg_struct *sel;
1413     abi_ulong inp, outp, exp, tvp;
1414     long nsel;
1415 
1416     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1417         return -TARGET_EFAULT;
1418     }
1419 
1420     nsel = tswapal(sel->n);
1421     inp = tswapal(sel->inp);
1422     outp = tswapal(sel->outp);
1423     exp = tswapal(sel->exp);
1424     tvp = tswapal(sel->tvp);
1425 
1426     unlock_user_struct(sel, arg1, 0);
1427 
1428     return do_select(nsel, inp, outp, exp, tvp);
1429 }
1430 #endif
1431 #endif
1432 
1433 static abi_long do_pipe2(int host_pipe[], int flags)
1434 {
1435 #ifdef CONFIG_PIPE2
1436     return pipe2(host_pipe, flags);
1437 #else
1438     return -ENOSYS;
1439 #endif
1440 }
1441 
1442 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1443                         int flags, int is_pipe2)
1444 {
1445     int host_pipe[2];
1446     abi_long ret;
1447     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1448 
1449     if (is_error(ret))
1450         return get_errno(ret);
1451 
1452     /* Several targets have special calling conventions for the original
1453        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1454     if (!is_pipe2) {
1455 #if defined(TARGET_ALPHA)
1456         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1457         return host_pipe[0];
1458 #elif defined(TARGET_MIPS)
1459         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1460         return host_pipe[0];
1461 #elif defined(TARGET_SH4)
1462         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1463         return host_pipe[0];
1464 #elif defined(TARGET_SPARC)
1465         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1466         return host_pipe[0];
1467 #endif
1468     }
1469 
1470     if (put_user_s32(host_pipe[0], pipedes)
1471         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1472         return -TARGET_EFAULT;
1473     return get_errno(ret);
1474 }
1475 
1476 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1477                                               abi_ulong target_addr,
1478                                               socklen_t len)
1479 {
1480     struct target_ip_mreqn *target_smreqn;
1481 
1482     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1483     if (!target_smreqn)
1484         return -TARGET_EFAULT;
1485     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1486     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1487     if (len == sizeof(struct target_ip_mreqn))
1488         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1489     unlock_user(target_smreqn, target_addr, 0);
1490 
1491     return 0;
1492 }
1493 
1494 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1495                                                abi_ulong target_addr,
1496                                                socklen_t len)
1497 {
1498     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1499     sa_family_t sa_family;
1500     struct target_sockaddr *target_saddr;
1501 
1502     if (fd_trans_target_to_host_addr(fd)) {
1503         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1504     }
1505 
1506     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1507     if (!target_saddr)
1508         return -TARGET_EFAULT;
1509 
1510     sa_family = tswap16(target_saddr->sa_family);
1511 
1512     /* Oops. The caller might send a incomplete sun_path; sun_path
1513      * must be terminated by \0 (see the manual page), but
1514      * unfortunately it is quite common to specify sockaddr_un
1515      * length as "strlen(x->sun_path)" while it should be
1516      * "strlen(...) + 1". We'll fix that here if needed.
1517      * Linux kernel has a similar feature.
1518      */
1519 
1520     if (sa_family == AF_UNIX) {
1521         if (len < unix_maxlen && len > 0) {
1522             char *cp = (char*)target_saddr;
1523 
1524             if ( cp[len-1] && !cp[len] )
1525                 len++;
1526         }
1527         if (len > unix_maxlen)
1528             len = unix_maxlen;
1529     }
1530 
1531     memcpy(addr, target_saddr, len);
1532     addr->sa_family = sa_family;
1533     if (sa_family == AF_NETLINK) {
1534         struct sockaddr_nl *nladdr;
1535 
1536         nladdr = (struct sockaddr_nl *)addr;
1537         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1538         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1539     } else if (sa_family == AF_PACKET) {
1540 	struct target_sockaddr_ll *lladdr;
1541 
1542 	lladdr = (struct target_sockaddr_ll *)addr;
1543 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1544 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1545     }
1546     unlock_user(target_saddr, target_addr, 0);
1547 
1548     return 0;
1549 }
1550 
1551 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1552                                                struct sockaddr *addr,
1553                                                socklen_t len)
1554 {
1555     struct target_sockaddr *target_saddr;
1556 
1557     if (len == 0) {
1558         return 0;
1559     }
1560     assert(addr);
1561 
1562     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1563     if (!target_saddr)
1564         return -TARGET_EFAULT;
1565     memcpy(target_saddr, addr, len);
1566     if (len >= offsetof(struct target_sockaddr, sa_family) +
1567         sizeof(target_saddr->sa_family)) {
1568         target_saddr->sa_family = tswap16(addr->sa_family);
1569     }
1570     if (addr->sa_family == AF_NETLINK &&
1571         len >= sizeof(struct target_sockaddr_nl)) {
1572         struct target_sockaddr_nl *target_nl =
1573                (struct target_sockaddr_nl *)target_saddr;
1574         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1575         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1576     } else if (addr->sa_family == AF_PACKET) {
1577         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1578         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1579         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1580     } else if (addr->sa_family == AF_INET6 &&
1581                len >= sizeof(struct target_sockaddr_in6)) {
1582         struct target_sockaddr_in6 *target_in6 =
1583                (struct target_sockaddr_in6 *)target_saddr;
1584         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1585     }
1586     unlock_user(target_saddr, target_addr, len);
1587 
1588     return 0;
1589 }
1590 
1591 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1592                                            struct target_msghdr *target_msgh)
1593 {
1594     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1595     abi_long msg_controllen;
1596     abi_ulong target_cmsg_addr;
1597     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1598     socklen_t space = 0;
1599 
1600     msg_controllen = tswapal(target_msgh->msg_controllen);
1601     if (msg_controllen < sizeof (struct target_cmsghdr))
1602         goto the_end;
1603     target_cmsg_addr = tswapal(target_msgh->msg_control);
1604     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1605     target_cmsg_start = target_cmsg;
1606     if (!target_cmsg)
1607         return -TARGET_EFAULT;
1608 
1609     while (cmsg && target_cmsg) {
1610         void *data = CMSG_DATA(cmsg);
1611         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1612 
1613         int len = tswapal(target_cmsg->cmsg_len)
1614             - sizeof(struct target_cmsghdr);
1615 
1616         space += CMSG_SPACE(len);
1617         if (space > msgh->msg_controllen) {
1618             space -= CMSG_SPACE(len);
1619             /* This is a QEMU bug, since we allocated the payload
1620              * area ourselves (unlike overflow in host-to-target
1621              * conversion, which is just the guest giving us a buffer
1622              * that's too small). It can't happen for the payload types
1623              * we currently support; if it becomes an issue in future
1624              * we would need to improve our allocation strategy to
1625              * something more intelligent than "twice the size of the
1626              * target buffer we're reading from".
1627              */
1628             qemu_log_mask(LOG_UNIMP,
1629                           ("Unsupported ancillary data %d/%d: "
1630                            "unhandled msg size\n"),
1631                           tswap32(target_cmsg->cmsg_level),
1632                           tswap32(target_cmsg->cmsg_type));
1633             break;
1634         }
1635 
1636         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1637             cmsg->cmsg_level = SOL_SOCKET;
1638         } else {
1639             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1640         }
1641         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1642         cmsg->cmsg_len = CMSG_LEN(len);
1643 
1644         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1645             int *fd = (int *)data;
1646             int *target_fd = (int *)target_data;
1647             int i, numfds = len / sizeof(int);
1648 
1649             for (i = 0; i < numfds; i++) {
1650                 __get_user(fd[i], target_fd + i);
1651             }
1652         } else if (cmsg->cmsg_level == SOL_SOCKET
1653                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1654             struct ucred *cred = (struct ucred *)data;
1655             struct target_ucred *target_cred =
1656                 (struct target_ucred *)target_data;
1657 
1658             __get_user(cred->pid, &target_cred->pid);
1659             __get_user(cred->uid, &target_cred->uid);
1660             __get_user(cred->gid, &target_cred->gid);
1661         } else {
1662             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1663                           cmsg->cmsg_level, cmsg->cmsg_type);
1664             memcpy(data, target_data, len);
1665         }
1666 
1667         cmsg = CMSG_NXTHDR(msgh, cmsg);
1668         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1669                                          target_cmsg_start);
1670     }
1671     unlock_user(target_cmsg, target_cmsg_addr, 0);
1672  the_end:
1673     msgh->msg_controllen = space;
1674     return 0;
1675 }
1676 
1677 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1678                                            struct msghdr *msgh)
1679 {
1680     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1681     abi_long msg_controllen;
1682     abi_ulong target_cmsg_addr;
1683     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1684     socklen_t space = 0;
1685 
1686     msg_controllen = tswapal(target_msgh->msg_controllen);
1687     if (msg_controllen < sizeof (struct target_cmsghdr))
1688         goto the_end;
1689     target_cmsg_addr = tswapal(target_msgh->msg_control);
1690     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1691     target_cmsg_start = target_cmsg;
1692     if (!target_cmsg)
1693         return -TARGET_EFAULT;
1694 
1695     while (cmsg && target_cmsg) {
1696         void *data = CMSG_DATA(cmsg);
1697         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1698 
1699         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1700         int tgt_len, tgt_space;
1701 
1702         /* We never copy a half-header but may copy half-data;
1703          * this is Linux's behaviour in put_cmsg(). Note that
1704          * truncation here is a guest problem (which we report
1705          * to the guest via the CTRUNC bit), unlike truncation
1706          * in target_to_host_cmsg, which is a QEMU bug.
1707          */
1708         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1709             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1710             break;
1711         }
1712 
1713         if (cmsg->cmsg_level == SOL_SOCKET) {
1714             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1715         } else {
1716             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1717         }
1718         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1719 
1720         /* Payload types which need a different size of payload on
1721          * the target must adjust tgt_len here.
1722          */
1723         tgt_len = len;
1724         switch (cmsg->cmsg_level) {
1725         case SOL_SOCKET:
1726             switch (cmsg->cmsg_type) {
1727             case SO_TIMESTAMP:
1728                 tgt_len = sizeof(struct target_timeval);
1729                 break;
1730             default:
1731                 break;
1732             }
1733             break;
1734         default:
1735             break;
1736         }
1737 
1738         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1739             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1740             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1741         }
1742 
1743         /* We must now copy-and-convert len bytes of payload
1744          * into tgt_len bytes of destination space. Bear in mind
1745          * that in both source and destination we may be dealing
1746          * with a truncated value!
1747          */
1748         switch (cmsg->cmsg_level) {
1749         case SOL_SOCKET:
1750             switch (cmsg->cmsg_type) {
1751             case SCM_RIGHTS:
1752             {
1753                 int *fd = (int *)data;
1754                 int *target_fd = (int *)target_data;
1755                 int i, numfds = tgt_len / sizeof(int);
1756 
1757                 for (i = 0; i < numfds; i++) {
1758                     __put_user(fd[i], target_fd + i);
1759                 }
1760                 break;
1761             }
1762             case SO_TIMESTAMP:
1763             {
1764                 struct timeval *tv = (struct timeval *)data;
1765                 struct target_timeval *target_tv =
1766                     (struct target_timeval *)target_data;
1767 
1768                 if (len != sizeof(struct timeval) ||
1769                     tgt_len != sizeof(struct target_timeval)) {
1770                     goto unimplemented;
1771                 }
1772 
1773                 /* copy struct timeval to target */
1774                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1775                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1776                 break;
1777             }
1778             case SCM_CREDENTIALS:
1779             {
1780                 struct ucred *cred = (struct ucred *)data;
1781                 struct target_ucred *target_cred =
1782                     (struct target_ucred *)target_data;
1783 
1784                 __put_user(cred->pid, &target_cred->pid);
1785                 __put_user(cred->uid, &target_cred->uid);
1786                 __put_user(cred->gid, &target_cred->gid);
1787                 break;
1788             }
1789             default:
1790                 goto unimplemented;
1791             }
1792             break;
1793 
1794         case SOL_IP:
1795             switch (cmsg->cmsg_type) {
1796             case IP_TTL:
1797             {
1798                 uint32_t *v = (uint32_t *)data;
1799                 uint32_t *t_int = (uint32_t *)target_data;
1800 
1801                 if (len != sizeof(uint32_t) ||
1802                     tgt_len != sizeof(uint32_t)) {
1803                     goto unimplemented;
1804                 }
1805                 __put_user(*v, t_int);
1806                 break;
1807             }
1808             case IP_RECVERR:
1809             {
1810                 struct errhdr_t {
1811                    struct sock_extended_err ee;
1812                    struct sockaddr_in offender;
1813                 };
1814                 struct errhdr_t *errh = (struct errhdr_t *)data;
1815                 struct errhdr_t *target_errh =
1816                     (struct errhdr_t *)target_data;
1817 
1818                 if (len != sizeof(struct errhdr_t) ||
1819                     tgt_len != sizeof(struct errhdr_t)) {
1820                     goto unimplemented;
1821                 }
1822                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1823                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1824                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1825                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1826                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1827                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1828                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1829                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1830                     (void *) &errh->offender, sizeof(errh->offender));
1831                 break;
1832             }
1833             default:
1834                 goto unimplemented;
1835             }
1836             break;
1837 
1838         case SOL_IPV6:
1839             switch (cmsg->cmsg_type) {
1840             case IPV6_HOPLIMIT:
1841             {
1842                 uint32_t *v = (uint32_t *)data;
1843                 uint32_t *t_int = (uint32_t *)target_data;
1844 
1845                 if (len != sizeof(uint32_t) ||
1846                     tgt_len != sizeof(uint32_t)) {
1847                     goto unimplemented;
1848                 }
1849                 __put_user(*v, t_int);
1850                 break;
1851             }
1852             case IPV6_RECVERR:
1853             {
1854                 struct errhdr6_t {
1855                    struct sock_extended_err ee;
1856                    struct sockaddr_in6 offender;
1857                 };
1858                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1859                 struct errhdr6_t *target_errh =
1860                     (struct errhdr6_t *)target_data;
1861 
1862                 if (len != sizeof(struct errhdr6_t) ||
1863                     tgt_len != sizeof(struct errhdr6_t)) {
1864                     goto unimplemented;
1865                 }
1866                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1867                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1868                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1869                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1870                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1871                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1872                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1873                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1874                     (void *) &errh->offender, sizeof(errh->offender));
1875                 break;
1876             }
1877             default:
1878                 goto unimplemented;
1879             }
1880             break;
1881 
1882         default:
1883         unimplemented:
1884             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1885                           cmsg->cmsg_level, cmsg->cmsg_type);
1886             memcpy(target_data, data, MIN(len, tgt_len));
1887             if (tgt_len > len) {
1888                 memset(target_data + len, 0, tgt_len - len);
1889             }
1890         }
1891 
1892         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1893         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1894         if (msg_controllen < tgt_space) {
1895             tgt_space = msg_controllen;
1896         }
1897         msg_controllen -= tgt_space;
1898         space += tgt_space;
1899         cmsg = CMSG_NXTHDR(msgh, cmsg);
1900         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1901                                          target_cmsg_start);
1902     }
1903     unlock_user(target_cmsg, target_cmsg_addr, space);
1904  the_end:
1905     target_msgh->msg_controllen = tswapal(space);
1906     return 0;
1907 }
1908 
1909 /* do_setsockopt() Must return target values and target errnos. */
1910 static abi_long do_setsockopt(int sockfd, int level, int optname,
1911                               abi_ulong optval_addr, socklen_t optlen)
1912 {
1913     abi_long ret;
1914     int val;
1915     struct ip_mreqn *ip_mreq;
1916     struct ip_mreq_source *ip_mreq_source;
1917 
1918     switch(level) {
1919     case SOL_TCP:
1920         /* TCP options all take an 'int' value.  */
1921         if (optlen < sizeof(uint32_t))
1922             return -TARGET_EINVAL;
1923 
1924         if (get_user_u32(val, optval_addr))
1925             return -TARGET_EFAULT;
1926         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1927         break;
1928     case SOL_IP:
1929         switch(optname) {
1930         case IP_TOS:
1931         case IP_TTL:
1932         case IP_HDRINCL:
1933         case IP_ROUTER_ALERT:
1934         case IP_RECVOPTS:
1935         case IP_RETOPTS:
1936         case IP_PKTINFO:
1937         case IP_MTU_DISCOVER:
1938         case IP_RECVERR:
1939         case IP_RECVTTL:
1940         case IP_RECVTOS:
1941 #ifdef IP_FREEBIND
1942         case IP_FREEBIND:
1943 #endif
1944         case IP_MULTICAST_TTL:
1945         case IP_MULTICAST_LOOP:
1946             val = 0;
1947             if (optlen >= sizeof(uint32_t)) {
1948                 if (get_user_u32(val, optval_addr))
1949                     return -TARGET_EFAULT;
1950             } else if (optlen >= 1) {
1951                 if (get_user_u8(val, optval_addr))
1952                     return -TARGET_EFAULT;
1953             }
1954             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1955             break;
1956         case IP_ADD_MEMBERSHIP:
1957         case IP_DROP_MEMBERSHIP:
1958             if (optlen < sizeof (struct target_ip_mreq) ||
1959                 optlen > sizeof (struct target_ip_mreqn))
1960                 return -TARGET_EINVAL;
1961 
1962             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1963             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1964             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1965             break;
1966 
1967         case IP_BLOCK_SOURCE:
1968         case IP_UNBLOCK_SOURCE:
1969         case IP_ADD_SOURCE_MEMBERSHIP:
1970         case IP_DROP_SOURCE_MEMBERSHIP:
1971             if (optlen != sizeof (struct target_ip_mreq_source))
1972                 return -TARGET_EINVAL;
1973 
1974             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1975             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1976             unlock_user (ip_mreq_source, optval_addr, 0);
1977             break;
1978 
1979         default:
1980             goto unimplemented;
1981         }
1982         break;
1983     case SOL_IPV6:
1984         switch (optname) {
1985         case IPV6_MTU_DISCOVER:
1986         case IPV6_MTU:
1987         case IPV6_V6ONLY:
1988         case IPV6_RECVPKTINFO:
1989         case IPV6_UNICAST_HOPS:
1990         case IPV6_MULTICAST_HOPS:
1991         case IPV6_MULTICAST_LOOP:
1992         case IPV6_RECVERR:
1993         case IPV6_RECVHOPLIMIT:
1994         case IPV6_2292HOPLIMIT:
1995         case IPV6_CHECKSUM:
1996         case IPV6_ADDRFORM:
1997         case IPV6_2292PKTINFO:
1998         case IPV6_RECVTCLASS:
1999         case IPV6_RECVRTHDR:
2000         case IPV6_2292RTHDR:
2001         case IPV6_RECVHOPOPTS:
2002         case IPV6_2292HOPOPTS:
2003         case IPV6_RECVDSTOPTS:
2004         case IPV6_2292DSTOPTS:
2005         case IPV6_TCLASS:
2006 #ifdef IPV6_RECVPATHMTU
2007         case IPV6_RECVPATHMTU:
2008 #endif
2009 #ifdef IPV6_TRANSPARENT
2010         case IPV6_TRANSPARENT:
2011 #endif
2012 #ifdef IPV6_FREEBIND
2013         case IPV6_FREEBIND:
2014 #endif
2015 #ifdef IPV6_RECVORIGDSTADDR
2016         case IPV6_RECVORIGDSTADDR:
2017 #endif
2018             val = 0;
2019             if (optlen < sizeof(uint32_t)) {
2020                 return -TARGET_EINVAL;
2021             }
2022             if (get_user_u32(val, optval_addr)) {
2023                 return -TARGET_EFAULT;
2024             }
2025             ret = get_errno(setsockopt(sockfd, level, optname,
2026                                        &val, sizeof(val)));
2027             break;
2028         case IPV6_PKTINFO:
2029         {
2030             struct in6_pktinfo pki;
2031 
2032             if (optlen < sizeof(pki)) {
2033                 return -TARGET_EINVAL;
2034             }
2035 
2036             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2037                 return -TARGET_EFAULT;
2038             }
2039 
2040             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2041 
2042             ret = get_errno(setsockopt(sockfd, level, optname,
2043                                        &pki, sizeof(pki)));
2044             break;
2045         }
2046         case IPV6_ADD_MEMBERSHIP:
2047         case IPV6_DROP_MEMBERSHIP:
2048         {
2049             struct ipv6_mreq ipv6mreq;
2050 
2051             if (optlen < sizeof(ipv6mreq)) {
2052                 return -TARGET_EINVAL;
2053             }
2054 
2055             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2056                 return -TARGET_EFAULT;
2057             }
2058 
2059             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2060 
2061             ret = get_errno(setsockopt(sockfd, level, optname,
2062                                        &ipv6mreq, sizeof(ipv6mreq)));
2063             break;
2064         }
2065         default:
2066             goto unimplemented;
2067         }
2068         break;
2069     case SOL_ICMPV6:
2070         switch (optname) {
2071         case ICMPV6_FILTER:
2072         {
2073             struct icmp6_filter icmp6f;
2074 
2075             if (optlen > sizeof(icmp6f)) {
2076                 optlen = sizeof(icmp6f);
2077             }
2078 
2079             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2080                 return -TARGET_EFAULT;
2081             }
2082 
2083             for (val = 0; val < 8; val++) {
2084                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2085             }
2086 
2087             ret = get_errno(setsockopt(sockfd, level, optname,
2088                                        &icmp6f, optlen));
2089             break;
2090         }
2091         default:
2092             goto unimplemented;
2093         }
2094         break;
2095     case SOL_RAW:
2096         switch (optname) {
2097         case ICMP_FILTER:
2098         case IPV6_CHECKSUM:
2099             /* those take an u32 value */
2100             if (optlen < sizeof(uint32_t)) {
2101                 return -TARGET_EINVAL;
2102             }
2103 
2104             if (get_user_u32(val, optval_addr)) {
2105                 return -TARGET_EFAULT;
2106             }
2107             ret = get_errno(setsockopt(sockfd, level, optname,
2108                                        &val, sizeof(val)));
2109             break;
2110 
2111         default:
2112             goto unimplemented;
2113         }
2114         break;
2115 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2116     case SOL_ALG:
2117         switch (optname) {
2118         case ALG_SET_KEY:
2119         {
2120             char *alg_key = g_malloc(optlen);
2121 
2122             if (!alg_key) {
2123                 return -TARGET_ENOMEM;
2124             }
2125             if (copy_from_user(alg_key, optval_addr, optlen)) {
2126                 g_free(alg_key);
2127                 return -TARGET_EFAULT;
2128             }
2129             ret = get_errno(setsockopt(sockfd, level, optname,
2130                                        alg_key, optlen));
2131             g_free(alg_key);
2132             break;
2133         }
2134         case ALG_SET_AEAD_AUTHSIZE:
2135         {
2136             ret = get_errno(setsockopt(sockfd, level, optname,
2137                                        NULL, optlen));
2138             break;
2139         }
2140         default:
2141             goto unimplemented;
2142         }
2143         break;
2144 #endif
2145     case TARGET_SOL_SOCKET:
2146         switch (optname) {
2147         case TARGET_SO_RCVTIMEO:
2148         {
2149                 struct timeval tv;
2150 
2151                 optname = SO_RCVTIMEO;
2152 
2153 set_timeout:
2154                 if (optlen != sizeof(struct target_timeval)) {
2155                     return -TARGET_EINVAL;
2156                 }
2157 
2158                 if (copy_from_user_timeval(&tv, optval_addr)) {
2159                     return -TARGET_EFAULT;
2160                 }
2161 
2162                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2163                                 &tv, sizeof(tv)));
2164                 return ret;
2165         }
2166         case TARGET_SO_SNDTIMEO:
2167                 optname = SO_SNDTIMEO;
2168                 goto set_timeout;
2169         case TARGET_SO_ATTACH_FILTER:
2170         {
2171                 struct target_sock_fprog *tfprog;
2172                 struct target_sock_filter *tfilter;
2173                 struct sock_fprog fprog;
2174                 struct sock_filter *filter;
2175                 int i;
2176 
2177                 if (optlen != sizeof(*tfprog)) {
2178                     return -TARGET_EINVAL;
2179                 }
2180                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2181                     return -TARGET_EFAULT;
2182                 }
2183                 if (!lock_user_struct(VERIFY_READ, tfilter,
2184                                       tswapal(tfprog->filter), 0)) {
2185                     unlock_user_struct(tfprog, optval_addr, 1);
2186                     return -TARGET_EFAULT;
2187                 }
2188 
2189                 fprog.len = tswap16(tfprog->len);
2190                 filter = g_try_new(struct sock_filter, fprog.len);
2191                 if (filter == NULL) {
2192                     unlock_user_struct(tfilter, tfprog->filter, 1);
2193                     unlock_user_struct(tfprog, optval_addr, 1);
2194                     return -TARGET_ENOMEM;
2195                 }
2196                 for (i = 0; i < fprog.len; i++) {
2197                     filter[i].code = tswap16(tfilter[i].code);
2198                     filter[i].jt = tfilter[i].jt;
2199                     filter[i].jf = tfilter[i].jf;
2200                     filter[i].k = tswap32(tfilter[i].k);
2201                 }
2202                 fprog.filter = filter;
2203 
2204                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2205                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2206                 g_free(filter);
2207 
2208                 unlock_user_struct(tfilter, tfprog->filter, 1);
2209                 unlock_user_struct(tfprog, optval_addr, 1);
2210                 return ret;
2211         }
2212 	case TARGET_SO_BINDTODEVICE:
2213 	{
2214 		char *dev_ifname, *addr_ifname;
2215 
2216 		if (optlen > IFNAMSIZ - 1) {
2217 		    optlen = IFNAMSIZ - 1;
2218 		}
2219 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2220 		if (!dev_ifname) {
2221 		    return -TARGET_EFAULT;
2222 		}
2223 		optname = SO_BINDTODEVICE;
2224 		addr_ifname = alloca(IFNAMSIZ);
2225 		memcpy(addr_ifname, dev_ifname, optlen);
2226 		addr_ifname[optlen] = 0;
2227 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2228                                            addr_ifname, optlen));
2229 		unlock_user (dev_ifname, optval_addr, 0);
2230 		return ret;
2231 	}
2232         case TARGET_SO_LINGER:
2233         {
2234                 struct linger lg;
2235                 struct target_linger *tlg;
2236 
2237                 if (optlen != sizeof(struct target_linger)) {
2238                     return -TARGET_EINVAL;
2239                 }
2240                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2241                     return -TARGET_EFAULT;
2242                 }
2243                 __get_user(lg.l_onoff, &tlg->l_onoff);
2244                 __get_user(lg.l_linger, &tlg->l_linger);
2245                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2246                                 &lg, sizeof(lg)));
2247                 unlock_user_struct(tlg, optval_addr, 0);
2248                 return ret;
2249         }
2250             /* Options with 'int' argument.  */
2251         case TARGET_SO_DEBUG:
2252 		optname = SO_DEBUG;
2253 		break;
2254         case TARGET_SO_REUSEADDR:
2255 		optname = SO_REUSEADDR;
2256 		break;
2257 #ifdef SO_REUSEPORT
2258         case TARGET_SO_REUSEPORT:
2259                 optname = SO_REUSEPORT;
2260                 break;
2261 #endif
2262         case TARGET_SO_TYPE:
2263 		optname = SO_TYPE;
2264 		break;
2265         case TARGET_SO_ERROR:
2266 		optname = SO_ERROR;
2267 		break;
2268         case TARGET_SO_DONTROUTE:
2269 		optname = SO_DONTROUTE;
2270 		break;
2271         case TARGET_SO_BROADCAST:
2272 		optname = SO_BROADCAST;
2273 		break;
2274         case TARGET_SO_SNDBUF:
2275 		optname = SO_SNDBUF;
2276 		break;
2277         case TARGET_SO_SNDBUFFORCE:
2278                 optname = SO_SNDBUFFORCE;
2279                 break;
2280         case TARGET_SO_RCVBUF:
2281 		optname = SO_RCVBUF;
2282 		break;
2283         case TARGET_SO_RCVBUFFORCE:
2284                 optname = SO_RCVBUFFORCE;
2285                 break;
2286         case TARGET_SO_KEEPALIVE:
2287 		optname = SO_KEEPALIVE;
2288 		break;
2289         case TARGET_SO_OOBINLINE:
2290 		optname = SO_OOBINLINE;
2291 		break;
2292         case TARGET_SO_NO_CHECK:
2293 		optname = SO_NO_CHECK;
2294 		break;
2295         case TARGET_SO_PRIORITY:
2296 		optname = SO_PRIORITY;
2297 		break;
2298 #ifdef SO_BSDCOMPAT
2299         case TARGET_SO_BSDCOMPAT:
2300 		optname = SO_BSDCOMPAT;
2301 		break;
2302 #endif
2303         case TARGET_SO_PASSCRED:
2304 		optname = SO_PASSCRED;
2305 		break;
2306         case TARGET_SO_PASSSEC:
2307                 optname = SO_PASSSEC;
2308                 break;
2309         case TARGET_SO_TIMESTAMP:
2310 		optname = SO_TIMESTAMP;
2311 		break;
2312         case TARGET_SO_RCVLOWAT:
2313 		optname = SO_RCVLOWAT;
2314 		break;
2315         default:
2316             goto unimplemented;
2317         }
2318 	if (optlen < sizeof(uint32_t))
2319             return -TARGET_EINVAL;
2320 
2321 	if (get_user_u32(val, optval_addr))
2322             return -TARGET_EFAULT;
2323 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2324         break;
2325 #ifdef SOL_NETLINK
2326     case SOL_NETLINK:
2327         switch (optname) {
2328         case NETLINK_PKTINFO:
2329         case NETLINK_ADD_MEMBERSHIP:
2330         case NETLINK_DROP_MEMBERSHIP:
2331         case NETLINK_BROADCAST_ERROR:
2332         case NETLINK_NO_ENOBUFS:
2333 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2334         case NETLINK_LISTEN_ALL_NSID:
2335         case NETLINK_CAP_ACK:
2336 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2337 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2338         case NETLINK_EXT_ACK:
2339 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2340 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2341         case NETLINK_GET_STRICT_CHK:
2342 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2343             break;
2344         default:
2345             goto unimplemented;
2346         }
2347         val = 0;
2348         if (optlen < sizeof(uint32_t)) {
2349             return -TARGET_EINVAL;
2350         }
2351         if (get_user_u32(val, optval_addr)) {
2352             return -TARGET_EFAULT;
2353         }
2354         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2355                                    sizeof(val)));
2356         break;
2357 #endif /* SOL_NETLINK */
2358     default:
2359     unimplemented:
2360         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2361                       level, optname);
2362         ret = -TARGET_ENOPROTOOPT;
2363     }
2364     return ret;
2365 }
2366 
2367 /* do_getsockopt() Must return target values and target errnos. */
2368 static abi_long do_getsockopt(int sockfd, int level, int optname,
2369                               abi_ulong optval_addr, abi_ulong optlen)
2370 {
2371     abi_long ret;
2372     int len, val;
2373     socklen_t lv;
2374 
2375     switch(level) {
2376     case TARGET_SOL_SOCKET:
2377         level = SOL_SOCKET;
2378         switch (optname) {
2379         /* These don't just return a single integer */
2380         case TARGET_SO_PEERNAME:
2381             goto unimplemented;
2382         case TARGET_SO_RCVTIMEO: {
2383             struct timeval tv;
2384             socklen_t tvlen;
2385 
2386             optname = SO_RCVTIMEO;
2387 
2388 get_timeout:
2389             if (get_user_u32(len, optlen)) {
2390                 return -TARGET_EFAULT;
2391             }
2392             if (len < 0) {
2393                 return -TARGET_EINVAL;
2394             }
2395 
2396             tvlen = sizeof(tv);
2397             ret = get_errno(getsockopt(sockfd, level, optname,
2398                                        &tv, &tvlen));
2399             if (ret < 0) {
2400                 return ret;
2401             }
2402             if (len > sizeof(struct target_timeval)) {
2403                 len = sizeof(struct target_timeval);
2404             }
2405             if (copy_to_user_timeval(optval_addr, &tv)) {
2406                 return -TARGET_EFAULT;
2407             }
2408             if (put_user_u32(len, optlen)) {
2409                 return -TARGET_EFAULT;
2410             }
2411             break;
2412         }
2413         case TARGET_SO_SNDTIMEO:
2414             optname = SO_SNDTIMEO;
2415             goto get_timeout;
2416         case TARGET_SO_PEERCRED: {
2417             struct ucred cr;
2418             socklen_t crlen;
2419             struct target_ucred *tcr;
2420 
2421             if (get_user_u32(len, optlen)) {
2422                 return -TARGET_EFAULT;
2423             }
2424             if (len < 0) {
2425                 return -TARGET_EINVAL;
2426             }
2427 
2428             crlen = sizeof(cr);
2429             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2430                                        &cr, &crlen));
2431             if (ret < 0) {
2432                 return ret;
2433             }
2434             if (len > crlen) {
2435                 len = crlen;
2436             }
2437             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2438                 return -TARGET_EFAULT;
2439             }
2440             __put_user(cr.pid, &tcr->pid);
2441             __put_user(cr.uid, &tcr->uid);
2442             __put_user(cr.gid, &tcr->gid);
2443             unlock_user_struct(tcr, optval_addr, 1);
2444             if (put_user_u32(len, optlen)) {
2445                 return -TARGET_EFAULT;
2446             }
2447             break;
2448         }
2449         case TARGET_SO_PEERSEC: {
2450             char *name;
2451 
2452             if (get_user_u32(len, optlen)) {
2453                 return -TARGET_EFAULT;
2454             }
2455             if (len < 0) {
2456                 return -TARGET_EINVAL;
2457             }
2458             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2459             if (!name) {
2460                 return -TARGET_EFAULT;
2461             }
2462             lv = len;
2463             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2464                                        name, &lv));
2465             if (put_user_u32(lv, optlen)) {
2466                 ret = -TARGET_EFAULT;
2467             }
2468             unlock_user(name, optval_addr, lv);
2469             break;
2470         }
2471         case TARGET_SO_LINGER:
2472         {
2473             struct linger lg;
2474             socklen_t lglen;
2475             struct target_linger *tlg;
2476 
2477             if (get_user_u32(len, optlen)) {
2478                 return -TARGET_EFAULT;
2479             }
2480             if (len < 0) {
2481                 return -TARGET_EINVAL;
2482             }
2483 
2484             lglen = sizeof(lg);
2485             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2486                                        &lg, &lglen));
2487             if (ret < 0) {
2488                 return ret;
2489             }
2490             if (len > lglen) {
2491                 len = lglen;
2492             }
2493             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2494                 return -TARGET_EFAULT;
2495             }
2496             __put_user(lg.l_onoff, &tlg->l_onoff);
2497             __put_user(lg.l_linger, &tlg->l_linger);
2498             unlock_user_struct(tlg, optval_addr, 1);
2499             if (put_user_u32(len, optlen)) {
2500                 return -TARGET_EFAULT;
2501             }
2502             break;
2503         }
2504         /* Options with 'int' argument.  */
2505         case TARGET_SO_DEBUG:
2506             optname = SO_DEBUG;
2507             goto int_case;
2508         case TARGET_SO_REUSEADDR:
2509             optname = SO_REUSEADDR;
2510             goto int_case;
2511 #ifdef SO_REUSEPORT
2512         case TARGET_SO_REUSEPORT:
2513             optname = SO_REUSEPORT;
2514             goto int_case;
2515 #endif
2516         case TARGET_SO_TYPE:
2517             optname = SO_TYPE;
2518             goto int_case;
2519         case TARGET_SO_ERROR:
2520             optname = SO_ERROR;
2521             goto int_case;
2522         case TARGET_SO_DONTROUTE:
2523             optname = SO_DONTROUTE;
2524             goto int_case;
2525         case TARGET_SO_BROADCAST:
2526             optname = SO_BROADCAST;
2527             goto int_case;
2528         case TARGET_SO_SNDBUF:
2529             optname = SO_SNDBUF;
2530             goto int_case;
2531         case TARGET_SO_RCVBUF:
2532             optname = SO_RCVBUF;
2533             goto int_case;
2534         case TARGET_SO_KEEPALIVE:
2535             optname = SO_KEEPALIVE;
2536             goto int_case;
2537         case TARGET_SO_OOBINLINE:
2538             optname = SO_OOBINLINE;
2539             goto int_case;
2540         case TARGET_SO_NO_CHECK:
2541             optname = SO_NO_CHECK;
2542             goto int_case;
2543         case TARGET_SO_PRIORITY:
2544             optname = SO_PRIORITY;
2545             goto int_case;
2546 #ifdef SO_BSDCOMPAT
2547         case TARGET_SO_BSDCOMPAT:
2548             optname = SO_BSDCOMPAT;
2549             goto int_case;
2550 #endif
2551         case TARGET_SO_PASSCRED:
2552             optname = SO_PASSCRED;
2553             goto int_case;
2554         case TARGET_SO_TIMESTAMP:
2555             optname = SO_TIMESTAMP;
2556             goto int_case;
2557         case TARGET_SO_RCVLOWAT:
2558             optname = SO_RCVLOWAT;
2559             goto int_case;
2560         case TARGET_SO_ACCEPTCONN:
2561             optname = SO_ACCEPTCONN;
2562             goto int_case;
2563         default:
2564             goto int_case;
2565         }
2566         break;
2567     case SOL_TCP:
2568         /* TCP options all take an 'int' value.  */
2569     int_case:
2570         if (get_user_u32(len, optlen))
2571             return -TARGET_EFAULT;
2572         if (len < 0)
2573             return -TARGET_EINVAL;
2574         lv = sizeof(lv);
2575         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2576         if (ret < 0)
2577             return ret;
2578         if (optname == SO_TYPE) {
2579             val = host_to_target_sock_type(val);
2580         }
2581         if (len > lv)
2582             len = lv;
2583         if (len == 4) {
2584             if (put_user_u32(val, optval_addr))
2585                 return -TARGET_EFAULT;
2586         } else {
2587             if (put_user_u8(val, optval_addr))
2588                 return -TARGET_EFAULT;
2589         }
2590         if (put_user_u32(len, optlen))
2591             return -TARGET_EFAULT;
2592         break;
2593     case SOL_IP:
2594         switch(optname) {
2595         case IP_TOS:
2596         case IP_TTL:
2597         case IP_HDRINCL:
2598         case IP_ROUTER_ALERT:
2599         case IP_RECVOPTS:
2600         case IP_RETOPTS:
2601         case IP_PKTINFO:
2602         case IP_MTU_DISCOVER:
2603         case IP_RECVERR:
2604         case IP_RECVTOS:
2605 #ifdef IP_FREEBIND
2606         case IP_FREEBIND:
2607 #endif
2608         case IP_MULTICAST_TTL:
2609         case IP_MULTICAST_LOOP:
2610             if (get_user_u32(len, optlen))
2611                 return -TARGET_EFAULT;
2612             if (len < 0)
2613                 return -TARGET_EINVAL;
2614             lv = sizeof(lv);
2615             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2616             if (ret < 0)
2617                 return ret;
2618             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2619                 len = 1;
2620                 if (put_user_u32(len, optlen)
2621                     || put_user_u8(val, optval_addr))
2622                     return -TARGET_EFAULT;
2623             } else {
2624                 if (len > sizeof(int))
2625                     len = sizeof(int);
2626                 if (put_user_u32(len, optlen)
2627                     || put_user_u32(val, optval_addr))
2628                     return -TARGET_EFAULT;
2629             }
2630             break;
2631         default:
2632             ret = -TARGET_ENOPROTOOPT;
2633             break;
2634         }
2635         break;
2636     case SOL_IPV6:
2637         switch (optname) {
2638         case IPV6_MTU_DISCOVER:
2639         case IPV6_MTU:
2640         case IPV6_V6ONLY:
2641         case IPV6_RECVPKTINFO:
2642         case IPV6_UNICAST_HOPS:
2643         case IPV6_MULTICAST_HOPS:
2644         case IPV6_MULTICAST_LOOP:
2645         case IPV6_RECVERR:
2646         case IPV6_RECVHOPLIMIT:
2647         case IPV6_2292HOPLIMIT:
2648         case IPV6_CHECKSUM:
2649         case IPV6_ADDRFORM:
2650         case IPV6_2292PKTINFO:
2651         case IPV6_RECVTCLASS:
2652         case IPV6_RECVRTHDR:
2653         case IPV6_2292RTHDR:
2654         case IPV6_RECVHOPOPTS:
2655         case IPV6_2292HOPOPTS:
2656         case IPV6_RECVDSTOPTS:
2657         case IPV6_2292DSTOPTS:
2658         case IPV6_TCLASS:
2659 #ifdef IPV6_RECVPATHMTU
2660         case IPV6_RECVPATHMTU:
2661 #endif
2662 #ifdef IPV6_TRANSPARENT
2663         case IPV6_TRANSPARENT:
2664 #endif
2665 #ifdef IPV6_FREEBIND
2666         case IPV6_FREEBIND:
2667 #endif
2668 #ifdef IPV6_RECVORIGDSTADDR
2669         case IPV6_RECVORIGDSTADDR:
2670 #endif
2671             if (get_user_u32(len, optlen))
2672                 return -TARGET_EFAULT;
2673             if (len < 0)
2674                 return -TARGET_EINVAL;
2675             lv = sizeof(lv);
2676             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2677             if (ret < 0)
2678                 return ret;
2679             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2680                 len = 1;
2681                 if (put_user_u32(len, optlen)
2682                     || put_user_u8(val, optval_addr))
2683                     return -TARGET_EFAULT;
2684             } else {
2685                 if (len > sizeof(int))
2686                     len = sizeof(int);
2687                 if (put_user_u32(len, optlen)
2688                     || put_user_u32(val, optval_addr))
2689                     return -TARGET_EFAULT;
2690             }
2691             break;
2692         default:
2693             ret = -TARGET_ENOPROTOOPT;
2694             break;
2695         }
2696         break;
2697 #ifdef SOL_NETLINK
2698     case SOL_NETLINK:
2699         switch (optname) {
2700         case NETLINK_PKTINFO:
2701         case NETLINK_BROADCAST_ERROR:
2702         case NETLINK_NO_ENOBUFS:
2703 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2704         case NETLINK_LISTEN_ALL_NSID:
2705         case NETLINK_CAP_ACK:
2706 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2707 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2708         case NETLINK_EXT_ACK:
2709 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2710 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2711         case NETLINK_GET_STRICT_CHK:
2712 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2713             if (get_user_u32(len, optlen)) {
2714                 return -TARGET_EFAULT;
2715             }
2716             if (len != sizeof(val)) {
2717                 return -TARGET_EINVAL;
2718             }
2719             lv = len;
2720             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2721             if (ret < 0) {
2722                 return ret;
2723             }
2724             if (put_user_u32(lv, optlen)
2725                 || put_user_u32(val, optval_addr)) {
2726                 return -TARGET_EFAULT;
2727             }
2728             break;
2729 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2730         case NETLINK_LIST_MEMBERSHIPS:
2731         {
2732             uint32_t *results;
2733             int i;
2734             if (get_user_u32(len, optlen)) {
2735                 return -TARGET_EFAULT;
2736             }
2737             if (len < 0) {
2738                 return -TARGET_EINVAL;
2739             }
2740             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2741             if (!results) {
2742                 return -TARGET_EFAULT;
2743             }
2744             lv = len;
2745             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2746             if (ret < 0) {
2747                 unlock_user(results, optval_addr, 0);
2748                 return ret;
2749             }
2750             /* swap host endianess to target endianess. */
2751             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2752                 results[i] = tswap32(results[i]);
2753             }
2754             if (put_user_u32(lv, optlen)) {
2755                 return -TARGET_EFAULT;
2756             }
2757             unlock_user(results, optval_addr, 0);
2758             break;
2759         }
2760 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2761         default:
2762             goto unimplemented;
2763         }
2764         break;
2765 #endif /* SOL_NETLINK */
2766     default:
2767     unimplemented:
2768         qemu_log_mask(LOG_UNIMP,
2769                       "getsockopt level=%d optname=%d not yet supported\n",
2770                       level, optname);
2771         ret = -TARGET_EOPNOTSUPP;
2772         break;
2773     }
2774     return ret;
2775 }
2776 
2777 /* Convert target low/high pair representing file offset into the host
2778  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2779  * as the kernel doesn't handle them either.
2780  */
2781 static void target_to_host_low_high(abi_ulong tlow,
2782                                     abi_ulong thigh,
2783                                     unsigned long *hlow,
2784                                     unsigned long *hhigh)
2785 {
2786     uint64_t off = tlow |
2787         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2788         TARGET_LONG_BITS / 2;
2789 
2790     *hlow = off;
2791     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2792 }
2793 
2794 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2795                                 abi_ulong count, int copy)
2796 {
2797     struct target_iovec *target_vec;
2798     struct iovec *vec;
2799     abi_ulong total_len, max_len;
2800     int i;
2801     int err = 0;
2802     bool bad_address = false;
2803 
2804     if (count == 0) {
2805         errno = 0;
2806         return NULL;
2807     }
2808     if (count > IOV_MAX) {
2809         errno = EINVAL;
2810         return NULL;
2811     }
2812 
2813     vec = g_try_new0(struct iovec, count);
2814     if (vec == NULL) {
2815         errno = ENOMEM;
2816         return NULL;
2817     }
2818 
2819     target_vec = lock_user(VERIFY_READ, target_addr,
2820                            count * sizeof(struct target_iovec), 1);
2821     if (target_vec == NULL) {
2822         err = EFAULT;
2823         goto fail2;
2824     }
2825 
2826     /* ??? If host page size > target page size, this will result in a
2827        value larger than what we can actually support.  */
2828     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2829     total_len = 0;
2830 
2831     for (i = 0; i < count; i++) {
2832         abi_ulong base = tswapal(target_vec[i].iov_base);
2833         abi_long len = tswapal(target_vec[i].iov_len);
2834 
2835         if (len < 0) {
2836             err = EINVAL;
2837             goto fail;
2838         } else if (len == 0) {
2839             /* Zero length pointer is ignored.  */
2840             vec[i].iov_base = 0;
2841         } else {
2842             vec[i].iov_base = lock_user(type, base, len, copy);
2843             /* If the first buffer pointer is bad, this is a fault.  But
2844              * subsequent bad buffers will result in a partial write; this
2845              * is realized by filling the vector with null pointers and
2846              * zero lengths. */
2847             if (!vec[i].iov_base) {
2848                 if (i == 0) {
2849                     err = EFAULT;
2850                     goto fail;
2851                 } else {
2852                     bad_address = true;
2853                 }
2854             }
2855             if (bad_address) {
2856                 len = 0;
2857             }
2858             if (len > max_len - total_len) {
2859                 len = max_len - total_len;
2860             }
2861         }
2862         vec[i].iov_len = len;
2863         total_len += len;
2864     }
2865 
2866     unlock_user(target_vec, target_addr, 0);
2867     return vec;
2868 
2869  fail:
2870     while (--i >= 0) {
2871         if (tswapal(target_vec[i].iov_len) > 0) {
2872             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2873         }
2874     }
2875     unlock_user(target_vec, target_addr, 0);
2876  fail2:
2877     g_free(vec);
2878     errno = err;
2879     return NULL;
2880 }
2881 
2882 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2883                          abi_ulong count, int copy)
2884 {
2885     struct target_iovec *target_vec;
2886     int i;
2887 
2888     target_vec = lock_user(VERIFY_READ, target_addr,
2889                            count * sizeof(struct target_iovec), 1);
2890     if (target_vec) {
2891         for (i = 0; i < count; i++) {
2892             abi_ulong base = tswapal(target_vec[i].iov_base);
2893             abi_long len = tswapal(target_vec[i].iov_len);
2894             if (len < 0) {
2895                 break;
2896             }
2897             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2898         }
2899         unlock_user(target_vec, target_addr, 0);
2900     }
2901 
2902     g_free(vec);
2903 }
2904 
2905 static inline int target_to_host_sock_type(int *type)
2906 {
2907     int host_type = 0;
2908     int target_type = *type;
2909 
2910     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2911     case TARGET_SOCK_DGRAM:
2912         host_type = SOCK_DGRAM;
2913         break;
2914     case TARGET_SOCK_STREAM:
2915         host_type = SOCK_STREAM;
2916         break;
2917     default:
2918         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2919         break;
2920     }
2921     if (target_type & TARGET_SOCK_CLOEXEC) {
2922 #if defined(SOCK_CLOEXEC)
2923         host_type |= SOCK_CLOEXEC;
2924 #else
2925         return -TARGET_EINVAL;
2926 #endif
2927     }
2928     if (target_type & TARGET_SOCK_NONBLOCK) {
2929 #if defined(SOCK_NONBLOCK)
2930         host_type |= SOCK_NONBLOCK;
2931 #elif !defined(O_NONBLOCK)
2932         return -TARGET_EINVAL;
2933 #endif
2934     }
2935     *type = host_type;
2936     return 0;
2937 }
2938 
2939 /* Try to emulate socket type flags after socket creation.  */
2940 static int sock_flags_fixup(int fd, int target_type)
2941 {
2942 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2943     if (target_type & TARGET_SOCK_NONBLOCK) {
2944         int flags = fcntl(fd, F_GETFL);
2945         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2946             close(fd);
2947             return -TARGET_EINVAL;
2948         }
2949     }
2950 #endif
2951     return fd;
2952 }
2953 
2954 /* do_socket() Must return target values and target errnos. */
2955 static abi_long do_socket(int domain, int type, int protocol)
2956 {
2957     int target_type = type;
2958     int ret;
2959 
2960     ret = target_to_host_sock_type(&type);
2961     if (ret) {
2962         return ret;
2963     }
2964 
2965     if (domain == PF_NETLINK && !(
2966 #ifdef CONFIG_RTNETLINK
2967          protocol == NETLINK_ROUTE ||
2968 #endif
2969          protocol == NETLINK_KOBJECT_UEVENT ||
2970          protocol == NETLINK_AUDIT)) {
2971         return -TARGET_EPROTONOSUPPORT;
2972     }
2973 
2974     if (domain == AF_PACKET ||
2975         (domain == AF_INET && type == SOCK_PACKET)) {
2976         protocol = tswap16(protocol);
2977     }
2978 
2979     ret = get_errno(socket(domain, type, protocol));
2980     if (ret >= 0) {
2981         ret = sock_flags_fixup(ret, target_type);
2982         if (type == SOCK_PACKET) {
2983             /* Manage an obsolete case :
2984              * if socket type is SOCK_PACKET, bind by name
2985              */
2986             fd_trans_register(ret, &target_packet_trans);
2987         } else if (domain == PF_NETLINK) {
2988             switch (protocol) {
2989 #ifdef CONFIG_RTNETLINK
2990             case NETLINK_ROUTE:
2991                 fd_trans_register(ret, &target_netlink_route_trans);
2992                 break;
2993 #endif
2994             case NETLINK_KOBJECT_UEVENT:
2995                 /* nothing to do: messages are strings */
2996                 break;
2997             case NETLINK_AUDIT:
2998                 fd_trans_register(ret, &target_netlink_audit_trans);
2999                 break;
3000             default:
3001                 g_assert_not_reached();
3002             }
3003         }
3004     }
3005     return ret;
3006 }
3007 
3008 /* do_bind() Must return target values and target errnos. */
3009 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3010                         socklen_t addrlen)
3011 {
3012     void *addr;
3013     abi_long ret;
3014 
3015     if ((int)addrlen < 0) {
3016         return -TARGET_EINVAL;
3017     }
3018 
3019     addr = alloca(addrlen+1);
3020 
3021     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3022     if (ret)
3023         return ret;
3024 
3025     return get_errno(bind(sockfd, addr, addrlen));
3026 }
3027 
3028 /* do_connect() Must return target values and target errnos. */
3029 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3030                            socklen_t addrlen)
3031 {
3032     void *addr;
3033     abi_long ret;
3034 
3035     if ((int)addrlen < 0) {
3036         return -TARGET_EINVAL;
3037     }
3038 
3039     addr = alloca(addrlen+1);
3040 
3041     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3042     if (ret)
3043         return ret;
3044 
3045     return get_errno(safe_connect(sockfd, addr, addrlen));
3046 }
3047 
3048 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3049 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3050                                       int flags, int send)
3051 {
3052     abi_long ret, len;
3053     struct msghdr msg;
3054     abi_ulong count;
3055     struct iovec *vec;
3056     abi_ulong target_vec;
3057 
3058     if (msgp->msg_name) {
3059         msg.msg_namelen = tswap32(msgp->msg_namelen);
3060         msg.msg_name = alloca(msg.msg_namelen+1);
3061         ret = target_to_host_sockaddr(fd, msg.msg_name,
3062                                       tswapal(msgp->msg_name),
3063                                       msg.msg_namelen);
3064         if (ret == -TARGET_EFAULT) {
3065             /* For connected sockets msg_name and msg_namelen must
3066              * be ignored, so returning EFAULT immediately is wrong.
3067              * Instead, pass a bad msg_name to the host kernel, and
3068              * let it decide whether to return EFAULT or not.
3069              */
3070             msg.msg_name = (void *)-1;
3071         } else if (ret) {
3072             goto out2;
3073         }
3074     } else {
3075         msg.msg_name = NULL;
3076         msg.msg_namelen = 0;
3077     }
3078     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3079     msg.msg_control = alloca(msg.msg_controllen);
3080     memset(msg.msg_control, 0, msg.msg_controllen);
3081 
3082     msg.msg_flags = tswap32(msgp->msg_flags);
3083 
3084     count = tswapal(msgp->msg_iovlen);
3085     target_vec = tswapal(msgp->msg_iov);
3086 
3087     if (count > IOV_MAX) {
3088         /* sendrcvmsg returns a different errno for this condition than
3089          * readv/writev, so we must catch it here before lock_iovec() does.
3090          */
3091         ret = -TARGET_EMSGSIZE;
3092         goto out2;
3093     }
3094 
3095     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3096                      target_vec, count, send);
3097     if (vec == NULL) {
3098         ret = -host_to_target_errno(errno);
3099         goto out2;
3100     }
3101     msg.msg_iovlen = count;
3102     msg.msg_iov = vec;
3103 
3104     if (send) {
3105         if (fd_trans_target_to_host_data(fd)) {
3106             void *host_msg;
3107 
3108             host_msg = g_malloc(msg.msg_iov->iov_len);
3109             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3110             ret = fd_trans_target_to_host_data(fd)(host_msg,
3111                                                    msg.msg_iov->iov_len);
3112             if (ret >= 0) {
3113                 msg.msg_iov->iov_base = host_msg;
3114                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3115             }
3116             g_free(host_msg);
3117         } else {
3118             ret = target_to_host_cmsg(&msg, msgp);
3119             if (ret == 0) {
3120                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3121             }
3122         }
3123     } else {
3124         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3125         if (!is_error(ret)) {
3126             len = ret;
3127             if (fd_trans_host_to_target_data(fd)) {
3128                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3129                                                MIN(msg.msg_iov->iov_len, len));
3130             } else {
3131                 ret = host_to_target_cmsg(msgp, &msg);
3132             }
3133             if (!is_error(ret)) {
3134                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3135                 msgp->msg_flags = tswap32(msg.msg_flags);
3136                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3137                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3138                                     msg.msg_name, msg.msg_namelen);
3139                     if (ret) {
3140                         goto out;
3141                     }
3142                 }
3143 
3144                 ret = len;
3145             }
3146         }
3147     }
3148 
3149 out:
3150     unlock_iovec(vec, target_vec, count, !send);
3151 out2:
3152     return ret;
3153 }
3154 
3155 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3156                                int flags, int send)
3157 {
3158     abi_long ret;
3159     struct target_msghdr *msgp;
3160 
3161     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3162                           msgp,
3163                           target_msg,
3164                           send ? 1 : 0)) {
3165         return -TARGET_EFAULT;
3166     }
3167     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3168     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3169     return ret;
3170 }
3171 
3172 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3173  * so it might not have this *mmsg-specific flag either.
3174  */
3175 #ifndef MSG_WAITFORONE
3176 #define MSG_WAITFORONE 0x10000
3177 #endif
3178 
3179 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3180                                 unsigned int vlen, unsigned int flags,
3181                                 int send)
3182 {
3183     struct target_mmsghdr *mmsgp;
3184     abi_long ret = 0;
3185     int i;
3186 
3187     if (vlen > UIO_MAXIOV) {
3188         vlen = UIO_MAXIOV;
3189     }
3190 
3191     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3192     if (!mmsgp) {
3193         return -TARGET_EFAULT;
3194     }
3195 
3196     for (i = 0; i < vlen; i++) {
3197         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3198         if (is_error(ret)) {
3199             break;
3200         }
3201         mmsgp[i].msg_len = tswap32(ret);
3202         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3203         if (flags & MSG_WAITFORONE) {
3204             flags |= MSG_DONTWAIT;
3205         }
3206     }
3207 
3208     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3209 
3210     /* Return number of datagrams sent if we sent any at all;
3211      * otherwise return the error.
3212      */
3213     if (i) {
3214         return i;
3215     }
3216     return ret;
3217 }
3218 
3219 /* do_accept4() Must return target values and target errnos. */
3220 static abi_long do_accept4(int fd, abi_ulong target_addr,
3221                            abi_ulong target_addrlen_addr, int flags)
3222 {
3223     socklen_t addrlen, ret_addrlen;
3224     void *addr;
3225     abi_long ret;
3226     int host_flags;
3227 
3228     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3229 
3230     if (target_addr == 0) {
3231         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3232     }
3233 
3234     /* linux returns EINVAL if addrlen pointer is invalid */
3235     if (get_user_u32(addrlen, target_addrlen_addr))
3236         return -TARGET_EINVAL;
3237 
3238     if ((int)addrlen < 0) {
3239         return -TARGET_EINVAL;
3240     }
3241 
3242     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3243         return -TARGET_EINVAL;
3244 
3245     addr = alloca(addrlen);
3246 
3247     ret_addrlen = addrlen;
3248     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3249     if (!is_error(ret)) {
3250         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3251         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3252             ret = -TARGET_EFAULT;
3253         }
3254     }
3255     return ret;
3256 }
3257 
3258 /* do_getpeername() Must return target values and target errnos. */
3259 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3260                                abi_ulong target_addrlen_addr)
3261 {
3262     socklen_t addrlen, ret_addrlen;
3263     void *addr;
3264     abi_long ret;
3265 
3266     if (get_user_u32(addrlen, target_addrlen_addr))
3267         return -TARGET_EFAULT;
3268 
3269     if ((int)addrlen < 0) {
3270         return -TARGET_EINVAL;
3271     }
3272 
3273     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3274         return -TARGET_EFAULT;
3275 
3276     addr = alloca(addrlen);
3277 
3278     ret_addrlen = addrlen;
3279     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3280     if (!is_error(ret)) {
3281         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3282         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3283             ret = -TARGET_EFAULT;
3284         }
3285     }
3286     return ret;
3287 }
3288 
3289 /* do_getsockname() Must return target values and target errnos. */
3290 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3291                                abi_ulong target_addrlen_addr)
3292 {
3293     socklen_t addrlen, ret_addrlen;
3294     void *addr;
3295     abi_long ret;
3296 
3297     if (get_user_u32(addrlen, target_addrlen_addr))
3298         return -TARGET_EFAULT;
3299 
3300     if ((int)addrlen < 0) {
3301         return -TARGET_EINVAL;
3302     }
3303 
3304     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3305         return -TARGET_EFAULT;
3306 
3307     addr = alloca(addrlen);
3308 
3309     ret_addrlen = addrlen;
3310     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3311     if (!is_error(ret)) {
3312         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3313         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3314             ret = -TARGET_EFAULT;
3315         }
3316     }
3317     return ret;
3318 }
3319 
3320 /* do_socketpair() Must return target values and target errnos. */
3321 static abi_long do_socketpair(int domain, int type, int protocol,
3322                               abi_ulong target_tab_addr)
3323 {
3324     int tab[2];
3325     abi_long ret;
3326 
3327     target_to_host_sock_type(&type);
3328 
3329     ret = get_errno(socketpair(domain, type, protocol, tab));
3330     if (!is_error(ret)) {
3331         if (put_user_s32(tab[0], target_tab_addr)
3332             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3333             ret = -TARGET_EFAULT;
3334     }
3335     return ret;
3336 }
3337 
3338 /* do_sendto() Must return target values and target errnos. */
3339 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3340                           abi_ulong target_addr, socklen_t addrlen)
3341 {
3342     void *addr;
3343     void *host_msg;
3344     void *copy_msg = NULL;
3345     abi_long ret;
3346 
3347     if ((int)addrlen < 0) {
3348         return -TARGET_EINVAL;
3349     }
3350 
3351     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3352     if (!host_msg)
3353         return -TARGET_EFAULT;
3354     if (fd_trans_target_to_host_data(fd)) {
3355         copy_msg = host_msg;
3356         host_msg = g_malloc(len);
3357         memcpy(host_msg, copy_msg, len);
3358         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3359         if (ret < 0) {
3360             goto fail;
3361         }
3362     }
3363     if (target_addr) {
3364         addr = alloca(addrlen+1);
3365         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3366         if (ret) {
3367             goto fail;
3368         }
3369         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3370     } else {
3371         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3372     }
3373 fail:
3374     if (copy_msg) {
3375         g_free(host_msg);
3376         host_msg = copy_msg;
3377     }
3378     unlock_user(host_msg, msg, 0);
3379     return ret;
3380 }
3381 
3382 /* do_recvfrom() Must return target values and target errnos. */
3383 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3384                             abi_ulong target_addr,
3385                             abi_ulong target_addrlen)
3386 {
3387     socklen_t addrlen, ret_addrlen;
3388     void *addr;
3389     void *host_msg;
3390     abi_long ret;
3391 
3392     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3393     if (!host_msg)
3394         return -TARGET_EFAULT;
3395     if (target_addr) {
3396         if (get_user_u32(addrlen, target_addrlen)) {
3397             ret = -TARGET_EFAULT;
3398             goto fail;
3399         }
3400         if ((int)addrlen < 0) {
3401             ret = -TARGET_EINVAL;
3402             goto fail;
3403         }
3404         addr = alloca(addrlen);
3405         ret_addrlen = addrlen;
3406         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3407                                       addr, &ret_addrlen));
3408     } else {
3409         addr = NULL; /* To keep compiler quiet.  */
3410         addrlen = 0; /* To keep compiler quiet.  */
3411         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3412     }
3413     if (!is_error(ret)) {
3414         if (fd_trans_host_to_target_data(fd)) {
3415             abi_long trans;
3416             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3417             if (is_error(trans)) {
3418                 ret = trans;
3419                 goto fail;
3420             }
3421         }
3422         if (target_addr) {
3423             host_to_target_sockaddr(target_addr, addr,
3424                                     MIN(addrlen, ret_addrlen));
3425             if (put_user_u32(ret_addrlen, target_addrlen)) {
3426                 ret = -TARGET_EFAULT;
3427                 goto fail;
3428             }
3429         }
3430         unlock_user(host_msg, msg, len);
3431     } else {
3432 fail:
3433         unlock_user(host_msg, msg, 0);
3434     }
3435     return ret;
3436 }
3437 
3438 #ifdef TARGET_NR_socketcall
3439 /* do_socketcall() must return target values and target errnos. */
3440 static abi_long do_socketcall(int num, abi_ulong vptr)
3441 {
3442     static const unsigned nargs[] = { /* number of arguments per operation */
3443         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3444         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3445         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3446         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3447         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3448         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3449         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3450         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3451         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3452         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3453         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3454         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3455         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3456         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3457         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3458         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3459         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3460         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3461         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3462         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3463     };
3464     abi_long a[6]; /* max 6 args */
3465     unsigned i;
3466 
3467     /* check the range of the first argument num */
3468     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3469     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3470         return -TARGET_EINVAL;
3471     }
3472     /* ensure we have space for args */
3473     if (nargs[num] > ARRAY_SIZE(a)) {
3474         return -TARGET_EINVAL;
3475     }
3476     /* collect the arguments in a[] according to nargs[] */
3477     for (i = 0; i < nargs[num]; ++i) {
3478         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3479             return -TARGET_EFAULT;
3480         }
3481     }
3482     /* now when we have the args, invoke the appropriate underlying function */
3483     switch (num) {
3484     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3485         return do_socket(a[0], a[1], a[2]);
3486     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3487         return do_bind(a[0], a[1], a[2]);
3488     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3489         return do_connect(a[0], a[1], a[2]);
3490     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3491         return get_errno(listen(a[0], a[1]));
3492     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3493         return do_accept4(a[0], a[1], a[2], 0);
3494     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3495         return do_getsockname(a[0], a[1], a[2]);
3496     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3497         return do_getpeername(a[0], a[1], a[2]);
3498     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3499         return do_socketpair(a[0], a[1], a[2], a[3]);
3500     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3501         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3502     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3503         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3504     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3505         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3506     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3507         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3508     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3509         return get_errno(shutdown(a[0], a[1]));
3510     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3511         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3512     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3513         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3514     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3515         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3516     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3517         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3518     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3519         return do_accept4(a[0], a[1], a[2], a[3]);
3520     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3521         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3522     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3523         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3524     default:
3525         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3526         return -TARGET_EINVAL;
3527     }
3528 }
3529 #endif
3530 
3531 #define N_SHM_REGIONS	32
3532 
3533 static struct shm_region {
3534     abi_ulong start;
3535     abi_ulong size;
3536     bool in_use;
3537 } shm_regions[N_SHM_REGIONS];
3538 
3539 #ifndef TARGET_SEMID64_DS
3540 /* asm-generic version of this struct */
3541 struct target_semid64_ds
3542 {
3543   struct target_ipc_perm sem_perm;
3544   abi_ulong sem_otime;
3545 #if TARGET_ABI_BITS == 32
3546   abi_ulong __unused1;
3547 #endif
3548   abi_ulong sem_ctime;
3549 #if TARGET_ABI_BITS == 32
3550   abi_ulong __unused2;
3551 #endif
3552   abi_ulong sem_nsems;
3553   abi_ulong __unused3;
3554   abi_ulong __unused4;
3555 };
3556 #endif
3557 
3558 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3559                                                abi_ulong target_addr)
3560 {
3561     struct target_ipc_perm *target_ip;
3562     struct target_semid64_ds *target_sd;
3563 
3564     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3565         return -TARGET_EFAULT;
3566     target_ip = &(target_sd->sem_perm);
3567     host_ip->__key = tswap32(target_ip->__key);
3568     host_ip->uid = tswap32(target_ip->uid);
3569     host_ip->gid = tswap32(target_ip->gid);
3570     host_ip->cuid = tswap32(target_ip->cuid);
3571     host_ip->cgid = tswap32(target_ip->cgid);
3572 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3573     host_ip->mode = tswap32(target_ip->mode);
3574 #else
3575     host_ip->mode = tswap16(target_ip->mode);
3576 #endif
3577 #if defined(TARGET_PPC)
3578     host_ip->__seq = tswap32(target_ip->__seq);
3579 #else
3580     host_ip->__seq = tswap16(target_ip->__seq);
3581 #endif
3582     unlock_user_struct(target_sd, target_addr, 0);
3583     return 0;
3584 }
3585 
3586 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3587                                                struct ipc_perm *host_ip)
3588 {
3589     struct target_ipc_perm *target_ip;
3590     struct target_semid64_ds *target_sd;
3591 
3592     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3593         return -TARGET_EFAULT;
3594     target_ip = &(target_sd->sem_perm);
3595     target_ip->__key = tswap32(host_ip->__key);
3596     target_ip->uid = tswap32(host_ip->uid);
3597     target_ip->gid = tswap32(host_ip->gid);
3598     target_ip->cuid = tswap32(host_ip->cuid);
3599     target_ip->cgid = tswap32(host_ip->cgid);
3600 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3601     target_ip->mode = tswap32(host_ip->mode);
3602 #else
3603     target_ip->mode = tswap16(host_ip->mode);
3604 #endif
3605 #if defined(TARGET_PPC)
3606     target_ip->__seq = tswap32(host_ip->__seq);
3607 #else
3608     target_ip->__seq = tswap16(host_ip->__seq);
3609 #endif
3610     unlock_user_struct(target_sd, target_addr, 1);
3611     return 0;
3612 }
3613 
3614 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3615                                                abi_ulong target_addr)
3616 {
3617     struct target_semid64_ds *target_sd;
3618 
3619     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3620         return -TARGET_EFAULT;
3621     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3622         return -TARGET_EFAULT;
3623     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3624     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3625     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3626     unlock_user_struct(target_sd, target_addr, 0);
3627     return 0;
3628 }
3629 
3630 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3631                                                struct semid_ds *host_sd)
3632 {
3633     struct target_semid64_ds *target_sd;
3634 
3635     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3636         return -TARGET_EFAULT;
3637     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3638         return -TARGET_EFAULT;
3639     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3640     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3641     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3642     unlock_user_struct(target_sd, target_addr, 1);
3643     return 0;
3644 }
3645 
3646 struct target_seminfo {
3647     int semmap;
3648     int semmni;
3649     int semmns;
3650     int semmnu;
3651     int semmsl;
3652     int semopm;
3653     int semume;
3654     int semusz;
3655     int semvmx;
3656     int semaem;
3657 };
3658 
3659 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3660                                               struct seminfo *host_seminfo)
3661 {
3662     struct target_seminfo *target_seminfo;
3663     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3664         return -TARGET_EFAULT;
3665     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3666     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3667     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3668     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3669     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3670     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3671     __put_user(host_seminfo->semume, &target_seminfo->semume);
3672     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3673     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3674     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3675     unlock_user_struct(target_seminfo, target_addr, 1);
3676     return 0;
3677 }
3678 
3679 union semun {
3680 	int val;
3681 	struct semid_ds *buf;
3682 	unsigned short *array;
3683 	struct seminfo *__buf;
3684 };
3685 
3686 union target_semun {
3687 	int val;
3688 	abi_ulong buf;
3689 	abi_ulong array;
3690 	abi_ulong __buf;
3691 };
3692 
3693 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3694                                                abi_ulong target_addr)
3695 {
3696     int nsems;
3697     unsigned short *array;
3698     union semun semun;
3699     struct semid_ds semid_ds;
3700     int i, ret;
3701 
3702     semun.buf = &semid_ds;
3703 
3704     ret = semctl(semid, 0, IPC_STAT, semun);
3705     if (ret == -1)
3706         return get_errno(ret);
3707 
3708     nsems = semid_ds.sem_nsems;
3709 
3710     *host_array = g_try_new(unsigned short, nsems);
3711     if (!*host_array) {
3712         return -TARGET_ENOMEM;
3713     }
3714     array = lock_user(VERIFY_READ, target_addr,
3715                       nsems*sizeof(unsigned short), 1);
3716     if (!array) {
3717         g_free(*host_array);
3718         return -TARGET_EFAULT;
3719     }
3720 
3721     for(i=0; i<nsems; i++) {
3722         __get_user((*host_array)[i], &array[i]);
3723     }
3724     unlock_user(array, target_addr, 0);
3725 
3726     return 0;
3727 }
3728 
3729 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3730                                                unsigned short **host_array)
3731 {
3732     int nsems;
3733     unsigned short *array;
3734     union semun semun;
3735     struct semid_ds semid_ds;
3736     int i, ret;
3737 
3738     semun.buf = &semid_ds;
3739 
3740     ret = semctl(semid, 0, IPC_STAT, semun);
3741     if (ret == -1)
3742         return get_errno(ret);
3743 
3744     nsems = semid_ds.sem_nsems;
3745 
3746     array = lock_user(VERIFY_WRITE, target_addr,
3747                       nsems*sizeof(unsigned short), 0);
3748     if (!array)
3749         return -TARGET_EFAULT;
3750 
3751     for(i=0; i<nsems; i++) {
3752         __put_user((*host_array)[i], &array[i]);
3753     }
3754     g_free(*host_array);
3755     unlock_user(array, target_addr, 1);
3756 
3757     return 0;
3758 }
3759 
3760 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3761                                  abi_ulong target_arg)
3762 {
3763     union target_semun target_su = { .buf = target_arg };
3764     union semun arg;
3765     struct semid_ds dsarg;
3766     unsigned short *array = NULL;
3767     struct seminfo seminfo;
3768     abi_long ret = -TARGET_EINVAL;
3769     abi_long err;
3770     cmd &= 0xff;
3771 
3772     switch( cmd ) {
3773 	case GETVAL:
3774 	case SETVAL:
3775             /* In 64 bit cross-endian situations, we will erroneously pick up
3776              * the wrong half of the union for the "val" element.  To rectify
3777              * this, the entire 8-byte structure is byteswapped, followed by
3778 	     * a swap of the 4 byte val field. In other cases, the data is
3779 	     * already in proper host byte order. */
3780 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3781 		target_su.buf = tswapal(target_su.buf);
3782 		arg.val = tswap32(target_su.val);
3783 	    } else {
3784 		arg.val = target_su.val;
3785 	    }
3786             ret = get_errno(semctl(semid, semnum, cmd, arg));
3787             break;
3788 	case GETALL:
3789 	case SETALL:
3790             err = target_to_host_semarray(semid, &array, target_su.array);
3791             if (err)
3792                 return err;
3793             arg.array = array;
3794             ret = get_errno(semctl(semid, semnum, cmd, arg));
3795             err = host_to_target_semarray(semid, target_su.array, &array);
3796             if (err)
3797                 return err;
3798             break;
3799 	case IPC_STAT:
3800 	case IPC_SET:
3801 	case SEM_STAT:
3802             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3803             if (err)
3804                 return err;
3805             arg.buf = &dsarg;
3806             ret = get_errno(semctl(semid, semnum, cmd, arg));
3807             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3808             if (err)
3809                 return err;
3810             break;
3811 	case IPC_INFO:
3812 	case SEM_INFO:
3813             arg.__buf = &seminfo;
3814             ret = get_errno(semctl(semid, semnum, cmd, arg));
3815             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3816             if (err)
3817                 return err;
3818             break;
3819 	case IPC_RMID:
3820 	case GETPID:
3821 	case GETNCNT:
3822 	case GETZCNT:
3823             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3824             break;
3825     }
3826 
3827     return ret;
3828 }
3829 
3830 struct target_sembuf {
3831     unsigned short sem_num;
3832     short sem_op;
3833     short sem_flg;
3834 };
3835 
3836 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3837                                              abi_ulong target_addr,
3838                                              unsigned nsops)
3839 {
3840     struct target_sembuf *target_sembuf;
3841     int i;
3842 
3843     target_sembuf = lock_user(VERIFY_READ, target_addr,
3844                               nsops*sizeof(struct target_sembuf), 1);
3845     if (!target_sembuf)
3846         return -TARGET_EFAULT;
3847 
3848     for(i=0; i<nsops; i++) {
3849         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3850         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3851         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3852     }
3853 
3854     unlock_user(target_sembuf, target_addr, 0);
3855 
3856     return 0;
3857 }
3858 
3859 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3860     defined(TARGET_NR_semtimedop)
3861 
3862 /*
3863  * This macro is required to handle the s390 variants, which passes the
3864  * arguments in a different order than default.
3865  */
3866 #ifdef __s390x__
3867 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3868   (__nsops), (__timeout), (__sops)
3869 #else
3870 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3871   (__nsops), 0, (__sops), (__timeout)
3872 #endif
3873 
3874 static inline abi_long do_semtimedop(int semid,
3875                                      abi_long ptr,
3876                                      unsigned nsops,
3877                                      abi_long timeout)
3878 {
3879     struct sembuf *sops;
3880     struct timespec ts, *pts = NULL;
3881     abi_long ret;
3882 
3883     if (timeout) {
3884         pts = &ts;
3885         if (target_to_host_timespec(pts, timeout)) {
3886             return -TARGET_EFAULT;
3887         }
3888     }
3889 
3890     if (nsops > TARGET_SEMOPM) {
3891         return -TARGET_E2BIG;
3892     }
3893 
3894     sops = g_new(struct sembuf, nsops);
3895 
3896     if (target_to_host_sembuf(sops, ptr, nsops)) {
3897         g_free(sops);
3898         return -TARGET_EFAULT;
3899     }
3900 
3901     ret = -TARGET_ENOSYS;
3902 #ifdef __NR_semtimedop
3903     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
3904 #endif
3905 #ifdef __NR_ipc
3906     if (ret == -TARGET_ENOSYS) {
3907         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
3908                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
3909     }
3910 #endif
3911     g_free(sops);
3912     return ret;
3913 }
3914 #endif
3915 
3916 struct target_msqid_ds
3917 {
3918     struct target_ipc_perm msg_perm;
3919     abi_ulong msg_stime;
3920 #if TARGET_ABI_BITS == 32
3921     abi_ulong __unused1;
3922 #endif
3923     abi_ulong msg_rtime;
3924 #if TARGET_ABI_BITS == 32
3925     abi_ulong __unused2;
3926 #endif
3927     abi_ulong msg_ctime;
3928 #if TARGET_ABI_BITS == 32
3929     abi_ulong __unused3;
3930 #endif
3931     abi_ulong __msg_cbytes;
3932     abi_ulong msg_qnum;
3933     abi_ulong msg_qbytes;
3934     abi_ulong msg_lspid;
3935     abi_ulong msg_lrpid;
3936     abi_ulong __unused4;
3937     abi_ulong __unused5;
3938 };
3939 
3940 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3941                                                abi_ulong target_addr)
3942 {
3943     struct target_msqid_ds *target_md;
3944 
3945     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3946         return -TARGET_EFAULT;
3947     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3948         return -TARGET_EFAULT;
3949     host_md->msg_stime = tswapal(target_md->msg_stime);
3950     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3951     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3952     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3953     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3954     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3955     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3956     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3957     unlock_user_struct(target_md, target_addr, 0);
3958     return 0;
3959 }
3960 
3961 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3962                                                struct msqid_ds *host_md)
3963 {
3964     struct target_msqid_ds *target_md;
3965 
3966     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3967         return -TARGET_EFAULT;
3968     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3969         return -TARGET_EFAULT;
3970     target_md->msg_stime = tswapal(host_md->msg_stime);
3971     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3972     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3973     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3974     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3975     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3976     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3977     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3978     unlock_user_struct(target_md, target_addr, 1);
3979     return 0;
3980 }
3981 
3982 struct target_msginfo {
3983     int msgpool;
3984     int msgmap;
3985     int msgmax;
3986     int msgmnb;
3987     int msgmni;
3988     int msgssz;
3989     int msgtql;
3990     unsigned short int msgseg;
3991 };
3992 
3993 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3994                                               struct msginfo *host_msginfo)
3995 {
3996     struct target_msginfo *target_msginfo;
3997     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3998         return -TARGET_EFAULT;
3999     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4000     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4001     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4002     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4003     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4004     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4005     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4006     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4007     unlock_user_struct(target_msginfo, target_addr, 1);
4008     return 0;
4009 }
4010 
4011 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4012 {
4013     struct msqid_ds dsarg;
4014     struct msginfo msginfo;
4015     abi_long ret = -TARGET_EINVAL;
4016 
4017     cmd &= 0xff;
4018 
4019     switch (cmd) {
4020     case IPC_STAT:
4021     case IPC_SET:
4022     case MSG_STAT:
4023         if (target_to_host_msqid_ds(&dsarg,ptr))
4024             return -TARGET_EFAULT;
4025         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4026         if (host_to_target_msqid_ds(ptr,&dsarg))
4027             return -TARGET_EFAULT;
4028         break;
4029     case IPC_RMID:
4030         ret = get_errno(msgctl(msgid, cmd, NULL));
4031         break;
4032     case IPC_INFO:
4033     case MSG_INFO:
4034         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4035         if (host_to_target_msginfo(ptr, &msginfo))
4036             return -TARGET_EFAULT;
4037         break;
4038     }
4039 
4040     return ret;
4041 }
4042 
4043 struct target_msgbuf {
4044     abi_long mtype;
4045     char	mtext[1];
4046 };
4047 
4048 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4049                                  ssize_t msgsz, int msgflg)
4050 {
4051     struct target_msgbuf *target_mb;
4052     struct msgbuf *host_mb;
4053     abi_long ret = 0;
4054 
4055     if (msgsz < 0) {
4056         return -TARGET_EINVAL;
4057     }
4058 
4059     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4060         return -TARGET_EFAULT;
4061     host_mb = g_try_malloc(msgsz + sizeof(long));
4062     if (!host_mb) {
4063         unlock_user_struct(target_mb, msgp, 0);
4064         return -TARGET_ENOMEM;
4065     }
4066     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4067     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4068     ret = -TARGET_ENOSYS;
4069 #ifdef __NR_msgsnd
4070     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4071 #endif
4072 #ifdef __NR_ipc
4073     if (ret == -TARGET_ENOSYS) {
4074 #ifdef __s390x__
4075         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4076                                  host_mb));
4077 #else
4078         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4079                                  host_mb, 0));
4080 #endif
4081     }
4082 #endif
4083     g_free(host_mb);
4084     unlock_user_struct(target_mb, msgp, 0);
4085 
4086     return ret;
4087 }
4088 
4089 #ifdef __NR_ipc
4090 #if defined(__sparc__)
4091 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4092 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4093 #elif defined(__s390x__)
4094 /* The s390 sys_ipc variant has only five parameters.  */
4095 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4096     ((long int[]){(long int)__msgp, __msgtyp})
4097 #else
4098 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4099     ((long int[]){(long int)__msgp, __msgtyp}), 0
4100 #endif
4101 #endif
4102 
4103 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4104                                  ssize_t msgsz, abi_long msgtyp,
4105                                  int msgflg)
4106 {
4107     struct target_msgbuf *target_mb;
4108     char *target_mtext;
4109     struct msgbuf *host_mb;
4110     abi_long ret = 0;
4111 
4112     if (msgsz < 0) {
4113         return -TARGET_EINVAL;
4114     }
4115 
4116     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4117         return -TARGET_EFAULT;
4118 
4119     host_mb = g_try_malloc(msgsz + sizeof(long));
4120     if (!host_mb) {
4121         ret = -TARGET_ENOMEM;
4122         goto end;
4123     }
4124     ret = -TARGET_ENOSYS;
4125 #ifdef __NR_msgrcv
4126     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4127 #endif
4128 #ifdef __NR_ipc
4129     if (ret == -TARGET_ENOSYS) {
4130         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4131                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4132     }
4133 #endif
4134 
4135     if (ret > 0) {
4136         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4137         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4138         if (!target_mtext) {
4139             ret = -TARGET_EFAULT;
4140             goto end;
4141         }
4142         memcpy(target_mb->mtext, host_mb->mtext, ret);
4143         unlock_user(target_mtext, target_mtext_addr, ret);
4144     }
4145 
4146     target_mb->mtype = tswapal(host_mb->mtype);
4147 
4148 end:
4149     if (target_mb)
4150         unlock_user_struct(target_mb, msgp, 1);
4151     g_free(host_mb);
4152     return ret;
4153 }
4154 
4155 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4156                                                abi_ulong target_addr)
4157 {
4158     struct target_shmid_ds *target_sd;
4159 
4160     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4161         return -TARGET_EFAULT;
4162     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4163         return -TARGET_EFAULT;
4164     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4165     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4166     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4167     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4168     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4169     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4170     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4171     unlock_user_struct(target_sd, target_addr, 0);
4172     return 0;
4173 }
4174 
4175 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4176                                                struct shmid_ds *host_sd)
4177 {
4178     struct target_shmid_ds *target_sd;
4179 
4180     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4181         return -TARGET_EFAULT;
4182     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4183         return -TARGET_EFAULT;
4184     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4185     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4186     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4187     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4188     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4189     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4190     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4191     unlock_user_struct(target_sd, target_addr, 1);
4192     return 0;
4193 }
4194 
4195 struct  target_shminfo {
4196     abi_ulong shmmax;
4197     abi_ulong shmmin;
4198     abi_ulong shmmni;
4199     abi_ulong shmseg;
4200     abi_ulong shmall;
4201 };
4202 
4203 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4204                                               struct shminfo *host_shminfo)
4205 {
4206     struct target_shminfo *target_shminfo;
4207     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4208         return -TARGET_EFAULT;
4209     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4210     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4211     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4212     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4213     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4214     unlock_user_struct(target_shminfo, target_addr, 1);
4215     return 0;
4216 }
4217 
4218 struct target_shm_info {
4219     int used_ids;
4220     abi_ulong shm_tot;
4221     abi_ulong shm_rss;
4222     abi_ulong shm_swp;
4223     abi_ulong swap_attempts;
4224     abi_ulong swap_successes;
4225 };
4226 
4227 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4228                                                struct shm_info *host_shm_info)
4229 {
4230     struct target_shm_info *target_shm_info;
4231     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4232         return -TARGET_EFAULT;
4233     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4234     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4235     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4236     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4237     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4238     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4239     unlock_user_struct(target_shm_info, target_addr, 1);
4240     return 0;
4241 }
4242 
4243 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4244 {
4245     struct shmid_ds dsarg;
4246     struct shminfo shminfo;
4247     struct shm_info shm_info;
4248     abi_long ret = -TARGET_EINVAL;
4249 
4250     cmd &= 0xff;
4251 
4252     switch(cmd) {
4253     case IPC_STAT:
4254     case IPC_SET:
4255     case SHM_STAT:
4256         if (target_to_host_shmid_ds(&dsarg, buf))
4257             return -TARGET_EFAULT;
4258         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4259         if (host_to_target_shmid_ds(buf, &dsarg))
4260             return -TARGET_EFAULT;
4261         break;
4262     case IPC_INFO:
4263         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4264         if (host_to_target_shminfo(buf, &shminfo))
4265             return -TARGET_EFAULT;
4266         break;
4267     case SHM_INFO:
4268         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4269         if (host_to_target_shm_info(buf, &shm_info))
4270             return -TARGET_EFAULT;
4271         break;
4272     case IPC_RMID:
4273     case SHM_LOCK:
4274     case SHM_UNLOCK:
4275         ret = get_errno(shmctl(shmid, cmd, NULL));
4276         break;
4277     }
4278 
4279     return ret;
4280 }
4281 
4282 #ifndef TARGET_FORCE_SHMLBA
4283 /* For most architectures, SHMLBA is the same as the page size;
4284  * some architectures have larger values, in which case they should
4285  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4286  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4287  * and defining its own value for SHMLBA.
4288  *
4289  * The kernel also permits SHMLBA to be set by the architecture to a
4290  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4291  * this means that addresses are rounded to the large size if
4292  * SHM_RND is set but addresses not aligned to that size are not rejected
4293  * as long as they are at least page-aligned. Since the only architecture
4294  * which uses this is ia64 this code doesn't provide for that oddity.
4295  */
4296 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4297 {
4298     return TARGET_PAGE_SIZE;
4299 }
4300 #endif
4301 
4302 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4303                                  int shmid, abi_ulong shmaddr, int shmflg)
4304 {
4305     abi_long raddr;
4306     void *host_raddr;
4307     struct shmid_ds shm_info;
4308     int i,ret;
4309     abi_ulong shmlba;
4310 
4311     /* find out the length of the shared memory segment */
4312     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4313     if (is_error(ret)) {
4314         /* can't get length, bail out */
4315         return ret;
4316     }
4317 
4318     shmlba = target_shmlba(cpu_env);
4319 
4320     if (shmaddr & (shmlba - 1)) {
4321         if (shmflg & SHM_RND) {
4322             shmaddr &= ~(shmlba - 1);
4323         } else {
4324             return -TARGET_EINVAL;
4325         }
4326     }
4327     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4328         return -TARGET_EINVAL;
4329     }
4330 
4331     mmap_lock();
4332 
4333     if (shmaddr)
4334         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4335     else {
4336         abi_ulong mmap_start;
4337 
4338         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4339         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4340 
4341         if (mmap_start == -1) {
4342             errno = ENOMEM;
4343             host_raddr = (void *)-1;
4344         } else
4345             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4346     }
4347 
4348     if (host_raddr == (void *)-1) {
4349         mmap_unlock();
4350         return get_errno((long)host_raddr);
4351     }
4352     raddr=h2g((unsigned long)host_raddr);
4353 
4354     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4355                    PAGE_VALID | PAGE_READ |
4356                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4357 
4358     for (i = 0; i < N_SHM_REGIONS; i++) {
4359         if (!shm_regions[i].in_use) {
4360             shm_regions[i].in_use = true;
4361             shm_regions[i].start = raddr;
4362             shm_regions[i].size = shm_info.shm_segsz;
4363             break;
4364         }
4365     }
4366 
4367     mmap_unlock();
4368     return raddr;
4369 
4370 }
4371 
4372 static inline abi_long do_shmdt(abi_ulong shmaddr)
4373 {
4374     int i;
4375     abi_long rv;
4376 
4377     mmap_lock();
4378 
4379     for (i = 0; i < N_SHM_REGIONS; ++i) {
4380         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4381             shm_regions[i].in_use = false;
4382             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4383             break;
4384         }
4385     }
4386     rv = get_errno(shmdt(g2h(shmaddr)));
4387 
4388     mmap_unlock();
4389 
4390     return rv;
4391 }
4392 
4393 #ifdef TARGET_NR_ipc
4394 /* ??? This only works with linear mappings.  */
4395 /* do_ipc() must return target values and target errnos. */
4396 static abi_long do_ipc(CPUArchState *cpu_env,
4397                        unsigned int call, abi_long first,
4398                        abi_long second, abi_long third,
4399                        abi_long ptr, abi_long fifth)
4400 {
4401     int version;
4402     abi_long ret = 0;
4403 
4404     version = call >> 16;
4405     call &= 0xffff;
4406 
4407     switch (call) {
4408     case IPCOP_semop:
4409         ret = do_semtimedop(first, ptr, second, 0);
4410         break;
4411     case IPCOP_semtimedop:
4412     /*
4413      * The s390 sys_ipc variant has only five parameters instead of six
4414      * (as for default variant) and the only difference is the handling of
4415      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4416      * to a struct timespec where the generic variant uses fifth parameter.
4417      */
4418 #if defined(TARGET_S390X)
4419         ret = do_semtimedop(first, ptr, second, third);
4420 #else
4421         ret = do_semtimedop(first, ptr, second, fifth);
4422 #endif
4423         break;
4424 
4425     case IPCOP_semget:
4426         ret = get_errno(semget(first, second, third));
4427         break;
4428 
4429     case IPCOP_semctl: {
4430         /* The semun argument to semctl is passed by value, so dereference the
4431          * ptr argument. */
4432         abi_ulong atptr;
4433         get_user_ual(atptr, ptr);
4434         ret = do_semctl(first, second, third, atptr);
4435         break;
4436     }
4437 
4438     case IPCOP_msgget:
4439         ret = get_errno(msgget(first, second));
4440         break;
4441 
4442     case IPCOP_msgsnd:
4443         ret = do_msgsnd(first, ptr, second, third);
4444         break;
4445 
4446     case IPCOP_msgctl:
4447         ret = do_msgctl(first, second, ptr);
4448         break;
4449 
4450     case IPCOP_msgrcv:
4451         switch (version) {
4452         case 0:
4453             {
4454                 struct target_ipc_kludge {
4455                     abi_long msgp;
4456                     abi_long msgtyp;
4457                 } *tmp;
4458 
4459                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4460                     ret = -TARGET_EFAULT;
4461                     break;
4462                 }
4463 
4464                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4465 
4466                 unlock_user_struct(tmp, ptr, 0);
4467                 break;
4468             }
4469         default:
4470             ret = do_msgrcv(first, ptr, second, fifth, third);
4471         }
4472         break;
4473 
4474     case IPCOP_shmat:
4475         switch (version) {
4476         default:
4477         {
4478             abi_ulong raddr;
4479             raddr = do_shmat(cpu_env, first, ptr, second);
4480             if (is_error(raddr))
4481                 return get_errno(raddr);
4482             if (put_user_ual(raddr, third))
4483                 return -TARGET_EFAULT;
4484             break;
4485         }
4486         case 1:
4487             ret = -TARGET_EINVAL;
4488             break;
4489         }
4490 	break;
4491     case IPCOP_shmdt:
4492         ret = do_shmdt(ptr);
4493 	break;
4494 
4495     case IPCOP_shmget:
4496 	/* IPC_* flag values are the same on all linux platforms */
4497 	ret = get_errno(shmget(first, second, third));
4498 	break;
4499 
4500 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4501     case IPCOP_shmctl:
4502         ret = do_shmctl(first, second, ptr);
4503         break;
4504     default:
4505         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4506                       call, version);
4507 	ret = -TARGET_ENOSYS;
4508 	break;
4509     }
4510     return ret;
4511 }
4512 #endif
4513 
4514 /* kernel structure types definitions */
4515 
4516 #define STRUCT(name, ...) STRUCT_ ## name,
4517 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4518 enum {
4519 #include "syscall_types.h"
4520 STRUCT_MAX
4521 };
4522 #undef STRUCT
4523 #undef STRUCT_SPECIAL
4524 
4525 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4526 #define STRUCT_SPECIAL(name)
4527 #include "syscall_types.h"
4528 #undef STRUCT
4529 #undef STRUCT_SPECIAL
4530 
4531 #define MAX_STRUCT_SIZE 4096
4532 
4533 #ifdef CONFIG_FIEMAP
4534 /* So fiemap access checks don't overflow on 32 bit systems.
4535  * This is very slightly smaller than the limit imposed by
4536  * the underlying kernel.
4537  */
4538 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4539                             / sizeof(struct fiemap_extent))
4540 
4541 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4542                                        int fd, int cmd, abi_long arg)
4543 {
4544     /* The parameter for this ioctl is a struct fiemap followed
4545      * by an array of struct fiemap_extent whose size is set
4546      * in fiemap->fm_extent_count. The array is filled in by the
4547      * ioctl.
4548      */
4549     int target_size_in, target_size_out;
4550     struct fiemap *fm;
4551     const argtype *arg_type = ie->arg_type;
4552     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4553     void *argptr, *p;
4554     abi_long ret;
4555     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4556     uint32_t outbufsz;
4557     int free_fm = 0;
4558 
4559     assert(arg_type[0] == TYPE_PTR);
4560     assert(ie->access == IOC_RW);
4561     arg_type++;
4562     target_size_in = thunk_type_size(arg_type, 0);
4563     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4564     if (!argptr) {
4565         return -TARGET_EFAULT;
4566     }
4567     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4568     unlock_user(argptr, arg, 0);
4569     fm = (struct fiemap *)buf_temp;
4570     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4571         return -TARGET_EINVAL;
4572     }
4573 
4574     outbufsz = sizeof (*fm) +
4575         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4576 
4577     if (outbufsz > MAX_STRUCT_SIZE) {
4578         /* We can't fit all the extents into the fixed size buffer.
4579          * Allocate one that is large enough and use it instead.
4580          */
4581         fm = g_try_malloc(outbufsz);
4582         if (!fm) {
4583             return -TARGET_ENOMEM;
4584         }
4585         memcpy(fm, buf_temp, sizeof(struct fiemap));
4586         free_fm = 1;
4587     }
4588     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4589     if (!is_error(ret)) {
4590         target_size_out = target_size_in;
4591         /* An extent_count of 0 means we were only counting the extents
4592          * so there are no structs to copy
4593          */
4594         if (fm->fm_extent_count != 0) {
4595             target_size_out += fm->fm_mapped_extents * extent_size;
4596         }
4597         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4598         if (!argptr) {
4599             ret = -TARGET_EFAULT;
4600         } else {
4601             /* Convert the struct fiemap */
4602             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4603             if (fm->fm_extent_count != 0) {
4604                 p = argptr + target_size_in;
4605                 /* ...and then all the struct fiemap_extents */
4606                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4607                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4608                                   THUNK_TARGET);
4609                     p += extent_size;
4610                 }
4611             }
4612             unlock_user(argptr, arg, target_size_out);
4613         }
4614     }
4615     if (free_fm) {
4616         g_free(fm);
4617     }
4618     return ret;
4619 }
4620 #endif
4621 
4622 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4623                                 int fd, int cmd, abi_long arg)
4624 {
4625     const argtype *arg_type = ie->arg_type;
4626     int target_size;
4627     void *argptr;
4628     int ret;
4629     struct ifconf *host_ifconf;
4630     uint32_t outbufsz;
4631     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4632     int target_ifreq_size;
4633     int nb_ifreq;
4634     int free_buf = 0;
4635     int i;
4636     int target_ifc_len;
4637     abi_long target_ifc_buf;
4638     int host_ifc_len;
4639     char *host_ifc_buf;
4640 
4641     assert(arg_type[0] == TYPE_PTR);
4642     assert(ie->access == IOC_RW);
4643 
4644     arg_type++;
4645     target_size = thunk_type_size(arg_type, 0);
4646 
4647     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4648     if (!argptr)
4649         return -TARGET_EFAULT;
4650     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4651     unlock_user(argptr, arg, 0);
4652 
4653     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4654     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4655     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4656 
4657     if (target_ifc_buf != 0) {
4658         target_ifc_len = host_ifconf->ifc_len;
4659         nb_ifreq = target_ifc_len / target_ifreq_size;
4660         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4661 
4662         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4663         if (outbufsz > MAX_STRUCT_SIZE) {
4664             /*
4665              * We can't fit all the extents into the fixed size buffer.
4666              * Allocate one that is large enough and use it instead.
4667              */
4668             host_ifconf = malloc(outbufsz);
4669             if (!host_ifconf) {
4670                 return -TARGET_ENOMEM;
4671             }
4672             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4673             free_buf = 1;
4674         }
4675         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4676 
4677         host_ifconf->ifc_len = host_ifc_len;
4678     } else {
4679       host_ifc_buf = NULL;
4680     }
4681     host_ifconf->ifc_buf = host_ifc_buf;
4682 
4683     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4684     if (!is_error(ret)) {
4685 	/* convert host ifc_len to target ifc_len */
4686 
4687         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4688         target_ifc_len = nb_ifreq * target_ifreq_size;
4689         host_ifconf->ifc_len = target_ifc_len;
4690 
4691 	/* restore target ifc_buf */
4692 
4693         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4694 
4695 	/* copy struct ifconf to target user */
4696 
4697         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4698         if (!argptr)
4699             return -TARGET_EFAULT;
4700         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4701         unlock_user(argptr, arg, target_size);
4702 
4703         if (target_ifc_buf != 0) {
4704             /* copy ifreq[] to target user */
4705             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4706             for (i = 0; i < nb_ifreq ; i++) {
4707                 thunk_convert(argptr + i * target_ifreq_size,
4708                               host_ifc_buf + i * sizeof(struct ifreq),
4709                               ifreq_arg_type, THUNK_TARGET);
4710             }
4711             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4712         }
4713     }
4714 
4715     if (free_buf) {
4716         free(host_ifconf);
4717     }
4718 
4719     return ret;
4720 }
4721 
4722 #if defined(CONFIG_USBFS)
4723 #if HOST_LONG_BITS > 64
4724 #error USBDEVFS thunks do not support >64 bit hosts yet.
4725 #endif
4726 struct live_urb {
4727     uint64_t target_urb_adr;
4728     uint64_t target_buf_adr;
4729     char *target_buf_ptr;
4730     struct usbdevfs_urb host_urb;
4731 };
4732 
4733 static GHashTable *usbdevfs_urb_hashtable(void)
4734 {
4735     static GHashTable *urb_hashtable;
4736 
4737     if (!urb_hashtable) {
4738         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4739     }
4740     return urb_hashtable;
4741 }
4742 
4743 static void urb_hashtable_insert(struct live_urb *urb)
4744 {
4745     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4746     g_hash_table_insert(urb_hashtable, urb, urb);
4747 }
4748 
4749 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4750 {
4751     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4752     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4753 }
4754 
4755 static void urb_hashtable_remove(struct live_urb *urb)
4756 {
4757     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4758     g_hash_table_remove(urb_hashtable, urb);
4759 }
4760 
4761 static abi_long
4762 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4763                           int fd, int cmd, abi_long arg)
4764 {
4765     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4766     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4767     struct live_urb *lurb;
4768     void *argptr;
4769     uint64_t hurb;
4770     int target_size;
4771     uintptr_t target_urb_adr;
4772     abi_long ret;
4773 
4774     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4775 
4776     memset(buf_temp, 0, sizeof(uint64_t));
4777     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4778     if (is_error(ret)) {
4779         return ret;
4780     }
4781 
4782     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4783     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4784     if (!lurb->target_urb_adr) {
4785         return -TARGET_EFAULT;
4786     }
4787     urb_hashtable_remove(lurb);
4788     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4789         lurb->host_urb.buffer_length);
4790     lurb->target_buf_ptr = NULL;
4791 
4792     /* restore the guest buffer pointer */
4793     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4794 
4795     /* update the guest urb struct */
4796     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4797     if (!argptr) {
4798         g_free(lurb);
4799         return -TARGET_EFAULT;
4800     }
4801     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4802     unlock_user(argptr, lurb->target_urb_adr, target_size);
4803 
4804     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4805     /* write back the urb handle */
4806     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4807     if (!argptr) {
4808         g_free(lurb);
4809         return -TARGET_EFAULT;
4810     }
4811 
4812     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4813     target_urb_adr = lurb->target_urb_adr;
4814     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4815     unlock_user(argptr, arg, target_size);
4816 
4817     g_free(lurb);
4818     return ret;
4819 }
4820 
4821 static abi_long
4822 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4823                              uint8_t *buf_temp __attribute__((unused)),
4824                              int fd, int cmd, abi_long arg)
4825 {
4826     struct live_urb *lurb;
4827 
4828     /* map target address back to host URB with metadata. */
4829     lurb = urb_hashtable_lookup(arg);
4830     if (!lurb) {
4831         return -TARGET_EFAULT;
4832     }
4833     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4834 }
4835 
4836 static abi_long
4837 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4838                             int fd, int cmd, abi_long arg)
4839 {
4840     const argtype *arg_type = ie->arg_type;
4841     int target_size;
4842     abi_long ret;
4843     void *argptr;
4844     int rw_dir;
4845     struct live_urb *lurb;
4846 
4847     /*
4848      * each submitted URB needs to map to a unique ID for the
4849      * kernel, and that unique ID needs to be a pointer to
4850      * host memory.  hence, we need to malloc for each URB.
4851      * isochronous transfers have a variable length struct.
4852      */
4853     arg_type++;
4854     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4855 
4856     /* construct host copy of urb and metadata */
4857     lurb = g_try_malloc0(sizeof(struct live_urb));
4858     if (!lurb) {
4859         return -TARGET_ENOMEM;
4860     }
4861 
4862     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4863     if (!argptr) {
4864         g_free(lurb);
4865         return -TARGET_EFAULT;
4866     }
4867     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4868     unlock_user(argptr, arg, 0);
4869 
4870     lurb->target_urb_adr = arg;
4871     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4872 
4873     /* buffer space used depends on endpoint type so lock the entire buffer */
4874     /* control type urbs should check the buffer contents for true direction */
4875     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4876     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4877         lurb->host_urb.buffer_length, 1);
4878     if (lurb->target_buf_ptr == NULL) {
4879         g_free(lurb);
4880         return -TARGET_EFAULT;
4881     }
4882 
4883     /* update buffer pointer in host copy */
4884     lurb->host_urb.buffer = lurb->target_buf_ptr;
4885 
4886     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4887     if (is_error(ret)) {
4888         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4889         g_free(lurb);
4890     } else {
4891         urb_hashtable_insert(lurb);
4892     }
4893 
4894     return ret;
4895 }
4896 #endif /* CONFIG_USBFS */
4897 
4898 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4899                             int cmd, abi_long arg)
4900 {
4901     void *argptr;
4902     struct dm_ioctl *host_dm;
4903     abi_long guest_data;
4904     uint32_t guest_data_size;
4905     int target_size;
4906     const argtype *arg_type = ie->arg_type;
4907     abi_long ret;
4908     void *big_buf = NULL;
4909     char *host_data;
4910 
4911     arg_type++;
4912     target_size = thunk_type_size(arg_type, 0);
4913     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4914     if (!argptr) {
4915         ret = -TARGET_EFAULT;
4916         goto out;
4917     }
4918     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4919     unlock_user(argptr, arg, 0);
4920 
4921     /* buf_temp is too small, so fetch things into a bigger buffer */
4922     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4923     memcpy(big_buf, buf_temp, target_size);
4924     buf_temp = big_buf;
4925     host_dm = big_buf;
4926 
4927     guest_data = arg + host_dm->data_start;
4928     if ((guest_data - arg) < 0) {
4929         ret = -TARGET_EINVAL;
4930         goto out;
4931     }
4932     guest_data_size = host_dm->data_size - host_dm->data_start;
4933     host_data = (char*)host_dm + host_dm->data_start;
4934 
4935     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4936     if (!argptr) {
4937         ret = -TARGET_EFAULT;
4938         goto out;
4939     }
4940 
4941     switch (ie->host_cmd) {
4942     case DM_REMOVE_ALL:
4943     case DM_LIST_DEVICES:
4944     case DM_DEV_CREATE:
4945     case DM_DEV_REMOVE:
4946     case DM_DEV_SUSPEND:
4947     case DM_DEV_STATUS:
4948     case DM_DEV_WAIT:
4949     case DM_TABLE_STATUS:
4950     case DM_TABLE_CLEAR:
4951     case DM_TABLE_DEPS:
4952     case DM_LIST_VERSIONS:
4953         /* no input data */
4954         break;
4955     case DM_DEV_RENAME:
4956     case DM_DEV_SET_GEOMETRY:
4957         /* data contains only strings */
4958         memcpy(host_data, argptr, guest_data_size);
4959         break;
4960     case DM_TARGET_MSG:
4961         memcpy(host_data, argptr, guest_data_size);
4962         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4963         break;
4964     case DM_TABLE_LOAD:
4965     {
4966         void *gspec = argptr;
4967         void *cur_data = host_data;
4968         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4969         int spec_size = thunk_type_size(arg_type, 0);
4970         int i;
4971 
4972         for (i = 0; i < host_dm->target_count; i++) {
4973             struct dm_target_spec *spec = cur_data;
4974             uint32_t next;
4975             int slen;
4976 
4977             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4978             slen = strlen((char*)gspec + spec_size) + 1;
4979             next = spec->next;
4980             spec->next = sizeof(*spec) + slen;
4981             strcpy((char*)&spec[1], gspec + spec_size);
4982             gspec += next;
4983             cur_data += spec->next;
4984         }
4985         break;
4986     }
4987     default:
4988         ret = -TARGET_EINVAL;
4989         unlock_user(argptr, guest_data, 0);
4990         goto out;
4991     }
4992     unlock_user(argptr, guest_data, 0);
4993 
4994     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4995     if (!is_error(ret)) {
4996         guest_data = arg + host_dm->data_start;
4997         guest_data_size = host_dm->data_size - host_dm->data_start;
4998         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4999         switch (ie->host_cmd) {
5000         case DM_REMOVE_ALL:
5001         case DM_DEV_CREATE:
5002         case DM_DEV_REMOVE:
5003         case DM_DEV_RENAME:
5004         case DM_DEV_SUSPEND:
5005         case DM_DEV_STATUS:
5006         case DM_TABLE_LOAD:
5007         case DM_TABLE_CLEAR:
5008         case DM_TARGET_MSG:
5009         case DM_DEV_SET_GEOMETRY:
5010             /* no return data */
5011             break;
5012         case DM_LIST_DEVICES:
5013         {
5014             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5015             uint32_t remaining_data = guest_data_size;
5016             void *cur_data = argptr;
5017             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5018             int nl_size = 12; /* can't use thunk_size due to alignment */
5019 
5020             while (1) {
5021                 uint32_t next = nl->next;
5022                 if (next) {
5023                     nl->next = nl_size + (strlen(nl->name) + 1);
5024                 }
5025                 if (remaining_data < nl->next) {
5026                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5027                     break;
5028                 }
5029                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5030                 strcpy(cur_data + nl_size, nl->name);
5031                 cur_data += nl->next;
5032                 remaining_data -= nl->next;
5033                 if (!next) {
5034                     break;
5035                 }
5036                 nl = (void*)nl + next;
5037             }
5038             break;
5039         }
5040         case DM_DEV_WAIT:
5041         case DM_TABLE_STATUS:
5042         {
5043             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5044             void *cur_data = argptr;
5045             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5046             int spec_size = thunk_type_size(arg_type, 0);
5047             int i;
5048 
5049             for (i = 0; i < host_dm->target_count; i++) {
5050                 uint32_t next = spec->next;
5051                 int slen = strlen((char*)&spec[1]) + 1;
5052                 spec->next = (cur_data - argptr) + spec_size + slen;
5053                 if (guest_data_size < spec->next) {
5054                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5055                     break;
5056                 }
5057                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5058                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5059                 cur_data = argptr + spec->next;
5060                 spec = (void*)host_dm + host_dm->data_start + next;
5061             }
5062             break;
5063         }
5064         case DM_TABLE_DEPS:
5065         {
5066             void *hdata = (void*)host_dm + host_dm->data_start;
5067             int count = *(uint32_t*)hdata;
5068             uint64_t *hdev = hdata + 8;
5069             uint64_t *gdev = argptr + 8;
5070             int i;
5071 
5072             *(uint32_t*)argptr = tswap32(count);
5073             for (i = 0; i < count; i++) {
5074                 *gdev = tswap64(*hdev);
5075                 gdev++;
5076                 hdev++;
5077             }
5078             break;
5079         }
5080         case DM_LIST_VERSIONS:
5081         {
5082             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5083             uint32_t remaining_data = guest_data_size;
5084             void *cur_data = argptr;
5085             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5086             int vers_size = thunk_type_size(arg_type, 0);
5087 
5088             while (1) {
5089                 uint32_t next = vers->next;
5090                 if (next) {
5091                     vers->next = vers_size + (strlen(vers->name) + 1);
5092                 }
5093                 if (remaining_data < vers->next) {
5094                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5095                     break;
5096                 }
5097                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5098                 strcpy(cur_data + vers_size, vers->name);
5099                 cur_data += vers->next;
5100                 remaining_data -= vers->next;
5101                 if (!next) {
5102                     break;
5103                 }
5104                 vers = (void*)vers + next;
5105             }
5106             break;
5107         }
5108         default:
5109             unlock_user(argptr, guest_data, 0);
5110             ret = -TARGET_EINVAL;
5111             goto out;
5112         }
5113         unlock_user(argptr, guest_data, guest_data_size);
5114 
5115         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5116         if (!argptr) {
5117             ret = -TARGET_EFAULT;
5118             goto out;
5119         }
5120         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5121         unlock_user(argptr, arg, target_size);
5122     }
5123 out:
5124     g_free(big_buf);
5125     return ret;
5126 }
5127 
5128 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5129                                int cmd, abi_long arg)
5130 {
5131     void *argptr;
5132     int target_size;
5133     const argtype *arg_type = ie->arg_type;
5134     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5135     abi_long ret;
5136 
5137     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5138     struct blkpg_partition host_part;
5139 
5140     /* Read and convert blkpg */
5141     arg_type++;
5142     target_size = thunk_type_size(arg_type, 0);
5143     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5144     if (!argptr) {
5145         ret = -TARGET_EFAULT;
5146         goto out;
5147     }
5148     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5149     unlock_user(argptr, arg, 0);
5150 
5151     switch (host_blkpg->op) {
5152     case BLKPG_ADD_PARTITION:
5153     case BLKPG_DEL_PARTITION:
5154         /* payload is struct blkpg_partition */
5155         break;
5156     default:
5157         /* Unknown opcode */
5158         ret = -TARGET_EINVAL;
5159         goto out;
5160     }
5161 
5162     /* Read and convert blkpg->data */
5163     arg = (abi_long)(uintptr_t)host_blkpg->data;
5164     target_size = thunk_type_size(part_arg_type, 0);
5165     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5166     if (!argptr) {
5167         ret = -TARGET_EFAULT;
5168         goto out;
5169     }
5170     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5171     unlock_user(argptr, arg, 0);
5172 
5173     /* Swizzle the data pointer to our local copy and call! */
5174     host_blkpg->data = &host_part;
5175     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5176 
5177 out:
5178     return ret;
5179 }
5180 
5181 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5182                                 int fd, int cmd, abi_long arg)
5183 {
5184     const argtype *arg_type = ie->arg_type;
5185     const StructEntry *se;
5186     const argtype *field_types;
5187     const int *dst_offsets, *src_offsets;
5188     int target_size;
5189     void *argptr;
5190     abi_ulong *target_rt_dev_ptr = NULL;
5191     unsigned long *host_rt_dev_ptr = NULL;
5192     abi_long ret;
5193     int i;
5194 
5195     assert(ie->access == IOC_W);
5196     assert(*arg_type == TYPE_PTR);
5197     arg_type++;
5198     assert(*arg_type == TYPE_STRUCT);
5199     target_size = thunk_type_size(arg_type, 0);
5200     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5201     if (!argptr) {
5202         return -TARGET_EFAULT;
5203     }
5204     arg_type++;
5205     assert(*arg_type == (int)STRUCT_rtentry);
5206     se = struct_entries + *arg_type++;
5207     assert(se->convert[0] == NULL);
5208     /* convert struct here to be able to catch rt_dev string */
5209     field_types = se->field_types;
5210     dst_offsets = se->field_offsets[THUNK_HOST];
5211     src_offsets = se->field_offsets[THUNK_TARGET];
5212     for (i = 0; i < se->nb_fields; i++) {
5213         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5214             assert(*field_types == TYPE_PTRVOID);
5215             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5216             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5217             if (*target_rt_dev_ptr != 0) {
5218                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5219                                                   tswapal(*target_rt_dev_ptr));
5220                 if (!*host_rt_dev_ptr) {
5221                     unlock_user(argptr, arg, 0);
5222                     return -TARGET_EFAULT;
5223                 }
5224             } else {
5225                 *host_rt_dev_ptr = 0;
5226             }
5227             field_types++;
5228             continue;
5229         }
5230         field_types = thunk_convert(buf_temp + dst_offsets[i],
5231                                     argptr + src_offsets[i],
5232                                     field_types, THUNK_HOST);
5233     }
5234     unlock_user(argptr, arg, 0);
5235 
5236     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5237 
5238     assert(host_rt_dev_ptr != NULL);
5239     assert(target_rt_dev_ptr != NULL);
5240     if (*host_rt_dev_ptr != 0) {
5241         unlock_user((void *)*host_rt_dev_ptr,
5242                     *target_rt_dev_ptr, 0);
5243     }
5244     return ret;
5245 }
5246 
5247 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5248                                      int fd, int cmd, abi_long arg)
5249 {
5250     int sig = target_to_host_signal(arg);
5251     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5252 }
5253 
5254 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5255                                     int fd, int cmd, abi_long arg)
5256 {
5257     struct timeval tv;
5258     abi_long ret;
5259 
5260     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5261     if (is_error(ret)) {
5262         return ret;
5263     }
5264 
5265     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5266         if (copy_to_user_timeval(arg, &tv)) {
5267             return -TARGET_EFAULT;
5268         }
5269     } else {
5270         if (copy_to_user_timeval64(arg, &tv)) {
5271             return -TARGET_EFAULT;
5272         }
5273     }
5274 
5275     return ret;
5276 }
5277 
5278 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5279                                       int fd, int cmd, abi_long arg)
5280 {
5281     struct timespec ts;
5282     abi_long ret;
5283 
5284     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5285     if (is_error(ret)) {
5286         return ret;
5287     }
5288 
5289     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5290         if (host_to_target_timespec(arg, &ts)) {
5291             return -TARGET_EFAULT;
5292         }
5293     } else{
5294         if (host_to_target_timespec64(arg, &ts)) {
5295             return -TARGET_EFAULT;
5296         }
5297     }
5298 
5299     return ret;
5300 }
5301 
5302 #ifdef TIOCGPTPEER
5303 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5304                                      int fd, int cmd, abi_long arg)
5305 {
5306     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5307     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5308 }
5309 #endif
5310 
5311 #ifdef HAVE_DRM_H
5312 
5313 static void unlock_drm_version(struct drm_version *host_ver,
5314                                struct target_drm_version *target_ver,
5315                                bool copy)
5316 {
5317     unlock_user(host_ver->name, target_ver->name,
5318                                 copy ? host_ver->name_len : 0);
5319     unlock_user(host_ver->date, target_ver->date,
5320                                 copy ? host_ver->date_len : 0);
5321     unlock_user(host_ver->desc, target_ver->desc,
5322                                 copy ? host_ver->desc_len : 0);
5323 }
5324 
5325 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5326                                           struct target_drm_version *target_ver)
5327 {
5328     memset(host_ver, 0, sizeof(*host_ver));
5329 
5330     __get_user(host_ver->name_len, &target_ver->name_len);
5331     if (host_ver->name_len) {
5332         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5333                                    target_ver->name_len, 0);
5334         if (!host_ver->name) {
5335             return -EFAULT;
5336         }
5337     }
5338 
5339     __get_user(host_ver->date_len, &target_ver->date_len);
5340     if (host_ver->date_len) {
5341         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5342                                    target_ver->date_len, 0);
5343         if (!host_ver->date) {
5344             goto err;
5345         }
5346     }
5347 
5348     __get_user(host_ver->desc_len, &target_ver->desc_len);
5349     if (host_ver->desc_len) {
5350         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5351                                    target_ver->desc_len, 0);
5352         if (!host_ver->desc) {
5353             goto err;
5354         }
5355     }
5356 
5357     return 0;
5358 err:
5359     unlock_drm_version(host_ver, target_ver, false);
5360     return -EFAULT;
5361 }
5362 
5363 static inline void host_to_target_drmversion(
5364                                           struct target_drm_version *target_ver,
5365                                           struct drm_version *host_ver)
5366 {
5367     __put_user(host_ver->version_major, &target_ver->version_major);
5368     __put_user(host_ver->version_minor, &target_ver->version_minor);
5369     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5370     __put_user(host_ver->name_len, &target_ver->name_len);
5371     __put_user(host_ver->date_len, &target_ver->date_len);
5372     __put_user(host_ver->desc_len, &target_ver->desc_len);
5373     unlock_drm_version(host_ver, target_ver, true);
5374 }
5375 
5376 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5377                              int fd, int cmd, abi_long arg)
5378 {
5379     struct drm_version *ver;
5380     struct target_drm_version *target_ver;
5381     abi_long ret;
5382 
5383     switch (ie->host_cmd) {
5384     case DRM_IOCTL_VERSION:
5385         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5386             return -TARGET_EFAULT;
5387         }
5388         ver = (struct drm_version *)buf_temp;
5389         ret = target_to_host_drmversion(ver, target_ver);
5390         if (!is_error(ret)) {
5391             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5392             if (is_error(ret)) {
5393                 unlock_drm_version(ver, target_ver, false);
5394             } else {
5395                 host_to_target_drmversion(target_ver, ver);
5396             }
5397         }
5398         unlock_user_struct(target_ver, arg, 0);
5399         return ret;
5400     }
5401     return -TARGET_ENOSYS;
5402 }
5403 
5404 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5405                                            struct drm_i915_getparam *gparam,
5406                                            int fd, abi_long arg)
5407 {
5408     abi_long ret;
5409     int value;
5410     struct target_drm_i915_getparam *target_gparam;
5411 
5412     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5413         return -TARGET_EFAULT;
5414     }
5415 
5416     __get_user(gparam->param, &target_gparam->param);
5417     gparam->value = &value;
5418     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5419     put_user_s32(value, target_gparam->value);
5420 
5421     unlock_user_struct(target_gparam, arg, 0);
5422     return ret;
5423 }
5424 
5425 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5426                                   int fd, int cmd, abi_long arg)
5427 {
5428     switch (ie->host_cmd) {
5429     case DRM_IOCTL_I915_GETPARAM:
5430         return do_ioctl_drm_i915_getparam(ie,
5431                                           (struct drm_i915_getparam *)buf_temp,
5432                                           fd, arg);
5433     default:
5434         return -TARGET_ENOSYS;
5435     }
5436 }
5437 
5438 #endif
5439 
5440 IOCTLEntry ioctl_entries[] = {
5441 #define IOCTL(cmd, access, ...) \
5442     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5443 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5444     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5445 #define IOCTL_IGNORE(cmd) \
5446     { TARGET_ ## cmd, 0, #cmd },
5447 #include "ioctls.h"
5448     { 0, 0, },
5449 };
5450 
5451 /* ??? Implement proper locking for ioctls.  */
5452 /* do_ioctl() Must return target values and target errnos. */
5453 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5454 {
5455     const IOCTLEntry *ie;
5456     const argtype *arg_type;
5457     abi_long ret;
5458     uint8_t buf_temp[MAX_STRUCT_SIZE];
5459     int target_size;
5460     void *argptr;
5461 
5462     ie = ioctl_entries;
5463     for(;;) {
5464         if (ie->target_cmd == 0) {
5465             qemu_log_mask(
5466                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5467             return -TARGET_ENOSYS;
5468         }
5469         if (ie->target_cmd == cmd)
5470             break;
5471         ie++;
5472     }
5473     arg_type = ie->arg_type;
5474     if (ie->do_ioctl) {
5475         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5476     } else if (!ie->host_cmd) {
5477         /* Some architectures define BSD ioctls in their headers
5478            that are not implemented in Linux.  */
5479         return -TARGET_ENOSYS;
5480     }
5481 
5482     switch(arg_type[0]) {
5483     case TYPE_NULL:
5484         /* no argument */
5485         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5486         break;
5487     case TYPE_PTRVOID:
5488     case TYPE_INT:
5489     case TYPE_LONG:
5490     case TYPE_ULONG:
5491         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5492         break;
5493     case TYPE_PTR:
5494         arg_type++;
5495         target_size = thunk_type_size(arg_type, 0);
5496         switch(ie->access) {
5497         case IOC_R:
5498             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5499             if (!is_error(ret)) {
5500                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5501                 if (!argptr)
5502                     return -TARGET_EFAULT;
5503                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5504                 unlock_user(argptr, arg, target_size);
5505             }
5506             break;
5507         case IOC_W:
5508             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5509             if (!argptr)
5510                 return -TARGET_EFAULT;
5511             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5512             unlock_user(argptr, arg, 0);
5513             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5514             break;
5515         default:
5516         case IOC_RW:
5517             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5518             if (!argptr)
5519                 return -TARGET_EFAULT;
5520             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5521             unlock_user(argptr, arg, 0);
5522             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5523             if (!is_error(ret)) {
5524                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5525                 if (!argptr)
5526                     return -TARGET_EFAULT;
5527                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5528                 unlock_user(argptr, arg, target_size);
5529             }
5530             break;
5531         }
5532         break;
5533     default:
5534         qemu_log_mask(LOG_UNIMP,
5535                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5536                       (long)cmd, arg_type[0]);
5537         ret = -TARGET_ENOSYS;
5538         break;
5539     }
5540     return ret;
5541 }
5542 
5543 static const bitmask_transtbl iflag_tbl[] = {
5544         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5545         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5546         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5547         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5548         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5549         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5550         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5551         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5552         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5553         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5554         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5555         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5556         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5557         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5558         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5559         { 0, 0, 0, 0 }
5560 };
5561 
5562 static const bitmask_transtbl oflag_tbl[] = {
5563 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5564 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5565 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5566 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5567 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5568 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5569 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5570 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5571 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5572 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5573 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5574 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5575 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5576 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5577 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5578 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5579 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5580 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5581 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5582 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5583 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5584 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5585 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5586 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5587 	{ 0, 0, 0, 0 }
5588 };
5589 
5590 static const bitmask_transtbl cflag_tbl[] = {
5591 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5592 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5593 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5594 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5595 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5596 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5597 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5598 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5599 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5600 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5601 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5602 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5603 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5604 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5605 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5606 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5607 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5608 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5609 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5610 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5611 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5612 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5613 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5614 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5615 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5616 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5617 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5618 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5619 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5620 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5621 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5622 	{ 0, 0, 0, 0 }
5623 };
5624 
5625 static const bitmask_transtbl lflag_tbl[] = {
5626   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5627   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5628   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5629   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5630   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5631   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5632   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5633   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5634   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5635   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5636   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5637   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5638   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5639   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5640   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5641   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5642   { 0, 0, 0, 0 }
5643 };
5644 
5645 static void target_to_host_termios (void *dst, const void *src)
5646 {
5647     struct host_termios *host = dst;
5648     const struct target_termios *target = src;
5649 
5650     host->c_iflag =
5651         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5652     host->c_oflag =
5653         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5654     host->c_cflag =
5655         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5656     host->c_lflag =
5657         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5658     host->c_line = target->c_line;
5659 
5660     memset(host->c_cc, 0, sizeof(host->c_cc));
5661     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5662     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5663     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5664     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5665     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5666     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5667     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5668     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5669     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5670     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5671     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5672     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5673     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5674     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5675     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5676     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5677     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5678 }
5679 
5680 static void host_to_target_termios (void *dst, const void *src)
5681 {
5682     struct target_termios *target = dst;
5683     const struct host_termios *host = src;
5684 
5685     target->c_iflag =
5686         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5687     target->c_oflag =
5688         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5689     target->c_cflag =
5690         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5691     target->c_lflag =
5692         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5693     target->c_line = host->c_line;
5694 
5695     memset(target->c_cc, 0, sizeof(target->c_cc));
5696     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5697     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5698     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5699     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5700     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5701     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5702     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5703     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5704     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5705     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5706     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5707     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5708     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5709     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5710     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5711     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5712     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5713 }
5714 
5715 static const StructEntry struct_termios_def = {
5716     .convert = { host_to_target_termios, target_to_host_termios },
5717     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5718     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5719     .print = print_termios,
5720 };
5721 
5722 static bitmask_transtbl mmap_flags_tbl[] = {
5723     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5724     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5725     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5726     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5727       MAP_ANONYMOUS, MAP_ANONYMOUS },
5728     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5729       MAP_GROWSDOWN, MAP_GROWSDOWN },
5730     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5731       MAP_DENYWRITE, MAP_DENYWRITE },
5732     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5733       MAP_EXECUTABLE, MAP_EXECUTABLE },
5734     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5735     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5736       MAP_NORESERVE, MAP_NORESERVE },
5737     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5738     /* MAP_STACK had been ignored by the kernel for quite some time.
5739        Recognize it for the target insofar as we do not want to pass
5740        it through to the host.  */
5741     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5742     { 0, 0, 0, 0 }
5743 };
5744 
5745 /*
5746  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5747  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5748  */
5749 #if defined(TARGET_I386)
5750 
5751 /* NOTE: there is really one LDT for all the threads */
5752 static uint8_t *ldt_table;
5753 
5754 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5755 {
5756     int size;
5757     void *p;
5758 
5759     if (!ldt_table)
5760         return 0;
5761     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5762     if (size > bytecount)
5763         size = bytecount;
5764     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5765     if (!p)
5766         return -TARGET_EFAULT;
5767     /* ??? Should this by byteswapped?  */
5768     memcpy(p, ldt_table, size);
5769     unlock_user(p, ptr, size);
5770     return size;
5771 }
5772 
5773 /* XXX: add locking support */
5774 static abi_long write_ldt(CPUX86State *env,
5775                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5776 {
5777     struct target_modify_ldt_ldt_s ldt_info;
5778     struct target_modify_ldt_ldt_s *target_ldt_info;
5779     int seg_32bit, contents, read_exec_only, limit_in_pages;
5780     int seg_not_present, useable, lm;
5781     uint32_t *lp, entry_1, entry_2;
5782 
5783     if (bytecount != sizeof(ldt_info))
5784         return -TARGET_EINVAL;
5785     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5786         return -TARGET_EFAULT;
5787     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5788     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5789     ldt_info.limit = tswap32(target_ldt_info->limit);
5790     ldt_info.flags = tswap32(target_ldt_info->flags);
5791     unlock_user_struct(target_ldt_info, ptr, 0);
5792 
5793     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5794         return -TARGET_EINVAL;
5795     seg_32bit = ldt_info.flags & 1;
5796     contents = (ldt_info.flags >> 1) & 3;
5797     read_exec_only = (ldt_info.flags >> 3) & 1;
5798     limit_in_pages = (ldt_info.flags >> 4) & 1;
5799     seg_not_present = (ldt_info.flags >> 5) & 1;
5800     useable = (ldt_info.flags >> 6) & 1;
5801 #ifdef TARGET_ABI32
5802     lm = 0;
5803 #else
5804     lm = (ldt_info.flags >> 7) & 1;
5805 #endif
5806     if (contents == 3) {
5807         if (oldmode)
5808             return -TARGET_EINVAL;
5809         if (seg_not_present == 0)
5810             return -TARGET_EINVAL;
5811     }
5812     /* allocate the LDT */
5813     if (!ldt_table) {
5814         env->ldt.base = target_mmap(0,
5815                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5816                                     PROT_READ|PROT_WRITE,
5817                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5818         if (env->ldt.base == -1)
5819             return -TARGET_ENOMEM;
5820         memset(g2h(env->ldt.base), 0,
5821                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5822         env->ldt.limit = 0xffff;
5823         ldt_table = g2h(env->ldt.base);
5824     }
5825 
5826     /* NOTE: same code as Linux kernel */
5827     /* Allow LDTs to be cleared by the user. */
5828     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5829         if (oldmode ||
5830             (contents == 0		&&
5831              read_exec_only == 1	&&
5832              seg_32bit == 0		&&
5833              limit_in_pages == 0	&&
5834              seg_not_present == 1	&&
5835              useable == 0 )) {
5836             entry_1 = 0;
5837             entry_2 = 0;
5838             goto install;
5839         }
5840     }
5841 
5842     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5843         (ldt_info.limit & 0x0ffff);
5844     entry_2 = (ldt_info.base_addr & 0xff000000) |
5845         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5846         (ldt_info.limit & 0xf0000) |
5847         ((read_exec_only ^ 1) << 9) |
5848         (contents << 10) |
5849         ((seg_not_present ^ 1) << 15) |
5850         (seg_32bit << 22) |
5851         (limit_in_pages << 23) |
5852         (lm << 21) |
5853         0x7000;
5854     if (!oldmode)
5855         entry_2 |= (useable << 20);
5856 
5857     /* Install the new entry ...  */
5858 install:
5859     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5860     lp[0] = tswap32(entry_1);
5861     lp[1] = tswap32(entry_2);
5862     return 0;
5863 }
5864 
5865 /* specific and weird i386 syscalls */
5866 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5867                               unsigned long bytecount)
5868 {
5869     abi_long ret;
5870 
5871     switch (func) {
5872     case 0:
5873         ret = read_ldt(ptr, bytecount);
5874         break;
5875     case 1:
5876         ret = write_ldt(env, ptr, bytecount, 1);
5877         break;
5878     case 0x11:
5879         ret = write_ldt(env, ptr, bytecount, 0);
5880         break;
5881     default:
5882         ret = -TARGET_ENOSYS;
5883         break;
5884     }
5885     return ret;
5886 }
5887 
5888 #if defined(TARGET_ABI32)
5889 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5890 {
5891     uint64_t *gdt_table = g2h(env->gdt.base);
5892     struct target_modify_ldt_ldt_s ldt_info;
5893     struct target_modify_ldt_ldt_s *target_ldt_info;
5894     int seg_32bit, contents, read_exec_only, limit_in_pages;
5895     int seg_not_present, useable, lm;
5896     uint32_t *lp, entry_1, entry_2;
5897     int i;
5898 
5899     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5900     if (!target_ldt_info)
5901         return -TARGET_EFAULT;
5902     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5903     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5904     ldt_info.limit = tswap32(target_ldt_info->limit);
5905     ldt_info.flags = tswap32(target_ldt_info->flags);
5906     if (ldt_info.entry_number == -1) {
5907         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5908             if (gdt_table[i] == 0) {
5909                 ldt_info.entry_number = i;
5910                 target_ldt_info->entry_number = tswap32(i);
5911                 break;
5912             }
5913         }
5914     }
5915     unlock_user_struct(target_ldt_info, ptr, 1);
5916 
5917     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5918         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5919            return -TARGET_EINVAL;
5920     seg_32bit = ldt_info.flags & 1;
5921     contents = (ldt_info.flags >> 1) & 3;
5922     read_exec_only = (ldt_info.flags >> 3) & 1;
5923     limit_in_pages = (ldt_info.flags >> 4) & 1;
5924     seg_not_present = (ldt_info.flags >> 5) & 1;
5925     useable = (ldt_info.flags >> 6) & 1;
5926 #ifdef TARGET_ABI32
5927     lm = 0;
5928 #else
5929     lm = (ldt_info.flags >> 7) & 1;
5930 #endif
5931 
5932     if (contents == 3) {
5933         if (seg_not_present == 0)
5934             return -TARGET_EINVAL;
5935     }
5936 
5937     /* NOTE: same code as Linux kernel */
5938     /* Allow LDTs to be cleared by the user. */
5939     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5940         if ((contents == 0             &&
5941              read_exec_only == 1       &&
5942              seg_32bit == 0            &&
5943              limit_in_pages == 0       &&
5944              seg_not_present == 1      &&
5945              useable == 0 )) {
5946             entry_1 = 0;
5947             entry_2 = 0;
5948             goto install;
5949         }
5950     }
5951 
5952     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5953         (ldt_info.limit & 0x0ffff);
5954     entry_2 = (ldt_info.base_addr & 0xff000000) |
5955         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5956         (ldt_info.limit & 0xf0000) |
5957         ((read_exec_only ^ 1) << 9) |
5958         (contents << 10) |
5959         ((seg_not_present ^ 1) << 15) |
5960         (seg_32bit << 22) |
5961         (limit_in_pages << 23) |
5962         (useable << 20) |
5963         (lm << 21) |
5964         0x7000;
5965 
5966     /* Install the new entry ...  */
5967 install:
5968     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5969     lp[0] = tswap32(entry_1);
5970     lp[1] = tswap32(entry_2);
5971     return 0;
5972 }
5973 
5974 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5975 {
5976     struct target_modify_ldt_ldt_s *target_ldt_info;
5977     uint64_t *gdt_table = g2h(env->gdt.base);
5978     uint32_t base_addr, limit, flags;
5979     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5980     int seg_not_present, useable, lm;
5981     uint32_t *lp, entry_1, entry_2;
5982 
5983     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5984     if (!target_ldt_info)
5985         return -TARGET_EFAULT;
5986     idx = tswap32(target_ldt_info->entry_number);
5987     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5988         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5989         unlock_user_struct(target_ldt_info, ptr, 1);
5990         return -TARGET_EINVAL;
5991     }
5992     lp = (uint32_t *)(gdt_table + idx);
5993     entry_1 = tswap32(lp[0]);
5994     entry_2 = tswap32(lp[1]);
5995 
5996     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5997     contents = (entry_2 >> 10) & 3;
5998     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5999     seg_32bit = (entry_2 >> 22) & 1;
6000     limit_in_pages = (entry_2 >> 23) & 1;
6001     useable = (entry_2 >> 20) & 1;
6002 #ifdef TARGET_ABI32
6003     lm = 0;
6004 #else
6005     lm = (entry_2 >> 21) & 1;
6006 #endif
6007     flags = (seg_32bit << 0) | (contents << 1) |
6008         (read_exec_only << 3) | (limit_in_pages << 4) |
6009         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6010     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6011     base_addr = (entry_1 >> 16) |
6012         (entry_2 & 0xff000000) |
6013         ((entry_2 & 0xff) << 16);
6014     target_ldt_info->base_addr = tswapal(base_addr);
6015     target_ldt_info->limit = tswap32(limit);
6016     target_ldt_info->flags = tswap32(flags);
6017     unlock_user_struct(target_ldt_info, ptr, 1);
6018     return 0;
6019 }
6020 
6021 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6022 {
6023     return -TARGET_ENOSYS;
6024 }
6025 #else
6026 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6027 {
6028     abi_long ret = 0;
6029     abi_ulong val;
6030     int idx;
6031 
6032     switch(code) {
6033     case TARGET_ARCH_SET_GS:
6034     case TARGET_ARCH_SET_FS:
6035         if (code == TARGET_ARCH_SET_GS)
6036             idx = R_GS;
6037         else
6038             idx = R_FS;
6039         cpu_x86_load_seg(env, idx, 0);
6040         env->segs[idx].base = addr;
6041         break;
6042     case TARGET_ARCH_GET_GS:
6043     case TARGET_ARCH_GET_FS:
6044         if (code == TARGET_ARCH_GET_GS)
6045             idx = R_GS;
6046         else
6047             idx = R_FS;
6048         val = env->segs[idx].base;
6049         if (put_user(val, addr, abi_ulong))
6050             ret = -TARGET_EFAULT;
6051         break;
6052     default:
6053         ret = -TARGET_EINVAL;
6054         break;
6055     }
6056     return ret;
6057 }
6058 #endif /* defined(TARGET_ABI32 */
6059 
6060 #endif /* defined(TARGET_I386) */
6061 
6062 #define NEW_STACK_SIZE 0x40000
6063 
6064 
6065 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6066 typedef struct {
6067     CPUArchState *env;
6068     pthread_mutex_t mutex;
6069     pthread_cond_t cond;
6070     pthread_t thread;
6071     uint32_t tid;
6072     abi_ulong child_tidptr;
6073     abi_ulong parent_tidptr;
6074     sigset_t sigmask;
6075 } new_thread_info;
6076 
6077 static void *clone_func(void *arg)
6078 {
6079     new_thread_info *info = arg;
6080     CPUArchState *env;
6081     CPUState *cpu;
6082     TaskState *ts;
6083 
6084     rcu_register_thread();
6085     tcg_register_thread();
6086     env = info->env;
6087     cpu = env_cpu(env);
6088     thread_cpu = cpu;
6089     ts = (TaskState *)cpu->opaque;
6090     info->tid = sys_gettid();
6091     task_settid(ts);
6092     if (info->child_tidptr)
6093         put_user_u32(info->tid, info->child_tidptr);
6094     if (info->parent_tidptr)
6095         put_user_u32(info->tid, info->parent_tidptr);
6096     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6097     /* Enable signals.  */
6098     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6099     /* Signal to the parent that we're ready.  */
6100     pthread_mutex_lock(&info->mutex);
6101     pthread_cond_broadcast(&info->cond);
6102     pthread_mutex_unlock(&info->mutex);
6103     /* Wait until the parent has finished initializing the tls state.  */
6104     pthread_mutex_lock(&clone_lock);
6105     pthread_mutex_unlock(&clone_lock);
6106     cpu_loop(env);
6107     /* never exits */
6108     return NULL;
6109 }
6110 
6111 /* do_fork() Must return host values and target errnos (unlike most
6112    do_*() functions). */
6113 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6114                    abi_ulong parent_tidptr, target_ulong newtls,
6115                    abi_ulong child_tidptr)
6116 {
6117     CPUState *cpu = env_cpu(env);
6118     int ret;
6119     TaskState *ts;
6120     CPUState *new_cpu;
6121     CPUArchState *new_env;
6122     sigset_t sigmask;
6123 
6124     flags &= ~CLONE_IGNORED_FLAGS;
6125 
6126     /* Emulate vfork() with fork() */
6127     if (flags & CLONE_VFORK)
6128         flags &= ~(CLONE_VFORK | CLONE_VM);
6129 
6130     if (flags & CLONE_VM) {
6131         TaskState *parent_ts = (TaskState *)cpu->opaque;
6132         new_thread_info info;
6133         pthread_attr_t attr;
6134 
6135         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6136             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6137             return -TARGET_EINVAL;
6138         }
6139 
6140         ts = g_new0(TaskState, 1);
6141         init_task_state(ts);
6142 
6143         /* Grab a mutex so that thread setup appears atomic.  */
6144         pthread_mutex_lock(&clone_lock);
6145 
6146         /* we create a new CPU instance. */
6147         new_env = cpu_copy(env);
6148         /* Init regs that differ from the parent.  */
6149         cpu_clone_regs_child(new_env, newsp, flags);
6150         cpu_clone_regs_parent(env, flags);
6151         new_cpu = env_cpu(new_env);
6152         new_cpu->opaque = ts;
6153         ts->bprm = parent_ts->bprm;
6154         ts->info = parent_ts->info;
6155         ts->signal_mask = parent_ts->signal_mask;
6156 
6157         if (flags & CLONE_CHILD_CLEARTID) {
6158             ts->child_tidptr = child_tidptr;
6159         }
6160 
6161         if (flags & CLONE_SETTLS) {
6162             cpu_set_tls (new_env, newtls);
6163         }
6164 
6165         memset(&info, 0, sizeof(info));
6166         pthread_mutex_init(&info.mutex, NULL);
6167         pthread_mutex_lock(&info.mutex);
6168         pthread_cond_init(&info.cond, NULL);
6169         info.env = new_env;
6170         if (flags & CLONE_CHILD_SETTID) {
6171             info.child_tidptr = child_tidptr;
6172         }
6173         if (flags & CLONE_PARENT_SETTID) {
6174             info.parent_tidptr = parent_tidptr;
6175         }
6176 
6177         ret = pthread_attr_init(&attr);
6178         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6179         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6180         /* It is not safe to deliver signals until the child has finished
6181            initializing, so temporarily block all signals.  */
6182         sigfillset(&sigmask);
6183         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6184         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6185 
6186         /* If this is our first additional thread, we need to ensure we
6187          * generate code for parallel execution and flush old translations.
6188          */
6189         if (!parallel_cpus) {
6190             parallel_cpus = true;
6191             tb_flush(cpu);
6192         }
6193 
6194         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6195         /* TODO: Free new CPU state if thread creation failed.  */
6196 
6197         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6198         pthread_attr_destroy(&attr);
6199         if (ret == 0) {
6200             /* Wait for the child to initialize.  */
6201             pthread_cond_wait(&info.cond, &info.mutex);
6202             ret = info.tid;
6203         } else {
6204             ret = -1;
6205         }
6206         pthread_mutex_unlock(&info.mutex);
6207         pthread_cond_destroy(&info.cond);
6208         pthread_mutex_destroy(&info.mutex);
6209         pthread_mutex_unlock(&clone_lock);
6210     } else {
6211         /* if no CLONE_VM, we consider it is a fork */
6212         if (flags & CLONE_INVALID_FORK_FLAGS) {
6213             return -TARGET_EINVAL;
6214         }
6215 
6216         /* We can't support custom termination signals */
6217         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6218             return -TARGET_EINVAL;
6219         }
6220 
6221         if (block_signals()) {
6222             return -TARGET_ERESTARTSYS;
6223         }
6224 
6225         fork_start();
6226         ret = fork();
6227         if (ret == 0) {
6228             /* Child Process.  */
6229             cpu_clone_regs_child(env, newsp, flags);
6230             fork_end(1);
6231             /* There is a race condition here.  The parent process could
6232                theoretically read the TID in the child process before the child
6233                tid is set.  This would require using either ptrace
6234                (not implemented) or having *_tidptr to point at a shared memory
6235                mapping.  We can't repeat the spinlock hack used above because
6236                the child process gets its own copy of the lock.  */
6237             if (flags & CLONE_CHILD_SETTID)
6238                 put_user_u32(sys_gettid(), child_tidptr);
6239             if (flags & CLONE_PARENT_SETTID)
6240                 put_user_u32(sys_gettid(), parent_tidptr);
6241             ts = (TaskState *)cpu->opaque;
6242             if (flags & CLONE_SETTLS)
6243                 cpu_set_tls (env, newtls);
6244             if (flags & CLONE_CHILD_CLEARTID)
6245                 ts->child_tidptr = child_tidptr;
6246         } else {
6247             cpu_clone_regs_parent(env, flags);
6248             fork_end(0);
6249         }
6250     }
6251     return ret;
6252 }
6253 
6254 /* warning : doesn't handle linux specific flags... */
6255 static int target_to_host_fcntl_cmd(int cmd)
6256 {
6257     int ret;
6258 
6259     switch(cmd) {
6260     case TARGET_F_DUPFD:
6261     case TARGET_F_GETFD:
6262     case TARGET_F_SETFD:
6263     case TARGET_F_GETFL:
6264     case TARGET_F_SETFL:
6265     case TARGET_F_OFD_GETLK:
6266     case TARGET_F_OFD_SETLK:
6267     case TARGET_F_OFD_SETLKW:
6268         ret = cmd;
6269         break;
6270     case TARGET_F_GETLK:
6271         ret = F_GETLK64;
6272         break;
6273     case TARGET_F_SETLK:
6274         ret = F_SETLK64;
6275         break;
6276     case TARGET_F_SETLKW:
6277         ret = F_SETLKW64;
6278         break;
6279     case TARGET_F_GETOWN:
6280         ret = F_GETOWN;
6281         break;
6282     case TARGET_F_SETOWN:
6283         ret = F_SETOWN;
6284         break;
6285     case TARGET_F_GETSIG:
6286         ret = F_GETSIG;
6287         break;
6288     case TARGET_F_SETSIG:
6289         ret = F_SETSIG;
6290         break;
6291 #if TARGET_ABI_BITS == 32
6292     case TARGET_F_GETLK64:
6293         ret = F_GETLK64;
6294         break;
6295     case TARGET_F_SETLK64:
6296         ret = F_SETLK64;
6297         break;
6298     case TARGET_F_SETLKW64:
6299         ret = F_SETLKW64;
6300         break;
6301 #endif
6302     case TARGET_F_SETLEASE:
6303         ret = F_SETLEASE;
6304         break;
6305     case TARGET_F_GETLEASE:
6306         ret = F_GETLEASE;
6307         break;
6308 #ifdef F_DUPFD_CLOEXEC
6309     case TARGET_F_DUPFD_CLOEXEC:
6310         ret = F_DUPFD_CLOEXEC;
6311         break;
6312 #endif
6313     case TARGET_F_NOTIFY:
6314         ret = F_NOTIFY;
6315         break;
6316 #ifdef F_GETOWN_EX
6317     case TARGET_F_GETOWN_EX:
6318         ret = F_GETOWN_EX;
6319         break;
6320 #endif
6321 #ifdef F_SETOWN_EX
6322     case TARGET_F_SETOWN_EX:
6323         ret = F_SETOWN_EX;
6324         break;
6325 #endif
6326 #ifdef F_SETPIPE_SZ
6327     case TARGET_F_SETPIPE_SZ:
6328         ret = F_SETPIPE_SZ;
6329         break;
6330     case TARGET_F_GETPIPE_SZ:
6331         ret = F_GETPIPE_SZ;
6332         break;
6333 #endif
6334     default:
6335         ret = -TARGET_EINVAL;
6336         break;
6337     }
6338 
6339 #if defined(__powerpc64__)
6340     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6341      * is not supported by kernel. The glibc fcntl call actually adjusts
6342      * them to 5, 6 and 7 before making the syscall(). Since we make the
6343      * syscall directly, adjust to what is supported by the kernel.
6344      */
6345     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6346         ret -= F_GETLK64 - 5;
6347     }
6348 #endif
6349 
6350     return ret;
6351 }
6352 
6353 #define FLOCK_TRANSTBL \
6354     switch (type) { \
6355     TRANSTBL_CONVERT(F_RDLCK); \
6356     TRANSTBL_CONVERT(F_WRLCK); \
6357     TRANSTBL_CONVERT(F_UNLCK); \
6358     TRANSTBL_CONVERT(F_EXLCK); \
6359     TRANSTBL_CONVERT(F_SHLCK); \
6360     }
6361 
6362 static int target_to_host_flock(int type)
6363 {
6364 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6365     FLOCK_TRANSTBL
6366 #undef  TRANSTBL_CONVERT
6367     return -TARGET_EINVAL;
6368 }
6369 
6370 static int host_to_target_flock(int type)
6371 {
6372 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6373     FLOCK_TRANSTBL
6374 #undef  TRANSTBL_CONVERT
6375     /* if we don't know how to convert the value coming
6376      * from the host we copy to the target field as-is
6377      */
6378     return type;
6379 }
6380 
6381 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6382                                             abi_ulong target_flock_addr)
6383 {
6384     struct target_flock *target_fl;
6385     int l_type;
6386 
6387     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6388         return -TARGET_EFAULT;
6389     }
6390 
6391     __get_user(l_type, &target_fl->l_type);
6392     l_type = target_to_host_flock(l_type);
6393     if (l_type < 0) {
6394         return l_type;
6395     }
6396     fl->l_type = l_type;
6397     __get_user(fl->l_whence, &target_fl->l_whence);
6398     __get_user(fl->l_start, &target_fl->l_start);
6399     __get_user(fl->l_len, &target_fl->l_len);
6400     __get_user(fl->l_pid, &target_fl->l_pid);
6401     unlock_user_struct(target_fl, target_flock_addr, 0);
6402     return 0;
6403 }
6404 
6405 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6406                                           const struct flock64 *fl)
6407 {
6408     struct target_flock *target_fl;
6409     short l_type;
6410 
6411     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6412         return -TARGET_EFAULT;
6413     }
6414 
6415     l_type = host_to_target_flock(fl->l_type);
6416     __put_user(l_type, &target_fl->l_type);
6417     __put_user(fl->l_whence, &target_fl->l_whence);
6418     __put_user(fl->l_start, &target_fl->l_start);
6419     __put_user(fl->l_len, &target_fl->l_len);
6420     __put_user(fl->l_pid, &target_fl->l_pid);
6421     unlock_user_struct(target_fl, target_flock_addr, 1);
6422     return 0;
6423 }
6424 
6425 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6426 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6427 
6428 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6429 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6430                                                    abi_ulong target_flock_addr)
6431 {
6432     struct target_oabi_flock64 *target_fl;
6433     int l_type;
6434 
6435     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6436         return -TARGET_EFAULT;
6437     }
6438 
6439     __get_user(l_type, &target_fl->l_type);
6440     l_type = target_to_host_flock(l_type);
6441     if (l_type < 0) {
6442         return l_type;
6443     }
6444     fl->l_type = l_type;
6445     __get_user(fl->l_whence, &target_fl->l_whence);
6446     __get_user(fl->l_start, &target_fl->l_start);
6447     __get_user(fl->l_len, &target_fl->l_len);
6448     __get_user(fl->l_pid, &target_fl->l_pid);
6449     unlock_user_struct(target_fl, target_flock_addr, 0);
6450     return 0;
6451 }
6452 
6453 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6454                                                  const struct flock64 *fl)
6455 {
6456     struct target_oabi_flock64 *target_fl;
6457     short l_type;
6458 
6459     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6460         return -TARGET_EFAULT;
6461     }
6462 
6463     l_type = host_to_target_flock(fl->l_type);
6464     __put_user(l_type, &target_fl->l_type);
6465     __put_user(fl->l_whence, &target_fl->l_whence);
6466     __put_user(fl->l_start, &target_fl->l_start);
6467     __put_user(fl->l_len, &target_fl->l_len);
6468     __put_user(fl->l_pid, &target_fl->l_pid);
6469     unlock_user_struct(target_fl, target_flock_addr, 1);
6470     return 0;
6471 }
6472 #endif
6473 
6474 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6475                                               abi_ulong target_flock_addr)
6476 {
6477     struct target_flock64 *target_fl;
6478     int l_type;
6479 
6480     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6481         return -TARGET_EFAULT;
6482     }
6483 
6484     __get_user(l_type, &target_fl->l_type);
6485     l_type = target_to_host_flock(l_type);
6486     if (l_type < 0) {
6487         return l_type;
6488     }
6489     fl->l_type = l_type;
6490     __get_user(fl->l_whence, &target_fl->l_whence);
6491     __get_user(fl->l_start, &target_fl->l_start);
6492     __get_user(fl->l_len, &target_fl->l_len);
6493     __get_user(fl->l_pid, &target_fl->l_pid);
6494     unlock_user_struct(target_fl, target_flock_addr, 0);
6495     return 0;
6496 }
6497 
6498 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6499                                             const struct flock64 *fl)
6500 {
6501     struct target_flock64 *target_fl;
6502     short l_type;
6503 
6504     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6505         return -TARGET_EFAULT;
6506     }
6507 
6508     l_type = host_to_target_flock(fl->l_type);
6509     __put_user(l_type, &target_fl->l_type);
6510     __put_user(fl->l_whence, &target_fl->l_whence);
6511     __put_user(fl->l_start, &target_fl->l_start);
6512     __put_user(fl->l_len, &target_fl->l_len);
6513     __put_user(fl->l_pid, &target_fl->l_pid);
6514     unlock_user_struct(target_fl, target_flock_addr, 1);
6515     return 0;
6516 }
6517 
6518 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6519 {
6520     struct flock64 fl64;
6521 #ifdef F_GETOWN_EX
6522     struct f_owner_ex fox;
6523     struct target_f_owner_ex *target_fox;
6524 #endif
6525     abi_long ret;
6526     int host_cmd = target_to_host_fcntl_cmd(cmd);
6527 
6528     if (host_cmd == -TARGET_EINVAL)
6529 	    return host_cmd;
6530 
6531     switch(cmd) {
6532     case TARGET_F_GETLK:
6533         ret = copy_from_user_flock(&fl64, arg);
6534         if (ret) {
6535             return ret;
6536         }
6537         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6538         if (ret == 0) {
6539             ret = copy_to_user_flock(arg, &fl64);
6540         }
6541         break;
6542 
6543     case TARGET_F_SETLK:
6544     case TARGET_F_SETLKW:
6545         ret = copy_from_user_flock(&fl64, arg);
6546         if (ret) {
6547             return ret;
6548         }
6549         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6550         break;
6551 
6552     case TARGET_F_GETLK64:
6553     case TARGET_F_OFD_GETLK:
6554         ret = copy_from_user_flock64(&fl64, arg);
6555         if (ret) {
6556             return ret;
6557         }
6558         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6559         if (ret == 0) {
6560             ret = copy_to_user_flock64(arg, &fl64);
6561         }
6562         break;
6563     case TARGET_F_SETLK64:
6564     case TARGET_F_SETLKW64:
6565     case TARGET_F_OFD_SETLK:
6566     case TARGET_F_OFD_SETLKW:
6567         ret = copy_from_user_flock64(&fl64, arg);
6568         if (ret) {
6569             return ret;
6570         }
6571         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6572         break;
6573 
6574     case TARGET_F_GETFL:
6575         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6576         if (ret >= 0) {
6577             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6578         }
6579         break;
6580 
6581     case TARGET_F_SETFL:
6582         ret = get_errno(safe_fcntl(fd, host_cmd,
6583                                    target_to_host_bitmask(arg,
6584                                                           fcntl_flags_tbl)));
6585         break;
6586 
6587 #ifdef F_GETOWN_EX
6588     case TARGET_F_GETOWN_EX:
6589         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6590         if (ret >= 0) {
6591             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6592                 return -TARGET_EFAULT;
6593             target_fox->type = tswap32(fox.type);
6594             target_fox->pid = tswap32(fox.pid);
6595             unlock_user_struct(target_fox, arg, 1);
6596         }
6597         break;
6598 #endif
6599 
6600 #ifdef F_SETOWN_EX
6601     case TARGET_F_SETOWN_EX:
6602         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6603             return -TARGET_EFAULT;
6604         fox.type = tswap32(target_fox->type);
6605         fox.pid = tswap32(target_fox->pid);
6606         unlock_user_struct(target_fox, arg, 0);
6607         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6608         break;
6609 #endif
6610 
6611     case TARGET_F_SETOWN:
6612     case TARGET_F_GETOWN:
6613     case TARGET_F_SETSIG:
6614     case TARGET_F_GETSIG:
6615     case TARGET_F_SETLEASE:
6616     case TARGET_F_GETLEASE:
6617     case TARGET_F_SETPIPE_SZ:
6618     case TARGET_F_GETPIPE_SZ:
6619         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6620         break;
6621 
6622     default:
6623         ret = get_errno(safe_fcntl(fd, cmd, arg));
6624         break;
6625     }
6626     return ret;
6627 }
6628 
6629 #ifdef USE_UID16
6630 
6631 static inline int high2lowuid(int uid)
6632 {
6633     if (uid > 65535)
6634         return 65534;
6635     else
6636         return uid;
6637 }
6638 
6639 static inline int high2lowgid(int gid)
6640 {
6641     if (gid > 65535)
6642         return 65534;
6643     else
6644         return gid;
6645 }
6646 
6647 static inline int low2highuid(int uid)
6648 {
6649     if ((int16_t)uid == -1)
6650         return -1;
6651     else
6652         return uid;
6653 }
6654 
6655 static inline int low2highgid(int gid)
6656 {
6657     if ((int16_t)gid == -1)
6658         return -1;
6659     else
6660         return gid;
6661 }
6662 static inline int tswapid(int id)
6663 {
6664     return tswap16(id);
6665 }
6666 
6667 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6668 
6669 #else /* !USE_UID16 */
6670 static inline int high2lowuid(int uid)
6671 {
6672     return uid;
6673 }
6674 static inline int high2lowgid(int gid)
6675 {
6676     return gid;
6677 }
6678 static inline int low2highuid(int uid)
6679 {
6680     return uid;
6681 }
6682 static inline int low2highgid(int gid)
6683 {
6684     return gid;
6685 }
6686 static inline int tswapid(int id)
6687 {
6688     return tswap32(id);
6689 }
6690 
6691 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6692 
6693 #endif /* USE_UID16 */
6694 
6695 /* We must do direct syscalls for setting UID/GID, because we want to
6696  * implement the Linux system call semantics of "change only for this thread",
6697  * not the libc/POSIX semantics of "change for all threads in process".
6698  * (See http://ewontfix.com/17/ for more details.)
6699  * We use the 32-bit version of the syscalls if present; if it is not
6700  * then either the host architecture supports 32-bit UIDs natively with
6701  * the standard syscall, or the 16-bit UID is the best we can do.
6702  */
6703 #ifdef __NR_setuid32
6704 #define __NR_sys_setuid __NR_setuid32
6705 #else
6706 #define __NR_sys_setuid __NR_setuid
6707 #endif
6708 #ifdef __NR_setgid32
6709 #define __NR_sys_setgid __NR_setgid32
6710 #else
6711 #define __NR_sys_setgid __NR_setgid
6712 #endif
6713 #ifdef __NR_setresuid32
6714 #define __NR_sys_setresuid __NR_setresuid32
6715 #else
6716 #define __NR_sys_setresuid __NR_setresuid
6717 #endif
6718 #ifdef __NR_setresgid32
6719 #define __NR_sys_setresgid __NR_setresgid32
6720 #else
6721 #define __NR_sys_setresgid __NR_setresgid
6722 #endif
6723 
6724 _syscall1(int, sys_setuid, uid_t, uid)
6725 _syscall1(int, sys_setgid, gid_t, gid)
6726 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6727 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6728 
6729 void syscall_init(void)
6730 {
6731     IOCTLEntry *ie;
6732     const argtype *arg_type;
6733     int size;
6734     int i;
6735 
6736     thunk_init(STRUCT_MAX);
6737 
6738 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6739 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6740 #include "syscall_types.h"
6741 #undef STRUCT
6742 #undef STRUCT_SPECIAL
6743 
6744     /* Build target_to_host_errno_table[] table from
6745      * host_to_target_errno_table[]. */
6746     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6747         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6748     }
6749 
6750     /* we patch the ioctl size if necessary. We rely on the fact that
6751        no ioctl has all the bits at '1' in the size field */
6752     ie = ioctl_entries;
6753     while (ie->target_cmd != 0) {
6754         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6755             TARGET_IOC_SIZEMASK) {
6756             arg_type = ie->arg_type;
6757             if (arg_type[0] != TYPE_PTR) {
6758                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6759                         ie->target_cmd);
6760                 exit(1);
6761             }
6762             arg_type++;
6763             size = thunk_type_size(arg_type, 0);
6764             ie->target_cmd = (ie->target_cmd &
6765                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6766                 (size << TARGET_IOC_SIZESHIFT);
6767         }
6768 
6769         /* automatic consistency check if same arch */
6770 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6771     (defined(__x86_64__) && defined(TARGET_X86_64))
6772         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6773             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6774                     ie->name, ie->target_cmd, ie->host_cmd);
6775         }
6776 #endif
6777         ie++;
6778     }
6779 }
6780 
6781 #ifdef TARGET_NR_truncate64
6782 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6783                                          abi_long arg2,
6784                                          abi_long arg3,
6785                                          abi_long arg4)
6786 {
6787     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6788         arg2 = arg3;
6789         arg3 = arg4;
6790     }
6791     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6792 }
6793 #endif
6794 
6795 #ifdef TARGET_NR_ftruncate64
6796 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6797                                           abi_long arg2,
6798                                           abi_long arg3,
6799                                           abi_long arg4)
6800 {
6801     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6802         arg2 = arg3;
6803         arg3 = arg4;
6804     }
6805     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6806 }
6807 #endif
6808 
6809 #if defined(TARGET_NR_timer_settime) || \
6810     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6811 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
6812                                                  abi_ulong target_addr)
6813 {
6814     if (target_to_host_timespec(&host_its->it_interval, target_addr +
6815                                 offsetof(struct target_itimerspec,
6816                                          it_interval)) ||
6817         target_to_host_timespec(&host_its->it_value, target_addr +
6818                                 offsetof(struct target_itimerspec,
6819                                          it_value))) {
6820         return -TARGET_EFAULT;
6821     }
6822 
6823     return 0;
6824 }
6825 #endif
6826 
6827 #if defined(TARGET_NR_timer_settime64) || \
6828     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6829 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
6830                                                    abi_ulong target_addr)
6831 {
6832     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
6833                                   offsetof(struct target__kernel_itimerspec,
6834                                            it_interval)) ||
6835         target_to_host_timespec64(&host_its->it_value, target_addr +
6836                                   offsetof(struct target__kernel_itimerspec,
6837                                            it_value))) {
6838         return -TARGET_EFAULT;
6839     }
6840 
6841     return 0;
6842 }
6843 #endif
6844 
6845 #if ((defined(TARGET_NR_timerfd_gettime) || \
6846       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6847       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6848 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6849                                                  struct itimerspec *host_its)
6850 {
6851     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6852                                                        it_interval),
6853                                 &host_its->it_interval) ||
6854         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6855                                                        it_value),
6856                                 &host_its->it_value)) {
6857         return -TARGET_EFAULT;
6858     }
6859     return 0;
6860 }
6861 #endif
6862 
6863 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6864       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6865       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6866 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
6867                                                    struct itimerspec *host_its)
6868 {
6869     if (host_to_target_timespec64(target_addr +
6870                                   offsetof(struct target__kernel_itimerspec,
6871                                            it_interval),
6872                                   &host_its->it_interval) ||
6873         host_to_target_timespec64(target_addr +
6874                                   offsetof(struct target__kernel_itimerspec,
6875                                            it_value),
6876                                   &host_its->it_value)) {
6877         return -TARGET_EFAULT;
6878     }
6879     return 0;
6880 }
6881 #endif
6882 
6883 #if defined(TARGET_NR_adjtimex) || \
6884     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6885 static inline abi_long target_to_host_timex(struct timex *host_tx,
6886                                             abi_long target_addr)
6887 {
6888     struct target_timex *target_tx;
6889 
6890     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6891         return -TARGET_EFAULT;
6892     }
6893 
6894     __get_user(host_tx->modes, &target_tx->modes);
6895     __get_user(host_tx->offset, &target_tx->offset);
6896     __get_user(host_tx->freq, &target_tx->freq);
6897     __get_user(host_tx->maxerror, &target_tx->maxerror);
6898     __get_user(host_tx->esterror, &target_tx->esterror);
6899     __get_user(host_tx->status, &target_tx->status);
6900     __get_user(host_tx->constant, &target_tx->constant);
6901     __get_user(host_tx->precision, &target_tx->precision);
6902     __get_user(host_tx->tolerance, &target_tx->tolerance);
6903     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6904     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6905     __get_user(host_tx->tick, &target_tx->tick);
6906     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6907     __get_user(host_tx->jitter, &target_tx->jitter);
6908     __get_user(host_tx->shift, &target_tx->shift);
6909     __get_user(host_tx->stabil, &target_tx->stabil);
6910     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6911     __get_user(host_tx->calcnt, &target_tx->calcnt);
6912     __get_user(host_tx->errcnt, &target_tx->errcnt);
6913     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6914     __get_user(host_tx->tai, &target_tx->tai);
6915 
6916     unlock_user_struct(target_tx, target_addr, 0);
6917     return 0;
6918 }
6919 
6920 static inline abi_long host_to_target_timex(abi_long target_addr,
6921                                             struct timex *host_tx)
6922 {
6923     struct target_timex *target_tx;
6924 
6925     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6926         return -TARGET_EFAULT;
6927     }
6928 
6929     __put_user(host_tx->modes, &target_tx->modes);
6930     __put_user(host_tx->offset, &target_tx->offset);
6931     __put_user(host_tx->freq, &target_tx->freq);
6932     __put_user(host_tx->maxerror, &target_tx->maxerror);
6933     __put_user(host_tx->esterror, &target_tx->esterror);
6934     __put_user(host_tx->status, &target_tx->status);
6935     __put_user(host_tx->constant, &target_tx->constant);
6936     __put_user(host_tx->precision, &target_tx->precision);
6937     __put_user(host_tx->tolerance, &target_tx->tolerance);
6938     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6939     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6940     __put_user(host_tx->tick, &target_tx->tick);
6941     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6942     __put_user(host_tx->jitter, &target_tx->jitter);
6943     __put_user(host_tx->shift, &target_tx->shift);
6944     __put_user(host_tx->stabil, &target_tx->stabil);
6945     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6946     __put_user(host_tx->calcnt, &target_tx->calcnt);
6947     __put_user(host_tx->errcnt, &target_tx->errcnt);
6948     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6949     __put_user(host_tx->tai, &target_tx->tai);
6950 
6951     unlock_user_struct(target_tx, target_addr, 1);
6952     return 0;
6953 }
6954 #endif
6955 
6956 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6957                                                abi_ulong target_addr)
6958 {
6959     struct target_sigevent *target_sevp;
6960 
6961     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6962         return -TARGET_EFAULT;
6963     }
6964 
6965     /* This union is awkward on 64 bit systems because it has a 32 bit
6966      * integer and a pointer in it; we follow the conversion approach
6967      * used for handling sigval types in signal.c so the guest should get
6968      * the correct value back even if we did a 64 bit byteswap and it's
6969      * using the 32 bit integer.
6970      */
6971     host_sevp->sigev_value.sival_ptr =
6972         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6973     host_sevp->sigev_signo =
6974         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6975     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6976     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6977 
6978     unlock_user_struct(target_sevp, target_addr, 1);
6979     return 0;
6980 }
6981 
6982 #if defined(TARGET_NR_mlockall)
6983 static inline int target_to_host_mlockall_arg(int arg)
6984 {
6985     int result = 0;
6986 
6987     if (arg & TARGET_MCL_CURRENT) {
6988         result |= MCL_CURRENT;
6989     }
6990     if (arg & TARGET_MCL_FUTURE) {
6991         result |= MCL_FUTURE;
6992     }
6993 #ifdef MCL_ONFAULT
6994     if (arg & TARGET_MCL_ONFAULT) {
6995         result |= MCL_ONFAULT;
6996     }
6997 #endif
6998 
6999     return result;
7000 }
7001 #endif
7002 
7003 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7004      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7005      defined(TARGET_NR_newfstatat))
7006 static inline abi_long host_to_target_stat64(void *cpu_env,
7007                                              abi_ulong target_addr,
7008                                              struct stat *host_st)
7009 {
7010 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7011     if (((CPUARMState *)cpu_env)->eabi) {
7012         struct target_eabi_stat64 *target_st;
7013 
7014         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7015             return -TARGET_EFAULT;
7016         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7017         __put_user(host_st->st_dev, &target_st->st_dev);
7018         __put_user(host_st->st_ino, &target_st->st_ino);
7019 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7020         __put_user(host_st->st_ino, &target_st->__st_ino);
7021 #endif
7022         __put_user(host_st->st_mode, &target_st->st_mode);
7023         __put_user(host_st->st_nlink, &target_st->st_nlink);
7024         __put_user(host_st->st_uid, &target_st->st_uid);
7025         __put_user(host_st->st_gid, &target_st->st_gid);
7026         __put_user(host_st->st_rdev, &target_st->st_rdev);
7027         __put_user(host_st->st_size, &target_st->st_size);
7028         __put_user(host_st->st_blksize, &target_st->st_blksize);
7029         __put_user(host_st->st_blocks, &target_st->st_blocks);
7030         __put_user(host_st->st_atime, &target_st->target_st_atime);
7031         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7032         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7033 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7034         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7035         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7036         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7037 #endif
7038         unlock_user_struct(target_st, target_addr, 1);
7039     } else
7040 #endif
7041     {
7042 #if defined(TARGET_HAS_STRUCT_STAT64)
7043         struct target_stat64 *target_st;
7044 #else
7045         struct target_stat *target_st;
7046 #endif
7047 
7048         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7049             return -TARGET_EFAULT;
7050         memset(target_st, 0, sizeof(*target_st));
7051         __put_user(host_st->st_dev, &target_st->st_dev);
7052         __put_user(host_st->st_ino, &target_st->st_ino);
7053 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7054         __put_user(host_st->st_ino, &target_st->__st_ino);
7055 #endif
7056         __put_user(host_st->st_mode, &target_st->st_mode);
7057         __put_user(host_st->st_nlink, &target_st->st_nlink);
7058         __put_user(host_st->st_uid, &target_st->st_uid);
7059         __put_user(host_st->st_gid, &target_st->st_gid);
7060         __put_user(host_st->st_rdev, &target_st->st_rdev);
7061         /* XXX: better use of kernel struct */
7062         __put_user(host_st->st_size, &target_st->st_size);
7063         __put_user(host_st->st_blksize, &target_st->st_blksize);
7064         __put_user(host_st->st_blocks, &target_st->st_blocks);
7065         __put_user(host_st->st_atime, &target_st->target_st_atime);
7066         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7067         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7068 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7069         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7070         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7071         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7072 #endif
7073         unlock_user_struct(target_st, target_addr, 1);
7074     }
7075 
7076     return 0;
7077 }
7078 #endif
7079 
7080 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7081 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7082                                             abi_ulong target_addr)
7083 {
7084     struct target_statx *target_stx;
7085 
7086     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7087         return -TARGET_EFAULT;
7088     }
7089     memset(target_stx, 0, sizeof(*target_stx));
7090 
7091     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7092     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7093     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7094     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7095     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7096     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7097     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7098     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7099     __put_user(host_stx->stx_size, &target_stx->stx_size);
7100     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7101     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7102     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7103     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7104     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7105     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7106     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7107     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7108     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7109     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7110     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7111     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7112     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7113     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7114 
7115     unlock_user_struct(target_stx, target_addr, 1);
7116 
7117     return 0;
7118 }
7119 #endif
7120 
7121 static int do_sys_futex(int *uaddr, int op, int val,
7122                          const struct timespec *timeout, int *uaddr2,
7123                          int val3)
7124 {
7125 #if HOST_LONG_BITS == 64
7126 #if defined(__NR_futex)
7127     /* always a 64-bit time_t, it doesn't define _time64 version  */
7128     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7129 
7130 #endif
7131 #else /* HOST_LONG_BITS == 64 */
7132 #if defined(__NR_futex_time64)
7133     if (sizeof(timeout->tv_sec) == 8) {
7134         /* _time64 function on 32bit arch */
7135         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7136     }
7137 #endif
7138 #if defined(__NR_futex)
7139     /* old function on 32bit arch */
7140     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7141 #endif
7142 #endif /* HOST_LONG_BITS == 64 */
7143     g_assert_not_reached();
7144 }
7145 
7146 static int do_safe_futex(int *uaddr, int op, int val,
7147                          const struct timespec *timeout, int *uaddr2,
7148                          int val3)
7149 {
7150 #if HOST_LONG_BITS == 64
7151 #if defined(__NR_futex)
7152     /* always a 64-bit time_t, it doesn't define _time64 version  */
7153     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7154 #endif
7155 #else /* HOST_LONG_BITS == 64 */
7156 #if defined(__NR_futex_time64)
7157     if (sizeof(timeout->tv_sec) == 8) {
7158         /* _time64 function on 32bit arch */
7159         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7160                                            val3));
7161     }
7162 #endif
7163 #if defined(__NR_futex)
7164     /* old function on 32bit arch */
7165     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7166 #endif
7167 #endif /* HOST_LONG_BITS == 64 */
7168     return -TARGET_ENOSYS;
7169 }
7170 
7171 /* ??? Using host futex calls even when target atomic operations
7172    are not really atomic probably breaks things.  However implementing
7173    futexes locally would make futexes shared between multiple processes
7174    tricky.  However they're probably useless because guest atomic
7175    operations won't work either.  */
7176 #if defined(TARGET_NR_futex)
7177 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7178                     target_ulong uaddr2, int val3)
7179 {
7180     struct timespec ts, *pts;
7181     int base_op;
7182 
7183     /* ??? We assume FUTEX_* constants are the same on both host
7184        and target.  */
7185 #ifdef FUTEX_CMD_MASK
7186     base_op = op & FUTEX_CMD_MASK;
7187 #else
7188     base_op = op;
7189 #endif
7190     switch (base_op) {
7191     case FUTEX_WAIT:
7192     case FUTEX_WAIT_BITSET:
7193         if (timeout) {
7194             pts = &ts;
7195             target_to_host_timespec(pts, timeout);
7196         } else {
7197             pts = NULL;
7198         }
7199         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7200     case FUTEX_WAKE:
7201         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7202     case FUTEX_FD:
7203         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7204     case FUTEX_REQUEUE:
7205     case FUTEX_CMP_REQUEUE:
7206     case FUTEX_WAKE_OP:
7207         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7208            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7209            But the prototype takes a `struct timespec *'; insert casts
7210            to satisfy the compiler.  We do not need to tswap TIMEOUT
7211            since it's not compared to guest memory.  */
7212         pts = (struct timespec *)(uintptr_t) timeout;
7213         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7214                              (base_op == FUTEX_CMP_REQUEUE
7215                                       ? tswap32(val3)
7216                                       : val3));
7217     default:
7218         return -TARGET_ENOSYS;
7219     }
7220 }
7221 #endif
7222 
7223 #if defined(TARGET_NR_futex_time64)
7224 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7225                            target_ulong uaddr2, int val3)
7226 {
7227     struct timespec ts, *pts;
7228     int base_op;
7229 
7230     /* ??? We assume FUTEX_* constants are the same on both host
7231        and target.  */
7232 #ifdef FUTEX_CMD_MASK
7233     base_op = op & FUTEX_CMD_MASK;
7234 #else
7235     base_op = op;
7236 #endif
7237     switch (base_op) {
7238     case FUTEX_WAIT:
7239     case FUTEX_WAIT_BITSET:
7240         if (timeout) {
7241             pts = &ts;
7242             target_to_host_timespec64(pts, timeout);
7243         } else {
7244             pts = NULL;
7245         }
7246         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7247     case FUTEX_WAKE:
7248         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7249     case FUTEX_FD:
7250         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7251     case FUTEX_REQUEUE:
7252     case FUTEX_CMP_REQUEUE:
7253     case FUTEX_WAKE_OP:
7254         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7255            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7256            But the prototype takes a `struct timespec *'; insert casts
7257            to satisfy the compiler.  We do not need to tswap TIMEOUT
7258            since it's not compared to guest memory.  */
7259         pts = (struct timespec *)(uintptr_t) timeout;
7260         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7261                              (base_op == FUTEX_CMP_REQUEUE
7262                                       ? tswap32(val3)
7263                                       : val3));
7264     default:
7265         return -TARGET_ENOSYS;
7266     }
7267 }
7268 #endif
7269 
7270 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7271 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7272                                      abi_long handle, abi_long mount_id,
7273                                      abi_long flags)
7274 {
7275     struct file_handle *target_fh;
7276     struct file_handle *fh;
7277     int mid = 0;
7278     abi_long ret;
7279     char *name;
7280     unsigned int size, total_size;
7281 
7282     if (get_user_s32(size, handle)) {
7283         return -TARGET_EFAULT;
7284     }
7285 
7286     name = lock_user_string(pathname);
7287     if (!name) {
7288         return -TARGET_EFAULT;
7289     }
7290 
7291     total_size = sizeof(struct file_handle) + size;
7292     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7293     if (!target_fh) {
7294         unlock_user(name, pathname, 0);
7295         return -TARGET_EFAULT;
7296     }
7297 
7298     fh = g_malloc0(total_size);
7299     fh->handle_bytes = size;
7300 
7301     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7302     unlock_user(name, pathname, 0);
7303 
7304     /* man name_to_handle_at(2):
7305      * Other than the use of the handle_bytes field, the caller should treat
7306      * the file_handle structure as an opaque data type
7307      */
7308 
7309     memcpy(target_fh, fh, total_size);
7310     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7311     target_fh->handle_type = tswap32(fh->handle_type);
7312     g_free(fh);
7313     unlock_user(target_fh, handle, total_size);
7314 
7315     if (put_user_s32(mid, mount_id)) {
7316         return -TARGET_EFAULT;
7317     }
7318 
7319     return ret;
7320 
7321 }
7322 #endif
7323 
7324 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7325 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7326                                      abi_long flags)
7327 {
7328     struct file_handle *target_fh;
7329     struct file_handle *fh;
7330     unsigned int size, total_size;
7331     abi_long ret;
7332 
7333     if (get_user_s32(size, handle)) {
7334         return -TARGET_EFAULT;
7335     }
7336 
7337     total_size = sizeof(struct file_handle) + size;
7338     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7339     if (!target_fh) {
7340         return -TARGET_EFAULT;
7341     }
7342 
7343     fh = g_memdup(target_fh, total_size);
7344     fh->handle_bytes = size;
7345     fh->handle_type = tswap32(target_fh->handle_type);
7346 
7347     ret = get_errno(open_by_handle_at(mount_fd, fh,
7348                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7349 
7350     g_free(fh);
7351 
7352     unlock_user(target_fh, handle, total_size);
7353 
7354     return ret;
7355 }
7356 #endif
7357 
7358 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7359 
7360 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7361 {
7362     int host_flags;
7363     target_sigset_t *target_mask;
7364     sigset_t host_mask;
7365     abi_long ret;
7366 
7367     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7368         return -TARGET_EINVAL;
7369     }
7370     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7371         return -TARGET_EFAULT;
7372     }
7373 
7374     target_to_host_sigset(&host_mask, target_mask);
7375 
7376     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7377 
7378     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7379     if (ret >= 0) {
7380         fd_trans_register(ret, &target_signalfd_trans);
7381     }
7382 
7383     unlock_user_struct(target_mask, mask, 0);
7384 
7385     return ret;
7386 }
7387 #endif
7388 
7389 /* Map host to target signal numbers for the wait family of syscalls.
7390    Assume all other status bits are the same.  */
7391 int host_to_target_waitstatus(int status)
7392 {
7393     if (WIFSIGNALED(status)) {
7394         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7395     }
7396     if (WIFSTOPPED(status)) {
7397         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7398                | (status & 0xff);
7399     }
7400     return status;
7401 }
7402 
7403 static int open_self_cmdline(void *cpu_env, int fd)
7404 {
7405     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7406     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7407     int i;
7408 
7409     for (i = 0; i < bprm->argc; i++) {
7410         size_t len = strlen(bprm->argv[i]) + 1;
7411 
7412         if (write(fd, bprm->argv[i], len) != len) {
7413             return -1;
7414         }
7415     }
7416 
7417     return 0;
7418 }
7419 
7420 static int open_self_maps(void *cpu_env, int fd)
7421 {
7422     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7423     TaskState *ts = cpu->opaque;
7424     GSList *map_info = read_self_maps();
7425     GSList *s;
7426     int count;
7427 
7428     for (s = map_info; s; s = g_slist_next(s)) {
7429         MapInfo *e = (MapInfo *) s->data;
7430 
7431         if (h2g_valid(e->start)) {
7432             unsigned long min = e->start;
7433             unsigned long max = e->end;
7434             int flags = page_get_flags(h2g(min));
7435             const char *path;
7436 
7437             max = h2g_valid(max - 1) ?
7438                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7439 
7440             if (page_check_range(h2g(min), max - min, flags) == -1) {
7441                 continue;
7442             }
7443 
7444             if (h2g(min) == ts->info->stack_limit) {
7445                 path = "[stack]";
7446             } else {
7447                 path = e->path;
7448             }
7449 
7450             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7451                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7452                             h2g(min), h2g(max - 1) + 1,
7453                             e->is_read ? 'r' : '-',
7454                             e->is_write ? 'w' : '-',
7455                             e->is_exec ? 'x' : '-',
7456                             e->is_priv ? 'p' : '-',
7457                             (uint64_t) e->offset, e->dev, e->inode);
7458             if (path) {
7459                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7460             } else {
7461                 dprintf(fd, "\n");
7462             }
7463         }
7464     }
7465 
7466     free_self_maps(map_info);
7467 
7468 #ifdef TARGET_VSYSCALL_PAGE
7469     /*
7470      * We only support execution from the vsyscall page.
7471      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7472      */
7473     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7474                     " --xp 00000000 00:00 0",
7475                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7476     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7477 #endif
7478 
7479     return 0;
7480 }
7481 
7482 static int open_self_stat(void *cpu_env, int fd)
7483 {
7484     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7485     TaskState *ts = cpu->opaque;
7486     g_autoptr(GString) buf = g_string_new(NULL);
7487     int i;
7488 
7489     for (i = 0; i < 44; i++) {
7490         if (i == 0) {
7491             /* pid */
7492             g_string_printf(buf, FMT_pid " ", getpid());
7493         } else if (i == 1) {
7494             /* app name */
7495             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7496             bin = bin ? bin + 1 : ts->bprm->argv[0];
7497             g_string_printf(buf, "(%.15s) ", bin);
7498         } else if (i == 27) {
7499             /* stack bottom */
7500             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7501         } else {
7502             /* for the rest, there is MasterCard */
7503             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7504         }
7505 
7506         if (write(fd, buf->str, buf->len) != buf->len) {
7507             return -1;
7508         }
7509     }
7510 
7511     return 0;
7512 }
7513 
7514 static int open_self_auxv(void *cpu_env, int fd)
7515 {
7516     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7517     TaskState *ts = cpu->opaque;
7518     abi_ulong auxv = ts->info->saved_auxv;
7519     abi_ulong len = ts->info->auxv_len;
7520     char *ptr;
7521 
7522     /*
7523      * Auxiliary vector is stored in target process stack.
7524      * read in whole auxv vector and copy it to file
7525      */
7526     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7527     if (ptr != NULL) {
7528         while (len > 0) {
7529             ssize_t r;
7530             r = write(fd, ptr, len);
7531             if (r <= 0) {
7532                 break;
7533             }
7534             len -= r;
7535             ptr += r;
7536         }
7537         lseek(fd, 0, SEEK_SET);
7538         unlock_user(ptr, auxv, len);
7539     }
7540 
7541     return 0;
7542 }
7543 
7544 static int is_proc_myself(const char *filename, const char *entry)
7545 {
7546     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7547         filename += strlen("/proc/");
7548         if (!strncmp(filename, "self/", strlen("self/"))) {
7549             filename += strlen("self/");
7550         } else if (*filename >= '1' && *filename <= '9') {
7551             char myself[80];
7552             snprintf(myself, sizeof(myself), "%d/", getpid());
7553             if (!strncmp(filename, myself, strlen(myself))) {
7554                 filename += strlen(myself);
7555             } else {
7556                 return 0;
7557             }
7558         } else {
7559             return 0;
7560         }
7561         if (!strcmp(filename, entry)) {
7562             return 1;
7563         }
7564     }
7565     return 0;
7566 }
7567 
7568 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7569     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7570 static int is_proc(const char *filename, const char *entry)
7571 {
7572     return strcmp(filename, entry) == 0;
7573 }
7574 #endif
7575 
7576 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7577 static int open_net_route(void *cpu_env, int fd)
7578 {
7579     FILE *fp;
7580     char *line = NULL;
7581     size_t len = 0;
7582     ssize_t read;
7583 
7584     fp = fopen("/proc/net/route", "r");
7585     if (fp == NULL) {
7586         return -1;
7587     }
7588 
7589     /* read header */
7590 
7591     read = getline(&line, &len, fp);
7592     dprintf(fd, "%s", line);
7593 
7594     /* read routes */
7595 
7596     while ((read = getline(&line, &len, fp)) != -1) {
7597         char iface[16];
7598         uint32_t dest, gw, mask;
7599         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7600         int fields;
7601 
7602         fields = sscanf(line,
7603                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7604                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7605                         &mask, &mtu, &window, &irtt);
7606         if (fields != 11) {
7607             continue;
7608         }
7609         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7610                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7611                 metric, tswap32(mask), mtu, window, irtt);
7612     }
7613 
7614     free(line);
7615     fclose(fp);
7616 
7617     return 0;
7618 }
7619 #endif
7620 
7621 #if defined(TARGET_SPARC)
7622 static int open_cpuinfo(void *cpu_env, int fd)
7623 {
7624     dprintf(fd, "type\t\t: sun4u\n");
7625     return 0;
7626 }
7627 #endif
7628 
7629 #if defined(TARGET_HPPA)
7630 static int open_cpuinfo(void *cpu_env, int fd)
7631 {
7632     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7633     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7634     dprintf(fd, "capabilities\t: os32\n");
7635     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7636     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7637     return 0;
7638 }
7639 #endif
7640 
7641 #if defined(TARGET_M68K)
7642 static int open_hardware(void *cpu_env, int fd)
7643 {
7644     dprintf(fd, "Model:\t\tqemu-m68k\n");
7645     return 0;
7646 }
7647 #endif
7648 
7649 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7650 {
7651     struct fake_open {
7652         const char *filename;
7653         int (*fill)(void *cpu_env, int fd);
7654         int (*cmp)(const char *s1, const char *s2);
7655     };
7656     const struct fake_open *fake_open;
7657     static const struct fake_open fakes[] = {
7658         { "maps", open_self_maps, is_proc_myself },
7659         { "stat", open_self_stat, is_proc_myself },
7660         { "auxv", open_self_auxv, is_proc_myself },
7661         { "cmdline", open_self_cmdline, is_proc_myself },
7662 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7663         { "/proc/net/route", open_net_route, is_proc },
7664 #endif
7665 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7666         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7667 #endif
7668 #if defined(TARGET_M68K)
7669         { "/proc/hardware", open_hardware, is_proc },
7670 #endif
7671         { NULL, NULL, NULL }
7672     };
7673 
7674     if (is_proc_myself(pathname, "exe")) {
7675         int execfd = qemu_getauxval(AT_EXECFD);
7676         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7677     }
7678 
7679     for (fake_open = fakes; fake_open->filename; fake_open++) {
7680         if (fake_open->cmp(pathname, fake_open->filename)) {
7681             break;
7682         }
7683     }
7684 
7685     if (fake_open->filename) {
7686         const char *tmpdir;
7687         char filename[PATH_MAX];
7688         int fd, r;
7689 
7690         /* create temporary file to map stat to */
7691         tmpdir = getenv("TMPDIR");
7692         if (!tmpdir)
7693             tmpdir = "/tmp";
7694         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7695         fd = mkstemp(filename);
7696         if (fd < 0) {
7697             return fd;
7698         }
7699         unlink(filename);
7700 
7701         if ((r = fake_open->fill(cpu_env, fd))) {
7702             int e = errno;
7703             close(fd);
7704             errno = e;
7705             return r;
7706         }
7707         lseek(fd, 0, SEEK_SET);
7708 
7709         return fd;
7710     }
7711 
7712     return safe_openat(dirfd, path(pathname), flags, mode);
7713 }
7714 
7715 #define TIMER_MAGIC 0x0caf0000
7716 #define TIMER_MAGIC_MASK 0xffff0000
7717 
7718 /* Convert QEMU provided timer ID back to internal 16bit index format */
7719 static target_timer_t get_timer_id(abi_long arg)
7720 {
7721     target_timer_t timerid = arg;
7722 
7723     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7724         return -TARGET_EINVAL;
7725     }
7726 
7727     timerid &= 0xffff;
7728 
7729     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7730         return -TARGET_EINVAL;
7731     }
7732 
7733     return timerid;
7734 }
7735 
7736 static int target_to_host_cpu_mask(unsigned long *host_mask,
7737                                    size_t host_size,
7738                                    abi_ulong target_addr,
7739                                    size_t target_size)
7740 {
7741     unsigned target_bits = sizeof(abi_ulong) * 8;
7742     unsigned host_bits = sizeof(*host_mask) * 8;
7743     abi_ulong *target_mask;
7744     unsigned i, j;
7745 
7746     assert(host_size >= target_size);
7747 
7748     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7749     if (!target_mask) {
7750         return -TARGET_EFAULT;
7751     }
7752     memset(host_mask, 0, host_size);
7753 
7754     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7755         unsigned bit = i * target_bits;
7756         abi_ulong val;
7757 
7758         __get_user(val, &target_mask[i]);
7759         for (j = 0; j < target_bits; j++, bit++) {
7760             if (val & (1UL << j)) {
7761                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7762             }
7763         }
7764     }
7765 
7766     unlock_user(target_mask, target_addr, 0);
7767     return 0;
7768 }
7769 
7770 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7771                                    size_t host_size,
7772                                    abi_ulong target_addr,
7773                                    size_t target_size)
7774 {
7775     unsigned target_bits = sizeof(abi_ulong) * 8;
7776     unsigned host_bits = sizeof(*host_mask) * 8;
7777     abi_ulong *target_mask;
7778     unsigned i, j;
7779 
7780     assert(host_size >= target_size);
7781 
7782     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7783     if (!target_mask) {
7784         return -TARGET_EFAULT;
7785     }
7786 
7787     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7788         unsigned bit = i * target_bits;
7789         abi_ulong val = 0;
7790 
7791         for (j = 0; j < target_bits; j++, bit++) {
7792             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7793                 val |= 1UL << j;
7794             }
7795         }
7796         __put_user(val, &target_mask[i]);
7797     }
7798 
7799     unlock_user(target_mask, target_addr, target_size);
7800     return 0;
7801 }
7802 
7803 /* This is an internal helper for do_syscall so that it is easier
7804  * to have a single return point, so that actions, such as logging
7805  * of syscall results, can be performed.
7806  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7807  */
7808 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7809                             abi_long arg2, abi_long arg3, abi_long arg4,
7810                             abi_long arg5, abi_long arg6, abi_long arg7,
7811                             abi_long arg8)
7812 {
7813     CPUState *cpu = env_cpu(cpu_env);
7814     abi_long ret;
7815 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7816     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7817     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7818     || defined(TARGET_NR_statx)
7819     struct stat st;
7820 #endif
7821 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7822     || defined(TARGET_NR_fstatfs)
7823     struct statfs stfs;
7824 #endif
7825     void *p;
7826 
7827     switch(num) {
7828     case TARGET_NR_exit:
7829         /* In old applications this may be used to implement _exit(2).
7830            However in threaded applictions it is used for thread termination,
7831            and _exit_group is used for application termination.
7832            Do thread termination if we have more then one thread.  */
7833 
7834         if (block_signals()) {
7835             return -TARGET_ERESTARTSYS;
7836         }
7837 
7838         pthread_mutex_lock(&clone_lock);
7839 
7840         if (CPU_NEXT(first_cpu)) {
7841             TaskState *ts = cpu->opaque;
7842 
7843             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7844             object_unref(OBJECT(cpu));
7845             /*
7846              * At this point the CPU should be unrealized and removed
7847              * from cpu lists. We can clean-up the rest of the thread
7848              * data without the lock held.
7849              */
7850 
7851             pthread_mutex_unlock(&clone_lock);
7852 
7853             if (ts->child_tidptr) {
7854                 put_user_u32(0, ts->child_tidptr);
7855                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7856                           NULL, NULL, 0);
7857             }
7858             thread_cpu = NULL;
7859             g_free(ts);
7860             rcu_unregister_thread();
7861             pthread_exit(NULL);
7862         }
7863 
7864         pthread_mutex_unlock(&clone_lock);
7865         preexit_cleanup(cpu_env, arg1);
7866         _exit(arg1);
7867         return 0; /* avoid warning */
7868     case TARGET_NR_read:
7869         if (arg2 == 0 && arg3 == 0) {
7870             return get_errno(safe_read(arg1, 0, 0));
7871         } else {
7872             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7873                 return -TARGET_EFAULT;
7874             ret = get_errno(safe_read(arg1, p, arg3));
7875             if (ret >= 0 &&
7876                 fd_trans_host_to_target_data(arg1)) {
7877                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7878             }
7879             unlock_user(p, arg2, ret);
7880         }
7881         return ret;
7882     case TARGET_NR_write:
7883         if (arg2 == 0 && arg3 == 0) {
7884             return get_errno(safe_write(arg1, 0, 0));
7885         }
7886         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7887             return -TARGET_EFAULT;
7888         if (fd_trans_target_to_host_data(arg1)) {
7889             void *copy = g_malloc(arg3);
7890             memcpy(copy, p, arg3);
7891             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7892             if (ret >= 0) {
7893                 ret = get_errno(safe_write(arg1, copy, ret));
7894             }
7895             g_free(copy);
7896         } else {
7897             ret = get_errno(safe_write(arg1, p, arg3));
7898         }
7899         unlock_user(p, arg2, 0);
7900         return ret;
7901 
7902 #ifdef TARGET_NR_open
7903     case TARGET_NR_open:
7904         if (!(p = lock_user_string(arg1)))
7905             return -TARGET_EFAULT;
7906         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7907                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7908                                   arg3));
7909         fd_trans_unregister(ret);
7910         unlock_user(p, arg1, 0);
7911         return ret;
7912 #endif
7913     case TARGET_NR_openat:
7914         if (!(p = lock_user_string(arg2)))
7915             return -TARGET_EFAULT;
7916         ret = get_errno(do_openat(cpu_env, arg1, p,
7917                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7918                                   arg4));
7919         fd_trans_unregister(ret);
7920         unlock_user(p, arg2, 0);
7921         return ret;
7922 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7923     case TARGET_NR_name_to_handle_at:
7924         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7925         return ret;
7926 #endif
7927 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7928     case TARGET_NR_open_by_handle_at:
7929         ret = do_open_by_handle_at(arg1, arg2, arg3);
7930         fd_trans_unregister(ret);
7931         return ret;
7932 #endif
7933     case TARGET_NR_close:
7934         fd_trans_unregister(arg1);
7935         return get_errno(close(arg1));
7936 
7937     case TARGET_NR_brk:
7938         return do_brk(arg1);
7939 #ifdef TARGET_NR_fork
7940     case TARGET_NR_fork:
7941         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7942 #endif
7943 #ifdef TARGET_NR_waitpid
7944     case TARGET_NR_waitpid:
7945         {
7946             int status;
7947             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7948             if (!is_error(ret) && arg2 && ret
7949                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7950                 return -TARGET_EFAULT;
7951         }
7952         return ret;
7953 #endif
7954 #ifdef TARGET_NR_waitid
7955     case TARGET_NR_waitid:
7956         {
7957             siginfo_t info;
7958             info.si_pid = 0;
7959             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7960             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7961                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7962                     return -TARGET_EFAULT;
7963                 host_to_target_siginfo(p, &info);
7964                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7965             }
7966         }
7967         return ret;
7968 #endif
7969 #ifdef TARGET_NR_creat /* not on alpha */
7970     case TARGET_NR_creat:
7971         if (!(p = lock_user_string(arg1)))
7972             return -TARGET_EFAULT;
7973         ret = get_errno(creat(p, arg2));
7974         fd_trans_unregister(ret);
7975         unlock_user(p, arg1, 0);
7976         return ret;
7977 #endif
7978 #ifdef TARGET_NR_link
7979     case TARGET_NR_link:
7980         {
7981             void * p2;
7982             p = lock_user_string(arg1);
7983             p2 = lock_user_string(arg2);
7984             if (!p || !p2)
7985                 ret = -TARGET_EFAULT;
7986             else
7987                 ret = get_errno(link(p, p2));
7988             unlock_user(p2, arg2, 0);
7989             unlock_user(p, arg1, 0);
7990         }
7991         return ret;
7992 #endif
7993 #if defined(TARGET_NR_linkat)
7994     case TARGET_NR_linkat:
7995         {
7996             void * p2 = NULL;
7997             if (!arg2 || !arg4)
7998                 return -TARGET_EFAULT;
7999             p  = lock_user_string(arg2);
8000             p2 = lock_user_string(arg4);
8001             if (!p || !p2)
8002                 ret = -TARGET_EFAULT;
8003             else
8004                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8005             unlock_user(p, arg2, 0);
8006             unlock_user(p2, arg4, 0);
8007         }
8008         return ret;
8009 #endif
8010 #ifdef TARGET_NR_unlink
8011     case TARGET_NR_unlink:
8012         if (!(p = lock_user_string(arg1)))
8013             return -TARGET_EFAULT;
8014         ret = get_errno(unlink(p));
8015         unlock_user(p, arg1, 0);
8016         return ret;
8017 #endif
8018 #if defined(TARGET_NR_unlinkat)
8019     case TARGET_NR_unlinkat:
8020         if (!(p = lock_user_string(arg2)))
8021             return -TARGET_EFAULT;
8022         ret = get_errno(unlinkat(arg1, p, arg3));
8023         unlock_user(p, arg2, 0);
8024         return ret;
8025 #endif
8026     case TARGET_NR_execve:
8027         {
8028             char **argp, **envp;
8029             int argc, envc;
8030             abi_ulong gp;
8031             abi_ulong guest_argp;
8032             abi_ulong guest_envp;
8033             abi_ulong addr;
8034             char **q;
8035             int total_size = 0;
8036 
8037             argc = 0;
8038             guest_argp = arg2;
8039             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8040                 if (get_user_ual(addr, gp))
8041                     return -TARGET_EFAULT;
8042                 if (!addr)
8043                     break;
8044                 argc++;
8045             }
8046             envc = 0;
8047             guest_envp = arg3;
8048             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8049                 if (get_user_ual(addr, gp))
8050                     return -TARGET_EFAULT;
8051                 if (!addr)
8052                     break;
8053                 envc++;
8054             }
8055 
8056             argp = g_new0(char *, argc + 1);
8057             envp = g_new0(char *, envc + 1);
8058 
8059             for (gp = guest_argp, q = argp; gp;
8060                   gp += sizeof(abi_ulong), q++) {
8061                 if (get_user_ual(addr, gp))
8062                     goto execve_efault;
8063                 if (!addr)
8064                     break;
8065                 if (!(*q = lock_user_string(addr)))
8066                     goto execve_efault;
8067                 total_size += strlen(*q) + 1;
8068             }
8069             *q = NULL;
8070 
8071             for (gp = guest_envp, q = envp; gp;
8072                   gp += sizeof(abi_ulong), q++) {
8073                 if (get_user_ual(addr, gp))
8074                     goto execve_efault;
8075                 if (!addr)
8076                     break;
8077                 if (!(*q = lock_user_string(addr)))
8078                     goto execve_efault;
8079                 total_size += strlen(*q) + 1;
8080             }
8081             *q = NULL;
8082 
8083             if (!(p = lock_user_string(arg1)))
8084                 goto execve_efault;
8085             /* Although execve() is not an interruptible syscall it is
8086              * a special case where we must use the safe_syscall wrapper:
8087              * if we allow a signal to happen before we make the host
8088              * syscall then we will 'lose' it, because at the point of
8089              * execve the process leaves QEMU's control. So we use the
8090              * safe syscall wrapper to ensure that we either take the
8091              * signal as a guest signal, or else it does not happen
8092              * before the execve completes and makes it the other
8093              * program's problem.
8094              */
8095             ret = get_errno(safe_execve(p, argp, envp));
8096             unlock_user(p, arg1, 0);
8097 
8098             goto execve_end;
8099 
8100         execve_efault:
8101             ret = -TARGET_EFAULT;
8102 
8103         execve_end:
8104             for (gp = guest_argp, q = argp; *q;
8105                   gp += sizeof(abi_ulong), q++) {
8106                 if (get_user_ual(addr, gp)
8107                     || !addr)
8108                     break;
8109                 unlock_user(*q, addr, 0);
8110             }
8111             for (gp = guest_envp, q = envp; *q;
8112                   gp += sizeof(abi_ulong), q++) {
8113                 if (get_user_ual(addr, gp)
8114                     || !addr)
8115                     break;
8116                 unlock_user(*q, addr, 0);
8117             }
8118 
8119             g_free(argp);
8120             g_free(envp);
8121         }
8122         return ret;
8123     case TARGET_NR_chdir:
8124         if (!(p = lock_user_string(arg1)))
8125             return -TARGET_EFAULT;
8126         ret = get_errno(chdir(p));
8127         unlock_user(p, arg1, 0);
8128         return ret;
8129 #ifdef TARGET_NR_time
8130     case TARGET_NR_time:
8131         {
8132             time_t host_time;
8133             ret = get_errno(time(&host_time));
8134             if (!is_error(ret)
8135                 && arg1
8136                 && put_user_sal(host_time, arg1))
8137                 return -TARGET_EFAULT;
8138         }
8139         return ret;
8140 #endif
8141 #ifdef TARGET_NR_mknod
8142     case TARGET_NR_mknod:
8143         if (!(p = lock_user_string(arg1)))
8144             return -TARGET_EFAULT;
8145         ret = get_errno(mknod(p, arg2, arg3));
8146         unlock_user(p, arg1, 0);
8147         return ret;
8148 #endif
8149 #if defined(TARGET_NR_mknodat)
8150     case TARGET_NR_mknodat:
8151         if (!(p = lock_user_string(arg2)))
8152             return -TARGET_EFAULT;
8153         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8154         unlock_user(p, arg2, 0);
8155         return ret;
8156 #endif
8157 #ifdef TARGET_NR_chmod
8158     case TARGET_NR_chmod:
8159         if (!(p = lock_user_string(arg1)))
8160             return -TARGET_EFAULT;
8161         ret = get_errno(chmod(p, arg2));
8162         unlock_user(p, arg1, 0);
8163         return ret;
8164 #endif
8165 #ifdef TARGET_NR_lseek
8166     case TARGET_NR_lseek:
8167         return get_errno(lseek(arg1, arg2, arg3));
8168 #endif
8169 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8170     /* Alpha specific */
8171     case TARGET_NR_getxpid:
8172         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8173         return get_errno(getpid());
8174 #endif
8175 #ifdef TARGET_NR_getpid
8176     case TARGET_NR_getpid:
8177         return get_errno(getpid());
8178 #endif
8179     case TARGET_NR_mount:
8180         {
8181             /* need to look at the data field */
8182             void *p2, *p3;
8183 
8184             if (arg1) {
8185                 p = lock_user_string(arg1);
8186                 if (!p) {
8187                     return -TARGET_EFAULT;
8188                 }
8189             } else {
8190                 p = NULL;
8191             }
8192 
8193             p2 = lock_user_string(arg2);
8194             if (!p2) {
8195                 if (arg1) {
8196                     unlock_user(p, arg1, 0);
8197                 }
8198                 return -TARGET_EFAULT;
8199             }
8200 
8201             if (arg3) {
8202                 p3 = lock_user_string(arg3);
8203                 if (!p3) {
8204                     if (arg1) {
8205                         unlock_user(p, arg1, 0);
8206                     }
8207                     unlock_user(p2, arg2, 0);
8208                     return -TARGET_EFAULT;
8209                 }
8210             } else {
8211                 p3 = NULL;
8212             }
8213 
8214             /* FIXME - arg5 should be locked, but it isn't clear how to
8215              * do that since it's not guaranteed to be a NULL-terminated
8216              * string.
8217              */
8218             if (!arg5) {
8219                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8220             } else {
8221                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8222             }
8223             ret = get_errno(ret);
8224 
8225             if (arg1) {
8226                 unlock_user(p, arg1, 0);
8227             }
8228             unlock_user(p2, arg2, 0);
8229             if (arg3) {
8230                 unlock_user(p3, arg3, 0);
8231             }
8232         }
8233         return ret;
8234 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8235 #if defined(TARGET_NR_umount)
8236     case TARGET_NR_umount:
8237 #endif
8238 #if defined(TARGET_NR_oldumount)
8239     case TARGET_NR_oldumount:
8240 #endif
8241         if (!(p = lock_user_string(arg1)))
8242             return -TARGET_EFAULT;
8243         ret = get_errno(umount(p));
8244         unlock_user(p, arg1, 0);
8245         return ret;
8246 #endif
8247 #ifdef TARGET_NR_stime /* not on alpha */
8248     case TARGET_NR_stime:
8249         {
8250             struct timespec ts;
8251             ts.tv_nsec = 0;
8252             if (get_user_sal(ts.tv_sec, arg1)) {
8253                 return -TARGET_EFAULT;
8254             }
8255             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8256         }
8257 #endif
8258 #ifdef TARGET_NR_alarm /* not on alpha */
8259     case TARGET_NR_alarm:
8260         return alarm(arg1);
8261 #endif
8262 #ifdef TARGET_NR_pause /* not on alpha */
8263     case TARGET_NR_pause:
8264         if (!block_signals()) {
8265             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8266         }
8267         return -TARGET_EINTR;
8268 #endif
8269 #ifdef TARGET_NR_utime
8270     case TARGET_NR_utime:
8271         {
8272             struct utimbuf tbuf, *host_tbuf;
8273             struct target_utimbuf *target_tbuf;
8274             if (arg2) {
8275                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8276                     return -TARGET_EFAULT;
8277                 tbuf.actime = tswapal(target_tbuf->actime);
8278                 tbuf.modtime = tswapal(target_tbuf->modtime);
8279                 unlock_user_struct(target_tbuf, arg2, 0);
8280                 host_tbuf = &tbuf;
8281             } else {
8282                 host_tbuf = NULL;
8283             }
8284             if (!(p = lock_user_string(arg1)))
8285                 return -TARGET_EFAULT;
8286             ret = get_errno(utime(p, host_tbuf));
8287             unlock_user(p, arg1, 0);
8288         }
8289         return ret;
8290 #endif
8291 #ifdef TARGET_NR_utimes
8292     case TARGET_NR_utimes:
8293         {
8294             struct timeval *tvp, tv[2];
8295             if (arg2) {
8296                 if (copy_from_user_timeval(&tv[0], arg2)
8297                     || copy_from_user_timeval(&tv[1],
8298                                               arg2 + sizeof(struct target_timeval)))
8299                     return -TARGET_EFAULT;
8300                 tvp = tv;
8301             } else {
8302                 tvp = NULL;
8303             }
8304             if (!(p = lock_user_string(arg1)))
8305                 return -TARGET_EFAULT;
8306             ret = get_errno(utimes(p, tvp));
8307             unlock_user(p, arg1, 0);
8308         }
8309         return ret;
8310 #endif
8311 #if defined(TARGET_NR_futimesat)
8312     case TARGET_NR_futimesat:
8313         {
8314             struct timeval *tvp, tv[2];
8315             if (arg3) {
8316                 if (copy_from_user_timeval(&tv[0], arg3)
8317                     || copy_from_user_timeval(&tv[1],
8318                                               arg3 + sizeof(struct target_timeval)))
8319                     return -TARGET_EFAULT;
8320                 tvp = tv;
8321             } else {
8322                 tvp = NULL;
8323             }
8324             if (!(p = lock_user_string(arg2))) {
8325                 return -TARGET_EFAULT;
8326             }
8327             ret = get_errno(futimesat(arg1, path(p), tvp));
8328             unlock_user(p, arg2, 0);
8329         }
8330         return ret;
8331 #endif
8332 #ifdef TARGET_NR_access
8333     case TARGET_NR_access:
8334         if (!(p = lock_user_string(arg1))) {
8335             return -TARGET_EFAULT;
8336         }
8337         ret = get_errno(access(path(p), arg2));
8338         unlock_user(p, arg1, 0);
8339         return ret;
8340 #endif
8341 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8342     case TARGET_NR_faccessat:
8343         if (!(p = lock_user_string(arg2))) {
8344             return -TARGET_EFAULT;
8345         }
8346         ret = get_errno(faccessat(arg1, p, arg3, 0));
8347         unlock_user(p, arg2, 0);
8348         return ret;
8349 #endif
8350 #ifdef TARGET_NR_nice /* not on alpha */
8351     case TARGET_NR_nice:
8352         return get_errno(nice(arg1));
8353 #endif
8354     case TARGET_NR_sync:
8355         sync();
8356         return 0;
8357 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8358     case TARGET_NR_syncfs:
8359         return get_errno(syncfs(arg1));
8360 #endif
8361     case TARGET_NR_kill:
8362         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8363 #ifdef TARGET_NR_rename
8364     case TARGET_NR_rename:
8365         {
8366             void *p2;
8367             p = lock_user_string(arg1);
8368             p2 = lock_user_string(arg2);
8369             if (!p || !p2)
8370                 ret = -TARGET_EFAULT;
8371             else
8372                 ret = get_errno(rename(p, p2));
8373             unlock_user(p2, arg2, 0);
8374             unlock_user(p, arg1, 0);
8375         }
8376         return ret;
8377 #endif
8378 #if defined(TARGET_NR_renameat)
8379     case TARGET_NR_renameat:
8380         {
8381             void *p2;
8382             p  = lock_user_string(arg2);
8383             p2 = lock_user_string(arg4);
8384             if (!p || !p2)
8385                 ret = -TARGET_EFAULT;
8386             else
8387                 ret = get_errno(renameat(arg1, p, arg3, p2));
8388             unlock_user(p2, arg4, 0);
8389             unlock_user(p, arg2, 0);
8390         }
8391         return ret;
8392 #endif
8393 #if defined(TARGET_NR_renameat2)
8394     case TARGET_NR_renameat2:
8395         {
8396             void *p2;
8397             p  = lock_user_string(arg2);
8398             p2 = lock_user_string(arg4);
8399             if (!p || !p2) {
8400                 ret = -TARGET_EFAULT;
8401             } else {
8402                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8403             }
8404             unlock_user(p2, arg4, 0);
8405             unlock_user(p, arg2, 0);
8406         }
8407         return ret;
8408 #endif
8409 #ifdef TARGET_NR_mkdir
8410     case TARGET_NR_mkdir:
8411         if (!(p = lock_user_string(arg1)))
8412             return -TARGET_EFAULT;
8413         ret = get_errno(mkdir(p, arg2));
8414         unlock_user(p, arg1, 0);
8415         return ret;
8416 #endif
8417 #if defined(TARGET_NR_mkdirat)
8418     case TARGET_NR_mkdirat:
8419         if (!(p = lock_user_string(arg2)))
8420             return -TARGET_EFAULT;
8421         ret = get_errno(mkdirat(arg1, p, arg3));
8422         unlock_user(p, arg2, 0);
8423         return ret;
8424 #endif
8425 #ifdef TARGET_NR_rmdir
8426     case TARGET_NR_rmdir:
8427         if (!(p = lock_user_string(arg1)))
8428             return -TARGET_EFAULT;
8429         ret = get_errno(rmdir(p));
8430         unlock_user(p, arg1, 0);
8431         return ret;
8432 #endif
8433     case TARGET_NR_dup:
8434         ret = get_errno(dup(arg1));
8435         if (ret >= 0) {
8436             fd_trans_dup(arg1, ret);
8437         }
8438         return ret;
8439 #ifdef TARGET_NR_pipe
8440     case TARGET_NR_pipe:
8441         return do_pipe(cpu_env, arg1, 0, 0);
8442 #endif
8443 #ifdef TARGET_NR_pipe2
8444     case TARGET_NR_pipe2:
8445         return do_pipe(cpu_env, arg1,
8446                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8447 #endif
8448     case TARGET_NR_times:
8449         {
8450             struct target_tms *tmsp;
8451             struct tms tms;
8452             ret = get_errno(times(&tms));
8453             if (arg1) {
8454                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8455                 if (!tmsp)
8456                     return -TARGET_EFAULT;
8457                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8458                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8459                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8460                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8461             }
8462             if (!is_error(ret))
8463                 ret = host_to_target_clock_t(ret);
8464         }
8465         return ret;
8466     case TARGET_NR_acct:
8467         if (arg1 == 0) {
8468             ret = get_errno(acct(NULL));
8469         } else {
8470             if (!(p = lock_user_string(arg1))) {
8471                 return -TARGET_EFAULT;
8472             }
8473             ret = get_errno(acct(path(p)));
8474             unlock_user(p, arg1, 0);
8475         }
8476         return ret;
8477 #ifdef TARGET_NR_umount2
8478     case TARGET_NR_umount2:
8479         if (!(p = lock_user_string(arg1)))
8480             return -TARGET_EFAULT;
8481         ret = get_errno(umount2(p, arg2));
8482         unlock_user(p, arg1, 0);
8483         return ret;
8484 #endif
8485     case TARGET_NR_ioctl:
8486         return do_ioctl(arg1, arg2, arg3);
8487 #ifdef TARGET_NR_fcntl
8488     case TARGET_NR_fcntl:
8489         return do_fcntl(arg1, arg2, arg3);
8490 #endif
8491     case TARGET_NR_setpgid:
8492         return get_errno(setpgid(arg1, arg2));
8493     case TARGET_NR_umask:
8494         return get_errno(umask(arg1));
8495     case TARGET_NR_chroot:
8496         if (!(p = lock_user_string(arg1)))
8497             return -TARGET_EFAULT;
8498         ret = get_errno(chroot(p));
8499         unlock_user(p, arg1, 0);
8500         return ret;
8501 #ifdef TARGET_NR_dup2
8502     case TARGET_NR_dup2:
8503         ret = get_errno(dup2(arg1, arg2));
8504         if (ret >= 0) {
8505             fd_trans_dup(arg1, arg2);
8506         }
8507         return ret;
8508 #endif
8509 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8510     case TARGET_NR_dup3:
8511     {
8512         int host_flags;
8513 
8514         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8515             return -EINVAL;
8516         }
8517         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8518         ret = get_errno(dup3(arg1, arg2, host_flags));
8519         if (ret >= 0) {
8520             fd_trans_dup(arg1, arg2);
8521         }
8522         return ret;
8523     }
8524 #endif
8525 #ifdef TARGET_NR_getppid /* not on alpha */
8526     case TARGET_NR_getppid:
8527         return get_errno(getppid());
8528 #endif
8529 #ifdef TARGET_NR_getpgrp
8530     case TARGET_NR_getpgrp:
8531         return get_errno(getpgrp());
8532 #endif
8533     case TARGET_NR_setsid:
8534         return get_errno(setsid());
8535 #ifdef TARGET_NR_sigaction
8536     case TARGET_NR_sigaction:
8537         {
8538 #if defined(TARGET_ALPHA)
8539             struct target_sigaction act, oact, *pact = 0;
8540             struct target_old_sigaction *old_act;
8541             if (arg2) {
8542                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8543                     return -TARGET_EFAULT;
8544                 act._sa_handler = old_act->_sa_handler;
8545                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8546                 act.sa_flags = old_act->sa_flags;
8547                 act.sa_restorer = 0;
8548                 unlock_user_struct(old_act, arg2, 0);
8549                 pact = &act;
8550             }
8551             ret = get_errno(do_sigaction(arg1, pact, &oact));
8552             if (!is_error(ret) && arg3) {
8553                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8554                     return -TARGET_EFAULT;
8555                 old_act->_sa_handler = oact._sa_handler;
8556                 old_act->sa_mask = oact.sa_mask.sig[0];
8557                 old_act->sa_flags = oact.sa_flags;
8558                 unlock_user_struct(old_act, arg3, 1);
8559             }
8560 #elif defined(TARGET_MIPS)
8561 	    struct target_sigaction act, oact, *pact, *old_act;
8562 
8563 	    if (arg2) {
8564                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8565                     return -TARGET_EFAULT;
8566 		act._sa_handler = old_act->_sa_handler;
8567 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8568 		act.sa_flags = old_act->sa_flags;
8569 		unlock_user_struct(old_act, arg2, 0);
8570 		pact = &act;
8571 	    } else {
8572 		pact = NULL;
8573 	    }
8574 
8575 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8576 
8577 	    if (!is_error(ret) && arg3) {
8578                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8579                     return -TARGET_EFAULT;
8580 		old_act->_sa_handler = oact._sa_handler;
8581 		old_act->sa_flags = oact.sa_flags;
8582 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8583 		old_act->sa_mask.sig[1] = 0;
8584 		old_act->sa_mask.sig[2] = 0;
8585 		old_act->sa_mask.sig[3] = 0;
8586 		unlock_user_struct(old_act, arg3, 1);
8587 	    }
8588 #else
8589             struct target_old_sigaction *old_act;
8590             struct target_sigaction act, oact, *pact;
8591             if (arg2) {
8592                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8593                     return -TARGET_EFAULT;
8594                 act._sa_handler = old_act->_sa_handler;
8595                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8596                 act.sa_flags = old_act->sa_flags;
8597                 act.sa_restorer = old_act->sa_restorer;
8598 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8599                 act.ka_restorer = 0;
8600 #endif
8601                 unlock_user_struct(old_act, arg2, 0);
8602                 pact = &act;
8603             } else {
8604                 pact = NULL;
8605             }
8606             ret = get_errno(do_sigaction(arg1, pact, &oact));
8607             if (!is_error(ret) && arg3) {
8608                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8609                     return -TARGET_EFAULT;
8610                 old_act->_sa_handler = oact._sa_handler;
8611                 old_act->sa_mask = oact.sa_mask.sig[0];
8612                 old_act->sa_flags = oact.sa_flags;
8613                 old_act->sa_restorer = oact.sa_restorer;
8614                 unlock_user_struct(old_act, arg3, 1);
8615             }
8616 #endif
8617         }
8618         return ret;
8619 #endif
8620     case TARGET_NR_rt_sigaction:
8621         {
8622 #if defined(TARGET_ALPHA)
8623             /* For Alpha and SPARC this is a 5 argument syscall, with
8624              * a 'restorer' parameter which must be copied into the
8625              * sa_restorer field of the sigaction struct.
8626              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8627              * and arg5 is the sigsetsize.
8628              * Alpha also has a separate rt_sigaction struct that it uses
8629              * here; SPARC uses the usual sigaction struct.
8630              */
8631             struct target_rt_sigaction *rt_act;
8632             struct target_sigaction act, oact, *pact = 0;
8633 
8634             if (arg4 != sizeof(target_sigset_t)) {
8635                 return -TARGET_EINVAL;
8636             }
8637             if (arg2) {
8638                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8639                     return -TARGET_EFAULT;
8640                 act._sa_handler = rt_act->_sa_handler;
8641                 act.sa_mask = rt_act->sa_mask;
8642                 act.sa_flags = rt_act->sa_flags;
8643                 act.sa_restorer = arg5;
8644                 unlock_user_struct(rt_act, arg2, 0);
8645                 pact = &act;
8646             }
8647             ret = get_errno(do_sigaction(arg1, pact, &oact));
8648             if (!is_error(ret) && arg3) {
8649                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8650                     return -TARGET_EFAULT;
8651                 rt_act->_sa_handler = oact._sa_handler;
8652                 rt_act->sa_mask = oact.sa_mask;
8653                 rt_act->sa_flags = oact.sa_flags;
8654                 unlock_user_struct(rt_act, arg3, 1);
8655             }
8656 #else
8657 #ifdef TARGET_SPARC
8658             target_ulong restorer = arg4;
8659             target_ulong sigsetsize = arg5;
8660 #else
8661             target_ulong sigsetsize = arg4;
8662 #endif
8663             struct target_sigaction *act;
8664             struct target_sigaction *oact;
8665 
8666             if (sigsetsize != sizeof(target_sigset_t)) {
8667                 return -TARGET_EINVAL;
8668             }
8669             if (arg2) {
8670                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8671                     return -TARGET_EFAULT;
8672                 }
8673 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8674                 act->ka_restorer = restorer;
8675 #endif
8676             } else {
8677                 act = NULL;
8678             }
8679             if (arg3) {
8680                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8681                     ret = -TARGET_EFAULT;
8682                     goto rt_sigaction_fail;
8683                 }
8684             } else
8685                 oact = NULL;
8686             ret = get_errno(do_sigaction(arg1, act, oact));
8687 	rt_sigaction_fail:
8688             if (act)
8689                 unlock_user_struct(act, arg2, 0);
8690             if (oact)
8691                 unlock_user_struct(oact, arg3, 1);
8692 #endif
8693         }
8694         return ret;
8695 #ifdef TARGET_NR_sgetmask /* not on alpha */
8696     case TARGET_NR_sgetmask:
8697         {
8698             sigset_t cur_set;
8699             abi_ulong target_set;
8700             ret = do_sigprocmask(0, NULL, &cur_set);
8701             if (!ret) {
8702                 host_to_target_old_sigset(&target_set, &cur_set);
8703                 ret = target_set;
8704             }
8705         }
8706         return ret;
8707 #endif
8708 #ifdef TARGET_NR_ssetmask /* not on alpha */
8709     case TARGET_NR_ssetmask:
8710         {
8711             sigset_t set, oset;
8712             abi_ulong target_set = arg1;
8713             target_to_host_old_sigset(&set, &target_set);
8714             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8715             if (!ret) {
8716                 host_to_target_old_sigset(&target_set, &oset);
8717                 ret = target_set;
8718             }
8719         }
8720         return ret;
8721 #endif
8722 #ifdef TARGET_NR_sigprocmask
8723     case TARGET_NR_sigprocmask:
8724         {
8725 #if defined(TARGET_ALPHA)
8726             sigset_t set, oldset;
8727             abi_ulong mask;
8728             int how;
8729 
8730             switch (arg1) {
8731             case TARGET_SIG_BLOCK:
8732                 how = SIG_BLOCK;
8733                 break;
8734             case TARGET_SIG_UNBLOCK:
8735                 how = SIG_UNBLOCK;
8736                 break;
8737             case TARGET_SIG_SETMASK:
8738                 how = SIG_SETMASK;
8739                 break;
8740             default:
8741                 return -TARGET_EINVAL;
8742             }
8743             mask = arg2;
8744             target_to_host_old_sigset(&set, &mask);
8745 
8746             ret = do_sigprocmask(how, &set, &oldset);
8747             if (!is_error(ret)) {
8748                 host_to_target_old_sigset(&mask, &oldset);
8749                 ret = mask;
8750                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8751             }
8752 #else
8753             sigset_t set, oldset, *set_ptr;
8754             int how;
8755 
8756             if (arg2) {
8757                 switch (arg1) {
8758                 case TARGET_SIG_BLOCK:
8759                     how = SIG_BLOCK;
8760                     break;
8761                 case TARGET_SIG_UNBLOCK:
8762                     how = SIG_UNBLOCK;
8763                     break;
8764                 case TARGET_SIG_SETMASK:
8765                     how = SIG_SETMASK;
8766                     break;
8767                 default:
8768                     return -TARGET_EINVAL;
8769                 }
8770                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8771                     return -TARGET_EFAULT;
8772                 target_to_host_old_sigset(&set, p);
8773                 unlock_user(p, arg2, 0);
8774                 set_ptr = &set;
8775             } else {
8776                 how = 0;
8777                 set_ptr = NULL;
8778             }
8779             ret = do_sigprocmask(how, set_ptr, &oldset);
8780             if (!is_error(ret) && arg3) {
8781                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8782                     return -TARGET_EFAULT;
8783                 host_to_target_old_sigset(p, &oldset);
8784                 unlock_user(p, arg3, sizeof(target_sigset_t));
8785             }
8786 #endif
8787         }
8788         return ret;
8789 #endif
8790     case TARGET_NR_rt_sigprocmask:
8791         {
8792             int how = arg1;
8793             sigset_t set, oldset, *set_ptr;
8794 
8795             if (arg4 != sizeof(target_sigset_t)) {
8796                 return -TARGET_EINVAL;
8797             }
8798 
8799             if (arg2) {
8800                 switch(how) {
8801                 case TARGET_SIG_BLOCK:
8802                     how = SIG_BLOCK;
8803                     break;
8804                 case TARGET_SIG_UNBLOCK:
8805                     how = SIG_UNBLOCK;
8806                     break;
8807                 case TARGET_SIG_SETMASK:
8808                     how = SIG_SETMASK;
8809                     break;
8810                 default:
8811                     return -TARGET_EINVAL;
8812                 }
8813                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8814                     return -TARGET_EFAULT;
8815                 target_to_host_sigset(&set, p);
8816                 unlock_user(p, arg2, 0);
8817                 set_ptr = &set;
8818             } else {
8819                 how = 0;
8820                 set_ptr = NULL;
8821             }
8822             ret = do_sigprocmask(how, set_ptr, &oldset);
8823             if (!is_error(ret) && arg3) {
8824                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8825                     return -TARGET_EFAULT;
8826                 host_to_target_sigset(p, &oldset);
8827                 unlock_user(p, arg3, sizeof(target_sigset_t));
8828             }
8829         }
8830         return ret;
8831 #ifdef TARGET_NR_sigpending
8832     case TARGET_NR_sigpending:
8833         {
8834             sigset_t set;
8835             ret = get_errno(sigpending(&set));
8836             if (!is_error(ret)) {
8837                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8838                     return -TARGET_EFAULT;
8839                 host_to_target_old_sigset(p, &set);
8840                 unlock_user(p, arg1, sizeof(target_sigset_t));
8841             }
8842         }
8843         return ret;
8844 #endif
8845     case TARGET_NR_rt_sigpending:
8846         {
8847             sigset_t set;
8848 
8849             /* Yes, this check is >, not != like most. We follow the kernel's
8850              * logic and it does it like this because it implements
8851              * NR_sigpending through the same code path, and in that case
8852              * the old_sigset_t is smaller in size.
8853              */
8854             if (arg2 > sizeof(target_sigset_t)) {
8855                 return -TARGET_EINVAL;
8856             }
8857 
8858             ret = get_errno(sigpending(&set));
8859             if (!is_error(ret)) {
8860                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8861                     return -TARGET_EFAULT;
8862                 host_to_target_sigset(p, &set);
8863                 unlock_user(p, arg1, sizeof(target_sigset_t));
8864             }
8865         }
8866         return ret;
8867 #ifdef TARGET_NR_sigsuspend
8868     case TARGET_NR_sigsuspend:
8869         {
8870             TaskState *ts = cpu->opaque;
8871 #if defined(TARGET_ALPHA)
8872             abi_ulong mask = arg1;
8873             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8874 #else
8875             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8876                 return -TARGET_EFAULT;
8877             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8878             unlock_user(p, arg1, 0);
8879 #endif
8880             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8881                                                SIGSET_T_SIZE));
8882             if (ret != -TARGET_ERESTARTSYS) {
8883                 ts->in_sigsuspend = 1;
8884             }
8885         }
8886         return ret;
8887 #endif
8888     case TARGET_NR_rt_sigsuspend:
8889         {
8890             TaskState *ts = cpu->opaque;
8891 
8892             if (arg2 != sizeof(target_sigset_t)) {
8893                 return -TARGET_EINVAL;
8894             }
8895             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8896                 return -TARGET_EFAULT;
8897             target_to_host_sigset(&ts->sigsuspend_mask, p);
8898             unlock_user(p, arg1, 0);
8899             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8900                                                SIGSET_T_SIZE));
8901             if (ret != -TARGET_ERESTARTSYS) {
8902                 ts->in_sigsuspend = 1;
8903             }
8904         }
8905         return ret;
8906 #ifdef TARGET_NR_rt_sigtimedwait
8907     case TARGET_NR_rt_sigtimedwait:
8908         {
8909             sigset_t set;
8910             struct timespec uts, *puts;
8911             siginfo_t uinfo;
8912 
8913             if (arg4 != sizeof(target_sigset_t)) {
8914                 return -TARGET_EINVAL;
8915             }
8916 
8917             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8918                 return -TARGET_EFAULT;
8919             target_to_host_sigset(&set, p);
8920             unlock_user(p, arg1, 0);
8921             if (arg3) {
8922                 puts = &uts;
8923                 if (target_to_host_timespec(puts, arg3)) {
8924                     return -TARGET_EFAULT;
8925                 }
8926             } else {
8927                 puts = NULL;
8928             }
8929             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8930                                                  SIGSET_T_SIZE));
8931             if (!is_error(ret)) {
8932                 if (arg2) {
8933                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8934                                   0);
8935                     if (!p) {
8936                         return -TARGET_EFAULT;
8937                     }
8938                     host_to_target_siginfo(p, &uinfo);
8939                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8940                 }
8941                 ret = host_to_target_signal(ret);
8942             }
8943         }
8944         return ret;
8945 #endif
8946     case TARGET_NR_rt_sigqueueinfo:
8947         {
8948             siginfo_t uinfo;
8949 
8950             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8951             if (!p) {
8952                 return -TARGET_EFAULT;
8953             }
8954             target_to_host_siginfo(&uinfo, p);
8955             unlock_user(p, arg3, 0);
8956             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8957         }
8958         return ret;
8959     case TARGET_NR_rt_tgsigqueueinfo:
8960         {
8961             siginfo_t uinfo;
8962 
8963             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8964             if (!p) {
8965                 return -TARGET_EFAULT;
8966             }
8967             target_to_host_siginfo(&uinfo, p);
8968             unlock_user(p, arg4, 0);
8969             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8970         }
8971         return ret;
8972 #ifdef TARGET_NR_sigreturn
8973     case TARGET_NR_sigreturn:
8974         if (block_signals()) {
8975             return -TARGET_ERESTARTSYS;
8976         }
8977         return do_sigreturn(cpu_env);
8978 #endif
8979     case TARGET_NR_rt_sigreturn:
8980         if (block_signals()) {
8981             return -TARGET_ERESTARTSYS;
8982         }
8983         return do_rt_sigreturn(cpu_env);
8984     case TARGET_NR_sethostname:
8985         if (!(p = lock_user_string(arg1)))
8986             return -TARGET_EFAULT;
8987         ret = get_errno(sethostname(p, arg2));
8988         unlock_user(p, arg1, 0);
8989         return ret;
8990 #ifdef TARGET_NR_setrlimit
8991     case TARGET_NR_setrlimit:
8992         {
8993             int resource = target_to_host_resource(arg1);
8994             struct target_rlimit *target_rlim;
8995             struct rlimit rlim;
8996             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8997                 return -TARGET_EFAULT;
8998             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8999             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9000             unlock_user_struct(target_rlim, arg2, 0);
9001             /*
9002              * If we just passed through resource limit settings for memory then
9003              * they would also apply to QEMU's own allocations, and QEMU will
9004              * crash or hang or die if its allocations fail. Ideally we would
9005              * track the guest allocations in QEMU and apply the limits ourselves.
9006              * For now, just tell the guest the call succeeded but don't actually
9007              * limit anything.
9008              */
9009             if (resource != RLIMIT_AS &&
9010                 resource != RLIMIT_DATA &&
9011                 resource != RLIMIT_STACK) {
9012                 return get_errno(setrlimit(resource, &rlim));
9013             } else {
9014                 return 0;
9015             }
9016         }
9017 #endif
9018 #ifdef TARGET_NR_getrlimit
9019     case TARGET_NR_getrlimit:
9020         {
9021             int resource = target_to_host_resource(arg1);
9022             struct target_rlimit *target_rlim;
9023             struct rlimit rlim;
9024 
9025             ret = get_errno(getrlimit(resource, &rlim));
9026             if (!is_error(ret)) {
9027                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9028                     return -TARGET_EFAULT;
9029                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9030                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9031                 unlock_user_struct(target_rlim, arg2, 1);
9032             }
9033         }
9034         return ret;
9035 #endif
9036     case TARGET_NR_getrusage:
9037         {
9038             struct rusage rusage;
9039             ret = get_errno(getrusage(arg1, &rusage));
9040             if (!is_error(ret)) {
9041                 ret = host_to_target_rusage(arg2, &rusage);
9042             }
9043         }
9044         return ret;
9045 #if defined(TARGET_NR_gettimeofday)
9046     case TARGET_NR_gettimeofday:
9047         {
9048             struct timeval tv;
9049             struct timezone tz;
9050 
9051             ret = get_errno(gettimeofday(&tv, &tz));
9052             if (!is_error(ret)) {
9053                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9054                     return -TARGET_EFAULT;
9055                 }
9056                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9057                     return -TARGET_EFAULT;
9058                 }
9059             }
9060         }
9061         return ret;
9062 #endif
9063 #if defined(TARGET_NR_settimeofday)
9064     case TARGET_NR_settimeofday:
9065         {
9066             struct timeval tv, *ptv = NULL;
9067             struct timezone tz, *ptz = NULL;
9068 
9069             if (arg1) {
9070                 if (copy_from_user_timeval(&tv, arg1)) {
9071                     return -TARGET_EFAULT;
9072                 }
9073                 ptv = &tv;
9074             }
9075 
9076             if (arg2) {
9077                 if (copy_from_user_timezone(&tz, arg2)) {
9078                     return -TARGET_EFAULT;
9079                 }
9080                 ptz = &tz;
9081             }
9082 
9083             return get_errno(settimeofday(ptv, ptz));
9084         }
9085 #endif
9086 #if defined(TARGET_NR_select)
9087     case TARGET_NR_select:
9088 #if defined(TARGET_WANT_NI_OLD_SELECT)
9089         /* some architectures used to have old_select here
9090          * but now ENOSYS it.
9091          */
9092         ret = -TARGET_ENOSYS;
9093 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9094         ret = do_old_select(arg1);
9095 #else
9096         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9097 #endif
9098         return ret;
9099 #endif
9100 #ifdef TARGET_NR_pselect6
9101     case TARGET_NR_pselect6:
9102         {
9103             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9104             fd_set rfds, wfds, efds;
9105             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9106             struct timespec ts, *ts_ptr;
9107 
9108             /*
9109              * The 6th arg is actually two args smashed together,
9110              * so we cannot use the C library.
9111              */
9112             sigset_t set;
9113             struct {
9114                 sigset_t *set;
9115                 size_t size;
9116             } sig, *sig_ptr;
9117 
9118             abi_ulong arg_sigset, arg_sigsize, *arg7;
9119             target_sigset_t *target_sigset;
9120 
9121             n = arg1;
9122             rfd_addr = arg2;
9123             wfd_addr = arg3;
9124             efd_addr = arg4;
9125             ts_addr = arg5;
9126 
9127             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9128             if (ret) {
9129                 return ret;
9130             }
9131             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9132             if (ret) {
9133                 return ret;
9134             }
9135             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9136             if (ret) {
9137                 return ret;
9138             }
9139 
9140             /*
9141              * This takes a timespec, and not a timeval, so we cannot
9142              * use the do_select() helper ...
9143              */
9144             if (ts_addr) {
9145                 if (target_to_host_timespec(&ts, ts_addr)) {
9146                     return -TARGET_EFAULT;
9147                 }
9148                 ts_ptr = &ts;
9149             } else {
9150                 ts_ptr = NULL;
9151             }
9152 
9153             /* Extract the two packed args for the sigset */
9154             if (arg6) {
9155                 sig_ptr = &sig;
9156                 sig.size = SIGSET_T_SIZE;
9157 
9158                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9159                 if (!arg7) {
9160                     return -TARGET_EFAULT;
9161                 }
9162                 arg_sigset = tswapal(arg7[0]);
9163                 arg_sigsize = tswapal(arg7[1]);
9164                 unlock_user(arg7, arg6, 0);
9165 
9166                 if (arg_sigset) {
9167                     sig.set = &set;
9168                     if (arg_sigsize != sizeof(*target_sigset)) {
9169                         /* Like the kernel, we enforce correct size sigsets */
9170                         return -TARGET_EINVAL;
9171                     }
9172                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9173                                               sizeof(*target_sigset), 1);
9174                     if (!target_sigset) {
9175                         return -TARGET_EFAULT;
9176                     }
9177                     target_to_host_sigset(&set, target_sigset);
9178                     unlock_user(target_sigset, arg_sigset, 0);
9179                 } else {
9180                     sig.set = NULL;
9181                 }
9182             } else {
9183                 sig_ptr = NULL;
9184             }
9185 
9186             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9187                                           ts_ptr, sig_ptr));
9188 
9189             if (!is_error(ret)) {
9190                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9191                     return -TARGET_EFAULT;
9192                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9193                     return -TARGET_EFAULT;
9194                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9195                     return -TARGET_EFAULT;
9196 
9197                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9198                     return -TARGET_EFAULT;
9199             }
9200         }
9201         return ret;
9202 #endif
9203 #ifdef TARGET_NR_symlink
9204     case TARGET_NR_symlink:
9205         {
9206             void *p2;
9207             p = lock_user_string(arg1);
9208             p2 = lock_user_string(arg2);
9209             if (!p || !p2)
9210                 ret = -TARGET_EFAULT;
9211             else
9212                 ret = get_errno(symlink(p, p2));
9213             unlock_user(p2, arg2, 0);
9214             unlock_user(p, arg1, 0);
9215         }
9216         return ret;
9217 #endif
9218 #if defined(TARGET_NR_symlinkat)
9219     case TARGET_NR_symlinkat:
9220         {
9221             void *p2;
9222             p  = lock_user_string(arg1);
9223             p2 = lock_user_string(arg3);
9224             if (!p || !p2)
9225                 ret = -TARGET_EFAULT;
9226             else
9227                 ret = get_errno(symlinkat(p, arg2, p2));
9228             unlock_user(p2, arg3, 0);
9229             unlock_user(p, arg1, 0);
9230         }
9231         return ret;
9232 #endif
9233 #ifdef TARGET_NR_readlink
9234     case TARGET_NR_readlink:
9235         {
9236             void *p2;
9237             p = lock_user_string(arg1);
9238             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9239             if (!p || !p2) {
9240                 ret = -TARGET_EFAULT;
9241             } else if (!arg3) {
9242                 /* Short circuit this for the magic exe check. */
9243                 ret = -TARGET_EINVAL;
9244             } else if (is_proc_myself((const char *)p, "exe")) {
9245                 char real[PATH_MAX], *temp;
9246                 temp = realpath(exec_path, real);
9247                 /* Return value is # of bytes that we wrote to the buffer. */
9248                 if (temp == NULL) {
9249                     ret = get_errno(-1);
9250                 } else {
9251                     /* Don't worry about sign mismatch as earlier mapping
9252                      * logic would have thrown a bad address error. */
9253                     ret = MIN(strlen(real), arg3);
9254                     /* We cannot NUL terminate the string. */
9255                     memcpy(p2, real, ret);
9256                 }
9257             } else {
9258                 ret = get_errno(readlink(path(p), p2, arg3));
9259             }
9260             unlock_user(p2, arg2, ret);
9261             unlock_user(p, arg1, 0);
9262         }
9263         return ret;
9264 #endif
9265 #if defined(TARGET_NR_readlinkat)
9266     case TARGET_NR_readlinkat:
9267         {
9268             void *p2;
9269             p  = lock_user_string(arg2);
9270             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9271             if (!p || !p2) {
9272                 ret = -TARGET_EFAULT;
9273             } else if (is_proc_myself((const char *)p, "exe")) {
9274                 char real[PATH_MAX], *temp;
9275                 temp = realpath(exec_path, real);
9276                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9277                 snprintf((char *)p2, arg4, "%s", real);
9278             } else {
9279                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9280             }
9281             unlock_user(p2, arg3, ret);
9282             unlock_user(p, arg2, 0);
9283         }
9284         return ret;
9285 #endif
9286 #ifdef TARGET_NR_swapon
9287     case TARGET_NR_swapon:
9288         if (!(p = lock_user_string(arg1)))
9289             return -TARGET_EFAULT;
9290         ret = get_errno(swapon(p, arg2));
9291         unlock_user(p, arg1, 0);
9292         return ret;
9293 #endif
9294     case TARGET_NR_reboot:
9295         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9296            /* arg4 must be ignored in all other cases */
9297            p = lock_user_string(arg4);
9298            if (!p) {
9299                return -TARGET_EFAULT;
9300            }
9301            ret = get_errno(reboot(arg1, arg2, arg3, p));
9302            unlock_user(p, arg4, 0);
9303         } else {
9304            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9305         }
9306         return ret;
9307 #ifdef TARGET_NR_mmap
9308     case TARGET_NR_mmap:
9309 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9310     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9311     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9312     || defined(TARGET_S390X)
9313         {
9314             abi_ulong *v;
9315             abi_ulong v1, v2, v3, v4, v5, v6;
9316             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9317                 return -TARGET_EFAULT;
9318             v1 = tswapal(v[0]);
9319             v2 = tswapal(v[1]);
9320             v3 = tswapal(v[2]);
9321             v4 = tswapal(v[3]);
9322             v5 = tswapal(v[4]);
9323             v6 = tswapal(v[5]);
9324             unlock_user(v, arg1, 0);
9325             ret = get_errno(target_mmap(v1, v2, v3,
9326                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9327                                         v5, v6));
9328         }
9329 #else
9330         ret = get_errno(target_mmap(arg1, arg2, arg3,
9331                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9332                                     arg5,
9333                                     arg6));
9334 #endif
9335         return ret;
9336 #endif
9337 #ifdef TARGET_NR_mmap2
9338     case TARGET_NR_mmap2:
9339 #ifndef MMAP_SHIFT
9340 #define MMAP_SHIFT 12
9341 #endif
9342         ret = target_mmap(arg1, arg2, arg3,
9343                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9344                           arg5, arg6 << MMAP_SHIFT);
9345         return get_errno(ret);
9346 #endif
9347     case TARGET_NR_munmap:
9348         return get_errno(target_munmap(arg1, arg2));
9349     case TARGET_NR_mprotect:
9350         {
9351             TaskState *ts = cpu->opaque;
9352             /* Special hack to detect libc making the stack executable.  */
9353             if ((arg3 & PROT_GROWSDOWN)
9354                 && arg1 >= ts->info->stack_limit
9355                 && arg1 <= ts->info->start_stack) {
9356                 arg3 &= ~PROT_GROWSDOWN;
9357                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9358                 arg1 = ts->info->stack_limit;
9359             }
9360         }
9361         return get_errno(target_mprotect(arg1, arg2, arg3));
9362 #ifdef TARGET_NR_mremap
9363     case TARGET_NR_mremap:
9364         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9365 #endif
9366         /* ??? msync/mlock/munlock are broken for softmmu.  */
9367 #ifdef TARGET_NR_msync
9368     case TARGET_NR_msync:
9369         return get_errno(msync(g2h(arg1), arg2, arg3));
9370 #endif
9371 #ifdef TARGET_NR_mlock
9372     case TARGET_NR_mlock:
9373         return get_errno(mlock(g2h(arg1), arg2));
9374 #endif
9375 #ifdef TARGET_NR_munlock
9376     case TARGET_NR_munlock:
9377         return get_errno(munlock(g2h(arg1), arg2));
9378 #endif
9379 #ifdef TARGET_NR_mlockall
9380     case TARGET_NR_mlockall:
9381         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9382 #endif
9383 #ifdef TARGET_NR_munlockall
9384     case TARGET_NR_munlockall:
9385         return get_errno(munlockall());
9386 #endif
9387 #ifdef TARGET_NR_truncate
9388     case TARGET_NR_truncate:
9389         if (!(p = lock_user_string(arg1)))
9390             return -TARGET_EFAULT;
9391         ret = get_errno(truncate(p, arg2));
9392         unlock_user(p, arg1, 0);
9393         return ret;
9394 #endif
9395 #ifdef TARGET_NR_ftruncate
9396     case TARGET_NR_ftruncate:
9397         return get_errno(ftruncate(arg1, arg2));
9398 #endif
9399     case TARGET_NR_fchmod:
9400         return get_errno(fchmod(arg1, arg2));
9401 #if defined(TARGET_NR_fchmodat)
9402     case TARGET_NR_fchmodat:
9403         if (!(p = lock_user_string(arg2)))
9404             return -TARGET_EFAULT;
9405         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9406         unlock_user(p, arg2, 0);
9407         return ret;
9408 #endif
9409     case TARGET_NR_getpriority:
9410         /* Note that negative values are valid for getpriority, so we must
9411            differentiate based on errno settings.  */
9412         errno = 0;
9413         ret = getpriority(arg1, arg2);
9414         if (ret == -1 && errno != 0) {
9415             return -host_to_target_errno(errno);
9416         }
9417 #ifdef TARGET_ALPHA
9418         /* Return value is the unbiased priority.  Signal no error.  */
9419         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9420 #else
9421         /* Return value is a biased priority to avoid negative numbers.  */
9422         ret = 20 - ret;
9423 #endif
9424         return ret;
9425     case TARGET_NR_setpriority:
9426         return get_errno(setpriority(arg1, arg2, arg3));
9427 #ifdef TARGET_NR_statfs
9428     case TARGET_NR_statfs:
9429         if (!(p = lock_user_string(arg1))) {
9430             return -TARGET_EFAULT;
9431         }
9432         ret = get_errno(statfs(path(p), &stfs));
9433         unlock_user(p, arg1, 0);
9434     convert_statfs:
9435         if (!is_error(ret)) {
9436             struct target_statfs *target_stfs;
9437 
9438             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9439                 return -TARGET_EFAULT;
9440             __put_user(stfs.f_type, &target_stfs->f_type);
9441             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9442             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9443             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9444             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9445             __put_user(stfs.f_files, &target_stfs->f_files);
9446             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9447             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9448             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9449             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9450             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9451 #ifdef _STATFS_F_FLAGS
9452             __put_user(stfs.f_flags, &target_stfs->f_flags);
9453 #else
9454             __put_user(0, &target_stfs->f_flags);
9455 #endif
9456             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9457             unlock_user_struct(target_stfs, arg2, 1);
9458         }
9459         return ret;
9460 #endif
9461 #ifdef TARGET_NR_fstatfs
9462     case TARGET_NR_fstatfs:
9463         ret = get_errno(fstatfs(arg1, &stfs));
9464         goto convert_statfs;
9465 #endif
9466 #ifdef TARGET_NR_statfs64
9467     case TARGET_NR_statfs64:
9468         if (!(p = lock_user_string(arg1))) {
9469             return -TARGET_EFAULT;
9470         }
9471         ret = get_errno(statfs(path(p), &stfs));
9472         unlock_user(p, arg1, 0);
9473     convert_statfs64:
9474         if (!is_error(ret)) {
9475             struct target_statfs64 *target_stfs;
9476 
9477             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9478                 return -TARGET_EFAULT;
9479             __put_user(stfs.f_type, &target_stfs->f_type);
9480             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9481             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9482             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9483             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9484             __put_user(stfs.f_files, &target_stfs->f_files);
9485             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9486             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9487             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9488             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9489             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9490             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9491             unlock_user_struct(target_stfs, arg3, 1);
9492         }
9493         return ret;
9494     case TARGET_NR_fstatfs64:
9495         ret = get_errno(fstatfs(arg1, &stfs));
9496         goto convert_statfs64;
9497 #endif
9498 #ifdef TARGET_NR_socketcall
9499     case TARGET_NR_socketcall:
9500         return do_socketcall(arg1, arg2);
9501 #endif
9502 #ifdef TARGET_NR_accept
9503     case TARGET_NR_accept:
9504         return do_accept4(arg1, arg2, arg3, 0);
9505 #endif
9506 #ifdef TARGET_NR_accept4
9507     case TARGET_NR_accept4:
9508         return do_accept4(arg1, arg2, arg3, arg4);
9509 #endif
9510 #ifdef TARGET_NR_bind
9511     case TARGET_NR_bind:
9512         return do_bind(arg1, arg2, arg3);
9513 #endif
9514 #ifdef TARGET_NR_connect
9515     case TARGET_NR_connect:
9516         return do_connect(arg1, arg2, arg3);
9517 #endif
9518 #ifdef TARGET_NR_getpeername
9519     case TARGET_NR_getpeername:
9520         return do_getpeername(arg1, arg2, arg3);
9521 #endif
9522 #ifdef TARGET_NR_getsockname
9523     case TARGET_NR_getsockname:
9524         return do_getsockname(arg1, arg2, arg3);
9525 #endif
9526 #ifdef TARGET_NR_getsockopt
9527     case TARGET_NR_getsockopt:
9528         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9529 #endif
9530 #ifdef TARGET_NR_listen
9531     case TARGET_NR_listen:
9532         return get_errno(listen(arg1, arg2));
9533 #endif
9534 #ifdef TARGET_NR_recv
9535     case TARGET_NR_recv:
9536         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9537 #endif
9538 #ifdef TARGET_NR_recvfrom
9539     case TARGET_NR_recvfrom:
9540         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9541 #endif
9542 #ifdef TARGET_NR_recvmsg
9543     case TARGET_NR_recvmsg:
9544         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9545 #endif
9546 #ifdef TARGET_NR_send
9547     case TARGET_NR_send:
9548         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9549 #endif
9550 #ifdef TARGET_NR_sendmsg
9551     case TARGET_NR_sendmsg:
9552         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9553 #endif
9554 #ifdef TARGET_NR_sendmmsg
9555     case TARGET_NR_sendmmsg:
9556         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9557 #endif
9558 #ifdef TARGET_NR_recvmmsg
9559     case TARGET_NR_recvmmsg:
9560         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9561 #endif
9562 #ifdef TARGET_NR_sendto
9563     case TARGET_NR_sendto:
9564         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9565 #endif
9566 #ifdef TARGET_NR_shutdown
9567     case TARGET_NR_shutdown:
9568         return get_errno(shutdown(arg1, arg2));
9569 #endif
9570 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9571     case TARGET_NR_getrandom:
9572         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9573         if (!p) {
9574             return -TARGET_EFAULT;
9575         }
9576         ret = get_errno(getrandom(p, arg2, arg3));
9577         unlock_user(p, arg1, ret);
9578         return ret;
9579 #endif
9580 #ifdef TARGET_NR_socket
9581     case TARGET_NR_socket:
9582         return do_socket(arg1, arg2, arg3);
9583 #endif
9584 #ifdef TARGET_NR_socketpair
9585     case TARGET_NR_socketpair:
9586         return do_socketpair(arg1, arg2, arg3, arg4);
9587 #endif
9588 #ifdef TARGET_NR_setsockopt
9589     case TARGET_NR_setsockopt:
9590         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9591 #endif
9592 #if defined(TARGET_NR_syslog)
9593     case TARGET_NR_syslog:
9594         {
9595             int len = arg2;
9596 
9597             switch (arg1) {
9598             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9599             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9600             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9601             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9602             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9603             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9604             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9605             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9606                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9607             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9608             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9609             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9610                 {
9611                     if (len < 0) {
9612                         return -TARGET_EINVAL;
9613                     }
9614                     if (len == 0) {
9615                         return 0;
9616                     }
9617                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9618                     if (!p) {
9619                         return -TARGET_EFAULT;
9620                     }
9621                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9622                     unlock_user(p, arg2, arg3);
9623                 }
9624                 return ret;
9625             default:
9626                 return -TARGET_EINVAL;
9627             }
9628         }
9629         break;
9630 #endif
9631     case TARGET_NR_setitimer:
9632         {
9633             struct itimerval value, ovalue, *pvalue;
9634 
9635             if (arg2) {
9636                 pvalue = &value;
9637                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9638                     || copy_from_user_timeval(&pvalue->it_value,
9639                                               arg2 + sizeof(struct target_timeval)))
9640                     return -TARGET_EFAULT;
9641             } else {
9642                 pvalue = NULL;
9643             }
9644             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9645             if (!is_error(ret) && arg3) {
9646                 if (copy_to_user_timeval(arg3,
9647                                          &ovalue.it_interval)
9648                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9649                                             &ovalue.it_value))
9650                     return -TARGET_EFAULT;
9651             }
9652         }
9653         return ret;
9654     case TARGET_NR_getitimer:
9655         {
9656             struct itimerval value;
9657 
9658             ret = get_errno(getitimer(arg1, &value));
9659             if (!is_error(ret) && arg2) {
9660                 if (copy_to_user_timeval(arg2,
9661                                          &value.it_interval)
9662                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9663                                             &value.it_value))
9664                     return -TARGET_EFAULT;
9665             }
9666         }
9667         return ret;
9668 #ifdef TARGET_NR_stat
9669     case TARGET_NR_stat:
9670         if (!(p = lock_user_string(arg1))) {
9671             return -TARGET_EFAULT;
9672         }
9673         ret = get_errno(stat(path(p), &st));
9674         unlock_user(p, arg1, 0);
9675         goto do_stat;
9676 #endif
9677 #ifdef TARGET_NR_lstat
9678     case TARGET_NR_lstat:
9679         if (!(p = lock_user_string(arg1))) {
9680             return -TARGET_EFAULT;
9681         }
9682         ret = get_errno(lstat(path(p), &st));
9683         unlock_user(p, arg1, 0);
9684         goto do_stat;
9685 #endif
9686 #ifdef TARGET_NR_fstat
9687     case TARGET_NR_fstat:
9688         {
9689             ret = get_errno(fstat(arg1, &st));
9690 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9691         do_stat:
9692 #endif
9693             if (!is_error(ret)) {
9694                 struct target_stat *target_st;
9695 
9696                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9697                     return -TARGET_EFAULT;
9698                 memset(target_st, 0, sizeof(*target_st));
9699                 __put_user(st.st_dev, &target_st->st_dev);
9700                 __put_user(st.st_ino, &target_st->st_ino);
9701                 __put_user(st.st_mode, &target_st->st_mode);
9702                 __put_user(st.st_uid, &target_st->st_uid);
9703                 __put_user(st.st_gid, &target_st->st_gid);
9704                 __put_user(st.st_nlink, &target_st->st_nlink);
9705                 __put_user(st.st_rdev, &target_st->st_rdev);
9706                 __put_user(st.st_size, &target_st->st_size);
9707                 __put_user(st.st_blksize, &target_st->st_blksize);
9708                 __put_user(st.st_blocks, &target_st->st_blocks);
9709                 __put_user(st.st_atime, &target_st->target_st_atime);
9710                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9711                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9712 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9713     defined(TARGET_STAT_HAVE_NSEC)
9714                 __put_user(st.st_atim.tv_nsec,
9715                            &target_st->target_st_atime_nsec);
9716                 __put_user(st.st_mtim.tv_nsec,
9717                            &target_st->target_st_mtime_nsec);
9718                 __put_user(st.st_ctim.tv_nsec,
9719                            &target_st->target_st_ctime_nsec);
9720 #endif
9721                 unlock_user_struct(target_st, arg2, 1);
9722             }
9723         }
9724         return ret;
9725 #endif
9726     case TARGET_NR_vhangup:
9727         return get_errno(vhangup());
9728 #ifdef TARGET_NR_syscall
9729     case TARGET_NR_syscall:
9730         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9731                           arg6, arg7, arg8, 0);
9732 #endif
9733 #if defined(TARGET_NR_wait4)
9734     case TARGET_NR_wait4:
9735         {
9736             int status;
9737             abi_long status_ptr = arg2;
9738             struct rusage rusage, *rusage_ptr;
9739             abi_ulong target_rusage = arg4;
9740             abi_long rusage_err;
9741             if (target_rusage)
9742                 rusage_ptr = &rusage;
9743             else
9744                 rusage_ptr = NULL;
9745             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9746             if (!is_error(ret)) {
9747                 if (status_ptr && ret) {
9748                     status = host_to_target_waitstatus(status);
9749                     if (put_user_s32(status, status_ptr))
9750                         return -TARGET_EFAULT;
9751                 }
9752                 if (target_rusage) {
9753                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9754                     if (rusage_err) {
9755                         ret = rusage_err;
9756                     }
9757                 }
9758             }
9759         }
9760         return ret;
9761 #endif
9762 #ifdef TARGET_NR_swapoff
9763     case TARGET_NR_swapoff:
9764         if (!(p = lock_user_string(arg1)))
9765             return -TARGET_EFAULT;
9766         ret = get_errno(swapoff(p));
9767         unlock_user(p, arg1, 0);
9768         return ret;
9769 #endif
9770     case TARGET_NR_sysinfo:
9771         {
9772             struct target_sysinfo *target_value;
9773             struct sysinfo value;
9774             ret = get_errno(sysinfo(&value));
9775             if (!is_error(ret) && arg1)
9776             {
9777                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9778                     return -TARGET_EFAULT;
9779                 __put_user(value.uptime, &target_value->uptime);
9780                 __put_user(value.loads[0], &target_value->loads[0]);
9781                 __put_user(value.loads[1], &target_value->loads[1]);
9782                 __put_user(value.loads[2], &target_value->loads[2]);
9783                 __put_user(value.totalram, &target_value->totalram);
9784                 __put_user(value.freeram, &target_value->freeram);
9785                 __put_user(value.sharedram, &target_value->sharedram);
9786                 __put_user(value.bufferram, &target_value->bufferram);
9787                 __put_user(value.totalswap, &target_value->totalswap);
9788                 __put_user(value.freeswap, &target_value->freeswap);
9789                 __put_user(value.procs, &target_value->procs);
9790                 __put_user(value.totalhigh, &target_value->totalhigh);
9791                 __put_user(value.freehigh, &target_value->freehigh);
9792                 __put_user(value.mem_unit, &target_value->mem_unit);
9793                 unlock_user_struct(target_value, arg1, 1);
9794             }
9795         }
9796         return ret;
9797 #ifdef TARGET_NR_ipc
9798     case TARGET_NR_ipc:
9799         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9800 #endif
9801 #ifdef TARGET_NR_semget
9802     case TARGET_NR_semget:
9803         return get_errno(semget(arg1, arg2, arg3));
9804 #endif
9805 #ifdef TARGET_NR_semop
9806     case TARGET_NR_semop:
9807         return do_semtimedop(arg1, arg2, arg3, 0);
9808 #endif
9809 #ifdef TARGET_NR_semtimedop
9810     case TARGET_NR_semtimedop:
9811         return do_semtimedop(arg1, arg2, arg3, arg4);
9812 #endif
9813 #ifdef TARGET_NR_semctl
9814     case TARGET_NR_semctl:
9815         return do_semctl(arg1, arg2, arg3, arg4);
9816 #endif
9817 #ifdef TARGET_NR_msgctl
9818     case TARGET_NR_msgctl:
9819         return do_msgctl(arg1, arg2, arg3);
9820 #endif
9821 #ifdef TARGET_NR_msgget
9822     case TARGET_NR_msgget:
9823         return get_errno(msgget(arg1, arg2));
9824 #endif
9825 #ifdef TARGET_NR_msgrcv
9826     case TARGET_NR_msgrcv:
9827         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9828 #endif
9829 #ifdef TARGET_NR_msgsnd
9830     case TARGET_NR_msgsnd:
9831         return do_msgsnd(arg1, arg2, arg3, arg4);
9832 #endif
9833 #ifdef TARGET_NR_shmget
9834     case TARGET_NR_shmget:
9835         return get_errno(shmget(arg1, arg2, arg3));
9836 #endif
9837 #ifdef TARGET_NR_shmctl
9838     case TARGET_NR_shmctl:
9839         return do_shmctl(arg1, arg2, arg3);
9840 #endif
9841 #ifdef TARGET_NR_shmat
9842     case TARGET_NR_shmat:
9843         return do_shmat(cpu_env, arg1, arg2, arg3);
9844 #endif
9845 #ifdef TARGET_NR_shmdt
9846     case TARGET_NR_shmdt:
9847         return do_shmdt(arg1);
9848 #endif
9849     case TARGET_NR_fsync:
9850         return get_errno(fsync(arg1));
9851     case TARGET_NR_clone:
9852         /* Linux manages to have three different orderings for its
9853          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9854          * match the kernel's CONFIG_CLONE_* settings.
9855          * Microblaze is further special in that it uses a sixth
9856          * implicit argument to clone for the TLS pointer.
9857          */
9858 #if defined(TARGET_MICROBLAZE)
9859         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9860 #elif defined(TARGET_CLONE_BACKWARDS)
9861         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9862 #elif defined(TARGET_CLONE_BACKWARDS2)
9863         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9864 #else
9865         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9866 #endif
9867         return ret;
9868 #ifdef __NR_exit_group
9869         /* new thread calls */
9870     case TARGET_NR_exit_group:
9871         preexit_cleanup(cpu_env, arg1);
9872         return get_errno(exit_group(arg1));
9873 #endif
9874     case TARGET_NR_setdomainname:
9875         if (!(p = lock_user_string(arg1)))
9876             return -TARGET_EFAULT;
9877         ret = get_errno(setdomainname(p, arg2));
9878         unlock_user(p, arg1, 0);
9879         return ret;
9880     case TARGET_NR_uname:
9881         /* no need to transcode because we use the linux syscall */
9882         {
9883             struct new_utsname * buf;
9884 
9885             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9886                 return -TARGET_EFAULT;
9887             ret = get_errno(sys_uname(buf));
9888             if (!is_error(ret)) {
9889                 /* Overwrite the native machine name with whatever is being
9890                    emulated. */
9891                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9892                           sizeof(buf->machine));
9893                 /* Allow the user to override the reported release.  */
9894                 if (qemu_uname_release && *qemu_uname_release) {
9895                     g_strlcpy(buf->release, qemu_uname_release,
9896                               sizeof(buf->release));
9897                 }
9898             }
9899             unlock_user_struct(buf, arg1, 1);
9900         }
9901         return ret;
9902 #ifdef TARGET_I386
9903     case TARGET_NR_modify_ldt:
9904         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9905 #if !defined(TARGET_X86_64)
9906     case TARGET_NR_vm86:
9907         return do_vm86(cpu_env, arg1, arg2);
9908 #endif
9909 #endif
9910 #if defined(TARGET_NR_adjtimex)
9911     case TARGET_NR_adjtimex:
9912         {
9913             struct timex host_buf;
9914 
9915             if (target_to_host_timex(&host_buf, arg1) != 0) {
9916                 return -TARGET_EFAULT;
9917             }
9918             ret = get_errno(adjtimex(&host_buf));
9919             if (!is_error(ret)) {
9920                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9921                     return -TARGET_EFAULT;
9922                 }
9923             }
9924         }
9925         return ret;
9926 #endif
9927 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9928     case TARGET_NR_clock_adjtime:
9929         {
9930             struct timex htx, *phtx = &htx;
9931 
9932             if (target_to_host_timex(phtx, arg2) != 0) {
9933                 return -TARGET_EFAULT;
9934             }
9935             ret = get_errno(clock_adjtime(arg1, phtx));
9936             if (!is_error(ret) && phtx) {
9937                 if (host_to_target_timex(arg2, phtx) != 0) {
9938                     return -TARGET_EFAULT;
9939                 }
9940             }
9941         }
9942         return ret;
9943 #endif
9944     case TARGET_NR_getpgid:
9945         return get_errno(getpgid(arg1));
9946     case TARGET_NR_fchdir:
9947         return get_errno(fchdir(arg1));
9948     case TARGET_NR_personality:
9949         return get_errno(personality(arg1));
9950 #ifdef TARGET_NR__llseek /* Not on alpha */
9951     case TARGET_NR__llseek:
9952         {
9953             int64_t res;
9954 #if !defined(__NR_llseek)
9955             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9956             if (res == -1) {
9957                 ret = get_errno(res);
9958             } else {
9959                 ret = 0;
9960             }
9961 #else
9962             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9963 #endif
9964             if ((ret == 0) && put_user_s64(res, arg4)) {
9965                 return -TARGET_EFAULT;
9966             }
9967         }
9968         return ret;
9969 #endif
9970 #ifdef TARGET_NR_getdents
9971     case TARGET_NR_getdents:
9972 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9973 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9974         {
9975             struct target_dirent *target_dirp;
9976             struct linux_dirent *dirp;
9977             abi_long count = arg3;
9978 
9979             dirp = g_try_malloc(count);
9980             if (!dirp) {
9981                 return -TARGET_ENOMEM;
9982             }
9983 
9984             ret = get_errno(sys_getdents(arg1, dirp, count));
9985             if (!is_error(ret)) {
9986                 struct linux_dirent *de;
9987 		struct target_dirent *tde;
9988                 int len = ret;
9989                 int reclen, treclen;
9990 		int count1, tnamelen;
9991 
9992 		count1 = 0;
9993                 de = dirp;
9994                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9995                     return -TARGET_EFAULT;
9996 		tde = target_dirp;
9997                 while (len > 0) {
9998                     reclen = de->d_reclen;
9999                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10000                     assert(tnamelen >= 0);
10001                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10002                     assert(count1 + treclen <= count);
10003                     tde->d_reclen = tswap16(treclen);
10004                     tde->d_ino = tswapal(de->d_ino);
10005                     tde->d_off = tswapal(de->d_off);
10006                     memcpy(tde->d_name, de->d_name, tnamelen);
10007                     de = (struct linux_dirent *)((char *)de + reclen);
10008                     len -= reclen;
10009                     tde = (struct target_dirent *)((char *)tde + treclen);
10010 		    count1 += treclen;
10011                 }
10012 		ret = count1;
10013                 unlock_user(target_dirp, arg2, ret);
10014             }
10015             g_free(dirp);
10016         }
10017 #else
10018         {
10019             struct linux_dirent *dirp;
10020             abi_long count = arg3;
10021 
10022             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10023                 return -TARGET_EFAULT;
10024             ret = get_errno(sys_getdents(arg1, dirp, count));
10025             if (!is_error(ret)) {
10026                 struct linux_dirent *de;
10027                 int len = ret;
10028                 int reclen;
10029                 de = dirp;
10030                 while (len > 0) {
10031                     reclen = de->d_reclen;
10032                     if (reclen > len)
10033                         break;
10034                     de->d_reclen = tswap16(reclen);
10035                     tswapls(&de->d_ino);
10036                     tswapls(&de->d_off);
10037                     de = (struct linux_dirent *)((char *)de + reclen);
10038                     len -= reclen;
10039                 }
10040             }
10041             unlock_user(dirp, arg2, ret);
10042         }
10043 #endif
10044 #else
10045         /* Implement getdents in terms of getdents64 */
10046         {
10047             struct linux_dirent64 *dirp;
10048             abi_long count = arg3;
10049 
10050             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10051             if (!dirp) {
10052                 return -TARGET_EFAULT;
10053             }
10054             ret = get_errno(sys_getdents64(arg1, dirp, count));
10055             if (!is_error(ret)) {
10056                 /* Convert the dirent64 structs to target dirent.  We do this
10057                  * in-place, since we can guarantee that a target_dirent is no
10058                  * larger than a dirent64; however this means we have to be
10059                  * careful to read everything before writing in the new format.
10060                  */
10061                 struct linux_dirent64 *de;
10062                 struct target_dirent *tde;
10063                 int len = ret;
10064                 int tlen = 0;
10065 
10066                 de = dirp;
10067                 tde = (struct target_dirent *)dirp;
10068                 while (len > 0) {
10069                     int namelen, treclen;
10070                     int reclen = de->d_reclen;
10071                     uint64_t ino = de->d_ino;
10072                     int64_t off = de->d_off;
10073                     uint8_t type = de->d_type;
10074 
10075                     namelen = strlen(de->d_name);
10076                     treclen = offsetof(struct target_dirent, d_name)
10077                         + namelen + 2;
10078                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10079 
10080                     memmove(tde->d_name, de->d_name, namelen + 1);
10081                     tde->d_ino = tswapal(ino);
10082                     tde->d_off = tswapal(off);
10083                     tde->d_reclen = tswap16(treclen);
10084                     /* The target_dirent type is in what was formerly a padding
10085                      * byte at the end of the structure:
10086                      */
10087                     *(((char *)tde) + treclen - 1) = type;
10088 
10089                     de = (struct linux_dirent64 *)((char *)de + reclen);
10090                     tde = (struct target_dirent *)((char *)tde + treclen);
10091                     len -= reclen;
10092                     tlen += treclen;
10093                 }
10094                 ret = tlen;
10095             }
10096             unlock_user(dirp, arg2, ret);
10097         }
10098 #endif
10099         return ret;
10100 #endif /* TARGET_NR_getdents */
10101 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10102     case TARGET_NR_getdents64:
10103         {
10104             struct linux_dirent64 *dirp;
10105             abi_long count = arg3;
10106             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10107                 return -TARGET_EFAULT;
10108             ret = get_errno(sys_getdents64(arg1, dirp, count));
10109             if (!is_error(ret)) {
10110                 struct linux_dirent64 *de;
10111                 int len = ret;
10112                 int reclen;
10113                 de = dirp;
10114                 while (len > 0) {
10115                     reclen = de->d_reclen;
10116                     if (reclen > len)
10117                         break;
10118                     de->d_reclen = tswap16(reclen);
10119                     tswap64s((uint64_t *)&de->d_ino);
10120                     tswap64s((uint64_t *)&de->d_off);
10121                     de = (struct linux_dirent64 *)((char *)de + reclen);
10122                     len -= reclen;
10123                 }
10124             }
10125             unlock_user(dirp, arg2, ret);
10126         }
10127         return ret;
10128 #endif /* TARGET_NR_getdents64 */
10129 #if defined(TARGET_NR__newselect)
10130     case TARGET_NR__newselect:
10131         return do_select(arg1, arg2, arg3, arg4, arg5);
10132 #endif
10133 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10134 # ifdef TARGET_NR_poll
10135     case TARGET_NR_poll:
10136 # endif
10137 # ifdef TARGET_NR_ppoll
10138     case TARGET_NR_ppoll:
10139 # endif
10140         {
10141             struct target_pollfd *target_pfd;
10142             unsigned int nfds = arg2;
10143             struct pollfd *pfd;
10144             unsigned int i;
10145 
10146             pfd = NULL;
10147             target_pfd = NULL;
10148             if (nfds) {
10149                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10150                     return -TARGET_EINVAL;
10151                 }
10152 
10153                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10154                                        sizeof(struct target_pollfd) * nfds, 1);
10155                 if (!target_pfd) {
10156                     return -TARGET_EFAULT;
10157                 }
10158 
10159                 pfd = alloca(sizeof(struct pollfd) * nfds);
10160                 for (i = 0; i < nfds; i++) {
10161                     pfd[i].fd = tswap32(target_pfd[i].fd);
10162                     pfd[i].events = tswap16(target_pfd[i].events);
10163                 }
10164             }
10165 
10166             switch (num) {
10167 # ifdef TARGET_NR_ppoll
10168             case TARGET_NR_ppoll:
10169             {
10170                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10171                 target_sigset_t *target_set;
10172                 sigset_t _set, *set = &_set;
10173 
10174                 if (arg3) {
10175                     if (target_to_host_timespec(timeout_ts, arg3)) {
10176                         unlock_user(target_pfd, arg1, 0);
10177                         return -TARGET_EFAULT;
10178                     }
10179                 } else {
10180                     timeout_ts = NULL;
10181                 }
10182 
10183                 if (arg4) {
10184                     if (arg5 != sizeof(target_sigset_t)) {
10185                         unlock_user(target_pfd, arg1, 0);
10186                         return -TARGET_EINVAL;
10187                     }
10188 
10189                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10190                     if (!target_set) {
10191                         unlock_user(target_pfd, arg1, 0);
10192                         return -TARGET_EFAULT;
10193                     }
10194                     target_to_host_sigset(set, target_set);
10195                 } else {
10196                     set = NULL;
10197                 }
10198 
10199                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10200                                            set, SIGSET_T_SIZE));
10201 
10202                 if (!is_error(ret) && arg3) {
10203                     host_to_target_timespec(arg3, timeout_ts);
10204                 }
10205                 if (arg4) {
10206                     unlock_user(target_set, arg4, 0);
10207                 }
10208                 break;
10209             }
10210 # endif
10211 # ifdef TARGET_NR_poll
10212             case TARGET_NR_poll:
10213             {
10214                 struct timespec ts, *pts;
10215 
10216                 if (arg3 >= 0) {
10217                     /* Convert ms to secs, ns */
10218                     ts.tv_sec = arg3 / 1000;
10219                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10220                     pts = &ts;
10221                 } else {
10222                     /* -ve poll() timeout means "infinite" */
10223                     pts = NULL;
10224                 }
10225                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10226                 break;
10227             }
10228 # endif
10229             default:
10230                 g_assert_not_reached();
10231             }
10232 
10233             if (!is_error(ret)) {
10234                 for(i = 0; i < nfds; i++) {
10235                     target_pfd[i].revents = tswap16(pfd[i].revents);
10236                 }
10237             }
10238             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10239         }
10240         return ret;
10241 #endif
10242     case TARGET_NR_flock:
10243         /* NOTE: the flock constant seems to be the same for every
10244            Linux platform */
10245         return get_errno(safe_flock(arg1, arg2));
10246     case TARGET_NR_readv:
10247         {
10248             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10249             if (vec != NULL) {
10250                 ret = get_errno(safe_readv(arg1, vec, arg3));
10251                 unlock_iovec(vec, arg2, arg3, 1);
10252             } else {
10253                 ret = -host_to_target_errno(errno);
10254             }
10255         }
10256         return ret;
10257     case TARGET_NR_writev:
10258         {
10259             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10260             if (vec != NULL) {
10261                 ret = get_errno(safe_writev(arg1, vec, arg3));
10262                 unlock_iovec(vec, arg2, arg3, 0);
10263             } else {
10264                 ret = -host_to_target_errno(errno);
10265             }
10266         }
10267         return ret;
10268 #if defined(TARGET_NR_preadv)
10269     case TARGET_NR_preadv:
10270         {
10271             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10272             if (vec != NULL) {
10273                 unsigned long low, high;
10274 
10275                 target_to_host_low_high(arg4, arg5, &low, &high);
10276                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10277                 unlock_iovec(vec, arg2, arg3, 1);
10278             } else {
10279                 ret = -host_to_target_errno(errno);
10280            }
10281         }
10282         return ret;
10283 #endif
10284 #if defined(TARGET_NR_pwritev)
10285     case TARGET_NR_pwritev:
10286         {
10287             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10288             if (vec != NULL) {
10289                 unsigned long low, high;
10290 
10291                 target_to_host_low_high(arg4, arg5, &low, &high);
10292                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10293                 unlock_iovec(vec, arg2, arg3, 0);
10294             } else {
10295                 ret = -host_to_target_errno(errno);
10296            }
10297         }
10298         return ret;
10299 #endif
10300     case TARGET_NR_getsid:
10301         return get_errno(getsid(arg1));
10302 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10303     case TARGET_NR_fdatasync:
10304         return get_errno(fdatasync(arg1));
10305 #endif
10306 #ifdef TARGET_NR__sysctl
10307     case TARGET_NR__sysctl:
10308         /* We don't implement this, but ENOTDIR is always a safe
10309            return value. */
10310         return -TARGET_ENOTDIR;
10311 #endif
10312     case TARGET_NR_sched_getaffinity:
10313         {
10314             unsigned int mask_size;
10315             unsigned long *mask;
10316 
10317             /*
10318              * sched_getaffinity needs multiples of ulong, so need to take
10319              * care of mismatches between target ulong and host ulong sizes.
10320              */
10321             if (arg2 & (sizeof(abi_ulong) - 1)) {
10322                 return -TARGET_EINVAL;
10323             }
10324             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10325 
10326             mask = alloca(mask_size);
10327             memset(mask, 0, mask_size);
10328             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10329 
10330             if (!is_error(ret)) {
10331                 if (ret > arg2) {
10332                     /* More data returned than the caller's buffer will fit.
10333                      * This only happens if sizeof(abi_long) < sizeof(long)
10334                      * and the caller passed us a buffer holding an odd number
10335                      * of abi_longs. If the host kernel is actually using the
10336                      * extra 4 bytes then fail EINVAL; otherwise we can just
10337                      * ignore them and only copy the interesting part.
10338                      */
10339                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10340                     if (numcpus > arg2 * 8) {
10341                         return -TARGET_EINVAL;
10342                     }
10343                     ret = arg2;
10344                 }
10345 
10346                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10347                     return -TARGET_EFAULT;
10348                 }
10349             }
10350         }
10351         return ret;
10352     case TARGET_NR_sched_setaffinity:
10353         {
10354             unsigned int mask_size;
10355             unsigned long *mask;
10356 
10357             /*
10358              * sched_setaffinity needs multiples of ulong, so need to take
10359              * care of mismatches between target ulong and host ulong sizes.
10360              */
10361             if (arg2 & (sizeof(abi_ulong) - 1)) {
10362                 return -TARGET_EINVAL;
10363             }
10364             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10365             mask = alloca(mask_size);
10366 
10367             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10368             if (ret) {
10369                 return ret;
10370             }
10371 
10372             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10373         }
10374     case TARGET_NR_getcpu:
10375         {
10376             unsigned cpu, node;
10377             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10378                                        arg2 ? &node : NULL,
10379                                        NULL));
10380             if (is_error(ret)) {
10381                 return ret;
10382             }
10383             if (arg1 && put_user_u32(cpu, arg1)) {
10384                 return -TARGET_EFAULT;
10385             }
10386             if (arg2 && put_user_u32(node, arg2)) {
10387                 return -TARGET_EFAULT;
10388             }
10389         }
10390         return ret;
10391     case TARGET_NR_sched_setparam:
10392         {
10393             struct sched_param *target_schp;
10394             struct sched_param schp;
10395 
10396             if (arg2 == 0) {
10397                 return -TARGET_EINVAL;
10398             }
10399             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10400                 return -TARGET_EFAULT;
10401             schp.sched_priority = tswap32(target_schp->sched_priority);
10402             unlock_user_struct(target_schp, arg2, 0);
10403             return get_errno(sched_setparam(arg1, &schp));
10404         }
10405     case TARGET_NR_sched_getparam:
10406         {
10407             struct sched_param *target_schp;
10408             struct sched_param schp;
10409 
10410             if (arg2 == 0) {
10411                 return -TARGET_EINVAL;
10412             }
10413             ret = get_errno(sched_getparam(arg1, &schp));
10414             if (!is_error(ret)) {
10415                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10416                     return -TARGET_EFAULT;
10417                 target_schp->sched_priority = tswap32(schp.sched_priority);
10418                 unlock_user_struct(target_schp, arg2, 1);
10419             }
10420         }
10421         return ret;
10422     case TARGET_NR_sched_setscheduler:
10423         {
10424             struct sched_param *target_schp;
10425             struct sched_param schp;
10426             if (arg3 == 0) {
10427                 return -TARGET_EINVAL;
10428             }
10429             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10430                 return -TARGET_EFAULT;
10431             schp.sched_priority = tswap32(target_schp->sched_priority);
10432             unlock_user_struct(target_schp, arg3, 0);
10433             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10434         }
10435     case TARGET_NR_sched_getscheduler:
10436         return get_errno(sched_getscheduler(arg1));
10437     case TARGET_NR_sched_yield:
10438         return get_errno(sched_yield());
10439     case TARGET_NR_sched_get_priority_max:
10440         return get_errno(sched_get_priority_max(arg1));
10441     case TARGET_NR_sched_get_priority_min:
10442         return get_errno(sched_get_priority_min(arg1));
10443 #ifdef TARGET_NR_sched_rr_get_interval
10444     case TARGET_NR_sched_rr_get_interval:
10445         {
10446             struct timespec ts;
10447             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10448             if (!is_error(ret)) {
10449                 ret = host_to_target_timespec(arg2, &ts);
10450             }
10451         }
10452         return ret;
10453 #endif
10454 #if defined(TARGET_NR_nanosleep)
10455     case TARGET_NR_nanosleep:
10456         {
10457             struct timespec req, rem;
10458             target_to_host_timespec(&req, arg1);
10459             ret = get_errno(safe_nanosleep(&req, &rem));
10460             if (is_error(ret) && arg2) {
10461                 host_to_target_timespec(arg2, &rem);
10462             }
10463         }
10464         return ret;
10465 #endif
10466     case TARGET_NR_prctl:
10467         switch (arg1) {
10468         case PR_GET_PDEATHSIG:
10469         {
10470             int deathsig;
10471             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10472             if (!is_error(ret) && arg2
10473                 && put_user_ual(deathsig, arg2)) {
10474                 return -TARGET_EFAULT;
10475             }
10476             return ret;
10477         }
10478 #ifdef PR_GET_NAME
10479         case PR_GET_NAME:
10480         {
10481             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10482             if (!name) {
10483                 return -TARGET_EFAULT;
10484             }
10485             ret = get_errno(prctl(arg1, (unsigned long)name,
10486                                   arg3, arg4, arg5));
10487             unlock_user(name, arg2, 16);
10488             return ret;
10489         }
10490         case PR_SET_NAME:
10491         {
10492             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10493             if (!name) {
10494                 return -TARGET_EFAULT;
10495             }
10496             ret = get_errno(prctl(arg1, (unsigned long)name,
10497                                   arg3, arg4, arg5));
10498             unlock_user(name, arg2, 0);
10499             return ret;
10500         }
10501 #endif
10502 #ifdef TARGET_MIPS
10503         case TARGET_PR_GET_FP_MODE:
10504         {
10505             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10506             ret = 0;
10507             if (env->CP0_Status & (1 << CP0St_FR)) {
10508                 ret |= TARGET_PR_FP_MODE_FR;
10509             }
10510             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10511                 ret |= TARGET_PR_FP_MODE_FRE;
10512             }
10513             return ret;
10514         }
10515         case TARGET_PR_SET_FP_MODE:
10516         {
10517             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10518             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10519             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10520             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10521             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10522 
10523             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10524                                             TARGET_PR_FP_MODE_FRE;
10525 
10526             /* If nothing to change, return right away, successfully.  */
10527             if (old_fr == new_fr && old_fre == new_fre) {
10528                 return 0;
10529             }
10530             /* Check the value is valid */
10531             if (arg2 & ~known_bits) {
10532                 return -TARGET_EOPNOTSUPP;
10533             }
10534             /* Setting FRE without FR is not supported.  */
10535             if (new_fre && !new_fr) {
10536                 return -TARGET_EOPNOTSUPP;
10537             }
10538             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10539                 /* FR1 is not supported */
10540                 return -TARGET_EOPNOTSUPP;
10541             }
10542             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10543                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10544                 /* cannot set FR=0 */
10545                 return -TARGET_EOPNOTSUPP;
10546             }
10547             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10548                 /* Cannot set FRE=1 */
10549                 return -TARGET_EOPNOTSUPP;
10550             }
10551 
10552             int i;
10553             fpr_t *fpr = env->active_fpu.fpr;
10554             for (i = 0; i < 32 ; i += 2) {
10555                 if (!old_fr && new_fr) {
10556                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10557                 } else if (old_fr && !new_fr) {
10558                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10559                 }
10560             }
10561 
10562             if (new_fr) {
10563                 env->CP0_Status |= (1 << CP0St_FR);
10564                 env->hflags |= MIPS_HFLAG_F64;
10565             } else {
10566                 env->CP0_Status &= ~(1 << CP0St_FR);
10567                 env->hflags &= ~MIPS_HFLAG_F64;
10568             }
10569             if (new_fre) {
10570                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10571                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10572                     env->hflags |= MIPS_HFLAG_FRE;
10573                 }
10574             } else {
10575                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10576                 env->hflags &= ~MIPS_HFLAG_FRE;
10577             }
10578 
10579             return 0;
10580         }
10581 #endif /* MIPS */
10582 #ifdef TARGET_AARCH64
10583         case TARGET_PR_SVE_SET_VL:
10584             /*
10585              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10586              * PR_SVE_VL_INHERIT.  Note the kernel definition
10587              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10588              * even though the current architectural maximum is VQ=16.
10589              */
10590             ret = -TARGET_EINVAL;
10591             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10592                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10593                 CPUARMState *env = cpu_env;
10594                 ARMCPU *cpu = env_archcpu(env);
10595                 uint32_t vq, old_vq;
10596 
10597                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10598                 vq = MAX(arg2 / 16, 1);
10599                 vq = MIN(vq, cpu->sve_max_vq);
10600 
10601                 if (vq < old_vq) {
10602                     aarch64_sve_narrow_vq(env, vq);
10603                 }
10604                 env->vfp.zcr_el[1] = vq - 1;
10605                 arm_rebuild_hflags(env);
10606                 ret = vq * 16;
10607             }
10608             return ret;
10609         case TARGET_PR_SVE_GET_VL:
10610             ret = -TARGET_EINVAL;
10611             {
10612                 ARMCPU *cpu = env_archcpu(cpu_env);
10613                 if (cpu_isar_feature(aa64_sve, cpu)) {
10614                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10615                 }
10616             }
10617             return ret;
10618         case TARGET_PR_PAC_RESET_KEYS:
10619             {
10620                 CPUARMState *env = cpu_env;
10621                 ARMCPU *cpu = env_archcpu(env);
10622 
10623                 if (arg3 || arg4 || arg5) {
10624                     return -TARGET_EINVAL;
10625                 }
10626                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10627                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10628                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10629                                TARGET_PR_PAC_APGAKEY);
10630                     int ret = 0;
10631                     Error *err = NULL;
10632 
10633                     if (arg2 == 0) {
10634                         arg2 = all;
10635                     } else if (arg2 & ~all) {
10636                         return -TARGET_EINVAL;
10637                     }
10638                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10639                         ret |= qemu_guest_getrandom(&env->keys.apia,
10640                                                     sizeof(ARMPACKey), &err);
10641                     }
10642                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10643                         ret |= qemu_guest_getrandom(&env->keys.apib,
10644                                                     sizeof(ARMPACKey), &err);
10645                     }
10646                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10647                         ret |= qemu_guest_getrandom(&env->keys.apda,
10648                                                     sizeof(ARMPACKey), &err);
10649                     }
10650                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10651                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10652                                                     sizeof(ARMPACKey), &err);
10653                     }
10654                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10655                         ret |= qemu_guest_getrandom(&env->keys.apga,
10656                                                     sizeof(ARMPACKey), &err);
10657                     }
10658                     if (ret != 0) {
10659                         /*
10660                          * Some unknown failure in the crypto.  The best
10661                          * we can do is log it and fail the syscall.
10662                          * The real syscall cannot fail this way.
10663                          */
10664                         qemu_log_mask(LOG_UNIMP,
10665                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10666                                       error_get_pretty(err));
10667                         error_free(err);
10668                         return -TARGET_EIO;
10669                     }
10670                     return 0;
10671                 }
10672             }
10673             return -TARGET_EINVAL;
10674 #endif /* AARCH64 */
10675         case PR_GET_SECCOMP:
10676         case PR_SET_SECCOMP:
10677             /* Disable seccomp to prevent the target disabling syscalls we
10678              * need. */
10679             return -TARGET_EINVAL;
10680         default:
10681             /* Most prctl options have no pointer arguments */
10682             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10683         }
10684         break;
10685 #ifdef TARGET_NR_arch_prctl
10686     case TARGET_NR_arch_prctl:
10687         return do_arch_prctl(cpu_env, arg1, arg2);
10688 #endif
10689 #ifdef TARGET_NR_pread64
10690     case TARGET_NR_pread64:
10691         if (regpairs_aligned(cpu_env, num)) {
10692             arg4 = arg5;
10693             arg5 = arg6;
10694         }
10695         if (arg2 == 0 && arg3 == 0) {
10696             /* Special-case NULL buffer and zero length, which should succeed */
10697             p = 0;
10698         } else {
10699             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10700             if (!p) {
10701                 return -TARGET_EFAULT;
10702             }
10703         }
10704         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10705         unlock_user(p, arg2, ret);
10706         return ret;
10707     case TARGET_NR_pwrite64:
10708         if (regpairs_aligned(cpu_env, num)) {
10709             arg4 = arg5;
10710             arg5 = arg6;
10711         }
10712         if (arg2 == 0 && arg3 == 0) {
10713             /* Special-case NULL buffer and zero length, which should succeed */
10714             p = 0;
10715         } else {
10716             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10717             if (!p) {
10718                 return -TARGET_EFAULT;
10719             }
10720         }
10721         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10722         unlock_user(p, arg2, 0);
10723         return ret;
10724 #endif
10725     case TARGET_NR_getcwd:
10726         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10727             return -TARGET_EFAULT;
10728         ret = get_errno(sys_getcwd1(p, arg2));
10729         unlock_user(p, arg1, ret);
10730         return ret;
10731     case TARGET_NR_capget:
10732     case TARGET_NR_capset:
10733     {
10734         struct target_user_cap_header *target_header;
10735         struct target_user_cap_data *target_data = NULL;
10736         struct __user_cap_header_struct header;
10737         struct __user_cap_data_struct data[2];
10738         struct __user_cap_data_struct *dataptr = NULL;
10739         int i, target_datalen;
10740         int data_items = 1;
10741 
10742         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10743             return -TARGET_EFAULT;
10744         }
10745         header.version = tswap32(target_header->version);
10746         header.pid = tswap32(target_header->pid);
10747 
10748         if (header.version != _LINUX_CAPABILITY_VERSION) {
10749             /* Version 2 and up takes pointer to two user_data structs */
10750             data_items = 2;
10751         }
10752 
10753         target_datalen = sizeof(*target_data) * data_items;
10754 
10755         if (arg2) {
10756             if (num == TARGET_NR_capget) {
10757                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10758             } else {
10759                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10760             }
10761             if (!target_data) {
10762                 unlock_user_struct(target_header, arg1, 0);
10763                 return -TARGET_EFAULT;
10764             }
10765 
10766             if (num == TARGET_NR_capset) {
10767                 for (i = 0; i < data_items; i++) {
10768                     data[i].effective = tswap32(target_data[i].effective);
10769                     data[i].permitted = tswap32(target_data[i].permitted);
10770                     data[i].inheritable = tswap32(target_data[i].inheritable);
10771                 }
10772             }
10773 
10774             dataptr = data;
10775         }
10776 
10777         if (num == TARGET_NR_capget) {
10778             ret = get_errno(capget(&header, dataptr));
10779         } else {
10780             ret = get_errno(capset(&header, dataptr));
10781         }
10782 
10783         /* The kernel always updates version for both capget and capset */
10784         target_header->version = tswap32(header.version);
10785         unlock_user_struct(target_header, arg1, 1);
10786 
10787         if (arg2) {
10788             if (num == TARGET_NR_capget) {
10789                 for (i = 0; i < data_items; i++) {
10790                     target_data[i].effective = tswap32(data[i].effective);
10791                     target_data[i].permitted = tswap32(data[i].permitted);
10792                     target_data[i].inheritable = tswap32(data[i].inheritable);
10793                 }
10794                 unlock_user(target_data, arg2, target_datalen);
10795             } else {
10796                 unlock_user(target_data, arg2, 0);
10797             }
10798         }
10799         return ret;
10800     }
10801     case TARGET_NR_sigaltstack:
10802         return do_sigaltstack(arg1, arg2,
10803                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10804 
10805 #ifdef CONFIG_SENDFILE
10806 #ifdef TARGET_NR_sendfile
10807     case TARGET_NR_sendfile:
10808     {
10809         off_t *offp = NULL;
10810         off_t off;
10811         if (arg3) {
10812             ret = get_user_sal(off, arg3);
10813             if (is_error(ret)) {
10814                 return ret;
10815             }
10816             offp = &off;
10817         }
10818         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10819         if (!is_error(ret) && arg3) {
10820             abi_long ret2 = put_user_sal(off, arg3);
10821             if (is_error(ret2)) {
10822                 ret = ret2;
10823             }
10824         }
10825         return ret;
10826     }
10827 #endif
10828 #ifdef TARGET_NR_sendfile64
10829     case TARGET_NR_sendfile64:
10830     {
10831         off_t *offp = NULL;
10832         off_t off;
10833         if (arg3) {
10834             ret = get_user_s64(off, arg3);
10835             if (is_error(ret)) {
10836                 return ret;
10837             }
10838             offp = &off;
10839         }
10840         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10841         if (!is_error(ret) && arg3) {
10842             abi_long ret2 = put_user_s64(off, arg3);
10843             if (is_error(ret2)) {
10844                 ret = ret2;
10845             }
10846         }
10847         return ret;
10848     }
10849 #endif
10850 #endif
10851 #ifdef TARGET_NR_vfork
10852     case TARGET_NR_vfork:
10853         return get_errno(do_fork(cpu_env,
10854                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10855                          0, 0, 0, 0));
10856 #endif
10857 #ifdef TARGET_NR_ugetrlimit
10858     case TARGET_NR_ugetrlimit:
10859     {
10860 	struct rlimit rlim;
10861 	int resource = target_to_host_resource(arg1);
10862 	ret = get_errno(getrlimit(resource, &rlim));
10863 	if (!is_error(ret)) {
10864 	    struct target_rlimit *target_rlim;
10865             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10866                 return -TARGET_EFAULT;
10867 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10868 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10869             unlock_user_struct(target_rlim, arg2, 1);
10870 	}
10871         return ret;
10872     }
10873 #endif
10874 #ifdef TARGET_NR_truncate64
10875     case TARGET_NR_truncate64:
10876         if (!(p = lock_user_string(arg1)))
10877             return -TARGET_EFAULT;
10878 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10879         unlock_user(p, arg1, 0);
10880         return ret;
10881 #endif
10882 #ifdef TARGET_NR_ftruncate64
10883     case TARGET_NR_ftruncate64:
10884         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10885 #endif
10886 #ifdef TARGET_NR_stat64
10887     case TARGET_NR_stat64:
10888         if (!(p = lock_user_string(arg1))) {
10889             return -TARGET_EFAULT;
10890         }
10891         ret = get_errno(stat(path(p), &st));
10892         unlock_user(p, arg1, 0);
10893         if (!is_error(ret))
10894             ret = host_to_target_stat64(cpu_env, arg2, &st);
10895         return ret;
10896 #endif
10897 #ifdef TARGET_NR_lstat64
10898     case TARGET_NR_lstat64:
10899         if (!(p = lock_user_string(arg1))) {
10900             return -TARGET_EFAULT;
10901         }
10902         ret = get_errno(lstat(path(p), &st));
10903         unlock_user(p, arg1, 0);
10904         if (!is_error(ret))
10905             ret = host_to_target_stat64(cpu_env, arg2, &st);
10906         return ret;
10907 #endif
10908 #ifdef TARGET_NR_fstat64
10909     case TARGET_NR_fstat64:
10910         ret = get_errno(fstat(arg1, &st));
10911         if (!is_error(ret))
10912             ret = host_to_target_stat64(cpu_env, arg2, &st);
10913         return ret;
10914 #endif
10915 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10916 #ifdef TARGET_NR_fstatat64
10917     case TARGET_NR_fstatat64:
10918 #endif
10919 #ifdef TARGET_NR_newfstatat
10920     case TARGET_NR_newfstatat:
10921 #endif
10922         if (!(p = lock_user_string(arg2))) {
10923             return -TARGET_EFAULT;
10924         }
10925         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10926         unlock_user(p, arg2, 0);
10927         if (!is_error(ret))
10928             ret = host_to_target_stat64(cpu_env, arg3, &st);
10929         return ret;
10930 #endif
10931 #if defined(TARGET_NR_statx)
10932     case TARGET_NR_statx:
10933         {
10934             struct target_statx *target_stx;
10935             int dirfd = arg1;
10936             int flags = arg3;
10937 
10938             p = lock_user_string(arg2);
10939             if (p == NULL) {
10940                 return -TARGET_EFAULT;
10941             }
10942 #if defined(__NR_statx)
10943             {
10944                 /*
10945                  * It is assumed that struct statx is architecture independent.
10946                  */
10947                 struct target_statx host_stx;
10948                 int mask = arg4;
10949 
10950                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10951                 if (!is_error(ret)) {
10952                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10953                         unlock_user(p, arg2, 0);
10954                         return -TARGET_EFAULT;
10955                     }
10956                 }
10957 
10958                 if (ret != -TARGET_ENOSYS) {
10959                     unlock_user(p, arg2, 0);
10960                     return ret;
10961                 }
10962             }
10963 #endif
10964             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10965             unlock_user(p, arg2, 0);
10966 
10967             if (!is_error(ret)) {
10968                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10969                     return -TARGET_EFAULT;
10970                 }
10971                 memset(target_stx, 0, sizeof(*target_stx));
10972                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10973                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10974                 __put_user(st.st_ino, &target_stx->stx_ino);
10975                 __put_user(st.st_mode, &target_stx->stx_mode);
10976                 __put_user(st.st_uid, &target_stx->stx_uid);
10977                 __put_user(st.st_gid, &target_stx->stx_gid);
10978                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10979                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10980                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10981                 __put_user(st.st_size, &target_stx->stx_size);
10982                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10983                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10984                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10985                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10986                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10987                 unlock_user_struct(target_stx, arg5, 1);
10988             }
10989         }
10990         return ret;
10991 #endif
10992 #ifdef TARGET_NR_lchown
10993     case TARGET_NR_lchown:
10994         if (!(p = lock_user_string(arg1)))
10995             return -TARGET_EFAULT;
10996         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10997         unlock_user(p, arg1, 0);
10998         return ret;
10999 #endif
11000 #ifdef TARGET_NR_getuid
11001     case TARGET_NR_getuid:
11002         return get_errno(high2lowuid(getuid()));
11003 #endif
11004 #ifdef TARGET_NR_getgid
11005     case TARGET_NR_getgid:
11006         return get_errno(high2lowgid(getgid()));
11007 #endif
11008 #ifdef TARGET_NR_geteuid
11009     case TARGET_NR_geteuid:
11010         return get_errno(high2lowuid(geteuid()));
11011 #endif
11012 #ifdef TARGET_NR_getegid
11013     case TARGET_NR_getegid:
11014         return get_errno(high2lowgid(getegid()));
11015 #endif
11016     case TARGET_NR_setreuid:
11017         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11018     case TARGET_NR_setregid:
11019         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11020     case TARGET_NR_getgroups:
11021         {
11022             int gidsetsize = arg1;
11023             target_id *target_grouplist;
11024             gid_t *grouplist;
11025             int i;
11026 
11027             grouplist = alloca(gidsetsize * sizeof(gid_t));
11028             ret = get_errno(getgroups(gidsetsize, grouplist));
11029             if (gidsetsize == 0)
11030                 return ret;
11031             if (!is_error(ret)) {
11032                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11033                 if (!target_grouplist)
11034                     return -TARGET_EFAULT;
11035                 for(i = 0;i < ret; i++)
11036                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11037                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11038             }
11039         }
11040         return ret;
11041     case TARGET_NR_setgroups:
11042         {
11043             int gidsetsize = arg1;
11044             target_id *target_grouplist;
11045             gid_t *grouplist = NULL;
11046             int i;
11047             if (gidsetsize) {
11048                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11049                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11050                 if (!target_grouplist) {
11051                     return -TARGET_EFAULT;
11052                 }
11053                 for (i = 0; i < gidsetsize; i++) {
11054                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11055                 }
11056                 unlock_user(target_grouplist, arg2, 0);
11057             }
11058             return get_errno(setgroups(gidsetsize, grouplist));
11059         }
11060     case TARGET_NR_fchown:
11061         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11062 #if defined(TARGET_NR_fchownat)
11063     case TARGET_NR_fchownat:
11064         if (!(p = lock_user_string(arg2)))
11065             return -TARGET_EFAULT;
11066         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11067                                  low2highgid(arg4), arg5));
11068         unlock_user(p, arg2, 0);
11069         return ret;
11070 #endif
11071 #ifdef TARGET_NR_setresuid
11072     case TARGET_NR_setresuid:
11073         return get_errno(sys_setresuid(low2highuid(arg1),
11074                                        low2highuid(arg2),
11075                                        low2highuid(arg3)));
11076 #endif
11077 #ifdef TARGET_NR_getresuid
11078     case TARGET_NR_getresuid:
11079         {
11080             uid_t ruid, euid, suid;
11081             ret = get_errno(getresuid(&ruid, &euid, &suid));
11082             if (!is_error(ret)) {
11083                 if (put_user_id(high2lowuid(ruid), arg1)
11084                     || put_user_id(high2lowuid(euid), arg2)
11085                     || put_user_id(high2lowuid(suid), arg3))
11086                     return -TARGET_EFAULT;
11087             }
11088         }
11089         return ret;
11090 #endif
11091 #ifdef TARGET_NR_getresgid
11092     case TARGET_NR_setresgid:
11093         return get_errno(sys_setresgid(low2highgid(arg1),
11094                                        low2highgid(arg2),
11095                                        low2highgid(arg3)));
11096 #endif
11097 #ifdef TARGET_NR_getresgid
11098     case TARGET_NR_getresgid:
11099         {
11100             gid_t rgid, egid, sgid;
11101             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11102             if (!is_error(ret)) {
11103                 if (put_user_id(high2lowgid(rgid), arg1)
11104                     || put_user_id(high2lowgid(egid), arg2)
11105                     || put_user_id(high2lowgid(sgid), arg3))
11106                     return -TARGET_EFAULT;
11107             }
11108         }
11109         return ret;
11110 #endif
11111 #ifdef TARGET_NR_chown
11112     case TARGET_NR_chown:
11113         if (!(p = lock_user_string(arg1)))
11114             return -TARGET_EFAULT;
11115         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11116         unlock_user(p, arg1, 0);
11117         return ret;
11118 #endif
11119     case TARGET_NR_setuid:
11120         return get_errno(sys_setuid(low2highuid(arg1)));
11121     case TARGET_NR_setgid:
11122         return get_errno(sys_setgid(low2highgid(arg1)));
11123     case TARGET_NR_setfsuid:
11124         return get_errno(setfsuid(arg1));
11125     case TARGET_NR_setfsgid:
11126         return get_errno(setfsgid(arg1));
11127 
11128 #ifdef TARGET_NR_lchown32
11129     case TARGET_NR_lchown32:
11130         if (!(p = lock_user_string(arg1)))
11131             return -TARGET_EFAULT;
11132         ret = get_errno(lchown(p, arg2, arg3));
11133         unlock_user(p, arg1, 0);
11134         return ret;
11135 #endif
11136 #ifdef TARGET_NR_getuid32
11137     case TARGET_NR_getuid32:
11138         return get_errno(getuid());
11139 #endif
11140 
11141 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11142    /* Alpha specific */
11143     case TARGET_NR_getxuid:
11144          {
11145             uid_t euid;
11146             euid=geteuid();
11147             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11148          }
11149         return get_errno(getuid());
11150 #endif
11151 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11152    /* Alpha specific */
11153     case TARGET_NR_getxgid:
11154          {
11155             uid_t egid;
11156             egid=getegid();
11157             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11158          }
11159         return get_errno(getgid());
11160 #endif
11161 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11162     /* Alpha specific */
11163     case TARGET_NR_osf_getsysinfo:
11164         ret = -TARGET_EOPNOTSUPP;
11165         switch (arg1) {
11166           case TARGET_GSI_IEEE_FP_CONTROL:
11167             {
11168                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11169                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11170 
11171                 swcr &= ~SWCR_STATUS_MASK;
11172                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11173 
11174                 if (put_user_u64 (swcr, arg2))
11175                         return -TARGET_EFAULT;
11176                 ret = 0;
11177             }
11178             break;
11179 
11180           /* case GSI_IEEE_STATE_AT_SIGNAL:
11181              -- Not implemented in linux kernel.
11182              case GSI_UACPROC:
11183              -- Retrieves current unaligned access state; not much used.
11184              case GSI_PROC_TYPE:
11185              -- Retrieves implver information; surely not used.
11186              case GSI_GET_HWRPB:
11187              -- Grabs a copy of the HWRPB; surely not used.
11188           */
11189         }
11190         return ret;
11191 #endif
11192 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11193     /* Alpha specific */
11194     case TARGET_NR_osf_setsysinfo:
11195         ret = -TARGET_EOPNOTSUPP;
11196         switch (arg1) {
11197           case TARGET_SSI_IEEE_FP_CONTROL:
11198             {
11199                 uint64_t swcr, fpcr;
11200 
11201                 if (get_user_u64 (swcr, arg2)) {
11202                     return -TARGET_EFAULT;
11203                 }
11204 
11205                 /*
11206                  * The kernel calls swcr_update_status to update the
11207                  * status bits from the fpcr at every point that it
11208                  * could be queried.  Therefore, we store the status
11209                  * bits only in FPCR.
11210                  */
11211                 ((CPUAlphaState *)cpu_env)->swcr
11212                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11213 
11214                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11215                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11216                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11217                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11218                 ret = 0;
11219             }
11220             break;
11221 
11222           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11223             {
11224                 uint64_t exc, fpcr, fex;
11225 
11226                 if (get_user_u64(exc, arg2)) {
11227                     return -TARGET_EFAULT;
11228                 }
11229                 exc &= SWCR_STATUS_MASK;
11230                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11231 
11232                 /* Old exceptions are not signaled.  */
11233                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11234                 fex = exc & ~fex;
11235                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11236                 fex &= ((CPUArchState *)cpu_env)->swcr;
11237 
11238                 /* Update the hardware fpcr.  */
11239                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11240                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11241 
11242                 if (fex) {
11243                     int si_code = TARGET_FPE_FLTUNK;
11244                     target_siginfo_t info;
11245 
11246                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11247                         si_code = TARGET_FPE_FLTUND;
11248                     }
11249                     if (fex & SWCR_TRAP_ENABLE_INE) {
11250                         si_code = TARGET_FPE_FLTRES;
11251                     }
11252                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11253                         si_code = TARGET_FPE_FLTUND;
11254                     }
11255                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11256                         si_code = TARGET_FPE_FLTOVF;
11257                     }
11258                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11259                         si_code = TARGET_FPE_FLTDIV;
11260                     }
11261                     if (fex & SWCR_TRAP_ENABLE_INV) {
11262                         si_code = TARGET_FPE_FLTINV;
11263                     }
11264 
11265                     info.si_signo = SIGFPE;
11266                     info.si_errno = 0;
11267                     info.si_code = si_code;
11268                     info._sifields._sigfault._addr
11269                         = ((CPUArchState *)cpu_env)->pc;
11270                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11271                                  QEMU_SI_FAULT, &info);
11272                 }
11273                 ret = 0;
11274             }
11275             break;
11276 
11277           /* case SSI_NVPAIRS:
11278              -- Used with SSIN_UACPROC to enable unaligned accesses.
11279              case SSI_IEEE_STATE_AT_SIGNAL:
11280              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11281              -- Not implemented in linux kernel
11282           */
11283         }
11284         return ret;
11285 #endif
11286 #ifdef TARGET_NR_osf_sigprocmask
11287     /* Alpha specific.  */
11288     case TARGET_NR_osf_sigprocmask:
11289         {
11290             abi_ulong mask;
11291             int how;
11292             sigset_t set, oldset;
11293 
11294             switch(arg1) {
11295             case TARGET_SIG_BLOCK:
11296                 how = SIG_BLOCK;
11297                 break;
11298             case TARGET_SIG_UNBLOCK:
11299                 how = SIG_UNBLOCK;
11300                 break;
11301             case TARGET_SIG_SETMASK:
11302                 how = SIG_SETMASK;
11303                 break;
11304             default:
11305                 return -TARGET_EINVAL;
11306             }
11307             mask = arg2;
11308             target_to_host_old_sigset(&set, &mask);
11309             ret = do_sigprocmask(how, &set, &oldset);
11310             if (!ret) {
11311                 host_to_target_old_sigset(&mask, &oldset);
11312                 ret = mask;
11313             }
11314         }
11315         return ret;
11316 #endif
11317 
11318 #ifdef TARGET_NR_getgid32
11319     case TARGET_NR_getgid32:
11320         return get_errno(getgid());
11321 #endif
11322 #ifdef TARGET_NR_geteuid32
11323     case TARGET_NR_geteuid32:
11324         return get_errno(geteuid());
11325 #endif
11326 #ifdef TARGET_NR_getegid32
11327     case TARGET_NR_getegid32:
11328         return get_errno(getegid());
11329 #endif
11330 #ifdef TARGET_NR_setreuid32
11331     case TARGET_NR_setreuid32:
11332         return get_errno(setreuid(arg1, arg2));
11333 #endif
11334 #ifdef TARGET_NR_setregid32
11335     case TARGET_NR_setregid32:
11336         return get_errno(setregid(arg1, arg2));
11337 #endif
11338 #ifdef TARGET_NR_getgroups32
11339     case TARGET_NR_getgroups32:
11340         {
11341             int gidsetsize = arg1;
11342             uint32_t *target_grouplist;
11343             gid_t *grouplist;
11344             int i;
11345 
11346             grouplist = alloca(gidsetsize * sizeof(gid_t));
11347             ret = get_errno(getgroups(gidsetsize, grouplist));
11348             if (gidsetsize == 0)
11349                 return ret;
11350             if (!is_error(ret)) {
11351                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11352                 if (!target_grouplist) {
11353                     return -TARGET_EFAULT;
11354                 }
11355                 for(i = 0;i < ret; i++)
11356                     target_grouplist[i] = tswap32(grouplist[i]);
11357                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11358             }
11359         }
11360         return ret;
11361 #endif
11362 #ifdef TARGET_NR_setgroups32
11363     case TARGET_NR_setgroups32:
11364         {
11365             int gidsetsize = arg1;
11366             uint32_t *target_grouplist;
11367             gid_t *grouplist;
11368             int i;
11369 
11370             grouplist = alloca(gidsetsize * sizeof(gid_t));
11371             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11372             if (!target_grouplist) {
11373                 return -TARGET_EFAULT;
11374             }
11375             for(i = 0;i < gidsetsize; i++)
11376                 grouplist[i] = tswap32(target_grouplist[i]);
11377             unlock_user(target_grouplist, arg2, 0);
11378             return get_errno(setgroups(gidsetsize, grouplist));
11379         }
11380 #endif
11381 #ifdef TARGET_NR_fchown32
11382     case TARGET_NR_fchown32:
11383         return get_errno(fchown(arg1, arg2, arg3));
11384 #endif
11385 #ifdef TARGET_NR_setresuid32
11386     case TARGET_NR_setresuid32:
11387         return get_errno(sys_setresuid(arg1, arg2, arg3));
11388 #endif
11389 #ifdef TARGET_NR_getresuid32
11390     case TARGET_NR_getresuid32:
11391         {
11392             uid_t ruid, euid, suid;
11393             ret = get_errno(getresuid(&ruid, &euid, &suid));
11394             if (!is_error(ret)) {
11395                 if (put_user_u32(ruid, arg1)
11396                     || put_user_u32(euid, arg2)
11397                     || put_user_u32(suid, arg3))
11398                     return -TARGET_EFAULT;
11399             }
11400         }
11401         return ret;
11402 #endif
11403 #ifdef TARGET_NR_setresgid32
11404     case TARGET_NR_setresgid32:
11405         return get_errno(sys_setresgid(arg1, arg2, arg3));
11406 #endif
11407 #ifdef TARGET_NR_getresgid32
11408     case TARGET_NR_getresgid32:
11409         {
11410             gid_t rgid, egid, sgid;
11411             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11412             if (!is_error(ret)) {
11413                 if (put_user_u32(rgid, arg1)
11414                     || put_user_u32(egid, arg2)
11415                     || put_user_u32(sgid, arg3))
11416                     return -TARGET_EFAULT;
11417             }
11418         }
11419         return ret;
11420 #endif
11421 #ifdef TARGET_NR_chown32
11422     case TARGET_NR_chown32:
11423         if (!(p = lock_user_string(arg1)))
11424             return -TARGET_EFAULT;
11425         ret = get_errno(chown(p, arg2, arg3));
11426         unlock_user(p, arg1, 0);
11427         return ret;
11428 #endif
11429 #ifdef TARGET_NR_setuid32
11430     case TARGET_NR_setuid32:
11431         return get_errno(sys_setuid(arg1));
11432 #endif
11433 #ifdef TARGET_NR_setgid32
11434     case TARGET_NR_setgid32:
11435         return get_errno(sys_setgid(arg1));
11436 #endif
11437 #ifdef TARGET_NR_setfsuid32
11438     case TARGET_NR_setfsuid32:
11439         return get_errno(setfsuid(arg1));
11440 #endif
11441 #ifdef TARGET_NR_setfsgid32
11442     case TARGET_NR_setfsgid32:
11443         return get_errno(setfsgid(arg1));
11444 #endif
11445 #ifdef TARGET_NR_mincore
11446     case TARGET_NR_mincore:
11447         {
11448             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11449             if (!a) {
11450                 return -TARGET_ENOMEM;
11451             }
11452             p = lock_user_string(arg3);
11453             if (!p) {
11454                 ret = -TARGET_EFAULT;
11455             } else {
11456                 ret = get_errno(mincore(a, arg2, p));
11457                 unlock_user(p, arg3, ret);
11458             }
11459             unlock_user(a, arg1, 0);
11460         }
11461         return ret;
11462 #endif
11463 #ifdef TARGET_NR_arm_fadvise64_64
11464     case TARGET_NR_arm_fadvise64_64:
11465         /* arm_fadvise64_64 looks like fadvise64_64 but
11466          * with different argument order: fd, advice, offset, len
11467          * rather than the usual fd, offset, len, advice.
11468          * Note that offset and len are both 64-bit so appear as
11469          * pairs of 32-bit registers.
11470          */
11471         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11472                             target_offset64(arg5, arg6), arg2);
11473         return -host_to_target_errno(ret);
11474 #endif
11475 
11476 #if TARGET_ABI_BITS == 32
11477 
11478 #ifdef TARGET_NR_fadvise64_64
11479     case TARGET_NR_fadvise64_64:
11480 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11481         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11482         ret = arg2;
11483         arg2 = arg3;
11484         arg3 = arg4;
11485         arg4 = arg5;
11486         arg5 = arg6;
11487         arg6 = ret;
11488 #else
11489         /* 6 args: fd, offset (high, low), len (high, low), advice */
11490         if (regpairs_aligned(cpu_env, num)) {
11491             /* offset is in (3,4), len in (5,6) and advice in 7 */
11492             arg2 = arg3;
11493             arg3 = arg4;
11494             arg4 = arg5;
11495             arg5 = arg6;
11496             arg6 = arg7;
11497         }
11498 #endif
11499         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11500                             target_offset64(arg4, arg5), arg6);
11501         return -host_to_target_errno(ret);
11502 #endif
11503 
11504 #ifdef TARGET_NR_fadvise64
11505     case TARGET_NR_fadvise64:
11506         /* 5 args: fd, offset (high, low), len, advice */
11507         if (regpairs_aligned(cpu_env, num)) {
11508             /* offset is in (3,4), len in 5 and advice in 6 */
11509             arg2 = arg3;
11510             arg3 = arg4;
11511             arg4 = arg5;
11512             arg5 = arg6;
11513         }
11514         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11515         return -host_to_target_errno(ret);
11516 #endif
11517 
11518 #else /* not a 32-bit ABI */
11519 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11520 #ifdef TARGET_NR_fadvise64_64
11521     case TARGET_NR_fadvise64_64:
11522 #endif
11523 #ifdef TARGET_NR_fadvise64
11524     case TARGET_NR_fadvise64:
11525 #endif
11526 #ifdef TARGET_S390X
11527         switch (arg4) {
11528         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11529         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11530         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11531         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11532         default: break;
11533         }
11534 #endif
11535         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11536 #endif
11537 #endif /* end of 64-bit ABI fadvise handling */
11538 
11539 #ifdef TARGET_NR_madvise
11540     case TARGET_NR_madvise:
11541         /* A straight passthrough may not be safe because qemu sometimes
11542            turns private file-backed mappings into anonymous mappings.
11543            This will break MADV_DONTNEED.
11544            This is a hint, so ignoring and returning success is ok.  */
11545         return 0;
11546 #endif
11547 #ifdef TARGET_NR_fcntl64
11548     case TARGET_NR_fcntl64:
11549     {
11550         int cmd;
11551         struct flock64 fl;
11552         from_flock64_fn *copyfrom = copy_from_user_flock64;
11553         to_flock64_fn *copyto = copy_to_user_flock64;
11554 
11555 #ifdef TARGET_ARM
11556         if (!((CPUARMState *)cpu_env)->eabi) {
11557             copyfrom = copy_from_user_oabi_flock64;
11558             copyto = copy_to_user_oabi_flock64;
11559         }
11560 #endif
11561 
11562         cmd = target_to_host_fcntl_cmd(arg2);
11563         if (cmd == -TARGET_EINVAL) {
11564             return cmd;
11565         }
11566 
11567         switch(arg2) {
11568         case TARGET_F_GETLK64:
11569             ret = copyfrom(&fl, arg3);
11570             if (ret) {
11571                 break;
11572             }
11573             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11574             if (ret == 0) {
11575                 ret = copyto(arg3, &fl);
11576             }
11577 	    break;
11578 
11579         case TARGET_F_SETLK64:
11580         case TARGET_F_SETLKW64:
11581             ret = copyfrom(&fl, arg3);
11582             if (ret) {
11583                 break;
11584             }
11585             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11586 	    break;
11587         default:
11588             ret = do_fcntl(arg1, arg2, arg3);
11589             break;
11590         }
11591         return ret;
11592     }
11593 #endif
11594 #ifdef TARGET_NR_cacheflush
11595     case TARGET_NR_cacheflush:
11596         /* self-modifying code is handled automatically, so nothing needed */
11597         return 0;
11598 #endif
11599 #ifdef TARGET_NR_getpagesize
11600     case TARGET_NR_getpagesize:
11601         return TARGET_PAGE_SIZE;
11602 #endif
11603     case TARGET_NR_gettid:
11604         return get_errno(sys_gettid());
11605 #ifdef TARGET_NR_readahead
11606     case TARGET_NR_readahead:
11607 #if TARGET_ABI_BITS == 32
11608         if (regpairs_aligned(cpu_env, num)) {
11609             arg2 = arg3;
11610             arg3 = arg4;
11611             arg4 = arg5;
11612         }
11613         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11614 #else
11615         ret = get_errno(readahead(arg1, arg2, arg3));
11616 #endif
11617         return ret;
11618 #endif
11619 #ifdef CONFIG_ATTR
11620 #ifdef TARGET_NR_setxattr
11621     case TARGET_NR_listxattr:
11622     case TARGET_NR_llistxattr:
11623     {
11624         void *p, *b = 0;
11625         if (arg2) {
11626             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11627             if (!b) {
11628                 return -TARGET_EFAULT;
11629             }
11630         }
11631         p = lock_user_string(arg1);
11632         if (p) {
11633             if (num == TARGET_NR_listxattr) {
11634                 ret = get_errno(listxattr(p, b, arg3));
11635             } else {
11636                 ret = get_errno(llistxattr(p, b, arg3));
11637             }
11638         } else {
11639             ret = -TARGET_EFAULT;
11640         }
11641         unlock_user(p, arg1, 0);
11642         unlock_user(b, arg2, arg3);
11643         return ret;
11644     }
11645     case TARGET_NR_flistxattr:
11646     {
11647         void *b = 0;
11648         if (arg2) {
11649             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11650             if (!b) {
11651                 return -TARGET_EFAULT;
11652             }
11653         }
11654         ret = get_errno(flistxattr(arg1, b, arg3));
11655         unlock_user(b, arg2, arg3);
11656         return ret;
11657     }
11658     case TARGET_NR_setxattr:
11659     case TARGET_NR_lsetxattr:
11660         {
11661             void *p, *n, *v = 0;
11662             if (arg3) {
11663                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11664                 if (!v) {
11665                     return -TARGET_EFAULT;
11666                 }
11667             }
11668             p = lock_user_string(arg1);
11669             n = lock_user_string(arg2);
11670             if (p && n) {
11671                 if (num == TARGET_NR_setxattr) {
11672                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11673                 } else {
11674                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11675                 }
11676             } else {
11677                 ret = -TARGET_EFAULT;
11678             }
11679             unlock_user(p, arg1, 0);
11680             unlock_user(n, arg2, 0);
11681             unlock_user(v, arg3, 0);
11682         }
11683         return ret;
11684     case TARGET_NR_fsetxattr:
11685         {
11686             void *n, *v = 0;
11687             if (arg3) {
11688                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11689                 if (!v) {
11690                     return -TARGET_EFAULT;
11691                 }
11692             }
11693             n = lock_user_string(arg2);
11694             if (n) {
11695                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11696             } else {
11697                 ret = -TARGET_EFAULT;
11698             }
11699             unlock_user(n, arg2, 0);
11700             unlock_user(v, arg3, 0);
11701         }
11702         return ret;
11703     case TARGET_NR_getxattr:
11704     case TARGET_NR_lgetxattr:
11705         {
11706             void *p, *n, *v = 0;
11707             if (arg3) {
11708                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11709                 if (!v) {
11710                     return -TARGET_EFAULT;
11711                 }
11712             }
11713             p = lock_user_string(arg1);
11714             n = lock_user_string(arg2);
11715             if (p && n) {
11716                 if (num == TARGET_NR_getxattr) {
11717                     ret = get_errno(getxattr(p, n, v, arg4));
11718                 } else {
11719                     ret = get_errno(lgetxattr(p, n, v, arg4));
11720                 }
11721             } else {
11722                 ret = -TARGET_EFAULT;
11723             }
11724             unlock_user(p, arg1, 0);
11725             unlock_user(n, arg2, 0);
11726             unlock_user(v, arg3, arg4);
11727         }
11728         return ret;
11729     case TARGET_NR_fgetxattr:
11730         {
11731             void *n, *v = 0;
11732             if (arg3) {
11733                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11734                 if (!v) {
11735                     return -TARGET_EFAULT;
11736                 }
11737             }
11738             n = lock_user_string(arg2);
11739             if (n) {
11740                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11741             } else {
11742                 ret = -TARGET_EFAULT;
11743             }
11744             unlock_user(n, arg2, 0);
11745             unlock_user(v, arg3, arg4);
11746         }
11747         return ret;
11748     case TARGET_NR_removexattr:
11749     case TARGET_NR_lremovexattr:
11750         {
11751             void *p, *n;
11752             p = lock_user_string(arg1);
11753             n = lock_user_string(arg2);
11754             if (p && n) {
11755                 if (num == TARGET_NR_removexattr) {
11756                     ret = get_errno(removexattr(p, n));
11757                 } else {
11758                     ret = get_errno(lremovexattr(p, n));
11759                 }
11760             } else {
11761                 ret = -TARGET_EFAULT;
11762             }
11763             unlock_user(p, arg1, 0);
11764             unlock_user(n, arg2, 0);
11765         }
11766         return ret;
11767     case TARGET_NR_fremovexattr:
11768         {
11769             void *n;
11770             n = lock_user_string(arg2);
11771             if (n) {
11772                 ret = get_errno(fremovexattr(arg1, n));
11773             } else {
11774                 ret = -TARGET_EFAULT;
11775             }
11776             unlock_user(n, arg2, 0);
11777         }
11778         return ret;
11779 #endif
11780 #endif /* CONFIG_ATTR */
11781 #ifdef TARGET_NR_set_thread_area
11782     case TARGET_NR_set_thread_area:
11783 #if defined(TARGET_MIPS)
11784       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11785       return 0;
11786 #elif defined(TARGET_CRIS)
11787       if (arg1 & 0xff)
11788           ret = -TARGET_EINVAL;
11789       else {
11790           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11791           ret = 0;
11792       }
11793       return ret;
11794 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11795       return do_set_thread_area(cpu_env, arg1);
11796 #elif defined(TARGET_M68K)
11797       {
11798           TaskState *ts = cpu->opaque;
11799           ts->tp_value = arg1;
11800           return 0;
11801       }
11802 #else
11803       return -TARGET_ENOSYS;
11804 #endif
11805 #endif
11806 #ifdef TARGET_NR_get_thread_area
11807     case TARGET_NR_get_thread_area:
11808 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11809         return do_get_thread_area(cpu_env, arg1);
11810 #elif defined(TARGET_M68K)
11811         {
11812             TaskState *ts = cpu->opaque;
11813             return ts->tp_value;
11814         }
11815 #else
11816         return -TARGET_ENOSYS;
11817 #endif
11818 #endif
11819 #ifdef TARGET_NR_getdomainname
11820     case TARGET_NR_getdomainname:
11821         return -TARGET_ENOSYS;
11822 #endif
11823 
11824 #ifdef TARGET_NR_clock_settime
11825     case TARGET_NR_clock_settime:
11826     {
11827         struct timespec ts;
11828 
11829         ret = target_to_host_timespec(&ts, arg2);
11830         if (!is_error(ret)) {
11831             ret = get_errno(clock_settime(arg1, &ts));
11832         }
11833         return ret;
11834     }
11835 #endif
11836 #ifdef TARGET_NR_clock_settime64
11837     case TARGET_NR_clock_settime64:
11838     {
11839         struct timespec ts;
11840 
11841         ret = target_to_host_timespec64(&ts, arg2);
11842         if (!is_error(ret)) {
11843             ret = get_errno(clock_settime(arg1, &ts));
11844         }
11845         return ret;
11846     }
11847 #endif
11848 #ifdef TARGET_NR_clock_gettime
11849     case TARGET_NR_clock_gettime:
11850     {
11851         struct timespec ts;
11852         ret = get_errno(clock_gettime(arg1, &ts));
11853         if (!is_error(ret)) {
11854             ret = host_to_target_timespec(arg2, &ts);
11855         }
11856         return ret;
11857     }
11858 #endif
11859 #ifdef TARGET_NR_clock_gettime64
11860     case TARGET_NR_clock_gettime64:
11861     {
11862         struct timespec ts;
11863         ret = get_errno(clock_gettime(arg1, &ts));
11864         if (!is_error(ret)) {
11865             ret = host_to_target_timespec64(arg2, &ts);
11866         }
11867         return ret;
11868     }
11869 #endif
11870 #ifdef TARGET_NR_clock_getres
11871     case TARGET_NR_clock_getres:
11872     {
11873         struct timespec ts;
11874         ret = get_errno(clock_getres(arg1, &ts));
11875         if (!is_error(ret)) {
11876             host_to_target_timespec(arg2, &ts);
11877         }
11878         return ret;
11879     }
11880 #endif
11881 #ifdef TARGET_NR_clock_getres_time64
11882     case TARGET_NR_clock_getres_time64:
11883     {
11884         struct timespec ts;
11885         ret = get_errno(clock_getres(arg1, &ts));
11886         if (!is_error(ret)) {
11887             host_to_target_timespec64(arg2, &ts);
11888         }
11889         return ret;
11890     }
11891 #endif
11892 #ifdef TARGET_NR_clock_nanosleep
11893     case TARGET_NR_clock_nanosleep:
11894     {
11895         struct timespec ts;
11896         if (target_to_host_timespec(&ts, arg3)) {
11897             return -TARGET_EFAULT;
11898         }
11899         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11900                                              &ts, arg4 ? &ts : NULL));
11901         /*
11902          * if the call is interrupted by a signal handler, it fails
11903          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
11904          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
11905          */
11906         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
11907             host_to_target_timespec(arg4, &ts)) {
11908               return -TARGET_EFAULT;
11909         }
11910 
11911         return ret;
11912     }
11913 #endif
11914 
11915 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11916     case TARGET_NR_set_tid_address:
11917         return get_errno(set_tid_address((int *)g2h(arg1)));
11918 #endif
11919 
11920     case TARGET_NR_tkill:
11921         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11922 
11923     case TARGET_NR_tgkill:
11924         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11925                          target_to_host_signal(arg3)));
11926 
11927 #ifdef TARGET_NR_set_robust_list
11928     case TARGET_NR_set_robust_list:
11929     case TARGET_NR_get_robust_list:
11930         /* The ABI for supporting robust futexes has userspace pass
11931          * the kernel a pointer to a linked list which is updated by
11932          * userspace after the syscall; the list is walked by the kernel
11933          * when the thread exits. Since the linked list in QEMU guest
11934          * memory isn't a valid linked list for the host and we have
11935          * no way to reliably intercept the thread-death event, we can't
11936          * support these. Silently return ENOSYS so that guest userspace
11937          * falls back to a non-robust futex implementation (which should
11938          * be OK except in the corner case of the guest crashing while
11939          * holding a mutex that is shared with another process via
11940          * shared memory).
11941          */
11942         return -TARGET_ENOSYS;
11943 #endif
11944 
11945 #if defined(TARGET_NR_utimensat)
11946     case TARGET_NR_utimensat:
11947         {
11948             struct timespec *tsp, ts[2];
11949             if (!arg3) {
11950                 tsp = NULL;
11951             } else {
11952                 if (target_to_host_timespec(ts, arg3)) {
11953                     return -TARGET_EFAULT;
11954                 }
11955                 if (target_to_host_timespec(ts + 1, arg3 +
11956                                             sizeof(struct target_timespec))) {
11957                     return -TARGET_EFAULT;
11958                 }
11959                 tsp = ts;
11960             }
11961             if (!arg2)
11962                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11963             else {
11964                 if (!(p = lock_user_string(arg2))) {
11965                     return -TARGET_EFAULT;
11966                 }
11967                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11968                 unlock_user(p, arg2, 0);
11969             }
11970         }
11971         return ret;
11972 #endif
11973 #ifdef TARGET_NR_futex
11974     case TARGET_NR_futex:
11975         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11976 #endif
11977 #ifdef TARGET_NR_futex_time64
11978     case TARGET_NR_futex_time64:
11979         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
11980 #endif
11981 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11982     case TARGET_NR_inotify_init:
11983         ret = get_errno(sys_inotify_init());
11984         if (ret >= 0) {
11985             fd_trans_register(ret, &target_inotify_trans);
11986         }
11987         return ret;
11988 #endif
11989 #ifdef CONFIG_INOTIFY1
11990 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11991     case TARGET_NR_inotify_init1:
11992         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11993                                           fcntl_flags_tbl)));
11994         if (ret >= 0) {
11995             fd_trans_register(ret, &target_inotify_trans);
11996         }
11997         return ret;
11998 #endif
11999 #endif
12000 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12001     case TARGET_NR_inotify_add_watch:
12002         p = lock_user_string(arg2);
12003         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12004         unlock_user(p, arg2, 0);
12005         return ret;
12006 #endif
12007 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12008     case TARGET_NR_inotify_rm_watch:
12009         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12010 #endif
12011 
12012 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12013     case TARGET_NR_mq_open:
12014         {
12015             struct mq_attr posix_mq_attr;
12016             struct mq_attr *pposix_mq_attr;
12017             int host_flags;
12018 
12019             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12020             pposix_mq_attr = NULL;
12021             if (arg4) {
12022                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12023                     return -TARGET_EFAULT;
12024                 }
12025                 pposix_mq_attr = &posix_mq_attr;
12026             }
12027             p = lock_user_string(arg1 - 1);
12028             if (!p) {
12029                 return -TARGET_EFAULT;
12030             }
12031             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12032             unlock_user (p, arg1, 0);
12033         }
12034         return ret;
12035 
12036     case TARGET_NR_mq_unlink:
12037         p = lock_user_string(arg1 - 1);
12038         if (!p) {
12039             return -TARGET_EFAULT;
12040         }
12041         ret = get_errno(mq_unlink(p));
12042         unlock_user (p, arg1, 0);
12043         return ret;
12044 
12045 #ifdef TARGET_NR_mq_timedsend
12046     case TARGET_NR_mq_timedsend:
12047         {
12048             struct timespec ts;
12049 
12050             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12051             if (arg5 != 0) {
12052                 if (target_to_host_timespec(&ts, arg5)) {
12053                     return -TARGET_EFAULT;
12054                 }
12055                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12056                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12057                     return -TARGET_EFAULT;
12058                 }
12059             } else {
12060                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12061             }
12062             unlock_user (p, arg2, arg3);
12063         }
12064         return ret;
12065 #endif
12066 #ifdef TARGET_NR_mq_timedsend_time64
12067     case TARGET_NR_mq_timedsend_time64:
12068         {
12069             struct timespec ts;
12070 
12071             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12072             if (arg5 != 0) {
12073                 if (target_to_host_timespec64(&ts, arg5)) {
12074                     return -TARGET_EFAULT;
12075                 }
12076                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12077                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12078                     return -TARGET_EFAULT;
12079                 }
12080             } else {
12081                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12082             }
12083             unlock_user(p, arg2, arg3);
12084         }
12085         return ret;
12086 #endif
12087 
12088 #ifdef TARGET_NR_mq_timedreceive
12089     case TARGET_NR_mq_timedreceive:
12090         {
12091             struct timespec ts;
12092             unsigned int prio;
12093 
12094             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12095             if (arg5 != 0) {
12096                 if (target_to_host_timespec(&ts, arg5)) {
12097                     return -TARGET_EFAULT;
12098                 }
12099                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12100                                                      &prio, &ts));
12101                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12102                     return -TARGET_EFAULT;
12103                 }
12104             } else {
12105                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12106                                                      &prio, NULL));
12107             }
12108             unlock_user (p, arg2, arg3);
12109             if (arg4 != 0)
12110                 put_user_u32(prio, arg4);
12111         }
12112         return ret;
12113 #endif
12114 #ifdef TARGET_NR_mq_timedreceive_time64
12115     case TARGET_NR_mq_timedreceive_time64:
12116         {
12117             struct timespec ts;
12118             unsigned int prio;
12119 
12120             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12121             if (arg5 != 0) {
12122                 if (target_to_host_timespec64(&ts, arg5)) {
12123                     return -TARGET_EFAULT;
12124                 }
12125                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12126                                                      &prio, &ts));
12127                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12128                     return -TARGET_EFAULT;
12129                 }
12130             } else {
12131                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12132                                                      &prio, NULL));
12133             }
12134             unlock_user(p, arg2, arg3);
12135             if (arg4 != 0) {
12136                 put_user_u32(prio, arg4);
12137             }
12138         }
12139         return ret;
12140 #endif
12141 
12142     /* Not implemented for now... */
12143 /*     case TARGET_NR_mq_notify: */
12144 /*         break; */
12145 
12146     case TARGET_NR_mq_getsetattr:
12147         {
12148             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12149             ret = 0;
12150             if (arg2 != 0) {
12151                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12152                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12153                                            &posix_mq_attr_out));
12154             } else if (arg3 != 0) {
12155                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12156             }
12157             if (ret == 0 && arg3 != 0) {
12158                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12159             }
12160         }
12161         return ret;
12162 #endif
12163 
12164 #ifdef CONFIG_SPLICE
12165 #ifdef TARGET_NR_tee
12166     case TARGET_NR_tee:
12167         {
12168             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12169         }
12170         return ret;
12171 #endif
12172 #ifdef TARGET_NR_splice
12173     case TARGET_NR_splice:
12174         {
12175             loff_t loff_in, loff_out;
12176             loff_t *ploff_in = NULL, *ploff_out = NULL;
12177             if (arg2) {
12178                 if (get_user_u64(loff_in, arg2)) {
12179                     return -TARGET_EFAULT;
12180                 }
12181                 ploff_in = &loff_in;
12182             }
12183             if (arg4) {
12184                 if (get_user_u64(loff_out, arg4)) {
12185                     return -TARGET_EFAULT;
12186                 }
12187                 ploff_out = &loff_out;
12188             }
12189             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12190             if (arg2) {
12191                 if (put_user_u64(loff_in, arg2)) {
12192                     return -TARGET_EFAULT;
12193                 }
12194             }
12195             if (arg4) {
12196                 if (put_user_u64(loff_out, arg4)) {
12197                     return -TARGET_EFAULT;
12198                 }
12199             }
12200         }
12201         return ret;
12202 #endif
12203 #ifdef TARGET_NR_vmsplice
12204 	case TARGET_NR_vmsplice:
12205         {
12206             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12207             if (vec != NULL) {
12208                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12209                 unlock_iovec(vec, arg2, arg3, 0);
12210             } else {
12211                 ret = -host_to_target_errno(errno);
12212             }
12213         }
12214         return ret;
12215 #endif
12216 #endif /* CONFIG_SPLICE */
12217 #ifdef CONFIG_EVENTFD
12218 #if defined(TARGET_NR_eventfd)
12219     case TARGET_NR_eventfd:
12220         ret = get_errno(eventfd(arg1, 0));
12221         if (ret >= 0) {
12222             fd_trans_register(ret, &target_eventfd_trans);
12223         }
12224         return ret;
12225 #endif
12226 #if defined(TARGET_NR_eventfd2)
12227     case TARGET_NR_eventfd2:
12228     {
12229         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12230         if (arg2 & TARGET_O_NONBLOCK) {
12231             host_flags |= O_NONBLOCK;
12232         }
12233         if (arg2 & TARGET_O_CLOEXEC) {
12234             host_flags |= O_CLOEXEC;
12235         }
12236         ret = get_errno(eventfd(arg1, host_flags));
12237         if (ret >= 0) {
12238             fd_trans_register(ret, &target_eventfd_trans);
12239         }
12240         return ret;
12241     }
12242 #endif
12243 #endif /* CONFIG_EVENTFD  */
12244 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12245     case TARGET_NR_fallocate:
12246 #if TARGET_ABI_BITS == 32
12247         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12248                                   target_offset64(arg5, arg6)));
12249 #else
12250         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12251 #endif
12252         return ret;
12253 #endif
12254 #if defined(CONFIG_SYNC_FILE_RANGE)
12255 #if defined(TARGET_NR_sync_file_range)
12256     case TARGET_NR_sync_file_range:
12257 #if TARGET_ABI_BITS == 32
12258 #if defined(TARGET_MIPS)
12259         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12260                                         target_offset64(arg5, arg6), arg7));
12261 #else
12262         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12263                                         target_offset64(arg4, arg5), arg6));
12264 #endif /* !TARGET_MIPS */
12265 #else
12266         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12267 #endif
12268         return ret;
12269 #endif
12270 #if defined(TARGET_NR_sync_file_range2) || \
12271     defined(TARGET_NR_arm_sync_file_range)
12272 #if defined(TARGET_NR_sync_file_range2)
12273     case TARGET_NR_sync_file_range2:
12274 #endif
12275 #if defined(TARGET_NR_arm_sync_file_range)
12276     case TARGET_NR_arm_sync_file_range:
12277 #endif
12278         /* This is like sync_file_range but the arguments are reordered */
12279 #if TARGET_ABI_BITS == 32
12280         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12281                                         target_offset64(arg5, arg6), arg2));
12282 #else
12283         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12284 #endif
12285         return ret;
12286 #endif
12287 #endif
12288 #if defined(TARGET_NR_signalfd4)
12289     case TARGET_NR_signalfd4:
12290         return do_signalfd4(arg1, arg2, arg4);
12291 #endif
12292 #if defined(TARGET_NR_signalfd)
12293     case TARGET_NR_signalfd:
12294         return do_signalfd4(arg1, arg2, 0);
12295 #endif
12296 #if defined(CONFIG_EPOLL)
12297 #if defined(TARGET_NR_epoll_create)
12298     case TARGET_NR_epoll_create:
12299         return get_errno(epoll_create(arg1));
12300 #endif
12301 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12302     case TARGET_NR_epoll_create1:
12303         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12304 #endif
12305 #if defined(TARGET_NR_epoll_ctl)
12306     case TARGET_NR_epoll_ctl:
12307     {
12308         struct epoll_event ep;
12309         struct epoll_event *epp = 0;
12310         if (arg4) {
12311             struct target_epoll_event *target_ep;
12312             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12313                 return -TARGET_EFAULT;
12314             }
12315             ep.events = tswap32(target_ep->events);
12316             /* The epoll_data_t union is just opaque data to the kernel,
12317              * so we transfer all 64 bits across and need not worry what
12318              * actual data type it is.
12319              */
12320             ep.data.u64 = tswap64(target_ep->data.u64);
12321             unlock_user_struct(target_ep, arg4, 0);
12322             epp = &ep;
12323         }
12324         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12325     }
12326 #endif
12327 
12328 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12329 #if defined(TARGET_NR_epoll_wait)
12330     case TARGET_NR_epoll_wait:
12331 #endif
12332 #if defined(TARGET_NR_epoll_pwait)
12333     case TARGET_NR_epoll_pwait:
12334 #endif
12335     {
12336         struct target_epoll_event *target_ep;
12337         struct epoll_event *ep;
12338         int epfd = arg1;
12339         int maxevents = arg3;
12340         int timeout = arg4;
12341 
12342         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12343             return -TARGET_EINVAL;
12344         }
12345 
12346         target_ep = lock_user(VERIFY_WRITE, arg2,
12347                               maxevents * sizeof(struct target_epoll_event), 1);
12348         if (!target_ep) {
12349             return -TARGET_EFAULT;
12350         }
12351 
12352         ep = g_try_new(struct epoll_event, maxevents);
12353         if (!ep) {
12354             unlock_user(target_ep, arg2, 0);
12355             return -TARGET_ENOMEM;
12356         }
12357 
12358         switch (num) {
12359 #if defined(TARGET_NR_epoll_pwait)
12360         case TARGET_NR_epoll_pwait:
12361         {
12362             target_sigset_t *target_set;
12363             sigset_t _set, *set = &_set;
12364 
12365             if (arg5) {
12366                 if (arg6 != sizeof(target_sigset_t)) {
12367                     ret = -TARGET_EINVAL;
12368                     break;
12369                 }
12370 
12371                 target_set = lock_user(VERIFY_READ, arg5,
12372                                        sizeof(target_sigset_t), 1);
12373                 if (!target_set) {
12374                     ret = -TARGET_EFAULT;
12375                     break;
12376                 }
12377                 target_to_host_sigset(set, target_set);
12378                 unlock_user(target_set, arg5, 0);
12379             } else {
12380                 set = NULL;
12381             }
12382 
12383             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12384                                              set, SIGSET_T_SIZE));
12385             break;
12386         }
12387 #endif
12388 #if defined(TARGET_NR_epoll_wait)
12389         case TARGET_NR_epoll_wait:
12390             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12391                                              NULL, 0));
12392             break;
12393 #endif
12394         default:
12395             ret = -TARGET_ENOSYS;
12396         }
12397         if (!is_error(ret)) {
12398             int i;
12399             for (i = 0; i < ret; i++) {
12400                 target_ep[i].events = tswap32(ep[i].events);
12401                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12402             }
12403             unlock_user(target_ep, arg2,
12404                         ret * sizeof(struct target_epoll_event));
12405         } else {
12406             unlock_user(target_ep, arg2, 0);
12407         }
12408         g_free(ep);
12409         return ret;
12410     }
12411 #endif
12412 #endif
12413 #ifdef TARGET_NR_prlimit64
12414     case TARGET_NR_prlimit64:
12415     {
12416         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12417         struct target_rlimit64 *target_rnew, *target_rold;
12418         struct host_rlimit64 rnew, rold, *rnewp = 0;
12419         int resource = target_to_host_resource(arg2);
12420 
12421         if (arg3 && (resource != RLIMIT_AS &&
12422                      resource != RLIMIT_DATA &&
12423                      resource != RLIMIT_STACK)) {
12424             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12425                 return -TARGET_EFAULT;
12426             }
12427             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12428             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12429             unlock_user_struct(target_rnew, arg3, 0);
12430             rnewp = &rnew;
12431         }
12432 
12433         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12434         if (!is_error(ret) && arg4) {
12435             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12436                 return -TARGET_EFAULT;
12437             }
12438             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12439             target_rold->rlim_max = tswap64(rold.rlim_max);
12440             unlock_user_struct(target_rold, arg4, 1);
12441         }
12442         return ret;
12443     }
12444 #endif
12445 #ifdef TARGET_NR_gethostname
12446     case TARGET_NR_gethostname:
12447     {
12448         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12449         if (name) {
12450             ret = get_errno(gethostname(name, arg2));
12451             unlock_user(name, arg1, arg2);
12452         } else {
12453             ret = -TARGET_EFAULT;
12454         }
12455         return ret;
12456     }
12457 #endif
12458 #ifdef TARGET_NR_atomic_cmpxchg_32
12459     case TARGET_NR_atomic_cmpxchg_32:
12460     {
12461         /* should use start_exclusive from main.c */
12462         abi_ulong mem_value;
12463         if (get_user_u32(mem_value, arg6)) {
12464             target_siginfo_t info;
12465             info.si_signo = SIGSEGV;
12466             info.si_errno = 0;
12467             info.si_code = TARGET_SEGV_MAPERR;
12468             info._sifields._sigfault._addr = arg6;
12469             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12470                          QEMU_SI_FAULT, &info);
12471             ret = 0xdeadbeef;
12472 
12473         }
12474         if (mem_value == arg2)
12475             put_user_u32(arg1, arg6);
12476         return mem_value;
12477     }
12478 #endif
12479 #ifdef TARGET_NR_atomic_barrier
12480     case TARGET_NR_atomic_barrier:
12481         /* Like the kernel implementation and the
12482            qemu arm barrier, no-op this? */
12483         return 0;
12484 #endif
12485 
12486 #ifdef TARGET_NR_timer_create
12487     case TARGET_NR_timer_create:
12488     {
12489         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12490 
12491         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12492 
12493         int clkid = arg1;
12494         int timer_index = next_free_host_timer();
12495 
12496         if (timer_index < 0) {
12497             ret = -TARGET_EAGAIN;
12498         } else {
12499             timer_t *phtimer = g_posix_timers  + timer_index;
12500 
12501             if (arg2) {
12502                 phost_sevp = &host_sevp;
12503                 ret = target_to_host_sigevent(phost_sevp, arg2);
12504                 if (ret != 0) {
12505                     return ret;
12506                 }
12507             }
12508 
12509             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12510             if (ret) {
12511                 phtimer = NULL;
12512             } else {
12513                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12514                     return -TARGET_EFAULT;
12515                 }
12516             }
12517         }
12518         return ret;
12519     }
12520 #endif
12521 
12522 #ifdef TARGET_NR_timer_settime
12523     case TARGET_NR_timer_settime:
12524     {
12525         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12526          * struct itimerspec * old_value */
12527         target_timer_t timerid = get_timer_id(arg1);
12528 
12529         if (timerid < 0) {
12530             ret = timerid;
12531         } else if (arg3 == 0) {
12532             ret = -TARGET_EINVAL;
12533         } else {
12534             timer_t htimer = g_posix_timers[timerid];
12535             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12536 
12537             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12538                 return -TARGET_EFAULT;
12539             }
12540             ret = get_errno(
12541                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12542             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12543                 return -TARGET_EFAULT;
12544             }
12545         }
12546         return ret;
12547     }
12548 #endif
12549 
12550 #ifdef TARGET_NR_timer_settime64
12551     case TARGET_NR_timer_settime64:
12552     {
12553         target_timer_t timerid = get_timer_id(arg1);
12554 
12555         if (timerid < 0) {
12556             ret = timerid;
12557         } else if (arg3 == 0) {
12558             ret = -TARGET_EINVAL;
12559         } else {
12560             timer_t htimer = g_posix_timers[timerid];
12561             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12562 
12563             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12564                 return -TARGET_EFAULT;
12565             }
12566             ret = get_errno(
12567                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12568             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12569                 return -TARGET_EFAULT;
12570             }
12571         }
12572         return ret;
12573     }
12574 #endif
12575 
12576 #ifdef TARGET_NR_timer_gettime
12577     case TARGET_NR_timer_gettime:
12578     {
12579         /* args: timer_t timerid, struct itimerspec *curr_value */
12580         target_timer_t timerid = get_timer_id(arg1);
12581 
12582         if (timerid < 0) {
12583             ret = timerid;
12584         } else if (!arg2) {
12585             ret = -TARGET_EFAULT;
12586         } else {
12587             timer_t htimer = g_posix_timers[timerid];
12588             struct itimerspec hspec;
12589             ret = get_errno(timer_gettime(htimer, &hspec));
12590 
12591             if (host_to_target_itimerspec(arg2, &hspec)) {
12592                 ret = -TARGET_EFAULT;
12593             }
12594         }
12595         return ret;
12596     }
12597 #endif
12598 
12599 #ifdef TARGET_NR_timer_gettime64
12600     case TARGET_NR_timer_gettime64:
12601     {
12602         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12603         target_timer_t timerid = get_timer_id(arg1);
12604 
12605         if (timerid < 0) {
12606             ret = timerid;
12607         } else if (!arg2) {
12608             ret = -TARGET_EFAULT;
12609         } else {
12610             timer_t htimer = g_posix_timers[timerid];
12611             struct itimerspec hspec;
12612             ret = get_errno(timer_gettime(htimer, &hspec));
12613 
12614             if (host_to_target_itimerspec64(arg2, &hspec)) {
12615                 ret = -TARGET_EFAULT;
12616             }
12617         }
12618         return ret;
12619     }
12620 #endif
12621 
12622 #ifdef TARGET_NR_timer_getoverrun
12623     case TARGET_NR_timer_getoverrun:
12624     {
12625         /* args: timer_t timerid */
12626         target_timer_t timerid = get_timer_id(arg1);
12627 
12628         if (timerid < 0) {
12629             ret = timerid;
12630         } else {
12631             timer_t htimer = g_posix_timers[timerid];
12632             ret = get_errno(timer_getoverrun(htimer));
12633         }
12634         return ret;
12635     }
12636 #endif
12637 
12638 #ifdef TARGET_NR_timer_delete
12639     case TARGET_NR_timer_delete:
12640     {
12641         /* args: timer_t timerid */
12642         target_timer_t timerid = get_timer_id(arg1);
12643 
12644         if (timerid < 0) {
12645             ret = timerid;
12646         } else {
12647             timer_t htimer = g_posix_timers[timerid];
12648             ret = get_errno(timer_delete(htimer));
12649             g_posix_timers[timerid] = 0;
12650         }
12651         return ret;
12652     }
12653 #endif
12654 
12655 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12656     case TARGET_NR_timerfd_create:
12657         return get_errno(timerfd_create(arg1,
12658                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12659 #endif
12660 
12661 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12662     case TARGET_NR_timerfd_gettime:
12663         {
12664             struct itimerspec its_curr;
12665 
12666             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12667 
12668             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12669                 return -TARGET_EFAULT;
12670             }
12671         }
12672         return ret;
12673 #endif
12674 
12675 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12676     case TARGET_NR_timerfd_gettime64:
12677         {
12678             struct itimerspec its_curr;
12679 
12680             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12681 
12682             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12683                 return -TARGET_EFAULT;
12684             }
12685         }
12686         return ret;
12687 #endif
12688 
12689 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12690     case TARGET_NR_timerfd_settime:
12691         {
12692             struct itimerspec its_new, its_old, *p_new;
12693 
12694             if (arg3) {
12695                 if (target_to_host_itimerspec(&its_new, arg3)) {
12696                     return -TARGET_EFAULT;
12697                 }
12698                 p_new = &its_new;
12699             } else {
12700                 p_new = NULL;
12701             }
12702 
12703             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12704 
12705             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12706                 return -TARGET_EFAULT;
12707             }
12708         }
12709         return ret;
12710 #endif
12711 
12712 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12713     case TARGET_NR_timerfd_settime64:
12714         {
12715             struct itimerspec its_new, its_old, *p_new;
12716 
12717             if (arg3) {
12718                 if (target_to_host_itimerspec64(&its_new, arg3)) {
12719                     return -TARGET_EFAULT;
12720                 }
12721                 p_new = &its_new;
12722             } else {
12723                 p_new = NULL;
12724             }
12725 
12726             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12727 
12728             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
12729                 return -TARGET_EFAULT;
12730             }
12731         }
12732         return ret;
12733 #endif
12734 
12735 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12736     case TARGET_NR_ioprio_get:
12737         return get_errno(ioprio_get(arg1, arg2));
12738 #endif
12739 
12740 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12741     case TARGET_NR_ioprio_set:
12742         return get_errno(ioprio_set(arg1, arg2, arg3));
12743 #endif
12744 
12745 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12746     case TARGET_NR_setns:
12747         return get_errno(setns(arg1, arg2));
12748 #endif
12749 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12750     case TARGET_NR_unshare:
12751         return get_errno(unshare(arg1));
12752 #endif
12753 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12754     case TARGET_NR_kcmp:
12755         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12756 #endif
12757 #ifdef TARGET_NR_swapcontext
12758     case TARGET_NR_swapcontext:
12759         /* PowerPC specific.  */
12760         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12761 #endif
12762 #ifdef TARGET_NR_memfd_create
12763     case TARGET_NR_memfd_create:
12764         p = lock_user_string(arg1);
12765         if (!p) {
12766             return -TARGET_EFAULT;
12767         }
12768         ret = get_errno(memfd_create(p, arg2));
12769         fd_trans_unregister(ret);
12770         unlock_user(p, arg1, 0);
12771         return ret;
12772 #endif
12773 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12774     case TARGET_NR_membarrier:
12775         return get_errno(membarrier(arg1, arg2));
12776 #endif
12777 
12778     default:
12779         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12780         return -TARGET_ENOSYS;
12781     }
12782     return ret;
12783 }
12784 
12785 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12786                     abi_long arg2, abi_long arg3, abi_long arg4,
12787                     abi_long arg5, abi_long arg6, abi_long arg7,
12788                     abi_long arg8)
12789 {
12790     CPUState *cpu = env_cpu(cpu_env);
12791     abi_long ret;
12792 
12793 #ifdef DEBUG_ERESTARTSYS
12794     /* Debug-only code for exercising the syscall-restart code paths
12795      * in the per-architecture cpu main loops: restart every syscall
12796      * the guest makes once before letting it through.
12797      */
12798     {
12799         static bool flag;
12800         flag = !flag;
12801         if (flag) {
12802             return -TARGET_ERESTARTSYS;
12803         }
12804     }
12805 #endif
12806 
12807     record_syscall_start(cpu, num, arg1,
12808                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12809 
12810     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12811         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
12812     }
12813 
12814     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12815                       arg5, arg6, arg7, arg8);
12816 
12817     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12818         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
12819                           arg3, arg4, arg5, arg6);
12820     }
12821 
12822     record_syscall_return(cpu, num, ret);
12823     return ret;
12824 }
12825