xref: /openbmc/qemu/linux-user/syscall.c (revision 2bb963ff)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/if_tun.h>
60 #include <linux/errqueue.h>
61 #include <linux/random.h>
62 #ifdef CONFIG_TIMERFD
63 #include <sys/timerfd.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 #ifdef HAVE_SYS_KCOV_H
78 #include <sys/kcov.h>
79 #endif
80 
81 #define termios host_termios
82 #define winsize host_winsize
83 #define termio host_termio
84 #define sgttyb host_sgttyb /* same as target */
85 #define tchars host_tchars /* same as target */
86 #define ltchars host_ltchars /* same as target */
87 
88 #include <linux/termios.h>
89 #include <linux/unistd.h>
90 #include <linux/cdrom.h>
91 #include <linux/hdreg.h>
92 #include <linux/soundcard.h>
93 #include <linux/kd.h>
94 #include <linux/mtio.h>
95 #include <linux/fs.h>
96 #include <linux/fd.h>
97 #if defined(CONFIG_FIEMAP)
98 #include <linux/fiemap.h>
99 #endif
100 #include <linux/fb.h>
101 #if defined(CONFIG_USBFS)
102 #include <linux/usbdevice_fs.h>
103 #include <linux/usb/ch9.h>
104 #endif
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include <linux/route.h>
109 #include <linux/filter.h>
110 #include <linux/blkpg.h>
111 #include <netpacket/packet.h>
112 #include <linux/netlink.h>
113 #include <linux/if_alg.h>
114 #include <linux/rtc.h>
115 #include <sound/asound.h>
116 #ifdef HAVE_BTRFS_H
117 #include <linux/btrfs.h>
118 #endif
119 #ifdef HAVE_DRM_H
120 #include <libdrm/drm.h>
121 #include <libdrm/i915_drm.h>
122 #endif
123 #include "linux_loop.h"
124 #include "uname.h"
125 
126 #include "qemu.h"
127 #include "qemu/guest-random.h"
128 #include "qemu/selfmap.h"
129 #include "user/syscall-trace.h"
130 #include "qapi/error.h"
131 #include "fd-trans.h"
132 #include "tcg/tcg.h"
133 
134 #ifndef CLONE_IO
135 #define CLONE_IO                0x80000000      /* Clone io context */
136 #endif
137 
138 /* We can't directly call the host clone syscall, because this will
139  * badly confuse libc (breaking mutexes, for example). So we must
140  * divide clone flags into:
141  *  * flag combinations that look like pthread_create()
142  *  * flag combinations that look like fork()
143  *  * flags we can implement within QEMU itself
144  *  * flags we can't support and will return an error for
145  */
146 /* For thread creation, all these flags must be present; for
147  * fork, none must be present.
148  */
149 #define CLONE_THREAD_FLAGS                              \
150     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
151      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
152 
153 /* These flags are ignored:
154  * CLONE_DETACHED is now ignored by the kernel;
155  * CLONE_IO is just an optimisation hint to the I/O scheduler
156  */
157 #define CLONE_IGNORED_FLAGS                     \
158     (CLONE_DETACHED | CLONE_IO)
159 
160 /* Flags for fork which we can implement within QEMU itself */
161 #define CLONE_OPTIONAL_FORK_FLAGS               \
162     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
163      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
164 
165 /* Flags for thread creation which we can implement within QEMU itself */
166 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
167     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
168      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
169 
170 #define CLONE_INVALID_FORK_FLAGS                                        \
171     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
172 
173 #define CLONE_INVALID_THREAD_FLAGS                                      \
174     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
175        CLONE_IGNORED_FLAGS))
176 
177 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
178  * have almost all been allocated. We cannot support any of
179  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
180  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
181  * The checks against the invalid thread masks above will catch these.
182  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
183  */
184 
185 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
186  * once. This exercises the codepaths for restart.
187  */
188 //#define DEBUG_ERESTARTSYS
189 
190 //#include <linux/msdos_fs.h>
191 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
192 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
193 
194 #undef _syscall0
195 #undef _syscall1
196 #undef _syscall2
197 #undef _syscall3
198 #undef _syscall4
199 #undef _syscall5
200 #undef _syscall6
201 
202 #define _syscall0(type,name)		\
203 static type name (void)			\
204 {					\
205 	return syscall(__NR_##name);	\
206 }
207 
208 #define _syscall1(type,name,type1,arg1)		\
209 static type name (type1 arg1)			\
210 {						\
211 	return syscall(__NR_##name, arg1);	\
212 }
213 
214 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
215 static type name (type1 arg1,type2 arg2)		\
216 {							\
217 	return syscall(__NR_##name, arg1, arg2);	\
218 }
219 
220 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
221 static type name (type1 arg1,type2 arg2,type3 arg3)		\
222 {								\
223 	return syscall(__NR_##name, arg1, arg2, arg3);		\
224 }
225 
226 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
227 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
228 {										\
229 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
230 }
231 
232 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
233 		  type5,arg5)							\
234 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
235 {										\
236 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
237 }
238 
239 
240 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
241 		  type5,arg5,type6,arg6)					\
242 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
243                   type6 arg6)							\
244 {										\
245 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
246 }
247 
248 
249 #define __NR_sys_uname __NR_uname
250 #define __NR_sys_getcwd1 __NR_getcwd
251 #define __NR_sys_getdents __NR_getdents
252 #define __NR_sys_getdents64 __NR_getdents64
253 #define __NR_sys_getpriority __NR_getpriority
254 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
255 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
256 #define __NR_sys_syslog __NR_syslog
257 #if defined(__NR_futex)
258 # define __NR_sys_futex __NR_futex
259 #endif
260 #if defined(__NR_futex_time64)
261 # define __NR_sys_futex_time64 __NR_futex_time64
262 #endif
263 #define __NR_sys_inotify_init __NR_inotify_init
264 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
265 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
266 #define __NR_sys_statx __NR_statx
267 
268 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
269 #define __NR__llseek __NR_lseek
270 #endif
271 
272 /* Newer kernel ports have llseek() instead of _llseek() */
273 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
274 #define TARGET_NR__llseek TARGET_NR_llseek
275 #endif
276 
277 #define __NR_sys_gettid __NR_gettid
278 _syscall0(int, sys_gettid)
279 
280 /* For the 64-bit guest on 32-bit host case we must emulate
281  * getdents using getdents64, because otherwise the host
282  * might hand us back more dirent records than we can fit
283  * into the guest buffer after structure format conversion.
284  * Otherwise we emulate getdents with getdents if the host has it.
285  */
286 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
287 #define EMULATE_GETDENTS_WITH_GETDENTS
288 #endif
289 
290 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
291 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
292 #endif
293 #if (defined(TARGET_NR_getdents) && \
294       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
295     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
296 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
297 #endif
298 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
299 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
300           loff_t *, res, uint, wh);
301 #endif
302 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
303 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
304           siginfo_t *, uinfo)
305 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
306 #ifdef __NR_exit_group
307 _syscall1(int,exit_group,int,error_code)
308 #endif
309 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
310 _syscall1(int,set_tid_address,int *,tidptr)
311 #endif
312 #if defined(__NR_futex)
313 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
314           const struct timespec *,timeout,int *,uaddr2,int,val3)
315 #endif
316 #if defined(__NR_futex_time64)
317 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
318           const struct timespec *,timeout,int *,uaddr2,int,val3)
319 #endif
320 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
321 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
322           unsigned long *, user_mask_ptr);
323 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
324 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
325           unsigned long *, user_mask_ptr);
326 #define __NR_sys_getcpu __NR_getcpu
327 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
328 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
329           void *, arg);
330 _syscall2(int, capget, struct __user_cap_header_struct *, header,
331           struct __user_cap_data_struct *, data);
332 _syscall2(int, capset, struct __user_cap_header_struct *, header,
333           struct __user_cap_data_struct *, data);
334 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
335 _syscall2(int, ioprio_get, int, which, int, who)
336 #endif
337 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
338 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
339 #endif
340 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
341 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
342 #endif
343 
344 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
345 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
346           unsigned long, idx1, unsigned long, idx2)
347 #endif
348 
349 /*
350  * It is assumed that struct statx is architecture independent.
351  */
352 #if defined(TARGET_NR_statx) && defined(__NR_statx)
353 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
354           unsigned int, mask, struct target_statx *, statxbuf)
355 #endif
356 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
357 _syscall2(int, membarrier, int, cmd, int, flags)
358 #endif
359 
360 static bitmask_transtbl fcntl_flags_tbl[] = {
361   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
362   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
363   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
364   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
365   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
366   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
367   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
368   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
369   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
370   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
371   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
372   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
373   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
374 #if defined(O_DIRECT)
375   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
376 #endif
377 #if defined(O_NOATIME)
378   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
379 #endif
380 #if defined(O_CLOEXEC)
381   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
382 #endif
383 #if defined(O_PATH)
384   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
385 #endif
386 #if defined(O_TMPFILE)
387   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
388 #endif
389   /* Don't terminate the list prematurely on 64-bit host+guest.  */
390 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
391   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
392 #endif
393   { 0, 0, 0, 0 }
394 };
395 
396 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
397 
398 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
399 #if defined(__NR_utimensat)
400 #define __NR_sys_utimensat __NR_utimensat
401 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
402           const struct timespec *,tsp,int,flags)
403 #else
404 static int sys_utimensat(int dirfd, const char *pathname,
405                          const struct timespec times[2], int flags)
406 {
407     errno = ENOSYS;
408     return -1;
409 }
410 #endif
411 #endif /* TARGET_NR_utimensat */
412 
413 #ifdef TARGET_NR_renameat2
414 #if defined(__NR_renameat2)
415 #define __NR_sys_renameat2 __NR_renameat2
416 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
417           const char *, new, unsigned int, flags)
418 #else
419 static int sys_renameat2(int oldfd, const char *old,
420                          int newfd, const char *new, int flags)
421 {
422     if (flags == 0) {
423         return renameat(oldfd, old, newfd, new);
424     }
425     errno = ENOSYS;
426     return -1;
427 }
428 #endif
429 #endif /* TARGET_NR_renameat2 */
430 
431 #ifdef CONFIG_INOTIFY
432 #include <sys/inotify.h>
433 
434 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
435 static int sys_inotify_init(void)
436 {
437   return (inotify_init());
438 }
439 #endif
440 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
441 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
442 {
443   return (inotify_add_watch(fd, pathname, mask));
444 }
445 #endif
446 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
447 static int sys_inotify_rm_watch(int fd, int32_t wd)
448 {
449   return (inotify_rm_watch(fd, wd));
450 }
451 #endif
452 #ifdef CONFIG_INOTIFY1
453 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
454 static int sys_inotify_init1(int flags)
455 {
456   return (inotify_init1(flags));
457 }
458 #endif
459 #endif
460 #else
461 /* Userspace can usually survive runtime without inotify */
462 #undef TARGET_NR_inotify_init
463 #undef TARGET_NR_inotify_init1
464 #undef TARGET_NR_inotify_add_watch
465 #undef TARGET_NR_inotify_rm_watch
466 #endif /* CONFIG_INOTIFY  */
467 
468 #if defined(TARGET_NR_prlimit64)
469 #ifndef __NR_prlimit64
470 # define __NR_prlimit64 -1
471 #endif
472 #define __NR_sys_prlimit64 __NR_prlimit64
473 /* The glibc rlimit structure may not be that used by the underlying syscall */
474 struct host_rlimit64 {
475     uint64_t rlim_cur;
476     uint64_t rlim_max;
477 };
478 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
479           const struct host_rlimit64 *, new_limit,
480           struct host_rlimit64 *, old_limit)
481 #endif
482 
483 
484 #if defined(TARGET_NR_timer_create)
485 /* Maximum of 32 active POSIX timers allowed at any one time. */
486 static timer_t g_posix_timers[32] = { 0, } ;
487 
488 static inline int next_free_host_timer(void)
489 {
490     int k ;
491     /* FIXME: Does finding the next free slot require a lock? */
492     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
493         if (g_posix_timers[k] == 0) {
494             g_posix_timers[k] = (timer_t) 1;
495             return k;
496         }
497     }
498     return -1;
499 }
500 #endif
501 
502 #define ERRNO_TABLE_SIZE 1200
503 
504 /* target_to_host_errno_table[] is initialized from
505  * host_to_target_errno_table[] in syscall_init(). */
506 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
507 };
508 
509 /*
510  * This list is the union of errno values overridden in asm-<arch>/errno.h
511  * minus the errnos that are not actually generic to all archs.
512  */
513 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
514     [EAGAIN]		= TARGET_EAGAIN,
515     [EIDRM]		= TARGET_EIDRM,
516     [ECHRNG]		= TARGET_ECHRNG,
517     [EL2NSYNC]		= TARGET_EL2NSYNC,
518     [EL3HLT]		= TARGET_EL3HLT,
519     [EL3RST]		= TARGET_EL3RST,
520     [ELNRNG]		= TARGET_ELNRNG,
521     [EUNATCH]		= TARGET_EUNATCH,
522     [ENOCSI]		= TARGET_ENOCSI,
523     [EL2HLT]		= TARGET_EL2HLT,
524     [EDEADLK]		= TARGET_EDEADLK,
525     [ENOLCK]		= TARGET_ENOLCK,
526     [EBADE]		= TARGET_EBADE,
527     [EBADR]		= TARGET_EBADR,
528     [EXFULL]		= TARGET_EXFULL,
529     [ENOANO]		= TARGET_ENOANO,
530     [EBADRQC]		= TARGET_EBADRQC,
531     [EBADSLT]		= TARGET_EBADSLT,
532     [EBFONT]		= TARGET_EBFONT,
533     [ENOSTR]		= TARGET_ENOSTR,
534     [ENODATA]		= TARGET_ENODATA,
535     [ETIME]		= TARGET_ETIME,
536     [ENOSR]		= TARGET_ENOSR,
537     [ENONET]		= TARGET_ENONET,
538     [ENOPKG]		= TARGET_ENOPKG,
539     [EREMOTE]		= TARGET_EREMOTE,
540     [ENOLINK]		= TARGET_ENOLINK,
541     [EADV]		= TARGET_EADV,
542     [ESRMNT]		= TARGET_ESRMNT,
543     [ECOMM]		= TARGET_ECOMM,
544     [EPROTO]		= TARGET_EPROTO,
545     [EDOTDOT]		= TARGET_EDOTDOT,
546     [EMULTIHOP]		= TARGET_EMULTIHOP,
547     [EBADMSG]		= TARGET_EBADMSG,
548     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
549     [EOVERFLOW]		= TARGET_EOVERFLOW,
550     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
551     [EBADFD]		= TARGET_EBADFD,
552     [EREMCHG]		= TARGET_EREMCHG,
553     [ELIBACC]		= TARGET_ELIBACC,
554     [ELIBBAD]		= TARGET_ELIBBAD,
555     [ELIBSCN]		= TARGET_ELIBSCN,
556     [ELIBMAX]		= TARGET_ELIBMAX,
557     [ELIBEXEC]		= TARGET_ELIBEXEC,
558     [EILSEQ]		= TARGET_EILSEQ,
559     [ENOSYS]		= TARGET_ENOSYS,
560     [ELOOP]		= TARGET_ELOOP,
561     [ERESTART]		= TARGET_ERESTART,
562     [ESTRPIPE]		= TARGET_ESTRPIPE,
563     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
564     [EUSERS]		= TARGET_EUSERS,
565     [ENOTSOCK]		= TARGET_ENOTSOCK,
566     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
567     [EMSGSIZE]		= TARGET_EMSGSIZE,
568     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
569     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
570     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
571     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
572     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
573     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
574     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
575     [EADDRINUSE]	= TARGET_EADDRINUSE,
576     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
577     [ENETDOWN]		= TARGET_ENETDOWN,
578     [ENETUNREACH]	= TARGET_ENETUNREACH,
579     [ENETRESET]		= TARGET_ENETRESET,
580     [ECONNABORTED]	= TARGET_ECONNABORTED,
581     [ECONNRESET]	= TARGET_ECONNRESET,
582     [ENOBUFS]		= TARGET_ENOBUFS,
583     [EISCONN]		= TARGET_EISCONN,
584     [ENOTCONN]		= TARGET_ENOTCONN,
585     [EUCLEAN]		= TARGET_EUCLEAN,
586     [ENOTNAM]		= TARGET_ENOTNAM,
587     [ENAVAIL]		= TARGET_ENAVAIL,
588     [EISNAM]		= TARGET_EISNAM,
589     [EREMOTEIO]		= TARGET_EREMOTEIO,
590     [EDQUOT]            = TARGET_EDQUOT,
591     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
592     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
593     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
594     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
595     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
596     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
597     [EALREADY]		= TARGET_EALREADY,
598     [EINPROGRESS]	= TARGET_EINPROGRESS,
599     [ESTALE]		= TARGET_ESTALE,
600     [ECANCELED]		= TARGET_ECANCELED,
601     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
602     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
603 #ifdef ENOKEY
604     [ENOKEY]		= TARGET_ENOKEY,
605 #endif
606 #ifdef EKEYEXPIRED
607     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
608 #endif
609 #ifdef EKEYREVOKED
610     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
611 #endif
612 #ifdef EKEYREJECTED
613     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
614 #endif
615 #ifdef EOWNERDEAD
616     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
617 #endif
618 #ifdef ENOTRECOVERABLE
619     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
620 #endif
621 #ifdef ENOMSG
622     [ENOMSG]            = TARGET_ENOMSG,
623 #endif
624 #ifdef ERKFILL
625     [ERFKILL]           = TARGET_ERFKILL,
626 #endif
627 #ifdef EHWPOISON
628     [EHWPOISON]         = TARGET_EHWPOISON,
629 #endif
630 };
631 
632 static inline int host_to_target_errno(int err)
633 {
634     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
635         host_to_target_errno_table[err]) {
636         return host_to_target_errno_table[err];
637     }
638     return err;
639 }
640 
641 static inline int target_to_host_errno(int err)
642 {
643     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
644         target_to_host_errno_table[err]) {
645         return target_to_host_errno_table[err];
646     }
647     return err;
648 }
649 
650 static inline abi_long get_errno(abi_long ret)
651 {
652     if (ret == -1)
653         return -host_to_target_errno(errno);
654     else
655         return ret;
656 }
657 
658 const char *target_strerror(int err)
659 {
660     if (err == TARGET_ERESTARTSYS) {
661         return "To be restarted";
662     }
663     if (err == TARGET_QEMU_ESIGRETURN) {
664         return "Successful exit from sigreturn";
665     }
666 
667     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
668         return NULL;
669     }
670     return strerror(target_to_host_errno(err));
671 }
672 
673 #define safe_syscall0(type, name) \
674 static type safe_##name(void) \
675 { \
676     return safe_syscall(__NR_##name); \
677 }
678 
679 #define safe_syscall1(type, name, type1, arg1) \
680 static type safe_##name(type1 arg1) \
681 { \
682     return safe_syscall(__NR_##name, arg1); \
683 }
684 
685 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
686 static type safe_##name(type1 arg1, type2 arg2) \
687 { \
688     return safe_syscall(__NR_##name, arg1, arg2); \
689 }
690 
691 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
692 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
693 { \
694     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
695 }
696 
697 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
698     type4, arg4) \
699 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
700 { \
701     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
702 }
703 
704 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
705     type4, arg4, type5, arg5) \
706 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
707     type5 arg5) \
708 { \
709     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
710 }
711 
712 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
713     type4, arg4, type5, arg5, type6, arg6) \
714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715     type5 arg5, type6 arg6) \
716 { \
717     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
718 }
719 
720 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
721 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
722 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
723               int, flags, mode_t, mode)
724 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
725 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
726               struct rusage *, rusage)
727 #endif
728 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
729               int, options, struct rusage *, rusage)
730 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
731 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
732     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
733 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
734               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
735 #endif
736 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
737 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
738               struct timespec *, tsp, const sigset_t *, sigmask,
739               size_t, sigsetsize)
740 #endif
741 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
742               int, maxevents, int, timeout, const sigset_t *, sigmask,
743               size_t, sigsetsize)
744 #if defined(__NR_futex)
745 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
746               const struct timespec *,timeout,int *,uaddr2,int,val3)
747 #endif
748 #if defined(__NR_futex_time64)
749 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
750               const struct timespec *,timeout,int *,uaddr2,int,val3)
751 #endif
752 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
753 safe_syscall2(int, kill, pid_t, pid, int, sig)
754 safe_syscall2(int, tkill, int, tid, int, sig)
755 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
756 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
757 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
758 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
759               unsigned long, pos_l, unsigned long, pos_h)
760 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
761               unsigned long, pos_l, unsigned long, pos_h)
762 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
763               socklen_t, addrlen)
764 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
765               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
766 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
767               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
768 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
769 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
770 safe_syscall2(int, flock, int, fd, int, operation)
771 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
772 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
773               const struct timespec *, uts, size_t, sigsetsize)
774 #endif
775 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
776               int, flags)
777 #if defined(TARGET_NR_nanosleep)
778 safe_syscall2(int, nanosleep, const struct timespec *, req,
779               struct timespec *, rem)
780 #endif
781 #if defined(TARGET_NR_clock_nanosleep) || \
782     defined(TARGET_NR_clock_nanosleep_time64)
783 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
784               const struct timespec *, req, struct timespec *, rem)
785 #endif
786 #ifdef __NR_ipc
787 #ifdef __s390x__
788 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
789               void *, ptr)
790 #else
791 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
792               void *, ptr, long, fifth)
793 #endif
794 #endif
795 #ifdef __NR_msgsnd
796 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
797               int, flags)
798 #endif
799 #ifdef __NR_msgrcv
800 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
801               long, msgtype, int, flags)
802 #endif
803 #ifdef __NR_semtimedop
804 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
805               unsigned, nsops, const struct timespec *, timeout)
806 #endif
807 #if defined(TARGET_NR_mq_timedsend) || \
808     defined(TARGET_NR_mq_timedsend_time64)
809 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
810               size_t, len, unsigned, prio, const struct timespec *, timeout)
811 #endif
812 #if defined(TARGET_NR_mq_timedreceive) || \
813     defined(TARGET_NR_mq_timedreceive_time64)
814 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
815               size_t, len, unsigned *, prio, const struct timespec *, timeout)
816 #endif
817 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
818 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
819               int, outfd, loff_t *, poutoff, size_t, length,
820               unsigned int, flags)
821 #endif
822 
823 /* We do ioctl like this rather than via safe_syscall3 to preserve the
824  * "third argument might be integer or pointer or not present" behaviour of
825  * the libc function.
826  */
827 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
828 /* Similarly for fcntl. Note that callers must always:
829  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
830  *  use the flock64 struct rather than unsuffixed flock
831  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
832  */
833 #ifdef __NR_fcntl64
834 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
835 #else
836 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
837 #endif
838 
839 static inline int host_to_target_sock_type(int host_type)
840 {
841     int target_type;
842 
843     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
844     case SOCK_DGRAM:
845         target_type = TARGET_SOCK_DGRAM;
846         break;
847     case SOCK_STREAM:
848         target_type = TARGET_SOCK_STREAM;
849         break;
850     default:
851         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
852         break;
853     }
854 
855 #if defined(SOCK_CLOEXEC)
856     if (host_type & SOCK_CLOEXEC) {
857         target_type |= TARGET_SOCK_CLOEXEC;
858     }
859 #endif
860 
861 #if defined(SOCK_NONBLOCK)
862     if (host_type & SOCK_NONBLOCK) {
863         target_type |= TARGET_SOCK_NONBLOCK;
864     }
865 #endif
866 
867     return target_type;
868 }
869 
870 static abi_ulong target_brk;
871 static abi_ulong target_original_brk;
872 static abi_ulong brk_page;
873 
874 void target_set_brk(abi_ulong new_brk)
875 {
876     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
877     brk_page = HOST_PAGE_ALIGN(target_brk);
878 }
879 
880 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
881 #define DEBUGF_BRK(message, args...)
882 
883 /* do_brk() must return target values and target errnos. */
884 abi_long do_brk(abi_ulong new_brk)
885 {
886     abi_long mapped_addr;
887     abi_ulong new_alloc_size;
888 
889     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
890 
891     if (!new_brk) {
892         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
893         return target_brk;
894     }
895     if (new_brk < target_original_brk) {
896         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
897                    target_brk);
898         return target_brk;
899     }
900 
901     /* If the new brk is less than the highest page reserved to the
902      * target heap allocation, set it and we're almost done...  */
903     if (new_brk <= brk_page) {
904         /* Heap contents are initialized to zero, as for anonymous
905          * mapped pages.  */
906         if (new_brk > target_brk) {
907             memset(g2h(target_brk), 0, new_brk - target_brk);
908         }
909 	target_brk = new_brk;
910         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
911 	return target_brk;
912     }
913 
914     /* We need to allocate more memory after the brk... Note that
915      * we don't use MAP_FIXED because that will map over the top of
916      * any existing mapping (like the one with the host libc or qemu
917      * itself); instead we treat "mapped but at wrong address" as
918      * a failure and unmap again.
919      */
920     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
921     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
922                                         PROT_READ|PROT_WRITE,
923                                         MAP_ANON|MAP_PRIVATE, 0, 0));
924 
925     if (mapped_addr == brk_page) {
926         /* Heap contents are initialized to zero, as for anonymous
927          * mapped pages.  Technically the new pages are already
928          * initialized to zero since they *are* anonymous mapped
929          * pages, however we have to take care with the contents that
930          * come from the remaining part of the previous page: it may
931          * contains garbage data due to a previous heap usage (grown
932          * then shrunken).  */
933         memset(g2h(target_brk), 0, brk_page - target_brk);
934 
935         target_brk = new_brk;
936         brk_page = HOST_PAGE_ALIGN(target_brk);
937         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
938             target_brk);
939         return target_brk;
940     } else if (mapped_addr != -1) {
941         /* Mapped but at wrong address, meaning there wasn't actually
942          * enough space for this brk.
943          */
944         target_munmap(mapped_addr, new_alloc_size);
945         mapped_addr = -1;
946         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
947     }
948     else {
949         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
950     }
951 
952 #if defined(TARGET_ALPHA)
953     /* We (partially) emulate OSF/1 on Alpha, which requires we
954        return a proper errno, not an unchanged brk value.  */
955     return -TARGET_ENOMEM;
956 #endif
957     /* For everything else, return the previous break. */
958     return target_brk;
959 }
960 
961 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
962     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
963 static inline abi_long copy_from_user_fdset(fd_set *fds,
964                                             abi_ulong target_fds_addr,
965                                             int n)
966 {
967     int i, nw, j, k;
968     abi_ulong b, *target_fds;
969 
970     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
971     if (!(target_fds = lock_user(VERIFY_READ,
972                                  target_fds_addr,
973                                  sizeof(abi_ulong) * nw,
974                                  1)))
975         return -TARGET_EFAULT;
976 
977     FD_ZERO(fds);
978     k = 0;
979     for (i = 0; i < nw; i++) {
980         /* grab the abi_ulong */
981         __get_user(b, &target_fds[i]);
982         for (j = 0; j < TARGET_ABI_BITS; j++) {
983             /* check the bit inside the abi_ulong */
984             if ((b >> j) & 1)
985                 FD_SET(k, fds);
986             k++;
987         }
988     }
989 
990     unlock_user(target_fds, target_fds_addr, 0);
991 
992     return 0;
993 }
994 
995 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
996                                                  abi_ulong target_fds_addr,
997                                                  int n)
998 {
999     if (target_fds_addr) {
1000         if (copy_from_user_fdset(fds, target_fds_addr, n))
1001             return -TARGET_EFAULT;
1002         *fds_ptr = fds;
1003     } else {
1004         *fds_ptr = NULL;
1005     }
1006     return 0;
1007 }
1008 
1009 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1010                                           const fd_set *fds,
1011                                           int n)
1012 {
1013     int i, nw, j, k;
1014     abi_long v;
1015     abi_ulong *target_fds;
1016 
1017     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1018     if (!(target_fds = lock_user(VERIFY_WRITE,
1019                                  target_fds_addr,
1020                                  sizeof(abi_ulong) * nw,
1021                                  0)))
1022         return -TARGET_EFAULT;
1023 
1024     k = 0;
1025     for (i = 0; i < nw; i++) {
1026         v = 0;
1027         for (j = 0; j < TARGET_ABI_BITS; j++) {
1028             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1029             k++;
1030         }
1031         __put_user(v, &target_fds[i]);
1032     }
1033 
1034     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1035 
1036     return 0;
1037 }
1038 #endif
1039 
1040 #if defined(__alpha__)
1041 #define HOST_HZ 1024
1042 #else
1043 #define HOST_HZ 100
1044 #endif
1045 
1046 static inline abi_long host_to_target_clock_t(long ticks)
1047 {
1048 #if HOST_HZ == TARGET_HZ
1049     return ticks;
1050 #else
1051     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1052 #endif
1053 }
1054 
1055 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1056                                              const struct rusage *rusage)
1057 {
1058     struct target_rusage *target_rusage;
1059 
1060     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1061         return -TARGET_EFAULT;
1062     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1063     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1064     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1065     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1066     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1067     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1068     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1069     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1070     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1071     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1072     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1073     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1074     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1075     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1076     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1077     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1078     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1079     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1080     unlock_user_struct(target_rusage, target_addr, 1);
1081 
1082     return 0;
1083 }
1084 
1085 #ifdef TARGET_NR_setrlimit
1086 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1087 {
1088     abi_ulong target_rlim_swap;
1089     rlim_t result;
1090 
1091     target_rlim_swap = tswapal(target_rlim);
1092     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1093         return RLIM_INFINITY;
1094 
1095     result = target_rlim_swap;
1096     if (target_rlim_swap != (rlim_t)result)
1097         return RLIM_INFINITY;
1098 
1099     return result;
1100 }
1101 #endif
1102 
1103 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1104 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1105 {
1106     abi_ulong target_rlim_swap;
1107     abi_ulong result;
1108 
1109     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1110         target_rlim_swap = TARGET_RLIM_INFINITY;
1111     else
1112         target_rlim_swap = rlim;
1113     result = tswapal(target_rlim_swap);
1114 
1115     return result;
1116 }
1117 #endif
1118 
1119 static inline int target_to_host_resource(int code)
1120 {
1121     switch (code) {
1122     case TARGET_RLIMIT_AS:
1123         return RLIMIT_AS;
1124     case TARGET_RLIMIT_CORE:
1125         return RLIMIT_CORE;
1126     case TARGET_RLIMIT_CPU:
1127         return RLIMIT_CPU;
1128     case TARGET_RLIMIT_DATA:
1129         return RLIMIT_DATA;
1130     case TARGET_RLIMIT_FSIZE:
1131         return RLIMIT_FSIZE;
1132     case TARGET_RLIMIT_LOCKS:
1133         return RLIMIT_LOCKS;
1134     case TARGET_RLIMIT_MEMLOCK:
1135         return RLIMIT_MEMLOCK;
1136     case TARGET_RLIMIT_MSGQUEUE:
1137         return RLIMIT_MSGQUEUE;
1138     case TARGET_RLIMIT_NICE:
1139         return RLIMIT_NICE;
1140     case TARGET_RLIMIT_NOFILE:
1141         return RLIMIT_NOFILE;
1142     case TARGET_RLIMIT_NPROC:
1143         return RLIMIT_NPROC;
1144     case TARGET_RLIMIT_RSS:
1145         return RLIMIT_RSS;
1146     case TARGET_RLIMIT_RTPRIO:
1147         return RLIMIT_RTPRIO;
1148     case TARGET_RLIMIT_SIGPENDING:
1149         return RLIMIT_SIGPENDING;
1150     case TARGET_RLIMIT_STACK:
1151         return RLIMIT_STACK;
1152     default:
1153         return code;
1154     }
1155 }
1156 
1157 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1158                                               abi_ulong target_tv_addr)
1159 {
1160     struct target_timeval *target_tv;
1161 
1162     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1163         return -TARGET_EFAULT;
1164     }
1165 
1166     __get_user(tv->tv_sec, &target_tv->tv_sec);
1167     __get_user(tv->tv_usec, &target_tv->tv_usec);
1168 
1169     unlock_user_struct(target_tv, target_tv_addr, 0);
1170 
1171     return 0;
1172 }
1173 
1174 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1175                                             const struct timeval *tv)
1176 {
1177     struct target_timeval *target_tv;
1178 
1179     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1180         return -TARGET_EFAULT;
1181     }
1182 
1183     __put_user(tv->tv_sec, &target_tv->tv_sec);
1184     __put_user(tv->tv_usec, &target_tv->tv_usec);
1185 
1186     unlock_user_struct(target_tv, target_tv_addr, 1);
1187 
1188     return 0;
1189 }
1190 
1191 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1192 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1193                                                 abi_ulong target_tv_addr)
1194 {
1195     struct target__kernel_sock_timeval *target_tv;
1196 
1197     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1198         return -TARGET_EFAULT;
1199     }
1200 
1201     __get_user(tv->tv_sec, &target_tv->tv_sec);
1202     __get_user(tv->tv_usec, &target_tv->tv_usec);
1203 
1204     unlock_user_struct(target_tv, target_tv_addr, 0);
1205 
1206     return 0;
1207 }
1208 #endif
1209 
1210 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1211                                               const struct timeval *tv)
1212 {
1213     struct target__kernel_sock_timeval *target_tv;
1214 
1215     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1216         return -TARGET_EFAULT;
1217     }
1218 
1219     __put_user(tv->tv_sec, &target_tv->tv_sec);
1220     __put_user(tv->tv_usec, &target_tv->tv_usec);
1221 
1222     unlock_user_struct(target_tv, target_tv_addr, 1);
1223 
1224     return 0;
1225 }
1226 
1227 #if defined(TARGET_NR_futex) || \
1228     defined(TARGET_NR_rt_sigtimedwait) || \
1229     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1230     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1231     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1232     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1233     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1234     defined(TARGET_NR_timer_settime) || \
1235     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1236 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1237                                                abi_ulong target_addr)
1238 {
1239     struct target_timespec *target_ts;
1240 
1241     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1242         return -TARGET_EFAULT;
1243     }
1244     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1245     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1246     unlock_user_struct(target_ts, target_addr, 0);
1247     return 0;
1248 }
1249 #endif
1250 
1251 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1252     defined(TARGET_NR_timer_settime64) || \
1253     defined(TARGET_NR_mq_timedsend_time64) || \
1254     defined(TARGET_NR_mq_timedreceive_time64) || \
1255     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1256     defined(TARGET_NR_clock_nanosleep_time64) || \
1257     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1258     defined(TARGET_NR_utimensat) || \
1259     defined(TARGET_NR_utimensat_time64) || \
1260     defined(TARGET_NR_semtimedop_time64) || \
1261     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1262 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1263                                                  abi_ulong target_addr)
1264 {
1265     struct target__kernel_timespec *target_ts;
1266 
1267     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1268         return -TARGET_EFAULT;
1269     }
1270     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1271     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1272     /* in 32bit mode, this drops the padding */
1273     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1274     unlock_user_struct(target_ts, target_addr, 0);
1275     return 0;
1276 }
1277 #endif
1278 
1279 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1280                                                struct timespec *host_ts)
1281 {
1282     struct target_timespec *target_ts;
1283 
1284     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1285         return -TARGET_EFAULT;
1286     }
1287     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1288     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1289     unlock_user_struct(target_ts, target_addr, 1);
1290     return 0;
1291 }
1292 
1293 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1294                                                  struct timespec *host_ts)
1295 {
1296     struct target__kernel_timespec *target_ts;
1297 
1298     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1299         return -TARGET_EFAULT;
1300     }
1301     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1302     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1303     unlock_user_struct(target_ts, target_addr, 1);
1304     return 0;
1305 }
1306 
1307 #if defined(TARGET_NR_gettimeofday)
1308 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1309                                              struct timezone *tz)
1310 {
1311     struct target_timezone *target_tz;
1312 
1313     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1314         return -TARGET_EFAULT;
1315     }
1316 
1317     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1318     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1319 
1320     unlock_user_struct(target_tz, target_tz_addr, 1);
1321 
1322     return 0;
1323 }
1324 #endif
1325 
1326 #if defined(TARGET_NR_settimeofday)
1327 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1328                                                abi_ulong target_tz_addr)
1329 {
1330     struct target_timezone *target_tz;
1331 
1332     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1333         return -TARGET_EFAULT;
1334     }
1335 
1336     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1337     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1338 
1339     unlock_user_struct(target_tz, target_tz_addr, 0);
1340 
1341     return 0;
1342 }
1343 #endif
1344 
1345 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1346 #include <mqueue.h>
1347 
1348 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1349                                               abi_ulong target_mq_attr_addr)
1350 {
1351     struct target_mq_attr *target_mq_attr;
1352 
1353     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1354                           target_mq_attr_addr, 1))
1355         return -TARGET_EFAULT;
1356 
1357     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1358     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1359     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1360     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1361 
1362     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1363 
1364     return 0;
1365 }
1366 
1367 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1368                                             const struct mq_attr *attr)
1369 {
1370     struct target_mq_attr *target_mq_attr;
1371 
1372     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1373                           target_mq_attr_addr, 0))
1374         return -TARGET_EFAULT;
1375 
1376     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1377     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1378     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1379     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1380 
1381     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1382 
1383     return 0;
1384 }
1385 #endif
1386 
1387 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1388 /* do_select() must return target values and target errnos. */
1389 static abi_long do_select(int n,
1390                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1391                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1392 {
1393     fd_set rfds, wfds, efds;
1394     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1395     struct timeval tv;
1396     struct timespec ts, *ts_ptr;
1397     abi_long ret;
1398 
1399     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1400     if (ret) {
1401         return ret;
1402     }
1403     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1404     if (ret) {
1405         return ret;
1406     }
1407     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1408     if (ret) {
1409         return ret;
1410     }
1411 
1412     if (target_tv_addr) {
1413         if (copy_from_user_timeval(&tv, target_tv_addr))
1414             return -TARGET_EFAULT;
1415         ts.tv_sec = tv.tv_sec;
1416         ts.tv_nsec = tv.tv_usec * 1000;
1417         ts_ptr = &ts;
1418     } else {
1419         ts_ptr = NULL;
1420     }
1421 
1422     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1423                                   ts_ptr, NULL));
1424 
1425     if (!is_error(ret)) {
1426         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1427             return -TARGET_EFAULT;
1428         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1429             return -TARGET_EFAULT;
1430         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1431             return -TARGET_EFAULT;
1432 
1433         if (target_tv_addr) {
1434             tv.tv_sec = ts.tv_sec;
1435             tv.tv_usec = ts.tv_nsec / 1000;
1436             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1437                 return -TARGET_EFAULT;
1438             }
1439         }
1440     }
1441 
1442     return ret;
1443 }
1444 
1445 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1446 static abi_long do_old_select(abi_ulong arg1)
1447 {
1448     struct target_sel_arg_struct *sel;
1449     abi_ulong inp, outp, exp, tvp;
1450     long nsel;
1451 
1452     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1453         return -TARGET_EFAULT;
1454     }
1455 
1456     nsel = tswapal(sel->n);
1457     inp = tswapal(sel->inp);
1458     outp = tswapal(sel->outp);
1459     exp = tswapal(sel->exp);
1460     tvp = tswapal(sel->tvp);
1461 
1462     unlock_user_struct(sel, arg1, 0);
1463 
1464     return do_select(nsel, inp, outp, exp, tvp);
1465 }
1466 #endif
1467 #endif
1468 
1469 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1470 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1471                             abi_long arg4, abi_long arg5, abi_long arg6,
1472                             bool time64)
1473 {
1474     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1475     fd_set rfds, wfds, efds;
1476     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1477     struct timespec ts, *ts_ptr;
1478     abi_long ret;
1479 
1480     /*
1481      * The 6th arg is actually two args smashed together,
1482      * so we cannot use the C library.
1483      */
1484     sigset_t set;
1485     struct {
1486         sigset_t *set;
1487         size_t size;
1488     } sig, *sig_ptr;
1489 
1490     abi_ulong arg_sigset, arg_sigsize, *arg7;
1491     target_sigset_t *target_sigset;
1492 
1493     n = arg1;
1494     rfd_addr = arg2;
1495     wfd_addr = arg3;
1496     efd_addr = arg4;
1497     ts_addr = arg5;
1498 
1499     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1500     if (ret) {
1501         return ret;
1502     }
1503     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1504     if (ret) {
1505         return ret;
1506     }
1507     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1508     if (ret) {
1509         return ret;
1510     }
1511 
1512     /*
1513      * This takes a timespec, and not a timeval, so we cannot
1514      * use the do_select() helper ...
1515      */
1516     if (ts_addr) {
1517         if (time64) {
1518             if (target_to_host_timespec64(&ts, ts_addr)) {
1519                 return -TARGET_EFAULT;
1520             }
1521         } else {
1522             if (target_to_host_timespec(&ts, ts_addr)) {
1523                 return -TARGET_EFAULT;
1524             }
1525         }
1526             ts_ptr = &ts;
1527     } else {
1528         ts_ptr = NULL;
1529     }
1530 
1531     /* Extract the two packed args for the sigset */
1532     if (arg6) {
1533         sig_ptr = &sig;
1534         sig.size = SIGSET_T_SIZE;
1535 
1536         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1537         if (!arg7) {
1538             return -TARGET_EFAULT;
1539         }
1540         arg_sigset = tswapal(arg7[0]);
1541         arg_sigsize = tswapal(arg7[1]);
1542         unlock_user(arg7, arg6, 0);
1543 
1544         if (arg_sigset) {
1545             sig.set = &set;
1546             if (arg_sigsize != sizeof(*target_sigset)) {
1547                 /* Like the kernel, we enforce correct size sigsets */
1548                 return -TARGET_EINVAL;
1549             }
1550             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1551                                       sizeof(*target_sigset), 1);
1552             if (!target_sigset) {
1553                 return -TARGET_EFAULT;
1554             }
1555             target_to_host_sigset(&set, target_sigset);
1556             unlock_user(target_sigset, arg_sigset, 0);
1557         } else {
1558             sig.set = NULL;
1559         }
1560     } else {
1561         sig_ptr = NULL;
1562     }
1563 
1564     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1565                                   ts_ptr, sig_ptr));
1566 
1567     if (!is_error(ret)) {
1568         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1569             return -TARGET_EFAULT;
1570         }
1571         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1572             return -TARGET_EFAULT;
1573         }
1574         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1575             return -TARGET_EFAULT;
1576         }
1577         if (time64) {
1578             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1579                 return -TARGET_EFAULT;
1580             }
1581         } else {
1582             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1583                 return -TARGET_EFAULT;
1584             }
1585         }
1586     }
1587     return ret;
1588 }
1589 #endif
1590 
1591 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1592     defined(TARGET_NR_ppoll_time64)
1593 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1594                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1595 {
1596     struct target_pollfd *target_pfd;
1597     unsigned int nfds = arg2;
1598     struct pollfd *pfd;
1599     unsigned int i;
1600     abi_long ret;
1601 
1602     pfd = NULL;
1603     target_pfd = NULL;
1604     if (nfds) {
1605         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1606             return -TARGET_EINVAL;
1607         }
1608         target_pfd = lock_user(VERIFY_WRITE, arg1,
1609                                sizeof(struct target_pollfd) * nfds, 1);
1610         if (!target_pfd) {
1611             return -TARGET_EFAULT;
1612         }
1613 
1614         pfd = alloca(sizeof(struct pollfd) * nfds);
1615         for (i = 0; i < nfds; i++) {
1616             pfd[i].fd = tswap32(target_pfd[i].fd);
1617             pfd[i].events = tswap16(target_pfd[i].events);
1618         }
1619     }
1620     if (ppoll) {
1621         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1622         target_sigset_t *target_set;
1623         sigset_t _set, *set = &_set;
1624 
1625         if (arg3) {
1626             if (time64) {
1627                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1628                     unlock_user(target_pfd, arg1, 0);
1629                     return -TARGET_EFAULT;
1630                 }
1631             } else {
1632                 if (target_to_host_timespec(timeout_ts, arg3)) {
1633                     unlock_user(target_pfd, arg1, 0);
1634                     return -TARGET_EFAULT;
1635                 }
1636             }
1637         } else {
1638             timeout_ts = NULL;
1639         }
1640 
1641         if (arg4) {
1642             if (arg5 != sizeof(target_sigset_t)) {
1643                 unlock_user(target_pfd, arg1, 0);
1644                 return -TARGET_EINVAL;
1645             }
1646 
1647             target_set = lock_user(VERIFY_READ, arg4,
1648                                    sizeof(target_sigset_t), 1);
1649             if (!target_set) {
1650                 unlock_user(target_pfd, arg1, 0);
1651                 return -TARGET_EFAULT;
1652             }
1653             target_to_host_sigset(set, target_set);
1654         } else {
1655             set = NULL;
1656         }
1657 
1658         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1659                                    set, SIGSET_T_SIZE));
1660 
1661         if (!is_error(ret) && arg3) {
1662             if (time64) {
1663                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1664                     return -TARGET_EFAULT;
1665                 }
1666             } else {
1667                 if (host_to_target_timespec(arg3, timeout_ts)) {
1668                     return -TARGET_EFAULT;
1669                 }
1670             }
1671         }
1672         if (arg4) {
1673             unlock_user(target_set, arg4, 0);
1674         }
1675     } else {
1676           struct timespec ts, *pts;
1677 
1678           if (arg3 >= 0) {
1679               /* Convert ms to secs, ns */
1680               ts.tv_sec = arg3 / 1000;
1681               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1682               pts = &ts;
1683           } else {
1684               /* -ve poll() timeout means "infinite" */
1685               pts = NULL;
1686           }
1687           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1688     }
1689 
1690     if (!is_error(ret)) {
1691         for (i = 0; i < nfds; i++) {
1692             target_pfd[i].revents = tswap16(pfd[i].revents);
1693         }
1694     }
1695     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1696     return ret;
1697 }
1698 #endif
1699 
1700 static abi_long do_pipe2(int host_pipe[], int flags)
1701 {
1702 #ifdef CONFIG_PIPE2
1703     return pipe2(host_pipe, flags);
1704 #else
1705     return -ENOSYS;
1706 #endif
1707 }
1708 
1709 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1710                         int flags, int is_pipe2)
1711 {
1712     int host_pipe[2];
1713     abi_long ret;
1714     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1715 
1716     if (is_error(ret))
1717         return get_errno(ret);
1718 
1719     /* Several targets have special calling conventions for the original
1720        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1721     if (!is_pipe2) {
1722 #if defined(TARGET_ALPHA)
1723         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1724         return host_pipe[0];
1725 #elif defined(TARGET_MIPS)
1726         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1727         return host_pipe[0];
1728 #elif defined(TARGET_SH4)
1729         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1730         return host_pipe[0];
1731 #elif defined(TARGET_SPARC)
1732         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1733         return host_pipe[0];
1734 #endif
1735     }
1736 
1737     if (put_user_s32(host_pipe[0], pipedes)
1738         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1739         return -TARGET_EFAULT;
1740     return get_errno(ret);
1741 }
1742 
1743 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1744                                               abi_ulong target_addr,
1745                                               socklen_t len)
1746 {
1747     struct target_ip_mreqn *target_smreqn;
1748 
1749     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1750     if (!target_smreqn)
1751         return -TARGET_EFAULT;
1752     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1753     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1754     if (len == sizeof(struct target_ip_mreqn))
1755         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1756     unlock_user(target_smreqn, target_addr, 0);
1757 
1758     return 0;
1759 }
1760 
1761 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1762                                                abi_ulong target_addr,
1763                                                socklen_t len)
1764 {
1765     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1766     sa_family_t sa_family;
1767     struct target_sockaddr *target_saddr;
1768 
1769     if (fd_trans_target_to_host_addr(fd)) {
1770         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1771     }
1772 
1773     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1774     if (!target_saddr)
1775         return -TARGET_EFAULT;
1776 
1777     sa_family = tswap16(target_saddr->sa_family);
1778 
1779     /* Oops. The caller might send a incomplete sun_path; sun_path
1780      * must be terminated by \0 (see the manual page), but
1781      * unfortunately it is quite common to specify sockaddr_un
1782      * length as "strlen(x->sun_path)" while it should be
1783      * "strlen(...) + 1". We'll fix that here if needed.
1784      * Linux kernel has a similar feature.
1785      */
1786 
1787     if (sa_family == AF_UNIX) {
1788         if (len < unix_maxlen && len > 0) {
1789             char *cp = (char*)target_saddr;
1790 
1791             if ( cp[len-1] && !cp[len] )
1792                 len++;
1793         }
1794         if (len > unix_maxlen)
1795             len = unix_maxlen;
1796     }
1797 
1798     memcpy(addr, target_saddr, len);
1799     addr->sa_family = sa_family;
1800     if (sa_family == AF_NETLINK) {
1801         struct sockaddr_nl *nladdr;
1802 
1803         nladdr = (struct sockaddr_nl *)addr;
1804         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1805         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1806     } else if (sa_family == AF_PACKET) {
1807 	struct target_sockaddr_ll *lladdr;
1808 
1809 	lladdr = (struct target_sockaddr_ll *)addr;
1810 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1811 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1812     }
1813     unlock_user(target_saddr, target_addr, 0);
1814 
1815     return 0;
1816 }
1817 
1818 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1819                                                struct sockaddr *addr,
1820                                                socklen_t len)
1821 {
1822     struct target_sockaddr *target_saddr;
1823 
1824     if (len == 0) {
1825         return 0;
1826     }
1827     assert(addr);
1828 
1829     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1830     if (!target_saddr)
1831         return -TARGET_EFAULT;
1832     memcpy(target_saddr, addr, len);
1833     if (len >= offsetof(struct target_sockaddr, sa_family) +
1834         sizeof(target_saddr->sa_family)) {
1835         target_saddr->sa_family = tswap16(addr->sa_family);
1836     }
1837     if (addr->sa_family == AF_NETLINK &&
1838         len >= sizeof(struct target_sockaddr_nl)) {
1839         struct target_sockaddr_nl *target_nl =
1840                (struct target_sockaddr_nl *)target_saddr;
1841         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1842         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1843     } else if (addr->sa_family == AF_PACKET) {
1844         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1845         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1846         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1847     } else if (addr->sa_family == AF_INET6 &&
1848                len >= sizeof(struct target_sockaddr_in6)) {
1849         struct target_sockaddr_in6 *target_in6 =
1850                (struct target_sockaddr_in6 *)target_saddr;
1851         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1852     }
1853     unlock_user(target_saddr, target_addr, len);
1854 
1855     return 0;
1856 }
1857 
1858 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1859                                            struct target_msghdr *target_msgh)
1860 {
1861     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1862     abi_long msg_controllen;
1863     abi_ulong target_cmsg_addr;
1864     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1865     socklen_t space = 0;
1866 
1867     msg_controllen = tswapal(target_msgh->msg_controllen);
1868     if (msg_controllen < sizeof (struct target_cmsghdr))
1869         goto the_end;
1870     target_cmsg_addr = tswapal(target_msgh->msg_control);
1871     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1872     target_cmsg_start = target_cmsg;
1873     if (!target_cmsg)
1874         return -TARGET_EFAULT;
1875 
1876     while (cmsg && target_cmsg) {
1877         void *data = CMSG_DATA(cmsg);
1878         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1879 
1880         int len = tswapal(target_cmsg->cmsg_len)
1881             - sizeof(struct target_cmsghdr);
1882 
1883         space += CMSG_SPACE(len);
1884         if (space > msgh->msg_controllen) {
1885             space -= CMSG_SPACE(len);
1886             /* This is a QEMU bug, since we allocated the payload
1887              * area ourselves (unlike overflow in host-to-target
1888              * conversion, which is just the guest giving us a buffer
1889              * that's too small). It can't happen for the payload types
1890              * we currently support; if it becomes an issue in future
1891              * we would need to improve our allocation strategy to
1892              * something more intelligent than "twice the size of the
1893              * target buffer we're reading from".
1894              */
1895             qemu_log_mask(LOG_UNIMP,
1896                           ("Unsupported ancillary data %d/%d: "
1897                            "unhandled msg size\n"),
1898                           tswap32(target_cmsg->cmsg_level),
1899                           tswap32(target_cmsg->cmsg_type));
1900             break;
1901         }
1902 
1903         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1904             cmsg->cmsg_level = SOL_SOCKET;
1905         } else {
1906             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1907         }
1908         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1909         cmsg->cmsg_len = CMSG_LEN(len);
1910 
1911         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1912             int *fd = (int *)data;
1913             int *target_fd = (int *)target_data;
1914             int i, numfds = len / sizeof(int);
1915 
1916             for (i = 0; i < numfds; i++) {
1917                 __get_user(fd[i], target_fd + i);
1918             }
1919         } else if (cmsg->cmsg_level == SOL_SOCKET
1920                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1921             struct ucred *cred = (struct ucred *)data;
1922             struct target_ucred *target_cred =
1923                 (struct target_ucred *)target_data;
1924 
1925             __get_user(cred->pid, &target_cred->pid);
1926             __get_user(cred->uid, &target_cred->uid);
1927             __get_user(cred->gid, &target_cred->gid);
1928         } else {
1929             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1930                           cmsg->cmsg_level, cmsg->cmsg_type);
1931             memcpy(data, target_data, len);
1932         }
1933 
1934         cmsg = CMSG_NXTHDR(msgh, cmsg);
1935         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1936                                          target_cmsg_start);
1937     }
1938     unlock_user(target_cmsg, target_cmsg_addr, 0);
1939  the_end:
1940     msgh->msg_controllen = space;
1941     return 0;
1942 }
1943 
1944 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1945                                            struct msghdr *msgh)
1946 {
1947     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1948     abi_long msg_controllen;
1949     abi_ulong target_cmsg_addr;
1950     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1951     socklen_t space = 0;
1952 
1953     msg_controllen = tswapal(target_msgh->msg_controllen);
1954     if (msg_controllen < sizeof (struct target_cmsghdr))
1955         goto the_end;
1956     target_cmsg_addr = tswapal(target_msgh->msg_control);
1957     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1958     target_cmsg_start = target_cmsg;
1959     if (!target_cmsg)
1960         return -TARGET_EFAULT;
1961 
1962     while (cmsg && target_cmsg) {
1963         void *data = CMSG_DATA(cmsg);
1964         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1965 
1966         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1967         int tgt_len, tgt_space;
1968 
1969         /* We never copy a half-header but may copy half-data;
1970          * this is Linux's behaviour in put_cmsg(). Note that
1971          * truncation here is a guest problem (which we report
1972          * to the guest via the CTRUNC bit), unlike truncation
1973          * in target_to_host_cmsg, which is a QEMU bug.
1974          */
1975         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1976             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1977             break;
1978         }
1979 
1980         if (cmsg->cmsg_level == SOL_SOCKET) {
1981             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1982         } else {
1983             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1984         }
1985         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1986 
1987         /* Payload types which need a different size of payload on
1988          * the target must adjust tgt_len here.
1989          */
1990         tgt_len = len;
1991         switch (cmsg->cmsg_level) {
1992         case SOL_SOCKET:
1993             switch (cmsg->cmsg_type) {
1994             case SO_TIMESTAMP:
1995                 tgt_len = sizeof(struct target_timeval);
1996                 break;
1997             default:
1998                 break;
1999             }
2000             break;
2001         default:
2002             break;
2003         }
2004 
2005         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2006             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2007             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2008         }
2009 
2010         /* We must now copy-and-convert len bytes of payload
2011          * into tgt_len bytes of destination space. Bear in mind
2012          * that in both source and destination we may be dealing
2013          * with a truncated value!
2014          */
2015         switch (cmsg->cmsg_level) {
2016         case SOL_SOCKET:
2017             switch (cmsg->cmsg_type) {
2018             case SCM_RIGHTS:
2019             {
2020                 int *fd = (int *)data;
2021                 int *target_fd = (int *)target_data;
2022                 int i, numfds = tgt_len / sizeof(int);
2023 
2024                 for (i = 0; i < numfds; i++) {
2025                     __put_user(fd[i], target_fd + i);
2026                 }
2027                 break;
2028             }
2029             case SO_TIMESTAMP:
2030             {
2031                 struct timeval *tv = (struct timeval *)data;
2032                 struct target_timeval *target_tv =
2033                     (struct target_timeval *)target_data;
2034 
2035                 if (len != sizeof(struct timeval) ||
2036                     tgt_len != sizeof(struct target_timeval)) {
2037                     goto unimplemented;
2038                 }
2039 
2040                 /* copy struct timeval to target */
2041                 __put_user(tv->tv_sec, &target_tv->tv_sec);
2042                 __put_user(tv->tv_usec, &target_tv->tv_usec);
2043                 break;
2044             }
2045             case SCM_CREDENTIALS:
2046             {
2047                 struct ucred *cred = (struct ucred *)data;
2048                 struct target_ucred *target_cred =
2049                     (struct target_ucred *)target_data;
2050 
2051                 __put_user(cred->pid, &target_cred->pid);
2052                 __put_user(cred->uid, &target_cred->uid);
2053                 __put_user(cred->gid, &target_cred->gid);
2054                 break;
2055             }
2056             default:
2057                 goto unimplemented;
2058             }
2059             break;
2060 
2061         case SOL_IP:
2062             switch (cmsg->cmsg_type) {
2063             case IP_TTL:
2064             {
2065                 uint32_t *v = (uint32_t *)data;
2066                 uint32_t *t_int = (uint32_t *)target_data;
2067 
2068                 if (len != sizeof(uint32_t) ||
2069                     tgt_len != sizeof(uint32_t)) {
2070                     goto unimplemented;
2071                 }
2072                 __put_user(*v, t_int);
2073                 break;
2074             }
2075             case IP_RECVERR:
2076             {
2077                 struct errhdr_t {
2078                    struct sock_extended_err ee;
2079                    struct sockaddr_in offender;
2080                 };
2081                 struct errhdr_t *errh = (struct errhdr_t *)data;
2082                 struct errhdr_t *target_errh =
2083                     (struct errhdr_t *)target_data;
2084 
2085                 if (len != sizeof(struct errhdr_t) ||
2086                     tgt_len != sizeof(struct errhdr_t)) {
2087                     goto unimplemented;
2088                 }
2089                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2090                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2091                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2092                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2093                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2094                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2095                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2096                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2097                     (void *) &errh->offender, sizeof(errh->offender));
2098                 break;
2099             }
2100             default:
2101                 goto unimplemented;
2102             }
2103             break;
2104 
2105         case SOL_IPV6:
2106             switch (cmsg->cmsg_type) {
2107             case IPV6_HOPLIMIT:
2108             {
2109                 uint32_t *v = (uint32_t *)data;
2110                 uint32_t *t_int = (uint32_t *)target_data;
2111 
2112                 if (len != sizeof(uint32_t) ||
2113                     tgt_len != sizeof(uint32_t)) {
2114                     goto unimplemented;
2115                 }
2116                 __put_user(*v, t_int);
2117                 break;
2118             }
2119             case IPV6_RECVERR:
2120             {
2121                 struct errhdr6_t {
2122                    struct sock_extended_err ee;
2123                    struct sockaddr_in6 offender;
2124                 };
2125                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2126                 struct errhdr6_t *target_errh =
2127                     (struct errhdr6_t *)target_data;
2128 
2129                 if (len != sizeof(struct errhdr6_t) ||
2130                     tgt_len != sizeof(struct errhdr6_t)) {
2131                     goto unimplemented;
2132                 }
2133                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2134                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2135                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2136                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2137                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2138                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2139                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2140                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2141                     (void *) &errh->offender, sizeof(errh->offender));
2142                 break;
2143             }
2144             default:
2145                 goto unimplemented;
2146             }
2147             break;
2148 
2149         default:
2150         unimplemented:
2151             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2152                           cmsg->cmsg_level, cmsg->cmsg_type);
2153             memcpy(target_data, data, MIN(len, tgt_len));
2154             if (tgt_len > len) {
2155                 memset(target_data + len, 0, tgt_len - len);
2156             }
2157         }
2158 
2159         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2160         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2161         if (msg_controllen < tgt_space) {
2162             tgt_space = msg_controllen;
2163         }
2164         msg_controllen -= tgt_space;
2165         space += tgt_space;
2166         cmsg = CMSG_NXTHDR(msgh, cmsg);
2167         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2168                                          target_cmsg_start);
2169     }
2170     unlock_user(target_cmsg, target_cmsg_addr, space);
2171  the_end:
2172     target_msgh->msg_controllen = tswapal(space);
2173     return 0;
2174 }
2175 
2176 /* do_setsockopt() Must return target values and target errnos. */
2177 static abi_long do_setsockopt(int sockfd, int level, int optname,
2178                               abi_ulong optval_addr, socklen_t optlen)
2179 {
2180     abi_long ret;
2181     int val;
2182     struct ip_mreqn *ip_mreq;
2183     struct ip_mreq_source *ip_mreq_source;
2184 
2185     switch(level) {
2186     case SOL_TCP:
2187         /* TCP options all take an 'int' value.  */
2188         if (optlen < sizeof(uint32_t))
2189             return -TARGET_EINVAL;
2190 
2191         if (get_user_u32(val, optval_addr))
2192             return -TARGET_EFAULT;
2193         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2194         break;
2195     case SOL_IP:
2196         switch(optname) {
2197         case IP_TOS:
2198         case IP_TTL:
2199         case IP_HDRINCL:
2200         case IP_ROUTER_ALERT:
2201         case IP_RECVOPTS:
2202         case IP_RETOPTS:
2203         case IP_PKTINFO:
2204         case IP_MTU_DISCOVER:
2205         case IP_RECVERR:
2206         case IP_RECVTTL:
2207         case IP_RECVTOS:
2208 #ifdef IP_FREEBIND
2209         case IP_FREEBIND:
2210 #endif
2211         case IP_MULTICAST_TTL:
2212         case IP_MULTICAST_LOOP:
2213             val = 0;
2214             if (optlen >= sizeof(uint32_t)) {
2215                 if (get_user_u32(val, optval_addr))
2216                     return -TARGET_EFAULT;
2217             } else if (optlen >= 1) {
2218                 if (get_user_u8(val, optval_addr))
2219                     return -TARGET_EFAULT;
2220             }
2221             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2222             break;
2223         case IP_ADD_MEMBERSHIP:
2224         case IP_DROP_MEMBERSHIP:
2225             if (optlen < sizeof (struct target_ip_mreq) ||
2226                 optlen > sizeof (struct target_ip_mreqn))
2227                 return -TARGET_EINVAL;
2228 
2229             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2230             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2231             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2232             break;
2233 
2234         case IP_BLOCK_SOURCE:
2235         case IP_UNBLOCK_SOURCE:
2236         case IP_ADD_SOURCE_MEMBERSHIP:
2237         case IP_DROP_SOURCE_MEMBERSHIP:
2238             if (optlen != sizeof (struct target_ip_mreq_source))
2239                 return -TARGET_EINVAL;
2240 
2241             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2242             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2243             unlock_user (ip_mreq_source, optval_addr, 0);
2244             break;
2245 
2246         default:
2247             goto unimplemented;
2248         }
2249         break;
2250     case SOL_IPV6:
2251         switch (optname) {
2252         case IPV6_MTU_DISCOVER:
2253         case IPV6_MTU:
2254         case IPV6_V6ONLY:
2255         case IPV6_RECVPKTINFO:
2256         case IPV6_UNICAST_HOPS:
2257         case IPV6_MULTICAST_HOPS:
2258         case IPV6_MULTICAST_LOOP:
2259         case IPV6_RECVERR:
2260         case IPV6_RECVHOPLIMIT:
2261         case IPV6_2292HOPLIMIT:
2262         case IPV6_CHECKSUM:
2263         case IPV6_ADDRFORM:
2264         case IPV6_2292PKTINFO:
2265         case IPV6_RECVTCLASS:
2266         case IPV6_RECVRTHDR:
2267         case IPV6_2292RTHDR:
2268         case IPV6_RECVHOPOPTS:
2269         case IPV6_2292HOPOPTS:
2270         case IPV6_RECVDSTOPTS:
2271         case IPV6_2292DSTOPTS:
2272         case IPV6_TCLASS:
2273 #ifdef IPV6_RECVPATHMTU
2274         case IPV6_RECVPATHMTU:
2275 #endif
2276 #ifdef IPV6_TRANSPARENT
2277         case IPV6_TRANSPARENT:
2278 #endif
2279 #ifdef IPV6_FREEBIND
2280         case IPV6_FREEBIND:
2281 #endif
2282 #ifdef IPV6_RECVORIGDSTADDR
2283         case IPV6_RECVORIGDSTADDR:
2284 #endif
2285             val = 0;
2286             if (optlen < sizeof(uint32_t)) {
2287                 return -TARGET_EINVAL;
2288             }
2289             if (get_user_u32(val, optval_addr)) {
2290                 return -TARGET_EFAULT;
2291             }
2292             ret = get_errno(setsockopt(sockfd, level, optname,
2293                                        &val, sizeof(val)));
2294             break;
2295         case IPV6_PKTINFO:
2296         {
2297             struct in6_pktinfo pki;
2298 
2299             if (optlen < sizeof(pki)) {
2300                 return -TARGET_EINVAL;
2301             }
2302 
2303             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2304                 return -TARGET_EFAULT;
2305             }
2306 
2307             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2308 
2309             ret = get_errno(setsockopt(sockfd, level, optname,
2310                                        &pki, sizeof(pki)));
2311             break;
2312         }
2313         case IPV6_ADD_MEMBERSHIP:
2314         case IPV6_DROP_MEMBERSHIP:
2315         {
2316             struct ipv6_mreq ipv6mreq;
2317 
2318             if (optlen < sizeof(ipv6mreq)) {
2319                 return -TARGET_EINVAL;
2320             }
2321 
2322             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2323                 return -TARGET_EFAULT;
2324             }
2325 
2326             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2327 
2328             ret = get_errno(setsockopt(sockfd, level, optname,
2329                                        &ipv6mreq, sizeof(ipv6mreq)));
2330             break;
2331         }
2332         default:
2333             goto unimplemented;
2334         }
2335         break;
2336     case SOL_ICMPV6:
2337         switch (optname) {
2338         case ICMPV6_FILTER:
2339         {
2340             struct icmp6_filter icmp6f;
2341 
2342             if (optlen > sizeof(icmp6f)) {
2343                 optlen = sizeof(icmp6f);
2344             }
2345 
2346             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2347                 return -TARGET_EFAULT;
2348             }
2349 
2350             for (val = 0; val < 8; val++) {
2351                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2352             }
2353 
2354             ret = get_errno(setsockopt(sockfd, level, optname,
2355                                        &icmp6f, optlen));
2356             break;
2357         }
2358         default:
2359             goto unimplemented;
2360         }
2361         break;
2362     case SOL_RAW:
2363         switch (optname) {
2364         case ICMP_FILTER:
2365         case IPV6_CHECKSUM:
2366             /* those take an u32 value */
2367             if (optlen < sizeof(uint32_t)) {
2368                 return -TARGET_EINVAL;
2369             }
2370 
2371             if (get_user_u32(val, optval_addr)) {
2372                 return -TARGET_EFAULT;
2373             }
2374             ret = get_errno(setsockopt(sockfd, level, optname,
2375                                        &val, sizeof(val)));
2376             break;
2377 
2378         default:
2379             goto unimplemented;
2380         }
2381         break;
2382 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2383     case SOL_ALG:
2384         switch (optname) {
2385         case ALG_SET_KEY:
2386         {
2387             char *alg_key = g_malloc(optlen);
2388 
2389             if (!alg_key) {
2390                 return -TARGET_ENOMEM;
2391             }
2392             if (copy_from_user(alg_key, optval_addr, optlen)) {
2393                 g_free(alg_key);
2394                 return -TARGET_EFAULT;
2395             }
2396             ret = get_errno(setsockopt(sockfd, level, optname,
2397                                        alg_key, optlen));
2398             g_free(alg_key);
2399             break;
2400         }
2401         case ALG_SET_AEAD_AUTHSIZE:
2402         {
2403             ret = get_errno(setsockopt(sockfd, level, optname,
2404                                        NULL, optlen));
2405             break;
2406         }
2407         default:
2408             goto unimplemented;
2409         }
2410         break;
2411 #endif
2412     case TARGET_SOL_SOCKET:
2413         switch (optname) {
2414         case TARGET_SO_RCVTIMEO:
2415         {
2416                 struct timeval tv;
2417 
2418                 optname = SO_RCVTIMEO;
2419 
2420 set_timeout:
2421                 if (optlen != sizeof(struct target_timeval)) {
2422                     return -TARGET_EINVAL;
2423                 }
2424 
2425                 if (copy_from_user_timeval(&tv, optval_addr)) {
2426                     return -TARGET_EFAULT;
2427                 }
2428 
2429                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2430                                 &tv, sizeof(tv)));
2431                 return ret;
2432         }
2433         case TARGET_SO_SNDTIMEO:
2434                 optname = SO_SNDTIMEO;
2435                 goto set_timeout;
2436         case TARGET_SO_ATTACH_FILTER:
2437         {
2438                 struct target_sock_fprog *tfprog;
2439                 struct target_sock_filter *tfilter;
2440                 struct sock_fprog fprog;
2441                 struct sock_filter *filter;
2442                 int i;
2443 
2444                 if (optlen != sizeof(*tfprog)) {
2445                     return -TARGET_EINVAL;
2446                 }
2447                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2448                     return -TARGET_EFAULT;
2449                 }
2450                 if (!lock_user_struct(VERIFY_READ, tfilter,
2451                                       tswapal(tfprog->filter), 0)) {
2452                     unlock_user_struct(tfprog, optval_addr, 1);
2453                     return -TARGET_EFAULT;
2454                 }
2455 
2456                 fprog.len = tswap16(tfprog->len);
2457                 filter = g_try_new(struct sock_filter, fprog.len);
2458                 if (filter == NULL) {
2459                     unlock_user_struct(tfilter, tfprog->filter, 1);
2460                     unlock_user_struct(tfprog, optval_addr, 1);
2461                     return -TARGET_ENOMEM;
2462                 }
2463                 for (i = 0; i < fprog.len; i++) {
2464                     filter[i].code = tswap16(tfilter[i].code);
2465                     filter[i].jt = tfilter[i].jt;
2466                     filter[i].jf = tfilter[i].jf;
2467                     filter[i].k = tswap32(tfilter[i].k);
2468                 }
2469                 fprog.filter = filter;
2470 
2471                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2472                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2473                 g_free(filter);
2474 
2475                 unlock_user_struct(tfilter, tfprog->filter, 1);
2476                 unlock_user_struct(tfprog, optval_addr, 1);
2477                 return ret;
2478         }
2479 	case TARGET_SO_BINDTODEVICE:
2480 	{
2481 		char *dev_ifname, *addr_ifname;
2482 
2483 		if (optlen > IFNAMSIZ - 1) {
2484 		    optlen = IFNAMSIZ - 1;
2485 		}
2486 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2487 		if (!dev_ifname) {
2488 		    return -TARGET_EFAULT;
2489 		}
2490 		optname = SO_BINDTODEVICE;
2491 		addr_ifname = alloca(IFNAMSIZ);
2492 		memcpy(addr_ifname, dev_ifname, optlen);
2493 		addr_ifname[optlen] = 0;
2494 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2495                                            addr_ifname, optlen));
2496 		unlock_user (dev_ifname, optval_addr, 0);
2497 		return ret;
2498 	}
2499         case TARGET_SO_LINGER:
2500         {
2501                 struct linger lg;
2502                 struct target_linger *tlg;
2503 
2504                 if (optlen != sizeof(struct target_linger)) {
2505                     return -TARGET_EINVAL;
2506                 }
2507                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2508                     return -TARGET_EFAULT;
2509                 }
2510                 __get_user(lg.l_onoff, &tlg->l_onoff);
2511                 __get_user(lg.l_linger, &tlg->l_linger);
2512                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2513                                 &lg, sizeof(lg)));
2514                 unlock_user_struct(tlg, optval_addr, 0);
2515                 return ret;
2516         }
2517             /* Options with 'int' argument.  */
2518         case TARGET_SO_DEBUG:
2519 		optname = SO_DEBUG;
2520 		break;
2521         case TARGET_SO_REUSEADDR:
2522 		optname = SO_REUSEADDR;
2523 		break;
2524 #ifdef SO_REUSEPORT
2525         case TARGET_SO_REUSEPORT:
2526                 optname = SO_REUSEPORT;
2527                 break;
2528 #endif
2529         case TARGET_SO_TYPE:
2530 		optname = SO_TYPE;
2531 		break;
2532         case TARGET_SO_ERROR:
2533 		optname = SO_ERROR;
2534 		break;
2535         case TARGET_SO_DONTROUTE:
2536 		optname = SO_DONTROUTE;
2537 		break;
2538         case TARGET_SO_BROADCAST:
2539 		optname = SO_BROADCAST;
2540 		break;
2541         case TARGET_SO_SNDBUF:
2542 		optname = SO_SNDBUF;
2543 		break;
2544         case TARGET_SO_SNDBUFFORCE:
2545                 optname = SO_SNDBUFFORCE;
2546                 break;
2547         case TARGET_SO_RCVBUF:
2548 		optname = SO_RCVBUF;
2549 		break;
2550         case TARGET_SO_RCVBUFFORCE:
2551                 optname = SO_RCVBUFFORCE;
2552                 break;
2553         case TARGET_SO_KEEPALIVE:
2554 		optname = SO_KEEPALIVE;
2555 		break;
2556         case TARGET_SO_OOBINLINE:
2557 		optname = SO_OOBINLINE;
2558 		break;
2559         case TARGET_SO_NO_CHECK:
2560 		optname = SO_NO_CHECK;
2561 		break;
2562         case TARGET_SO_PRIORITY:
2563 		optname = SO_PRIORITY;
2564 		break;
2565 #ifdef SO_BSDCOMPAT
2566         case TARGET_SO_BSDCOMPAT:
2567 		optname = SO_BSDCOMPAT;
2568 		break;
2569 #endif
2570         case TARGET_SO_PASSCRED:
2571 		optname = SO_PASSCRED;
2572 		break;
2573         case TARGET_SO_PASSSEC:
2574                 optname = SO_PASSSEC;
2575                 break;
2576         case TARGET_SO_TIMESTAMP:
2577 		optname = SO_TIMESTAMP;
2578 		break;
2579         case TARGET_SO_RCVLOWAT:
2580 		optname = SO_RCVLOWAT;
2581 		break;
2582         default:
2583             goto unimplemented;
2584         }
2585 	if (optlen < sizeof(uint32_t))
2586             return -TARGET_EINVAL;
2587 
2588 	if (get_user_u32(val, optval_addr))
2589             return -TARGET_EFAULT;
2590 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2591         break;
2592 #ifdef SOL_NETLINK
2593     case SOL_NETLINK:
2594         switch (optname) {
2595         case NETLINK_PKTINFO:
2596         case NETLINK_ADD_MEMBERSHIP:
2597         case NETLINK_DROP_MEMBERSHIP:
2598         case NETLINK_BROADCAST_ERROR:
2599         case NETLINK_NO_ENOBUFS:
2600 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2601         case NETLINK_LISTEN_ALL_NSID:
2602         case NETLINK_CAP_ACK:
2603 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2604 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2605         case NETLINK_EXT_ACK:
2606 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2607 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2608         case NETLINK_GET_STRICT_CHK:
2609 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2610             break;
2611         default:
2612             goto unimplemented;
2613         }
2614         val = 0;
2615         if (optlen < sizeof(uint32_t)) {
2616             return -TARGET_EINVAL;
2617         }
2618         if (get_user_u32(val, optval_addr)) {
2619             return -TARGET_EFAULT;
2620         }
2621         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2622                                    sizeof(val)));
2623         break;
2624 #endif /* SOL_NETLINK */
2625     default:
2626     unimplemented:
2627         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2628                       level, optname);
2629         ret = -TARGET_ENOPROTOOPT;
2630     }
2631     return ret;
2632 }
2633 
2634 /* do_getsockopt() Must return target values and target errnos. */
2635 static abi_long do_getsockopt(int sockfd, int level, int optname,
2636                               abi_ulong optval_addr, abi_ulong optlen)
2637 {
2638     abi_long ret;
2639     int len, val;
2640     socklen_t lv;
2641 
2642     switch(level) {
2643     case TARGET_SOL_SOCKET:
2644         level = SOL_SOCKET;
2645         switch (optname) {
2646         /* These don't just return a single integer */
2647         case TARGET_SO_PEERNAME:
2648             goto unimplemented;
2649         case TARGET_SO_RCVTIMEO: {
2650             struct timeval tv;
2651             socklen_t tvlen;
2652 
2653             optname = SO_RCVTIMEO;
2654 
2655 get_timeout:
2656             if (get_user_u32(len, optlen)) {
2657                 return -TARGET_EFAULT;
2658             }
2659             if (len < 0) {
2660                 return -TARGET_EINVAL;
2661             }
2662 
2663             tvlen = sizeof(tv);
2664             ret = get_errno(getsockopt(sockfd, level, optname,
2665                                        &tv, &tvlen));
2666             if (ret < 0) {
2667                 return ret;
2668             }
2669             if (len > sizeof(struct target_timeval)) {
2670                 len = sizeof(struct target_timeval);
2671             }
2672             if (copy_to_user_timeval(optval_addr, &tv)) {
2673                 return -TARGET_EFAULT;
2674             }
2675             if (put_user_u32(len, optlen)) {
2676                 return -TARGET_EFAULT;
2677             }
2678             break;
2679         }
2680         case TARGET_SO_SNDTIMEO:
2681             optname = SO_SNDTIMEO;
2682             goto get_timeout;
2683         case TARGET_SO_PEERCRED: {
2684             struct ucred cr;
2685             socklen_t crlen;
2686             struct target_ucred *tcr;
2687 
2688             if (get_user_u32(len, optlen)) {
2689                 return -TARGET_EFAULT;
2690             }
2691             if (len < 0) {
2692                 return -TARGET_EINVAL;
2693             }
2694 
2695             crlen = sizeof(cr);
2696             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2697                                        &cr, &crlen));
2698             if (ret < 0) {
2699                 return ret;
2700             }
2701             if (len > crlen) {
2702                 len = crlen;
2703             }
2704             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2705                 return -TARGET_EFAULT;
2706             }
2707             __put_user(cr.pid, &tcr->pid);
2708             __put_user(cr.uid, &tcr->uid);
2709             __put_user(cr.gid, &tcr->gid);
2710             unlock_user_struct(tcr, optval_addr, 1);
2711             if (put_user_u32(len, optlen)) {
2712                 return -TARGET_EFAULT;
2713             }
2714             break;
2715         }
2716         case TARGET_SO_PEERSEC: {
2717             char *name;
2718 
2719             if (get_user_u32(len, optlen)) {
2720                 return -TARGET_EFAULT;
2721             }
2722             if (len < 0) {
2723                 return -TARGET_EINVAL;
2724             }
2725             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2726             if (!name) {
2727                 return -TARGET_EFAULT;
2728             }
2729             lv = len;
2730             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2731                                        name, &lv));
2732             if (put_user_u32(lv, optlen)) {
2733                 ret = -TARGET_EFAULT;
2734             }
2735             unlock_user(name, optval_addr, lv);
2736             break;
2737         }
2738         case TARGET_SO_LINGER:
2739         {
2740             struct linger lg;
2741             socklen_t lglen;
2742             struct target_linger *tlg;
2743 
2744             if (get_user_u32(len, optlen)) {
2745                 return -TARGET_EFAULT;
2746             }
2747             if (len < 0) {
2748                 return -TARGET_EINVAL;
2749             }
2750 
2751             lglen = sizeof(lg);
2752             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2753                                        &lg, &lglen));
2754             if (ret < 0) {
2755                 return ret;
2756             }
2757             if (len > lglen) {
2758                 len = lglen;
2759             }
2760             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2761                 return -TARGET_EFAULT;
2762             }
2763             __put_user(lg.l_onoff, &tlg->l_onoff);
2764             __put_user(lg.l_linger, &tlg->l_linger);
2765             unlock_user_struct(tlg, optval_addr, 1);
2766             if (put_user_u32(len, optlen)) {
2767                 return -TARGET_EFAULT;
2768             }
2769             break;
2770         }
2771         /* Options with 'int' argument.  */
2772         case TARGET_SO_DEBUG:
2773             optname = SO_DEBUG;
2774             goto int_case;
2775         case TARGET_SO_REUSEADDR:
2776             optname = SO_REUSEADDR;
2777             goto int_case;
2778 #ifdef SO_REUSEPORT
2779         case TARGET_SO_REUSEPORT:
2780             optname = SO_REUSEPORT;
2781             goto int_case;
2782 #endif
2783         case TARGET_SO_TYPE:
2784             optname = SO_TYPE;
2785             goto int_case;
2786         case TARGET_SO_ERROR:
2787             optname = SO_ERROR;
2788             goto int_case;
2789         case TARGET_SO_DONTROUTE:
2790             optname = SO_DONTROUTE;
2791             goto int_case;
2792         case TARGET_SO_BROADCAST:
2793             optname = SO_BROADCAST;
2794             goto int_case;
2795         case TARGET_SO_SNDBUF:
2796             optname = SO_SNDBUF;
2797             goto int_case;
2798         case TARGET_SO_RCVBUF:
2799             optname = SO_RCVBUF;
2800             goto int_case;
2801         case TARGET_SO_KEEPALIVE:
2802             optname = SO_KEEPALIVE;
2803             goto int_case;
2804         case TARGET_SO_OOBINLINE:
2805             optname = SO_OOBINLINE;
2806             goto int_case;
2807         case TARGET_SO_NO_CHECK:
2808             optname = SO_NO_CHECK;
2809             goto int_case;
2810         case TARGET_SO_PRIORITY:
2811             optname = SO_PRIORITY;
2812             goto int_case;
2813 #ifdef SO_BSDCOMPAT
2814         case TARGET_SO_BSDCOMPAT:
2815             optname = SO_BSDCOMPAT;
2816             goto int_case;
2817 #endif
2818         case TARGET_SO_PASSCRED:
2819             optname = SO_PASSCRED;
2820             goto int_case;
2821         case TARGET_SO_TIMESTAMP:
2822             optname = SO_TIMESTAMP;
2823             goto int_case;
2824         case TARGET_SO_RCVLOWAT:
2825             optname = SO_RCVLOWAT;
2826             goto int_case;
2827         case TARGET_SO_ACCEPTCONN:
2828             optname = SO_ACCEPTCONN;
2829             goto int_case;
2830         default:
2831             goto int_case;
2832         }
2833         break;
2834     case SOL_TCP:
2835         /* TCP options all take an 'int' value.  */
2836     int_case:
2837         if (get_user_u32(len, optlen))
2838             return -TARGET_EFAULT;
2839         if (len < 0)
2840             return -TARGET_EINVAL;
2841         lv = sizeof(lv);
2842         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2843         if (ret < 0)
2844             return ret;
2845         if (optname == SO_TYPE) {
2846             val = host_to_target_sock_type(val);
2847         }
2848         if (len > lv)
2849             len = lv;
2850         if (len == 4) {
2851             if (put_user_u32(val, optval_addr))
2852                 return -TARGET_EFAULT;
2853         } else {
2854             if (put_user_u8(val, optval_addr))
2855                 return -TARGET_EFAULT;
2856         }
2857         if (put_user_u32(len, optlen))
2858             return -TARGET_EFAULT;
2859         break;
2860     case SOL_IP:
2861         switch(optname) {
2862         case IP_TOS:
2863         case IP_TTL:
2864         case IP_HDRINCL:
2865         case IP_ROUTER_ALERT:
2866         case IP_RECVOPTS:
2867         case IP_RETOPTS:
2868         case IP_PKTINFO:
2869         case IP_MTU_DISCOVER:
2870         case IP_RECVERR:
2871         case IP_RECVTOS:
2872 #ifdef IP_FREEBIND
2873         case IP_FREEBIND:
2874 #endif
2875         case IP_MULTICAST_TTL:
2876         case IP_MULTICAST_LOOP:
2877             if (get_user_u32(len, optlen))
2878                 return -TARGET_EFAULT;
2879             if (len < 0)
2880                 return -TARGET_EINVAL;
2881             lv = sizeof(lv);
2882             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2883             if (ret < 0)
2884                 return ret;
2885             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2886                 len = 1;
2887                 if (put_user_u32(len, optlen)
2888                     || put_user_u8(val, optval_addr))
2889                     return -TARGET_EFAULT;
2890             } else {
2891                 if (len > sizeof(int))
2892                     len = sizeof(int);
2893                 if (put_user_u32(len, optlen)
2894                     || put_user_u32(val, optval_addr))
2895                     return -TARGET_EFAULT;
2896             }
2897             break;
2898         default:
2899             ret = -TARGET_ENOPROTOOPT;
2900             break;
2901         }
2902         break;
2903     case SOL_IPV6:
2904         switch (optname) {
2905         case IPV6_MTU_DISCOVER:
2906         case IPV6_MTU:
2907         case IPV6_V6ONLY:
2908         case IPV6_RECVPKTINFO:
2909         case IPV6_UNICAST_HOPS:
2910         case IPV6_MULTICAST_HOPS:
2911         case IPV6_MULTICAST_LOOP:
2912         case IPV6_RECVERR:
2913         case IPV6_RECVHOPLIMIT:
2914         case IPV6_2292HOPLIMIT:
2915         case IPV6_CHECKSUM:
2916         case IPV6_ADDRFORM:
2917         case IPV6_2292PKTINFO:
2918         case IPV6_RECVTCLASS:
2919         case IPV6_RECVRTHDR:
2920         case IPV6_2292RTHDR:
2921         case IPV6_RECVHOPOPTS:
2922         case IPV6_2292HOPOPTS:
2923         case IPV6_RECVDSTOPTS:
2924         case IPV6_2292DSTOPTS:
2925         case IPV6_TCLASS:
2926 #ifdef IPV6_RECVPATHMTU
2927         case IPV6_RECVPATHMTU:
2928 #endif
2929 #ifdef IPV6_TRANSPARENT
2930         case IPV6_TRANSPARENT:
2931 #endif
2932 #ifdef IPV6_FREEBIND
2933         case IPV6_FREEBIND:
2934 #endif
2935 #ifdef IPV6_RECVORIGDSTADDR
2936         case IPV6_RECVORIGDSTADDR:
2937 #endif
2938             if (get_user_u32(len, optlen))
2939                 return -TARGET_EFAULT;
2940             if (len < 0)
2941                 return -TARGET_EINVAL;
2942             lv = sizeof(lv);
2943             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2944             if (ret < 0)
2945                 return ret;
2946             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2947                 len = 1;
2948                 if (put_user_u32(len, optlen)
2949                     || put_user_u8(val, optval_addr))
2950                     return -TARGET_EFAULT;
2951             } else {
2952                 if (len > sizeof(int))
2953                     len = sizeof(int);
2954                 if (put_user_u32(len, optlen)
2955                     || put_user_u32(val, optval_addr))
2956                     return -TARGET_EFAULT;
2957             }
2958             break;
2959         default:
2960             ret = -TARGET_ENOPROTOOPT;
2961             break;
2962         }
2963         break;
2964 #ifdef SOL_NETLINK
2965     case SOL_NETLINK:
2966         switch (optname) {
2967         case NETLINK_PKTINFO:
2968         case NETLINK_BROADCAST_ERROR:
2969         case NETLINK_NO_ENOBUFS:
2970 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2971         case NETLINK_LISTEN_ALL_NSID:
2972         case NETLINK_CAP_ACK:
2973 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2974 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2975         case NETLINK_EXT_ACK:
2976 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2977 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2978         case NETLINK_GET_STRICT_CHK:
2979 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2980             if (get_user_u32(len, optlen)) {
2981                 return -TARGET_EFAULT;
2982             }
2983             if (len != sizeof(val)) {
2984                 return -TARGET_EINVAL;
2985             }
2986             lv = len;
2987             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2988             if (ret < 0) {
2989                 return ret;
2990             }
2991             if (put_user_u32(lv, optlen)
2992                 || put_user_u32(val, optval_addr)) {
2993                 return -TARGET_EFAULT;
2994             }
2995             break;
2996 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2997         case NETLINK_LIST_MEMBERSHIPS:
2998         {
2999             uint32_t *results;
3000             int i;
3001             if (get_user_u32(len, optlen)) {
3002                 return -TARGET_EFAULT;
3003             }
3004             if (len < 0) {
3005                 return -TARGET_EINVAL;
3006             }
3007             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3008             if (!results) {
3009                 return -TARGET_EFAULT;
3010             }
3011             lv = len;
3012             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3013             if (ret < 0) {
3014                 unlock_user(results, optval_addr, 0);
3015                 return ret;
3016             }
3017             /* swap host endianess to target endianess. */
3018             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3019                 results[i] = tswap32(results[i]);
3020             }
3021             if (put_user_u32(lv, optlen)) {
3022                 return -TARGET_EFAULT;
3023             }
3024             unlock_user(results, optval_addr, 0);
3025             break;
3026         }
3027 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3028         default:
3029             goto unimplemented;
3030         }
3031         break;
3032 #endif /* SOL_NETLINK */
3033     default:
3034     unimplemented:
3035         qemu_log_mask(LOG_UNIMP,
3036                       "getsockopt level=%d optname=%d not yet supported\n",
3037                       level, optname);
3038         ret = -TARGET_EOPNOTSUPP;
3039         break;
3040     }
3041     return ret;
3042 }
3043 
3044 /* Convert target low/high pair representing file offset into the host
3045  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3046  * as the kernel doesn't handle them either.
3047  */
3048 static void target_to_host_low_high(abi_ulong tlow,
3049                                     abi_ulong thigh,
3050                                     unsigned long *hlow,
3051                                     unsigned long *hhigh)
3052 {
3053     uint64_t off = tlow |
3054         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3055         TARGET_LONG_BITS / 2;
3056 
3057     *hlow = off;
3058     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3059 }
3060 
3061 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3062                                 abi_ulong count, int copy)
3063 {
3064     struct target_iovec *target_vec;
3065     struct iovec *vec;
3066     abi_ulong total_len, max_len;
3067     int i;
3068     int err = 0;
3069     bool bad_address = false;
3070 
3071     if (count == 0) {
3072         errno = 0;
3073         return NULL;
3074     }
3075     if (count > IOV_MAX) {
3076         errno = EINVAL;
3077         return NULL;
3078     }
3079 
3080     vec = g_try_new0(struct iovec, count);
3081     if (vec == NULL) {
3082         errno = ENOMEM;
3083         return NULL;
3084     }
3085 
3086     target_vec = lock_user(VERIFY_READ, target_addr,
3087                            count * sizeof(struct target_iovec), 1);
3088     if (target_vec == NULL) {
3089         err = EFAULT;
3090         goto fail2;
3091     }
3092 
3093     /* ??? If host page size > target page size, this will result in a
3094        value larger than what we can actually support.  */
3095     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3096     total_len = 0;
3097 
3098     for (i = 0; i < count; i++) {
3099         abi_ulong base = tswapal(target_vec[i].iov_base);
3100         abi_long len = tswapal(target_vec[i].iov_len);
3101 
3102         if (len < 0) {
3103             err = EINVAL;
3104             goto fail;
3105         } else if (len == 0) {
3106             /* Zero length pointer is ignored.  */
3107             vec[i].iov_base = 0;
3108         } else {
3109             vec[i].iov_base = lock_user(type, base, len, copy);
3110             /* If the first buffer pointer is bad, this is a fault.  But
3111              * subsequent bad buffers will result in a partial write; this
3112              * is realized by filling the vector with null pointers and
3113              * zero lengths. */
3114             if (!vec[i].iov_base) {
3115                 if (i == 0) {
3116                     err = EFAULT;
3117                     goto fail;
3118                 } else {
3119                     bad_address = true;
3120                 }
3121             }
3122             if (bad_address) {
3123                 len = 0;
3124             }
3125             if (len > max_len - total_len) {
3126                 len = max_len - total_len;
3127             }
3128         }
3129         vec[i].iov_len = len;
3130         total_len += len;
3131     }
3132 
3133     unlock_user(target_vec, target_addr, 0);
3134     return vec;
3135 
3136  fail:
3137     while (--i >= 0) {
3138         if (tswapal(target_vec[i].iov_len) > 0) {
3139             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3140         }
3141     }
3142     unlock_user(target_vec, target_addr, 0);
3143  fail2:
3144     g_free(vec);
3145     errno = err;
3146     return NULL;
3147 }
3148 
3149 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3150                          abi_ulong count, int copy)
3151 {
3152     struct target_iovec *target_vec;
3153     int i;
3154 
3155     target_vec = lock_user(VERIFY_READ, target_addr,
3156                            count * sizeof(struct target_iovec), 1);
3157     if (target_vec) {
3158         for (i = 0; i < count; i++) {
3159             abi_ulong base = tswapal(target_vec[i].iov_base);
3160             abi_long len = tswapal(target_vec[i].iov_len);
3161             if (len < 0) {
3162                 break;
3163             }
3164             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3165         }
3166         unlock_user(target_vec, target_addr, 0);
3167     }
3168 
3169     g_free(vec);
3170 }
3171 
3172 static inline int target_to_host_sock_type(int *type)
3173 {
3174     int host_type = 0;
3175     int target_type = *type;
3176 
3177     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3178     case TARGET_SOCK_DGRAM:
3179         host_type = SOCK_DGRAM;
3180         break;
3181     case TARGET_SOCK_STREAM:
3182         host_type = SOCK_STREAM;
3183         break;
3184     default:
3185         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3186         break;
3187     }
3188     if (target_type & TARGET_SOCK_CLOEXEC) {
3189 #if defined(SOCK_CLOEXEC)
3190         host_type |= SOCK_CLOEXEC;
3191 #else
3192         return -TARGET_EINVAL;
3193 #endif
3194     }
3195     if (target_type & TARGET_SOCK_NONBLOCK) {
3196 #if defined(SOCK_NONBLOCK)
3197         host_type |= SOCK_NONBLOCK;
3198 #elif !defined(O_NONBLOCK)
3199         return -TARGET_EINVAL;
3200 #endif
3201     }
3202     *type = host_type;
3203     return 0;
3204 }
3205 
3206 /* Try to emulate socket type flags after socket creation.  */
3207 static int sock_flags_fixup(int fd, int target_type)
3208 {
3209 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3210     if (target_type & TARGET_SOCK_NONBLOCK) {
3211         int flags = fcntl(fd, F_GETFL);
3212         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3213             close(fd);
3214             return -TARGET_EINVAL;
3215         }
3216     }
3217 #endif
3218     return fd;
3219 }
3220 
3221 /* do_socket() Must return target values and target errnos. */
3222 static abi_long do_socket(int domain, int type, int protocol)
3223 {
3224     int target_type = type;
3225     int ret;
3226 
3227     ret = target_to_host_sock_type(&type);
3228     if (ret) {
3229         return ret;
3230     }
3231 
3232     if (domain == PF_NETLINK && !(
3233 #ifdef CONFIG_RTNETLINK
3234          protocol == NETLINK_ROUTE ||
3235 #endif
3236          protocol == NETLINK_KOBJECT_UEVENT ||
3237          protocol == NETLINK_AUDIT)) {
3238         return -TARGET_EPROTONOSUPPORT;
3239     }
3240 
3241     if (domain == AF_PACKET ||
3242         (domain == AF_INET && type == SOCK_PACKET)) {
3243         protocol = tswap16(protocol);
3244     }
3245 
3246     ret = get_errno(socket(domain, type, protocol));
3247     if (ret >= 0) {
3248         ret = sock_flags_fixup(ret, target_type);
3249         if (type == SOCK_PACKET) {
3250             /* Manage an obsolete case :
3251              * if socket type is SOCK_PACKET, bind by name
3252              */
3253             fd_trans_register(ret, &target_packet_trans);
3254         } else if (domain == PF_NETLINK) {
3255             switch (protocol) {
3256 #ifdef CONFIG_RTNETLINK
3257             case NETLINK_ROUTE:
3258                 fd_trans_register(ret, &target_netlink_route_trans);
3259                 break;
3260 #endif
3261             case NETLINK_KOBJECT_UEVENT:
3262                 /* nothing to do: messages are strings */
3263                 break;
3264             case NETLINK_AUDIT:
3265                 fd_trans_register(ret, &target_netlink_audit_trans);
3266                 break;
3267             default:
3268                 g_assert_not_reached();
3269             }
3270         }
3271     }
3272     return ret;
3273 }
3274 
3275 /* do_bind() Must return target values and target errnos. */
3276 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3277                         socklen_t addrlen)
3278 {
3279     void *addr;
3280     abi_long ret;
3281 
3282     if ((int)addrlen < 0) {
3283         return -TARGET_EINVAL;
3284     }
3285 
3286     addr = alloca(addrlen+1);
3287 
3288     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3289     if (ret)
3290         return ret;
3291 
3292     return get_errno(bind(sockfd, addr, addrlen));
3293 }
3294 
3295 /* do_connect() Must return target values and target errnos. */
3296 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3297                            socklen_t addrlen)
3298 {
3299     void *addr;
3300     abi_long ret;
3301 
3302     if ((int)addrlen < 0) {
3303         return -TARGET_EINVAL;
3304     }
3305 
3306     addr = alloca(addrlen+1);
3307 
3308     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3309     if (ret)
3310         return ret;
3311 
3312     return get_errno(safe_connect(sockfd, addr, addrlen));
3313 }
3314 
3315 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3316 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3317                                       int flags, int send)
3318 {
3319     abi_long ret, len;
3320     struct msghdr msg;
3321     abi_ulong count;
3322     struct iovec *vec;
3323     abi_ulong target_vec;
3324 
3325     if (msgp->msg_name) {
3326         msg.msg_namelen = tswap32(msgp->msg_namelen);
3327         msg.msg_name = alloca(msg.msg_namelen+1);
3328         ret = target_to_host_sockaddr(fd, msg.msg_name,
3329                                       tswapal(msgp->msg_name),
3330                                       msg.msg_namelen);
3331         if (ret == -TARGET_EFAULT) {
3332             /* For connected sockets msg_name and msg_namelen must
3333              * be ignored, so returning EFAULT immediately is wrong.
3334              * Instead, pass a bad msg_name to the host kernel, and
3335              * let it decide whether to return EFAULT or not.
3336              */
3337             msg.msg_name = (void *)-1;
3338         } else if (ret) {
3339             goto out2;
3340         }
3341     } else {
3342         msg.msg_name = NULL;
3343         msg.msg_namelen = 0;
3344     }
3345     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3346     msg.msg_control = alloca(msg.msg_controllen);
3347     memset(msg.msg_control, 0, msg.msg_controllen);
3348 
3349     msg.msg_flags = tswap32(msgp->msg_flags);
3350 
3351     count = tswapal(msgp->msg_iovlen);
3352     target_vec = tswapal(msgp->msg_iov);
3353 
3354     if (count > IOV_MAX) {
3355         /* sendrcvmsg returns a different errno for this condition than
3356          * readv/writev, so we must catch it here before lock_iovec() does.
3357          */
3358         ret = -TARGET_EMSGSIZE;
3359         goto out2;
3360     }
3361 
3362     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3363                      target_vec, count, send);
3364     if (vec == NULL) {
3365         ret = -host_to_target_errno(errno);
3366         goto out2;
3367     }
3368     msg.msg_iovlen = count;
3369     msg.msg_iov = vec;
3370 
3371     if (send) {
3372         if (fd_trans_target_to_host_data(fd)) {
3373             void *host_msg;
3374 
3375             host_msg = g_malloc(msg.msg_iov->iov_len);
3376             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3377             ret = fd_trans_target_to_host_data(fd)(host_msg,
3378                                                    msg.msg_iov->iov_len);
3379             if (ret >= 0) {
3380                 msg.msg_iov->iov_base = host_msg;
3381                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3382             }
3383             g_free(host_msg);
3384         } else {
3385             ret = target_to_host_cmsg(&msg, msgp);
3386             if (ret == 0) {
3387                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3388             }
3389         }
3390     } else {
3391         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3392         if (!is_error(ret)) {
3393             len = ret;
3394             if (fd_trans_host_to_target_data(fd)) {
3395                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3396                                                MIN(msg.msg_iov->iov_len, len));
3397             } else {
3398                 ret = host_to_target_cmsg(msgp, &msg);
3399             }
3400             if (!is_error(ret)) {
3401                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3402                 msgp->msg_flags = tswap32(msg.msg_flags);
3403                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3404                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3405                                     msg.msg_name, msg.msg_namelen);
3406                     if (ret) {
3407                         goto out;
3408                     }
3409                 }
3410 
3411                 ret = len;
3412             }
3413         }
3414     }
3415 
3416 out:
3417     unlock_iovec(vec, target_vec, count, !send);
3418 out2:
3419     return ret;
3420 }
3421 
3422 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3423                                int flags, int send)
3424 {
3425     abi_long ret;
3426     struct target_msghdr *msgp;
3427 
3428     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3429                           msgp,
3430                           target_msg,
3431                           send ? 1 : 0)) {
3432         return -TARGET_EFAULT;
3433     }
3434     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3435     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3436     return ret;
3437 }
3438 
3439 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3440  * so it might not have this *mmsg-specific flag either.
3441  */
3442 #ifndef MSG_WAITFORONE
3443 #define MSG_WAITFORONE 0x10000
3444 #endif
3445 
3446 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3447                                 unsigned int vlen, unsigned int flags,
3448                                 int send)
3449 {
3450     struct target_mmsghdr *mmsgp;
3451     abi_long ret = 0;
3452     int i;
3453 
3454     if (vlen > UIO_MAXIOV) {
3455         vlen = UIO_MAXIOV;
3456     }
3457 
3458     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3459     if (!mmsgp) {
3460         return -TARGET_EFAULT;
3461     }
3462 
3463     for (i = 0; i < vlen; i++) {
3464         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3465         if (is_error(ret)) {
3466             break;
3467         }
3468         mmsgp[i].msg_len = tswap32(ret);
3469         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3470         if (flags & MSG_WAITFORONE) {
3471             flags |= MSG_DONTWAIT;
3472         }
3473     }
3474 
3475     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3476 
3477     /* Return number of datagrams sent if we sent any at all;
3478      * otherwise return the error.
3479      */
3480     if (i) {
3481         return i;
3482     }
3483     return ret;
3484 }
3485 
3486 /* do_accept4() Must return target values and target errnos. */
3487 static abi_long do_accept4(int fd, abi_ulong target_addr,
3488                            abi_ulong target_addrlen_addr, int flags)
3489 {
3490     socklen_t addrlen, ret_addrlen;
3491     void *addr;
3492     abi_long ret;
3493     int host_flags;
3494 
3495     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3496 
3497     if (target_addr == 0) {
3498         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3499     }
3500 
3501     /* linux returns EFAULT if addrlen pointer is invalid */
3502     if (get_user_u32(addrlen, target_addrlen_addr))
3503         return -TARGET_EFAULT;
3504 
3505     if ((int)addrlen < 0) {
3506         return -TARGET_EINVAL;
3507     }
3508 
3509     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3510         return -TARGET_EFAULT;
3511 
3512     addr = alloca(addrlen);
3513 
3514     ret_addrlen = addrlen;
3515     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3516     if (!is_error(ret)) {
3517         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3518         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3519             ret = -TARGET_EFAULT;
3520         }
3521     }
3522     return ret;
3523 }
3524 
3525 /* do_getpeername() Must return target values and target errnos. */
3526 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3527                                abi_ulong target_addrlen_addr)
3528 {
3529     socklen_t addrlen, ret_addrlen;
3530     void *addr;
3531     abi_long ret;
3532 
3533     if (get_user_u32(addrlen, target_addrlen_addr))
3534         return -TARGET_EFAULT;
3535 
3536     if ((int)addrlen < 0) {
3537         return -TARGET_EINVAL;
3538     }
3539 
3540     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3541         return -TARGET_EFAULT;
3542 
3543     addr = alloca(addrlen);
3544 
3545     ret_addrlen = addrlen;
3546     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3547     if (!is_error(ret)) {
3548         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3549         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3550             ret = -TARGET_EFAULT;
3551         }
3552     }
3553     return ret;
3554 }
3555 
3556 /* do_getsockname() Must return target values and target errnos. */
3557 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3558                                abi_ulong target_addrlen_addr)
3559 {
3560     socklen_t addrlen, ret_addrlen;
3561     void *addr;
3562     abi_long ret;
3563 
3564     if (get_user_u32(addrlen, target_addrlen_addr))
3565         return -TARGET_EFAULT;
3566 
3567     if ((int)addrlen < 0) {
3568         return -TARGET_EINVAL;
3569     }
3570 
3571     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3572         return -TARGET_EFAULT;
3573 
3574     addr = alloca(addrlen);
3575 
3576     ret_addrlen = addrlen;
3577     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3578     if (!is_error(ret)) {
3579         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3580         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3581             ret = -TARGET_EFAULT;
3582         }
3583     }
3584     return ret;
3585 }
3586 
3587 /* do_socketpair() Must return target values and target errnos. */
3588 static abi_long do_socketpair(int domain, int type, int protocol,
3589                               abi_ulong target_tab_addr)
3590 {
3591     int tab[2];
3592     abi_long ret;
3593 
3594     target_to_host_sock_type(&type);
3595 
3596     ret = get_errno(socketpair(domain, type, protocol, tab));
3597     if (!is_error(ret)) {
3598         if (put_user_s32(tab[0], target_tab_addr)
3599             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3600             ret = -TARGET_EFAULT;
3601     }
3602     return ret;
3603 }
3604 
3605 /* do_sendto() Must return target values and target errnos. */
3606 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3607                           abi_ulong target_addr, socklen_t addrlen)
3608 {
3609     void *addr;
3610     void *host_msg;
3611     void *copy_msg = NULL;
3612     abi_long ret;
3613 
3614     if ((int)addrlen < 0) {
3615         return -TARGET_EINVAL;
3616     }
3617 
3618     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3619     if (!host_msg)
3620         return -TARGET_EFAULT;
3621     if (fd_trans_target_to_host_data(fd)) {
3622         copy_msg = host_msg;
3623         host_msg = g_malloc(len);
3624         memcpy(host_msg, copy_msg, len);
3625         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3626         if (ret < 0) {
3627             goto fail;
3628         }
3629     }
3630     if (target_addr) {
3631         addr = alloca(addrlen+1);
3632         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3633         if (ret) {
3634             goto fail;
3635         }
3636         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3637     } else {
3638         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3639     }
3640 fail:
3641     if (copy_msg) {
3642         g_free(host_msg);
3643         host_msg = copy_msg;
3644     }
3645     unlock_user(host_msg, msg, 0);
3646     return ret;
3647 }
3648 
3649 /* do_recvfrom() Must return target values and target errnos. */
3650 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3651                             abi_ulong target_addr,
3652                             abi_ulong target_addrlen)
3653 {
3654     socklen_t addrlen, ret_addrlen;
3655     void *addr;
3656     void *host_msg;
3657     abi_long ret;
3658 
3659     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3660     if (!host_msg)
3661         return -TARGET_EFAULT;
3662     if (target_addr) {
3663         if (get_user_u32(addrlen, target_addrlen)) {
3664             ret = -TARGET_EFAULT;
3665             goto fail;
3666         }
3667         if ((int)addrlen < 0) {
3668             ret = -TARGET_EINVAL;
3669             goto fail;
3670         }
3671         addr = alloca(addrlen);
3672         ret_addrlen = addrlen;
3673         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3674                                       addr, &ret_addrlen));
3675     } else {
3676         addr = NULL; /* To keep compiler quiet.  */
3677         addrlen = 0; /* To keep compiler quiet.  */
3678         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3679     }
3680     if (!is_error(ret)) {
3681         if (fd_trans_host_to_target_data(fd)) {
3682             abi_long trans;
3683             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3684             if (is_error(trans)) {
3685                 ret = trans;
3686                 goto fail;
3687             }
3688         }
3689         if (target_addr) {
3690             host_to_target_sockaddr(target_addr, addr,
3691                                     MIN(addrlen, ret_addrlen));
3692             if (put_user_u32(ret_addrlen, target_addrlen)) {
3693                 ret = -TARGET_EFAULT;
3694                 goto fail;
3695             }
3696         }
3697         unlock_user(host_msg, msg, len);
3698     } else {
3699 fail:
3700         unlock_user(host_msg, msg, 0);
3701     }
3702     return ret;
3703 }
3704 
3705 #ifdef TARGET_NR_socketcall
3706 /* do_socketcall() must return target values and target errnos. */
3707 static abi_long do_socketcall(int num, abi_ulong vptr)
3708 {
3709     static const unsigned nargs[] = { /* number of arguments per operation */
3710         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3711         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3712         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3713         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3714         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3715         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3716         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3717         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3718         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3719         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3720         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3721         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3722         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3723         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3724         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3725         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3726         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3727         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3728         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3729         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3730     };
3731     abi_long a[6]; /* max 6 args */
3732     unsigned i;
3733 
3734     /* check the range of the first argument num */
3735     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3736     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3737         return -TARGET_EINVAL;
3738     }
3739     /* ensure we have space for args */
3740     if (nargs[num] > ARRAY_SIZE(a)) {
3741         return -TARGET_EINVAL;
3742     }
3743     /* collect the arguments in a[] according to nargs[] */
3744     for (i = 0; i < nargs[num]; ++i) {
3745         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3746             return -TARGET_EFAULT;
3747         }
3748     }
3749     /* now when we have the args, invoke the appropriate underlying function */
3750     switch (num) {
3751     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3752         return do_socket(a[0], a[1], a[2]);
3753     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3754         return do_bind(a[0], a[1], a[2]);
3755     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3756         return do_connect(a[0], a[1], a[2]);
3757     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3758         return get_errno(listen(a[0], a[1]));
3759     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3760         return do_accept4(a[0], a[1], a[2], 0);
3761     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3762         return do_getsockname(a[0], a[1], a[2]);
3763     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3764         return do_getpeername(a[0], a[1], a[2]);
3765     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3766         return do_socketpair(a[0], a[1], a[2], a[3]);
3767     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3768         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3769     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3770         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3771     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3772         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3773     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3774         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3775     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3776         return get_errno(shutdown(a[0], a[1]));
3777     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3778         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3779     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3780         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3781     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3782         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3783     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3784         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3785     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3786         return do_accept4(a[0], a[1], a[2], a[3]);
3787     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3788         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3789     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3790         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3791     default:
3792         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3793         return -TARGET_EINVAL;
3794     }
3795 }
3796 #endif
3797 
3798 #define N_SHM_REGIONS	32
3799 
3800 static struct shm_region {
3801     abi_ulong start;
3802     abi_ulong size;
3803     bool in_use;
3804 } shm_regions[N_SHM_REGIONS];
3805 
3806 #ifndef TARGET_SEMID64_DS
3807 /* asm-generic version of this struct */
3808 struct target_semid64_ds
3809 {
3810   struct target_ipc_perm sem_perm;
3811   abi_ulong sem_otime;
3812 #if TARGET_ABI_BITS == 32
3813   abi_ulong __unused1;
3814 #endif
3815   abi_ulong sem_ctime;
3816 #if TARGET_ABI_BITS == 32
3817   abi_ulong __unused2;
3818 #endif
3819   abi_ulong sem_nsems;
3820   abi_ulong __unused3;
3821   abi_ulong __unused4;
3822 };
3823 #endif
3824 
3825 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3826                                                abi_ulong target_addr)
3827 {
3828     struct target_ipc_perm *target_ip;
3829     struct target_semid64_ds *target_sd;
3830 
3831     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3832         return -TARGET_EFAULT;
3833     target_ip = &(target_sd->sem_perm);
3834     host_ip->__key = tswap32(target_ip->__key);
3835     host_ip->uid = tswap32(target_ip->uid);
3836     host_ip->gid = tswap32(target_ip->gid);
3837     host_ip->cuid = tswap32(target_ip->cuid);
3838     host_ip->cgid = tswap32(target_ip->cgid);
3839 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3840     host_ip->mode = tswap32(target_ip->mode);
3841 #else
3842     host_ip->mode = tswap16(target_ip->mode);
3843 #endif
3844 #if defined(TARGET_PPC)
3845     host_ip->__seq = tswap32(target_ip->__seq);
3846 #else
3847     host_ip->__seq = tswap16(target_ip->__seq);
3848 #endif
3849     unlock_user_struct(target_sd, target_addr, 0);
3850     return 0;
3851 }
3852 
3853 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3854                                                struct ipc_perm *host_ip)
3855 {
3856     struct target_ipc_perm *target_ip;
3857     struct target_semid64_ds *target_sd;
3858 
3859     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3860         return -TARGET_EFAULT;
3861     target_ip = &(target_sd->sem_perm);
3862     target_ip->__key = tswap32(host_ip->__key);
3863     target_ip->uid = tswap32(host_ip->uid);
3864     target_ip->gid = tswap32(host_ip->gid);
3865     target_ip->cuid = tswap32(host_ip->cuid);
3866     target_ip->cgid = tswap32(host_ip->cgid);
3867 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3868     target_ip->mode = tswap32(host_ip->mode);
3869 #else
3870     target_ip->mode = tswap16(host_ip->mode);
3871 #endif
3872 #if defined(TARGET_PPC)
3873     target_ip->__seq = tswap32(host_ip->__seq);
3874 #else
3875     target_ip->__seq = tswap16(host_ip->__seq);
3876 #endif
3877     unlock_user_struct(target_sd, target_addr, 1);
3878     return 0;
3879 }
3880 
3881 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3882                                                abi_ulong target_addr)
3883 {
3884     struct target_semid64_ds *target_sd;
3885 
3886     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3887         return -TARGET_EFAULT;
3888     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3889         return -TARGET_EFAULT;
3890     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3891     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3892     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3893     unlock_user_struct(target_sd, target_addr, 0);
3894     return 0;
3895 }
3896 
3897 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3898                                                struct semid_ds *host_sd)
3899 {
3900     struct target_semid64_ds *target_sd;
3901 
3902     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3903         return -TARGET_EFAULT;
3904     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3905         return -TARGET_EFAULT;
3906     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3907     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3908     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3909     unlock_user_struct(target_sd, target_addr, 1);
3910     return 0;
3911 }
3912 
3913 struct target_seminfo {
3914     int semmap;
3915     int semmni;
3916     int semmns;
3917     int semmnu;
3918     int semmsl;
3919     int semopm;
3920     int semume;
3921     int semusz;
3922     int semvmx;
3923     int semaem;
3924 };
3925 
3926 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3927                                               struct seminfo *host_seminfo)
3928 {
3929     struct target_seminfo *target_seminfo;
3930     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3931         return -TARGET_EFAULT;
3932     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3933     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3934     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3935     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3936     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3937     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3938     __put_user(host_seminfo->semume, &target_seminfo->semume);
3939     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3940     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3941     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3942     unlock_user_struct(target_seminfo, target_addr, 1);
3943     return 0;
3944 }
3945 
3946 union semun {
3947 	int val;
3948 	struct semid_ds *buf;
3949 	unsigned short *array;
3950 	struct seminfo *__buf;
3951 };
3952 
3953 union target_semun {
3954 	int val;
3955 	abi_ulong buf;
3956 	abi_ulong array;
3957 	abi_ulong __buf;
3958 };
3959 
3960 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3961                                                abi_ulong target_addr)
3962 {
3963     int nsems;
3964     unsigned short *array;
3965     union semun semun;
3966     struct semid_ds semid_ds;
3967     int i, ret;
3968 
3969     semun.buf = &semid_ds;
3970 
3971     ret = semctl(semid, 0, IPC_STAT, semun);
3972     if (ret == -1)
3973         return get_errno(ret);
3974 
3975     nsems = semid_ds.sem_nsems;
3976 
3977     *host_array = g_try_new(unsigned short, nsems);
3978     if (!*host_array) {
3979         return -TARGET_ENOMEM;
3980     }
3981     array = lock_user(VERIFY_READ, target_addr,
3982                       nsems*sizeof(unsigned short), 1);
3983     if (!array) {
3984         g_free(*host_array);
3985         return -TARGET_EFAULT;
3986     }
3987 
3988     for(i=0; i<nsems; i++) {
3989         __get_user((*host_array)[i], &array[i]);
3990     }
3991     unlock_user(array, target_addr, 0);
3992 
3993     return 0;
3994 }
3995 
3996 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3997                                                unsigned short **host_array)
3998 {
3999     int nsems;
4000     unsigned short *array;
4001     union semun semun;
4002     struct semid_ds semid_ds;
4003     int i, ret;
4004 
4005     semun.buf = &semid_ds;
4006 
4007     ret = semctl(semid, 0, IPC_STAT, semun);
4008     if (ret == -1)
4009         return get_errno(ret);
4010 
4011     nsems = semid_ds.sem_nsems;
4012 
4013     array = lock_user(VERIFY_WRITE, target_addr,
4014                       nsems*sizeof(unsigned short), 0);
4015     if (!array)
4016         return -TARGET_EFAULT;
4017 
4018     for(i=0; i<nsems; i++) {
4019         __put_user((*host_array)[i], &array[i]);
4020     }
4021     g_free(*host_array);
4022     unlock_user(array, target_addr, 1);
4023 
4024     return 0;
4025 }
4026 
4027 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4028                                  abi_ulong target_arg)
4029 {
4030     union target_semun target_su = { .buf = target_arg };
4031     union semun arg;
4032     struct semid_ds dsarg;
4033     unsigned short *array = NULL;
4034     struct seminfo seminfo;
4035     abi_long ret = -TARGET_EINVAL;
4036     abi_long err;
4037     cmd &= 0xff;
4038 
4039     switch( cmd ) {
4040 	case GETVAL:
4041 	case SETVAL:
4042             /* In 64 bit cross-endian situations, we will erroneously pick up
4043              * the wrong half of the union for the "val" element.  To rectify
4044              * this, the entire 8-byte structure is byteswapped, followed by
4045 	     * a swap of the 4 byte val field. In other cases, the data is
4046 	     * already in proper host byte order. */
4047 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4048 		target_su.buf = tswapal(target_su.buf);
4049 		arg.val = tswap32(target_su.val);
4050 	    } else {
4051 		arg.val = target_su.val;
4052 	    }
4053             ret = get_errno(semctl(semid, semnum, cmd, arg));
4054             break;
4055 	case GETALL:
4056 	case SETALL:
4057             err = target_to_host_semarray(semid, &array, target_su.array);
4058             if (err)
4059                 return err;
4060             arg.array = array;
4061             ret = get_errno(semctl(semid, semnum, cmd, arg));
4062             err = host_to_target_semarray(semid, target_su.array, &array);
4063             if (err)
4064                 return err;
4065             break;
4066 	case IPC_STAT:
4067 	case IPC_SET:
4068 	case SEM_STAT:
4069             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4070             if (err)
4071                 return err;
4072             arg.buf = &dsarg;
4073             ret = get_errno(semctl(semid, semnum, cmd, arg));
4074             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4075             if (err)
4076                 return err;
4077             break;
4078 	case IPC_INFO:
4079 	case SEM_INFO:
4080             arg.__buf = &seminfo;
4081             ret = get_errno(semctl(semid, semnum, cmd, arg));
4082             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4083             if (err)
4084                 return err;
4085             break;
4086 	case IPC_RMID:
4087 	case GETPID:
4088 	case GETNCNT:
4089 	case GETZCNT:
4090             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4091             break;
4092     }
4093 
4094     return ret;
4095 }
4096 
4097 struct target_sembuf {
4098     unsigned short sem_num;
4099     short sem_op;
4100     short sem_flg;
4101 };
4102 
4103 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4104                                              abi_ulong target_addr,
4105                                              unsigned nsops)
4106 {
4107     struct target_sembuf *target_sembuf;
4108     int i;
4109 
4110     target_sembuf = lock_user(VERIFY_READ, target_addr,
4111                               nsops*sizeof(struct target_sembuf), 1);
4112     if (!target_sembuf)
4113         return -TARGET_EFAULT;
4114 
4115     for(i=0; i<nsops; i++) {
4116         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4117         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4118         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4119     }
4120 
4121     unlock_user(target_sembuf, target_addr, 0);
4122 
4123     return 0;
4124 }
4125 
4126 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4127     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4128 
4129 /*
4130  * This macro is required to handle the s390 variants, which passes the
4131  * arguments in a different order than default.
4132  */
4133 #ifdef __s390x__
4134 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4135   (__nsops), (__timeout), (__sops)
4136 #else
4137 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4138   (__nsops), 0, (__sops), (__timeout)
4139 #endif
4140 
4141 static inline abi_long do_semtimedop(int semid,
4142                                      abi_long ptr,
4143                                      unsigned nsops,
4144                                      abi_long timeout, bool time64)
4145 {
4146     struct sembuf *sops;
4147     struct timespec ts, *pts = NULL;
4148     abi_long ret;
4149 
4150     if (timeout) {
4151         pts = &ts;
4152         if (time64) {
4153             if (target_to_host_timespec64(pts, timeout)) {
4154                 return -TARGET_EFAULT;
4155             }
4156         } else {
4157             if (target_to_host_timespec(pts, timeout)) {
4158                 return -TARGET_EFAULT;
4159             }
4160         }
4161     }
4162 
4163     if (nsops > TARGET_SEMOPM) {
4164         return -TARGET_E2BIG;
4165     }
4166 
4167     sops = g_new(struct sembuf, nsops);
4168 
4169     if (target_to_host_sembuf(sops, ptr, nsops)) {
4170         g_free(sops);
4171         return -TARGET_EFAULT;
4172     }
4173 
4174     ret = -TARGET_ENOSYS;
4175 #ifdef __NR_semtimedop
4176     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4177 #endif
4178 #ifdef __NR_ipc
4179     if (ret == -TARGET_ENOSYS) {
4180         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4181                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4182     }
4183 #endif
4184     g_free(sops);
4185     return ret;
4186 }
4187 #endif
4188 
4189 struct target_msqid_ds
4190 {
4191     struct target_ipc_perm msg_perm;
4192     abi_ulong msg_stime;
4193 #if TARGET_ABI_BITS == 32
4194     abi_ulong __unused1;
4195 #endif
4196     abi_ulong msg_rtime;
4197 #if TARGET_ABI_BITS == 32
4198     abi_ulong __unused2;
4199 #endif
4200     abi_ulong msg_ctime;
4201 #if TARGET_ABI_BITS == 32
4202     abi_ulong __unused3;
4203 #endif
4204     abi_ulong __msg_cbytes;
4205     abi_ulong msg_qnum;
4206     abi_ulong msg_qbytes;
4207     abi_ulong msg_lspid;
4208     abi_ulong msg_lrpid;
4209     abi_ulong __unused4;
4210     abi_ulong __unused5;
4211 };
4212 
4213 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4214                                                abi_ulong target_addr)
4215 {
4216     struct target_msqid_ds *target_md;
4217 
4218     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4219         return -TARGET_EFAULT;
4220     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4221         return -TARGET_EFAULT;
4222     host_md->msg_stime = tswapal(target_md->msg_stime);
4223     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4224     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4225     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4226     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4227     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4228     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4229     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4230     unlock_user_struct(target_md, target_addr, 0);
4231     return 0;
4232 }
4233 
4234 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4235                                                struct msqid_ds *host_md)
4236 {
4237     struct target_msqid_ds *target_md;
4238 
4239     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4240         return -TARGET_EFAULT;
4241     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4242         return -TARGET_EFAULT;
4243     target_md->msg_stime = tswapal(host_md->msg_stime);
4244     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4245     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4246     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4247     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4248     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4249     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4250     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4251     unlock_user_struct(target_md, target_addr, 1);
4252     return 0;
4253 }
4254 
4255 struct target_msginfo {
4256     int msgpool;
4257     int msgmap;
4258     int msgmax;
4259     int msgmnb;
4260     int msgmni;
4261     int msgssz;
4262     int msgtql;
4263     unsigned short int msgseg;
4264 };
4265 
4266 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4267                                               struct msginfo *host_msginfo)
4268 {
4269     struct target_msginfo *target_msginfo;
4270     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4271         return -TARGET_EFAULT;
4272     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4273     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4274     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4275     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4276     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4277     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4278     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4279     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4280     unlock_user_struct(target_msginfo, target_addr, 1);
4281     return 0;
4282 }
4283 
4284 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4285 {
4286     struct msqid_ds dsarg;
4287     struct msginfo msginfo;
4288     abi_long ret = -TARGET_EINVAL;
4289 
4290     cmd &= 0xff;
4291 
4292     switch (cmd) {
4293     case IPC_STAT:
4294     case IPC_SET:
4295     case MSG_STAT:
4296         if (target_to_host_msqid_ds(&dsarg,ptr))
4297             return -TARGET_EFAULT;
4298         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4299         if (host_to_target_msqid_ds(ptr,&dsarg))
4300             return -TARGET_EFAULT;
4301         break;
4302     case IPC_RMID:
4303         ret = get_errno(msgctl(msgid, cmd, NULL));
4304         break;
4305     case IPC_INFO:
4306     case MSG_INFO:
4307         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4308         if (host_to_target_msginfo(ptr, &msginfo))
4309             return -TARGET_EFAULT;
4310         break;
4311     }
4312 
4313     return ret;
4314 }
4315 
4316 struct target_msgbuf {
4317     abi_long mtype;
4318     char	mtext[1];
4319 };
4320 
4321 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4322                                  ssize_t msgsz, int msgflg)
4323 {
4324     struct target_msgbuf *target_mb;
4325     struct msgbuf *host_mb;
4326     abi_long ret = 0;
4327 
4328     if (msgsz < 0) {
4329         return -TARGET_EINVAL;
4330     }
4331 
4332     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4333         return -TARGET_EFAULT;
4334     host_mb = g_try_malloc(msgsz + sizeof(long));
4335     if (!host_mb) {
4336         unlock_user_struct(target_mb, msgp, 0);
4337         return -TARGET_ENOMEM;
4338     }
4339     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4340     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4341     ret = -TARGET_ENOSYS;
4342 #ifdef __NR_msgsnd
4343     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4344 #endif
4345 #ifdef __NR_ipc
4346     if (ret == -TARGET_ENOSYS) {
4347 #ifdef __s390x__
4348         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4349                                  host_mb));
4350 #else
4351         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4352                                  host_mb, 0));
4353 #endif
4354     }
4355 #endif
4356     g_free(host_mb);
4357     unlock_user_struct(target_mb, msgp, 0);
4358 
4359     return ret;
4360 }
4361 
4362 #ifdef __NR_ipc
4363 #if defined(__sparc__)
4364 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4365 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4366 #elif defined(__s390x__)
4367 /* The s390 sys_ipc variant has only five parameters.  */
4368 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4369     ((long int[]){(long int)__msgp, __msgtyp})
4370 #else
4371 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4372     ((long int[]){(long int)__msgp, __msgtyp}), 0
4373 #endif
4374 #endif
4375 
4376 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4377                                  ssize_t msgsz, abi_long msgtyp,
4378                                  int msgflg)
4379 {
4380     struct target_msgbuf *target_mb;
4381     char *target_mtext;
4382     struct msgbuf *host_mb;
4383     abi_long ret = 0;
4384 
4385     if (msgsz < 0) {
4386         return -TARGET_EINVAL;
4387     }
4388 
4389     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4390         return -TARGET_EFAULT;
4391 
4392     host_mb = g_try_malloc(msgsz + sizeof(long));
4393     if (!host_mb) {
4394         ret = -TARGET_ENOMEM;
4395         goto end;
4396     }
4397     ret = -TARGET_ENOSYS;
4398 #ifdef __NR_msgrcv
4399     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4400 #endif
4401 #ifdef __NR_ipc
4402     if (ret == -TARGET_ENOSYS) {
4403         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4404                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4405     }
4406 #endif
4407 
4408     if (ret > 0) {
4409         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4410         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4411         if (!target_mtext) {
4412             ret = -TARGET_EFAULT;
4413             goto end;
4414         }
4415         memcpy(target_mb->mtext, host_mb->mtext, ret);
4416         unlock_user(target_mtext, target_mtext_addr, ret);
4417     }
4418 
4419     target_mb->mtype = tswapal(host_mb->mtype);
4420 
4421 end:
4422     if (target_mb)
4423         unlock_user_struct(target_mb, msgp, 1);
4424     g_free(host_mb);
4425     return ret;
4426 }
4427 
4428 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4429                                                abi_ulong target_addr)
4430 {
4431     struct target_shmid_ds *target_sd;
4432 
4433     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4434         return -TARGET_EFAULT;
4435     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4436         return -TARGET_EFAULT;
4437     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4438     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4439     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4440     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4441     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4442     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4443     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4444     unlock_user_struct(target_sd, target_addr, 0);
4445     return 0;
4446 }
4447 
4448 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4449                                                struct shmid_ds *host_sd)
4450 {
4451     struct target_shmid_ds *target_sd;
4452 
4453     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4454         return -TARGET_EFAULT;
4455     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4456         return -TARGET_EFAULT;
4457     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4458     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4459     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4460     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4461     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4462     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4463     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4464     unlock_user_struct(target_sd, target_addr, 1);
4465     return 0;
4466 }
4467 
4468 struct  target_shminfo {
4469     abi_ulong shmmax;
4470     abi_ulong shmmin;
4471     abi_ulong shmmni;
4472     abi_ulong shmseg;
4473     abi_ulong shmall;
4474 };
4475 
4476 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4477                                               struct shminfo *host_shminfo)
4478 {
4479     struct target_shminfo *target_shminfo;
4480     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4481         return -TARGET_EFAULT;
4482     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4483     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4484     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4485     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4486     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4487     unlock_user_struct(target_shminfo, target_addr, 1);
4488     return 0;
4489 }
4490 
4491 struct target_shm_info {
4492     int used_ids;
4493     abi_ulong shm_tot;
4494     abi_ulong shm_rss;
4495     abi_ulong shm_swp;
4496     abi_ulong swap_attempts;
4497     abi_ulong swap_successes;
4498 };
4499 
4500 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4501                                                struct shm_info *host_shm_info)
4502 {
4503     struct target_shm_info *target_shm_info;
4504     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4505         return -TARGET_EFAULT;
4506     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4507     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4508     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4509     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4510     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4511     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4512     unlock_user_struct(target_shm_info, target_addr, 1);
4513     return 0;
4514 }
4515 
4516 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4517 {
4518     struct shmid_ds dsarg;
4519     struct shminfo shminfo;
4520     struct shm_info shm_info;
4521     abi_long ret = -TARGET_EINVAL;
4522 
4523     cmd &= 0xff;
4524 
4525     switch(cmd) {
4526     case IPC_STAT:
4527     case IPC_SET:
4528     case SHM_STAT:
4529         if (target_to_host_shmid_ds(&dsarg, buf))
4530             return -TARGET_EFAULT;
4531         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4532         if (host_to_target_shmid_ds(buf, &dsarg))
4533             return -TARGET_EFAULT;
4534         break;
4535     case IPC_INFO:
4536         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4537         if (host_to_target_shminfo(buf, &shminfo))
4538             return -TARGET_EFAULT;
4539         break;
4540     case SHM_INFO:
4541         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4542         if (host_to_target_shm_info(buf, &shm_info))
4543             return -TARGET_EFAULT;
4544         break;
4545     case IPC_RMID:
4546     case SHM_LOCK:
4547     case SHM_UNLOCK:
4548         ret = get_errno(shmctl(shmid, cmd, NULL));
4549         break;
4550     }
4551 
4552     return ret;
4553 }
4554 
4555 #ifndef TARGET_FORCE_SHMLBA
4556 /* For most architectures, SHMLBA is the same as the page size;
4557  * some architectures have larger values, in which case they should
4558  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4559  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4560  * and defining its own value for SHMLBA.
4561  *
4562  * The kernel also permits SHMLBA to be set by the architecture to a
4563  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4564  * this means that addresses are rounded to the large size if
4565  * SHM_RND is set but addresses not aligned to that size are not rejected
4566  * as long as they are at least page-aligned. Since the only architecture
4567  * which uses this is ia64 this code doesn't provide for that oddity.
4568  */
4569 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4570 {
4571     return TARGET_PAGE_SIZE;
4572 }
4573 #endif
4574 
4575 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4576                                  int shmid, abi_ulong shmaddr, int shmflg)
4577 {
4578     abi_long raddr;
4579     void *host_raddr;
4580     struct shmid_ds shm_info;
4581     int i,ret;
4582     abi_ulong shmlba;
4583 
4584     /* find out the length of the shared memory segment */
4585     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4586     if (is_error(ret)) {
4587         /* can't get length, bail out */
4588         return ret;
4589     }
4590 
4591     shmlba = target_shmlba(cpu_env);
4592 
4593     if (shmaddr & (shmlba - 1)) {
4594         if (shmflg & SHM_RND) {
4595             shmaddr &= ~(shmlba - 1);
4596         } else {
4597             return -TARGET_EINVAL;
4598         }
4599     }
4600     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4601         return -TARGET_EINVAL;
4602     }
4603 
4604     mmap_lock();
4605 
4606     if (shmaddr)
4607         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4608     else {
4609         abi_ulong mmap_start;
4610 
4611         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4612         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4613 
4614         if (mmap_start == -1) {
4615             errno = ENOMEM;
4616             host_raddr = (void *)-1;
4617         } else
4618             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4619     }
4620 
4621     if (host_raddr == (void *)-1) {
4622         mmap_unlock();
4623         return get_errno((long)host_raddr);
4624     }
4625     raddr=h2g((unsigned long)host_raddr);
4626 
4627     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4628                    PAGE_VALID | PAGE_READ |
4629                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4630 
4631     for (i = 0; i < N_SHM_REGIONS; i++) {
4632         if (!shm_regions[i].in_use) {
4633             shm_regions[i].in_use = true;
4634             shm_regions[i].start = raddr;
4635             shm_regions[i].size = shm_info.shm_segsz;
4636             break;
4637         }
4638     }
4639 
4640     mmap_unlock();
4641     return raddr;
4642 
4643 }
4644 
4645 static inline abi_long do_shmdt(abi_ulong shmaddr)
4646 {
4647     int i;
4648     abi_long rv;
4649 
4650     mmap_lock();
4651 
4652     for (i = 0; i < N_SHM_REGIONS; ++i) {
4653         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4654             shm_regions[i].in_use = false;
4655             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4656             break;
4657         }
4658     }
4659     rv = get_errno(shmdt(g2h(shmaddr)));
4660 
4661     mmap_unlock();
4662 
4663     return rv;
4664 }
4665 
4666 #ifdef TARGET_NR_ipc
4667 /* ??? This only works with linear mappings.  */
4668 /* do_ipc() must return target values and target errnos. */
4669 static abi_long do_ipc(CPUArchState *cpu_env,
4670                        unsigned int call, abi_long first,
4671                        abi_long second, abi_long third,
4672                        abi_long ptr, abi_long fifth)
4673 {
4674     int version;
4675     abi_long ret = 0;
4676 
4677     version = call >> 16;
4678     call &= 0xffff;
4679 
4680     switch (call) {
4681     case IPCOP_semop:
4682         ret = do_semtimedop(first, ptr, second, 0, false);
4683         break;
4684     case IPCOP_semtimedop:
4685     /*
4686      * The s390 sys_ipc variant has only five parameters instead of six
4687      * (as for default variant) and the only difference is the handling of
4688      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4689      * to a struct timespec where the generic variant uses fifth parameter.
4690      */
4691 #if defined(TARGET_S390X)
4692         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4693 #else
4694         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4695 #endif
4696         break;
4697 
4698     case IPCOP_semget:
4699         ret = get_errno(semget(first, second, third));
4700         break;
4701 
4702     case IPCOP_semctl: {
4703         /* The semun argument to semctl is passed by value, so dereference the
4704          * ptr argument. */
4705         abi_ulong atptr;
4706         get_user_ual(atptr, ptr);
4707         ret = do_semctl(first, second, third, atptr);
4708         break;
4709     }
4710 
4711     case IPCOP_msgget:
4712         ret = get_errno(msgget(first, second));
4713         break;
4714 
4715     case IPCOP_msgsnd:
4716         ret = do_msgsnd(first, ptr, second, third);
4717         break;
4718 
4719     case IPCOP_msgctl:
4720         ret = do_msgctl(first, second, ptr);
4721         break;
4722 
4723     case IPCOP_msgrcv:
4724         switch (version) {
4725         case 0:
4726             {
4727                 struct target_ipc_kludge {
4728                     abi_long msgp;
4729                     abi_long msgtyp;
4730                 } *tmp;
4731 
4732                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4733                     ret = -TARGET_EFAULT;
4734                     break;
4735                 }
4736 
4737                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4738 
4739                 unlock_user_struct(tmp, ptr, 0);
4740                 break;
4741             }
4742         default:
4743             ret = do_msgrcv(first, ptr, second, fifth, third);
4744         }
4745         break;
4746 
4747     case IPCOP_shmat:
4748         switch (version) {
4749         default:
4750         {
4751             abi_ulong raddr;
4752             raddr = do_shmat(cpu_env, first, ptr, second);
4753             if (is_error(raddr))
4754                 return get_errno(raddr);
4755             if (put_user_ual(raddr, third))
4756                 return -TARGET_EFAULT;
4757             break;
4758         }
4759         case 1:
4760             ret = -TARGET_EINVAL;
4761             break;
4762         }
4763 	break;
4764     case IPCOP_shmdt:
4765         ret = do_shmdt(ptr);
4766 	break;
4767 
4768     case IPCOP_shmget:
4769 	/* IPC_* flag values are the same on all linux platforms */
4770 	ret = get_errno(shmget(first, second, third));
4771 	break;
4772 
4773 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4774     case IPCOP_shmctl:
4775         ret = do_shmctl(first, second, ptr);
4776         break;
4777     default:
4778         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4779                       call, version);
4780 	ret = -TARGET_ENOSYS;
4781 	break;
4782     }
4783     return ret;
4784 }
4785 #endif
4786 
4787 /* kernel structure types definitions */
4788 
4789 #define STRUCT(name, ...) STRUCT_ ## name,
4790 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4791 enum {
4792 #include "syscall_types.h"
4793 STRUCT_MAX
4794 };
4795 #undef STRUCT
4796 #undef STRUCT_SPECIAL
4797 
4798 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4799 #define STRUCT_SPECIAL(name)
4800 #include "syscall_types.h"
4801 #undef STRUCT
4802 #undef STRUCT_SPECIAL
4803 
4804 #define MAX_STRUCT_SIZE 4096
4805 
4806 #ifdef CONFIG_FIEMAP
4807 /* So fiemap access checks don't overflow on 32 bit systems.
4808  * This is very slightly smaller than the limit imposed by
4809  * the underlying kernel.
4810  */
4811 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4812                             / sizeof(struct fiemap_extent))
4813 
4814 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4815                                        int fd, int cmd, abi_long arg)
4816 {
4817     /* The parameter for this ioctl is a struct fiemap followed
4818      * by an array of struct fiemap_extent whose size is set
4819      * in fiemap->fm_extent_count. The array is filled in by the
4820      * ioctl.
4821      */
4822     int target_size_in, target_size_out;
4823     struct fiemap *fm;
4824     const argtype *arg_type = ie->arg_type;
4825     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4826     void *argptr, *p;
4827     abi_long ret;
4828     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4829     uint32_t outbufsz;
4830     int free_fm = 0;
4831 
4832     assert(arg_type[0] == TYPE_PTR);
4833     assert(ie->access == IOC_RW);
4834     arg_type++;
4835     target_size_in = thunk_type_size(arg_type, 0);
4836     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4837     if (!argptr) {
4838         return -TARGET_EFAULT;
4839     }
4840     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4841     unlock_user(argptr, arg, 0);
4842     fm = (struct fiemap *)buf_temp;
4843     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4844         return -TARGET_EINVAL;
4845     }
4846 
4847     outbufsz = sizeof (*fm) +
4848         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4849 
4850     if (outbufsz > MAX_STRUCT_SIZE) {
4851         /* We can't fit all the extents into the fixed size buffer.
4852          * Allocate one that is large enough and use it instead.
4853          */
4854         fm = g_try_malloc(outbufsz);
4855         if (!fm) {
4856             return -TARGET_ENOMEM;
4857         }
4858         memcpy(fm, buf_temp, sizeof(struct fiemap));
4859         free_fm = 1;
4860     }
4861     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4862     if (!is_error(ret)) {
4863         target_size_out = target_size_in;
4864         /* An extent_count of 0 means we were only counting the extents
4865          * so there are no structs to copy
4866          */
4867         if (fm->fm_extent_count != 0) {
4868             target_size_out += fm->fm_mapped_extents * extent_size;
4869         }
4870         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4871         if (!argptr) {
4872             ret = -TARGET_EFAULT;
4873         } else {
4874             /* Convert the struct fiemap */
4875             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4876             if (fm->fm_extent_count != 0) {
4877                 p = argptr + target_size_in;
4878                 /* ...and then all the struct fiemap_extents */
4879                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4880                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4881                                   THUNK_TARGET);
4882                     p += extent_size;
4883                 }
4884             }
4885             unlock_user(argptr, arg, target_size_out);
4886         }
4887     }
4888     if (free_fm) {
4889         g_free(fm);
4890     }
4891     return ret;
4892 }
4893 #endif
4894 
4895 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4896                                 int fd, int cmd, abi_long arg)
4897 {
4898     const argtype *arg_type = ie->arg_type;
4899     int target_size;
4900     void *argptr;
4901     int ret;
4902     struct ifconf *host_ifconf;
4903     uint32_t outbufsz;
4904     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4905     int target_ifreq_size;
4906     int nb_ifreq;
4907     int free_buf = 0;
4908     int i;
4909     int target_ifc_len;
4910     abi_long target_ifc_buf;
4911     int host_ifc_len;
4912     char *host_ifc_buf;
4913 
4914     assert(arg_type[0] == TYPE_PTR);
4915     assert(ie->access == IOC_RW);
4916 
4917     arg_type++;
4918     target_size = thunk_type_size(arg_type, 0);
4919 
4920     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4921     if (!argptr)
4922         return -TARGET_EFAULT;
4923     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4924     unlock_user(argptr, arg, 0);
4925 
4926     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4927     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4928     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4929 
4930     if (target_ifc_buf != 0) {
4931         target_ifc_len = host_ifconf->ifc_len;
4932         nb_ifreq = target_ifc_len / target_ifreq_size;
4933         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4934 
4935         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4936         if (outbufsz > MAX_STRUCT_SIZE) {
4937             /*
4938              * We can't fit all the extents into the fixed size buffer.
4939              * Allocate one that is large enough and use it instead.
4940              */
4941             host_ifconf = malloc(outbufsz);
4942             if (!host_ifconf) {
4943                 return -TARGET_ENOMEM;
4944             }
4945             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4946             free_buf = 1;
4947         }
4948         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4949 
4950         host_ifconf->ifc_len = host_ifc_len;
4951     } else {
4952       host_ifc_buf = NULL;
4953     }
4954     host_ifconf->ifc_buf = host_ifc_buf;
4955 
4956     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4957     if (!is_error(ret)) {
4958 	/* convert host ifc_len to target ifc_len */
4959 
4960         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4961         target_ifc_len = nb_ifreq * target_ifreq_size;
4962         host_ifconf->ifc_len = target_ifc_len;
4963 
4964 	/* restore target ifc_buf */
4965 
4966         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4967 
4968 	/* copy struct ifconf to target user */
4969 
4970         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4971         if (!argptr)
4972             return -TARGET_EFAULT;
4973         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4974         unlock_user(argptr, arg, target_size);
4975 
4976         if (target_ifc_buf != 0) {
4977             /* copy ifreq[] to target user */
4978             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4979             for (i = 0; i < nb_ifreq ; i++) {
4980                 thunk_convert(argptr + i * target_ifreq_size,
4981                               host_ifc_buf + i * sizeof(struct ifreq),
4982                               ifreq_arg_type, THUNK_TARGET);
4983             }
4984             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4985         }
4986     }
4987 
4988     if (free_buf) {
4989         free(host_ifconf);
4990     }
4991 
4992     return ret;
4993 }
4994 
4995 #if defined(CONFIG_USBFS)
4996 #if HOST_LONG_BITS > 64
4997 #error USBDEVFS thunks do not support >64 bit hosts yet.
4998 #endif
4999 struct live_urb {
5000     uint64_t target_urb_adr;
5001     uint64_t target_buf_adr;
5002     char *target_buf_ptr;
5003     struct usbdevfs_urb host_urb;
5004 };
5005 
5006 static GHashTable *usbdevfs_urb_hashtable(void)
5007 {
5008     static GHashTable *urb_hashtable;
5009 
5010     if (!urb_hashtable) {
5011         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5012     }
5013     return urb_hashtable;
5014 }
5015 
5016 static void urb_hashtable_insert(struct live_urb *urb)
5017 {
5018     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5019     g_hash_table_insert(urb_hashtable, urb, urb);
5020 }
5021 
5022 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5023 {
5024     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5025     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5026 }
5027 
5028 static void urb_hashtable_remove(struct live_urb *urb)
5029 {
5030     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5031     g_hash_table_remove(urb_hashtable, urb);
5032 }
5033 
5034 static abi_long
5035 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5036                           int fd, int cmd, abi_long arg)
5037 {
5038     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5039     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5040     struct live_urb *lurb;
5041     void *argptr;
5042     uint64_t hurb;
5043     int target_size;
5044     uintptr_t target_urb_adr;
5045     abi_long ret;
5046 
5047     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5048 
5049     memset(buf_temp, 0, sizeof(uint64_t));
5050     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5051     if (is_error(ret)) {
5052         return ret;
5053     }
5054 
5055     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5056     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5057     if (!lurb->target_urb_adr) {
5058         return -TARGET_EFAULT;
5059     }
5060     urb_hashtable_remove(lurb);
5061     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5062         lurb->host_urb.buffer_length);
5063     lurb->target_buf_ptr = NULL;
5064 
5065     /* restore the guest buffer pointer */
5066     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5067 
5068     /* update the guest urb struct */
5069     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5070     if (!argptr) {
5071         g_free(lurb);
5072         return -TARGET_EFAULT;
5073     }
5074     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5075     unlock_user(argptr, lurb->target_urb_adr, target_size);
5076 
5077     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5078     /* write back the urb handle */
5079     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5080     if (!argptr) {
5081         g_free(lurb);
5082         return -TARGET_EFAULT;
5083     }
5084 
5085     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5086     target_urb_adr = lurb->target_urb_adr;
5087     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5088     unlock_user(argptr, arg, target_size);
5089 
5090     g_free(lurb);
5091     return ret;
5092 }
5093 
5094 static abi_long
5095 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5096                              uint8_t *buf_temp __attribute__((unused)),
5097                              int fd, int cmd, abi_long arg)
5098 {
5099     struct live_urb *lurb;
5100 
5101     /* map target address back to host URB with metadata. */
5102     lurb = urb_hashtable_lookup(arg);
5103     if (!lurb) {
5104         return -TARGET_EFAULT;
5105     }
5106     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5107 }
5108 
5109 static abi_long
5110 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5111                             int fd, int cmd, abi_long arg)
5112 {
5113     const argtype *arg_type = ie->arg_type;
5114     int target_size;
5115     abi_long ret;
5116     void *argptr;
5117     int rw_dir;
5118     struct live_urb *lurb;
5119 
5120     /*
5121      * each submitted URB needs to map to a unique ID for the
5122      * kernel, and that unique ID needs to be a pointer to
5123      * host memory.  hence, we need to malloc for each URB.
5124      * isochronous transfers have a variable length struct.
5125      */
5126     arg_type++;
5127     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5128 
5129     /* construct host copy of urb and metadata */
5130     lurb = g_try_malloc0(sizeof(struct live_urb));
5131     if (!lurb) {
5132         return -TARGET_ENOMEM;
5133     }
5134 
5135     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5136     if (!argptr) {
5137         g_free(lurb);
5138         return -TARGET_EFAULT;
5139     }
5140     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5141     unlock_user(argptr, arg, 0);
5142 
5143     lurb->target_urb_adr = arg;
5144     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5145 
5146     /* buffer space used depends on endpoint type so lock the entire buffer */
5147     /* control type urbs should check the buffer contents for true direction */
5148     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5149     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5150         lurb->host_urb.buffer_length, 1);
5151     if (lurb->target_buf_ptr == NULL) {
5152         g_free(lurb);
5153         return -TARGET_EFAULT;
5154     }
5155 
5156     /* update buffer pointer in host copy */
5157     lurb->host_urb.buffer = lurb->target_buf_ptr;
5158 
5159     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5160     if (is_error(ret)) {
5161         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5162         g_free(lurb);
5163     } else {
5164         urb_hashtable_insert(lurb);
5165     }
5166 
5167     return ret;
5168 }
5169 #endif /* CONFIG_USBFS */
5170 
5171 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5172                             int cmd, abi_long arg)
5173 {
5174     void *argptr;
5175     struct dm_ioctl *host_dm;
5176     abi_long guest_data;
5177     uint32_t guest_data_size;
5178     int target_size;
5179     const argtype *arg_type = ie->arg_type;
5180     abi_long ret;
5181     void *big_buf = NULL;
5182     char *host_data;
5183 
5184     arg_type++;
5185     target_size = thunk_type_size(arg_type, 0);
5186     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5187     if (!argptr) {
5188         ret = -TARGET_EFAULT;
5189         goto out;
5190     }
5191     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5192     unlock_user(argptr, arg, 0);
5193 
5194     /* buf_temp is too small, so fetch things into a bigger buffer */
5195     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5196     memcpy(big_buf, buf_temp, target_size);
5197     buf_temp = big_buf;
5198     host_dm = big_buf;
5199 
5200     guest_data = arg + host_dm->data_start;
5201     if ((guest_data - arg) < 0) {
5202         ret = -TARGET_EINVAL;
5203         goto out;
5204     }
5205     guest_data_size = host_dm->data_size - host_dm->data_start;
5206     host_data = (char*)host_dm + host_dm->data_start;
5207 
5208     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5209     if (!argptr) {
5210         ret = -TARGET_EFAULT;
5211         goto out;
5212     }
5213 
5214     switch (ie->host_cmd) {
5215     case DM_REMOVE_ALL:
5216     case DM_LIST_DEVICES:
5217     case DM_DEV_CREATE:
5218     case DM_DEV_REMOVE:
5219     case DM_DEV_SUSPEND:
5220     case DM_DEV_STATUS:
5221     case DM_DEV_WAIT:
5222     case DM_TABLE_STATUS:
5223     case DM_TABLE_CLEAR:
5224     case DM_TABLE_DEPS:
5225     case DM_LIST_VERSIONS:
5226         /* no input data */
5227         break;
5228     case DM_DEV_RENAME:
5229     case DM_DEV_SET_GEOMETRY:
5230         /* data contains only strings */
5231         memcpy(host_data, argptr, guest_data_size);
5232         break;
5233     case DM_TARGET_MSG:
5234         memcpy(host_data, argptr, guest_data_size);
5235         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5236         break;
5237     case DM_TABLE_LOAD:
5238     {
5239         void *gspec = argptr;
5240         void *cur_data = host_data;
5241         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5242         int spec_size = thunk_type_size(arg_type, 0);
5243         int i;
5244 
5245         for (i = 0; i < host_dm->target_count; i++) {
5246             struct dm_target_spec *spec = cur_data;
5247             uint32_t next;
5248             int slen;
5249 
5250             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5251             slen = strlen((char*)gspec + spec_size) + 1;
5252             next = spec->next;
5253             spec->next = sizeof(*spec) + slen;
5254             strcpy((char*)&spec[1], gspec + spec_size);
5255             gspec += next;
5256             cur_data += spec->next;
5257         }
5258         break;
5259     }
5260     default:
5261         ret = -TARGET_EINVAL;
5262         unlock_user(argptr, guest_data, 0);
5263         goto out;
5264     }
5265     unlock_user(argptr, guest_data, 0);
5266 
5267     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5268     if (!is_error(ret)) {
5269         guest_data = arg + host_dm->data_start;
5270         guest_data_size = host_dm->data_size - host_dm->data_start;
5271         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5272         switch (ie->host_cmd) {
5273         case DM_REMOVE_ALL:
5274         case DM_DEV_CREATE:
5275         case DM_DEV_REMOVE:
5276         case DM_DEV_RENAME:
5277         case DM_DEV_SUSPEND:
5278         case DM_DEV_STATUS:
5279         case DM_TABLE_LOAD:
5280         case DM_TABLE_CLEAR:
5281         case DM_TARGET_MSG:
5282         case DM_DEV_SET_GEOMETRY:
5283             /* no return data */
5284             break;
5285         case DM_LIST_DEVICES:
5286         {
5287             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5288             uint32_t remaining_data = guest_data_size;
5289             void *cur_data = argptr;
5290             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5291             int nl_size = 12; /* can't use thunk_size due to alignment */
5292 
5293             while (1) {
5294                 uint32_t next = nl->next;
5295                 if (next) {
5296                     nl->next = nl_size + (strlen(nl->name) + 1);
5297                 }
5298                 if (remaining_data < nl->next) {
5299                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5300                     break;
5301                 }
5302                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5303                 strcpy(cur_data + nl_size, nl->name);
5304                 cur_data += nl->next;
5305                 remaining_data -= nl->next;
5306                 if (!next) {
5307                     break;
5308                 }
5309                 nl = (void*)nl + next;
5310             }
5311             break;
5312         }
5313         case DM_DEV_WAIT:
5314         case DM_TABLE_STATUS:
5315         {
5316             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5317             void *cur_data = argptr;
5318             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5319             int spec_size = thunk_type_size(arg_type, 0);
5320             int i;
5321 
5322             for (i = 0; i < host_dm->target_count; i++) {
5323                 uint32_t next = spec->next;
5324                 int slen = strlen((char*)&spec[1]) + 1;
5325                 spec->next = (cur_data - argptr) + spec_size + slen;
5326                 if (guest_data_size < spec->next) {
5327                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5328                     break;
5329                 }
5330                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5331                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5332                 cur_data = argptr + spec->next;
5333                 spec = (void*)host_dm + host_dm->data_start + next;
5334             }
5335             break;
5336         }
5337         case DM_TABLE_DEPS:
5338         {
5339             void *hdata = (void*)host_dm + host_dm->data_start;
5340             int count = *(uint32_t*)hdata;
5341             uint64_t *hdev = hdata + 8;
5342             uint64_t *gdev = argptr + 8;
5343             int i;
5344 
5345             *(uint32_t*)argptr = tswap32(count);
5346             for (i = 0; i < count; i++) {
5347                 *gdev = tswap64(*hdev);
5348                 gdev++;
5349                 hdev++;
5350             }
5351             break;
5352         }
5353         case DM_LIST_VERSIONS:
5354         {
5355             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5356             uint32_t remaining_data = guest_data_size;
5357             void *cur_data = argptr;
5358             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5359             int vers_size = thunk_type_size(arg_type, 0);
5360 
5361             while (1) {
5362                 uint32_t next = vers->next;
5363                 if (next) {
5364                     vers->next = vers_size + (strlen(vers->name) + 1);
5365                 }
5366                 if (remaining_data < vers->next) {
5367                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5368                     break;
5369                 }
5370                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5371                 strcpy(cur_data + vers_size, vers->name);
5372                 cur_data += vers->next;
5373                 remaining_data -= vers->next;
5374                 if (!next) {
5375                     break;
5376                 }
5377                 vers = (void*)vers + next;
5378             }
5379             break;
5380         }
5381         default:
5382             unlock_user(argptr, guest_data, 0);
5383             ret = -TARGET_EINVAL;
5384             goto out;
5385         }
5386         unlock_user(argptr, guest_data, guest_data_size);
5387 
5388         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5389         if (!argptr) {
5390             ret = -TARGET_EFAULT;
5391             goto out;
5392         }
5393         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5394         unlock_user(argptr, arg, target_size);
5395     }
5396 out:
5397     g_free(big_buf);
5398     return ret;
5399 }
5400 
5401 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5402                                int cmd, abi_long arg)
5403 {
5404     void *argptr;
5405     int target_size;
5406     const argtype *arg_type = ie->arg_type;
5407     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5408     abi_long ret;
5409 
5410     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5411     struct blkpg_partition host_part;
5412 
5413     /* Read and convert blkpg */
5414     arg_type++;
5415     target_size = thunk_type_size(arg_type, 0);
5416     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5417     if (!argptr) {
5418         ret = -TARGET_EFAULT;
5419         goto out;
5420     }
5421     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5422     unlock_user(argptr, arg, 0);
5423 
5424     switch (host_blkpg->op) {
5425     case BLKPG_ADD_PARTITION:
5426     case BLKPG_DEL_PARTITION:
5427         /* payload is struct blkpg_partition */
5428         break;
5429     default:
5430         /* Unknown opcode */
5431         ret = -TARGET_EINVAL;
5432         goto out;
5433     }
5434 
5435     /* Read and convert blkpg->data */
5436     arg = (abi_long)(uintptr_t)host_blkpg->data;
5437     target_size = thunk_type_size(part_arg_type, 0);
5438     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5439     if (!argptr) {
5440         ret = -TARGET_EFAULT;
5441         goto out;
5442     }
5443     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5444     unlock_user(argptr, arg, 0);
5445 
5446     /* Swizzle the data pointer to our local copy and call! */
5447     host_blkpg->data = &host_part;
5448     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5449 
5450 out:
5451     return ret;
5452 }
5453 
5454 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5455                                 int fd, int cmd, abi_long arg)
5456 {
5457     const argtype *arg_type = ie->arg_type;
5458     const StructEntry *se;
5459     const argtype *field_types;
5460     const int *dst_offsets, *src_offsets;
5461     int target_size;
5462     void *argptr;
5463     abi_ulong *target_rt_dev_ptr = NULL;
5464     unsigned long *host_rt_dev_ptr = NULL;
5465     abi_long ret;
5466     int i;
5467 
5468     assert(ie->access == IOC_W);
5469     assert(*arg_type == TYPE_PTR);
5470     arg_type++;
5471     assert(*arg_type == TYPE_STRUCT);
5472     target_size = thunk_type_size(arg_type, 0);
5473     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5474     if (!argptr) {
5475         return -TARGET_EFAULT;
5476     }
5477     arg_type++;
5478     assert(*arg_type == (int)STRUCT_rtentry);
5479     se = struct_entries + *arg_type++;
5480     assert(se->convert[0] == NULL);
5481     /* convert struct here to be able to catch rt_dev string */
5482     field_types = se->field_types;
5483     dst_offsets = se->field_offsets[THUNK_HOST];
5484     src_offsets = se->field_offsets[THUNK_TARGET];
5485     for (i = 0; i < se->nb_fields; i++) {
5486         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5487             assert(*field_types == TYPE_PTRVOID);
5488             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5489             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5490             if (*target_rt_dev_ptr != 0) {
5491                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5492                                                   tswapal(*target_rt_dev_ptr));
5493                 if (!*host_rt_dev_ptr) {
5494                     unlock_user(argptr, arg, 0);
5495                     return -TARGET_EFAULT;
5496                 }
5497             } else {
5498                 *host_rt_dev_ptr = 0;
5499             }
5500             field_types++;
5501             continue;
5502         }
5503         field_types = thunk_convert(buf_temp + dst_offsets[i],
5504                                     argptr + src_offsets[i],
5505                                     field_types, THUNK_HOST);
5506     }
5507     unlock_user(argptr, arg, 0);
5508 
5509     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5510 
5511     assert(host_rt_dev_ptr != NULL);
5512     assert(target_rt_dev_ptr != NULL);
5513     if (*host_rt_dev_ptr != 0) {
5514         unlock_user((void *)*host_rt_dev_ptr,
5515                     *target_rt_dev_ptr, 0);
5516     }
5517     return ret;
5518 }
5519 
5520 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5521                                      int fd, int cmd, abi_long arg)
5522 {
5523     int sig = target_to_host_signal(arg);
5524     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5525 }
5526 
5527 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5528                                     int fd, int cmd, abi_long arg)
5529 {
5530     struct timeval tv;
5531     abi_long ret;
5532 
5533     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5534     if (is_error(ret)) {
5535         return ret;
5536     }
5537 
5538     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5539         if (copy_to_user_timeval(arg, &tv)) {
5540             return -TARGET_EFAULT;
5541         }
5542     } else {
5543         if (copy_to_user_timeval64(arg, &tv)) {
5544             return -TARGET_EFAULT;
5545         }
5546     }
5547 
5548     return ret;
5549 }
5550 
5551 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5552                                       int fd, int cmd, abi_long arg)
5553 {
5554     struct timespec ts;
5555     abi_long ret;
5556 
5557     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5558     if (is_error(ret)) {
5559         return ret;
5560     }
5561 
5562     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5563         if (host_to_target_timespec(arg, &ts)) {
5564             return -TARGET_EFAULT;
5565         }
5566     } else{
5567         if (host_to_target_timespec64(arg, &ts)) {
5568             return -TARGET_EFAULT;
5569         }
5570     }
5571 
5572     return ret;
5573 }
5574 
5575 #ifdef TIOCGPTPEER
5576 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5577                                      int fd, int cmd, abi_long arg)
5578 {
5579     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5580     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5581 }
5582 #endif
5583 
5584 #ifdef HAVE_DRM_H
5585 
5586 static void unlock_drm_version(struct drm_version *host_ver,
5587                                struct target_drm_version *target_ver,
5588                                bool copy)
5589 {
5590     unlock_user(host_ver->name, target_ver->name,
5591                                 copy ? host_ver->name_len : 0);
5592     unlock_user(host_ver->date, target_ver->date,
5593                                 copy ? host_ver->date_len : 0);
5594     unlock_user(host_ver->desc, target_ver->desc,
5595                                 copy ? host_ver->desc_len : 0);
5596 }
5597 
5598 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5599                                           struct target_drm_version *target_ver)
5600 {
5601     memset(host_ver, 0, sizeof(*host_ver));
5602 
5603     __get_user(host_ver->name_len, &target_ver->name_len);
5604     if (host_ver->name_len) {
5605         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5606                                    target_ver->name_len, 0);
5607         if (!host_ver->name) {
5608             return -EFAULT;
5609         }
5610     }
5611 
5612     __get_user(host_ver->date_len, &target_ver->date_len);
5613     if (host_ver->date_len) {
5614         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5615                                    target_ver->date_len, 0);
5616         if (!host_ver->date) {
5617             goto err;
5618         }
5619     }
5620 
5621     __get_user(host_ver->desc_len, &target_ver->desc_len);
5622     if (host_ver->desc_len) {
5623         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5624                                    target_ver->desc_len, 0);
5625         if (!host_ver->desc) {
5626             goto err;
5627         }
5628     }
5629 
5630     return 0;
5631 err:
5632     unlock_drm_version(host_ver, target_ver, false);
5633     return -EFAULT;
5634 }
5635 
5636 static inline void host_to_target_drmversion(
5637                                           struct target_drm_version *target_ver,
5638                                           struct drm_version *host_ver)
5639 {
5640     __put_user(host_ver->version_major, &target_ver->version_major);
5641     __put_user(host_ver->version_minor, &target_ver->version_minor);
5642     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5643     __put_user(host_ver->name_len, &target_ver->name_len);
5644     __put_user(host_ver->date_len, &target_ver->date_len);
5645     __put_user(host_ver->desc_len, &target_ver->desc_len);
5646     unlock_drm_version(host_ver, target_ver, true);
5647 }
5648 
5649 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5650                              int fd, int cmd, abi_long arg)
5651 {
5652     struct drm_version *ver;
5653     struct target_drm_version *target_ver;
5654     abi_long ret;
5655 
5656     switch (ie->host_cmd) {
5657     case DRM_IOCTL_VERSION:
5658         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5659             return -TARGET_EFAULT;
5660         }
5661         ver = (struct drm_version *)buf_temp;
5662         ret = target_to_host_drmversion(ver, target_ver);
5663         if (!is_error(ret)) {
5664             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5665             if (is_error(ret)) {
5666                 unlock_drm_version(ver, target_ver, false);
5667             } else {
5668                 host_to_target_drmversion(target_ver, ver);
5669             }
5670         }
5671         unlock_user_struct(target_ver, arg, 0);
5672         return ret;
5673     }
5674     return -TARGET_ENOSYS;
5675 }
5676 
5677 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5678                                            struct drm_i915_getparam *gparam,
5679                                            int fd, abi_long arg)
5680 {
5681     abi_long ret;
5682     int value;
5683     struct target_drm_i915_getparam *target_gparam;
5684 
5685     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5686         return -TARGET_EFAULT;
5687     }
5688 
5689     __get_user(gparam->param, &target_gparam->param);
5690     gparam->value = &value;
5691     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5692     put_user_s32(value, target_gparam->value);
5693 
5694     unlock_user_struct(target_gparam, arg, 0);
5695     return ret;
5696 }
5697 
5698 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5699                                   int fd, int cmd, abi_long arg)
5700 {
5701     switch (ie->host_cmd) {
5702     case DRM_IOCTL_I915_GETPARAM:
5703         return do_ioctl_drm_i915_getparam(ie,
5704                                           (struct drm_i915_getparam *)buf_temp,
5705                                           fd, arg);
5706     default:
5707         return -TARGET_ENOSYS;
5708     }
5709 }
5710 
5711 #endif
5712 
5713 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5714                                         int fd, int cmd, abi_long arg)
5715 {
5716     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5717     struct tun_filter *target_filter;
5718     char *target_addr;
5719 
5720     assert(ie->access == IOC_W);
5721 
5722     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5723     if (!target_filter) {
5724         return -TARGET_EFAULT;
5725     }
5726     filter->flags = tswap16(target_filter->flags);
5727     filter->count = tswap16(target_filter->count);
5728     unlock_user(target_filter, arg, 0);
5729 
5730     if (filter->count) {
5731         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5732             MAX_STRUCT_SIZE) {
5733             return -TARGET_EFAULT;
5734         }
5735 
5736         target_addr = lock_user(VERIFY_READ,
5737                                 arg + offsetof(struct tun_filter, addr),
5738                                 filter->count * ETH_ALEN, 1);
5739         if (!target_addr) {
5740             return -TARGET_EFAULT;
5741         }
5742         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5743         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5744     }
5745 
5746     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5747 }
5748 
5749 IOCTLEntry ioctl_entries[] = {
5750 #define IOCTL(cmd, access, ...) \
5751     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5752 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5753     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5754 #define IOCTL_IGNORE(cmd) \
5755     { TARGET_ ## cmd, 0, #cmd },
5756 #include "ioctls.h"
5757     { 0, 0, },
5758 };
5759 
5760 /* ??? Implement proper locking for ioctls.  */
5761 /* do_ioctl() Must return target values and target errnos. */
5762 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5763 {
5764     const IOCTLEntry *ie;
5765     const argtype *arg_type;
5766     abi_long ret;
5767     uint8_t buf_temp[MAX_STRUCT_SIZE];
5768     int target_size;
5769     void *argptr;
5770 
5771     ie = ioctl_entries;
5772     for(;;) {
5773         if (ie->target_cmd == 0) {
5774             qemu_log_mask(
5775                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5776             return -TARGET_ENOSYS;
5777         }
5778         if (ie->target_cmd == cmd)
5779             break;
5780         ie++;
5781     }
5782     arg_type = ie->arg_type;
5783     if (ie->do_ioctl) {
5784         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5785     } else if (!ie->host_cmd) {
5786         /* Some architectures define BSD ioctls in their headers
5787            that are not implemented in Linux.  */
5788         return -TARGET_ENOSYS;
5789     }
5790 
5791     switch(arg_type[0]) {
5792     case TYPE_NULL:
5793         /* no argument */
5794         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5795         break;
5796     case TYPE_PTRVOID:
5797     case TYPE_INT:
5798     case TYPE_LONG:
5799     case TYPE_ULONG:
5800         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5801         break;
5802     case TYPE_PTR:
5803         arg_type++;
5804         target_size = thunk_type_size(arg_type, 0);
5805         switch(ie->access) {
5806         case IOC_R:
5807             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5808             if (!is_error(ret)) {
5809                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5810                 if (!argptr)
5811                     return -TARGET_EFAULT;
5812                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5813                 unlock_user(argptr, arg, target_size);
5814             }
5815             break;
5816         case IOC_W:
5817             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5818             if (!argptr)
5819                 return -TARGET_EFAULT;
5820             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5821             unlock_user(argptr, arg, 0);
5822             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5823             break;
5824         default:
5825         case IOC_RW:
5826             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5827             if (!argptr)
5828                 return -TARGET_EFAULT;
5829             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5830             unlock_user(argptr, arg, 0);
5831             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5832             if (!is_error(ret)) {
5833                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5834                 if (!argptr)
5835                     return -TARGET_EFAULT;
5836                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5837                 unlock_user(argptr, arg, target_size);
5838             }
5839             break;
5840         }
5841         break;
5842     default:
5843         qemu_log_mask(LOG_UNIMP,
5844                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5845                       (long)cmd, arg_type[0]);
5846         ret = -TARGET_ENOSYS;
5847         break;
5848     }
5849     return ret;
5850 }
5851 
5852 static const bitmask_transtbl iflag_tbl[] = {
5853         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5854         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5855         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5856         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5857         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5858         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5859         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5860         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5861         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5862         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5863         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5864         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5865         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5866         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5867         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5868         { 0, 0, 0, 0 }
5869 };
5870 
5871 static const bitmask_transtbl oflag_tbl[] = {
5872 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5873 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5874 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5875 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5876 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5877 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5878 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5879 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5880 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5881 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5882 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5883 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5884 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5885 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5886 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5887 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5888 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5889 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5890 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5891 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5892 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5893 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5894 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5895 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5896 	{ 0, 0, 0, 0 }
5897 };
5898 
5899 static const bitmask_transtbl cflag_tbl[] = {
5900 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5901 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5902 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5903 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5904 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5905 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5906 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5907 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5908 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5909 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5910 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5911 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5912 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5913 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5914 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5915 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5916 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5917 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5918 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5919 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5920 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5921 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5922 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5923 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5924 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5925 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5926 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5927 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5928 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5929 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5930 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5931 	{ 0, 0, 0, 0 }
5932 };
5933 
5934 static const bitmask_transtbl lflag_tbl[] = {
5935   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5936   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5937   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5938   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5939   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5940   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5941   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5942   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5943   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5944   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5945   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5946   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5947   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5948   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5949   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5950   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5951   { 0, 0, 0, 0 }
5952 };
5953 
5954 static void target_to_host_termios (void *dst, const void *src)
5955 {
5956     struct host_termios *host = dst;
5957     const struct target_termios *target = src;
5958 
5959     host->c_iflag =
5960         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5961     host->c_oflag =
5962         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5963     host->c_cflag =
5964         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5965     host->c_lflag =
5966         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5967     host->c_line = target->c_line;
5968 
5969     memset(host->c_cc, 0, sizeof(host->c_cc));
5970     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5971     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5972     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5973     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5974     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5975     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5976     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5977     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5978     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5979     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5980     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5981     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5982     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5983     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5984     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5985     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5986     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5987 }
5988 
5989 static void host_to_target_termios (void *dst, const void *src)
5990 {
5991     struct target_termios *target = dst;
5992     const struct host_termios *host = src;
5993 
5994     target->c_iflag =
5995         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5996     target->c_oflag =
5997         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5998     target->c_cflag =
5999         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6000     target->c_lflag =
6001         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6002     target->c_line = host->c_line;
6003 
6004     memset(target->c_cc, 0, sizeof(target->c_cc));
6005     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6006     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6007     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6008     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6009     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6010     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6011     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6012     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6013     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6014     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6015     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6016     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6017     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6018     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6019     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6020     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6021     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6022 }
6023 
6024 static const StructEntry struct_termios_def = {
6025     .convert = { host_to_target_termios, target_to_host_termios },
6026     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6027     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6028     .print = print_termios,
6029 };
6030 
6031 static bitmask_transtbl mmap_flags_tbl[] = {
6032     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6033     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6034     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6035     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6036       MAP_ANONYMOUS, MAP_ANONYMOUS },
6037     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6038       MAP_GROWSDOWN, MAP_GROWSDOWN },
6039     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6040       MAP_DENYWRITE, MAP_DENYWRITE },
6041     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6042       MAP_EXECUTABLE, MAP_EXECUTABLE },
6043     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6044     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6045       MAP_NORESERVE, MAP_NORESERVE },
6046     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6047     /* MAP_STACK had been ignored by the kernel for quite some time.
6048        Recognize it for the target insofar as we do not want to pass
6049        it through to the host.  */
6050     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6051     { 0, 0, 0, 0 }
6052 };
6053 
6054 /*
6055  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6056  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6057  */
6058 #if defined(TARGET_I386)
6059 
6060 /* NOTE: there is really one LDT for all the threads */
6061 static uint8_t *ldt_table;
6062 
6063 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6064 {
6065     int size;
6066     void *p;
6067 
6068     if (!ldt_table)
6069         return 0;
6070     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6071     if (size > bytecount)
6072         size = bytecount;
6073     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6074     if (!p)
6075         return -TARGET_EFAULT;
6076     /* ??? Should this by byteswapped?  */
6077     memcpy(p, ldt_table, size);
6078     unlock_user(p, ptr, size);
6079     return size;
6080 }
6081 
6082 /* XXX: add locking support */
6083 static abi_long write_ldt(CPUX86State *env,
6084                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6085 {
6086     struct target_modify_ldt_ldt_s ldt_info;
6087     struct target_modify_ldt_ldt_s *target_ldt_info;
6088     int seg_32bit, contents, read_exec_only, limit_in_pages;
6089     int seg_not_present, useable, lm;
6090     uint32_t *lp, entry_1, entry_2;
6091 
6092     if (bytecount != sizeof(ldt_info))
6093         return -TARGET_EINVAL;
6094     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6095         return -TARGET_EFAULT;
6096     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6097     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6098     ldt_info.limit = tswap32(target_ldt_info->limit);
6099     ldt_info.flags = tswap32(target_ldt_info->flags);
6100     unlock_user_struct(target_ldt_info, ptr, 0);
6101 
6102     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6103         return -TARGET_EINVAL;
6104     seg_32bit = ldt_info.flags & 1;
6105     contents = (ldt_info.flags >> 1) & 3;
6106     read_exec_only = (ldt_info.flags >> 3) & 1;
6107     limit_in_pages = (ldt_info.flags >> 4) & 1;
6108     seg_not_present = (ldt_info.flags >> 5) & 1;
6109     useable = (ldt_info.flags >> 6) & 1;
6110 #ifdef TARGET_ABI32
6111     lm = 0;
6112 #else
6113     lm = (ldt_info.flags >> 7) & 1;
6114 #endif
6115     if (contents == 3) {
6116         if (oldmode)
6117             return -TARGET_EINVAL;
6118         if (seg_not_present == 0)
6119             return -TARGET_EINVAL;
6120     }
6121     /* allocate the LDT */
6122     if (!ldt_table) {
6123         env->ldt.base = target_mmap(0,
6124                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6125                                     PROT_READ|PROT_WRITE,
6126                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6127         if (env->ldt.base == -1)
6128             return -TARGET_ENOMEM;
6129         memset(g2h(env->ldt.base), 0,
6130                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6131         env->ldt.limit = 0xffff;
6132         ldt_table = g2h(env->ldt.base);
6133     }
6134 
6135     /* NOTE: same code as Linux kernel */
6136     /* Allow LDTs to be cleared by the user. */
6137     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6138         if (oldmode ||
6139             (contents == 0		&&
6140              read_exec_only == 1	&&
6141              seg_32bit == 0		&&
6142              limit_in_pages == 0	&&
6143              seg_not_present == 1	&&
6144              useable == 0 )) {
6145             entry_1 = 0;
6146             entry_2 = 0;
6147             goto install;
6148         }
6149     }
6150 
6151     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6152         (ldt_info.limit & 0x0ffff);
6153     entry_2 = (ldt_info.base_addr & 0xff000000) |
6154         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6155         (ldt_info.limit & 0xf0000) |
6156         ((read_exec_only ^ 1) << 9) |
6157         (contents << 10) |
6158         ((seg_not_present ^ 1) << 15) |
6159         (seg_32bit << 22) |
6160         (limit_in_pages << 23) |
6161         (lm << 21) |
6162         0x7000;
6163     if (!oldmode)
6164         entry_2 |= (useable << 20);
6165 
6166     /* Install the new entry ...  */
6167 install:
6168     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6169     lp[0] = tswap32(entry_1);
6170     lp[1] = tswap32(entry_2);
6171     return 0;
6172 }
6173 
6174 /* specific and weird i386 syscalls */
6175 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6176                               unsigned long bytecount)
6177 {
6178     abi_long ret;
6179 
6180     switch (func) {
6181     case 0:
6182         ret = read_ldt(ptr, bytecount);
6183         break;
6184     case 1:
6185         ret = write_ldt(env, ptr, bytecount, 1);
6186         break;
6187     case 0x11:
6188         ret = write_ldt(env, ptr, bytecount, 0);
6189         break;
6190     default:
6191         ret = -TARGET_ENOSYS;
6192         break;
6193     }
6194     return ret;
6195 }
6196 
6197 #if defined(TARGET_ABI32)
6198 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6199 {
6200     uint64_t *gdt_table = g2h(env->gdt.base);
6201     struct target_modify_ldt_ldt_s ldt_info;
6202     struct target_modify_ldt_ldt_s *target_ldt_info;
6203     int seg_32bit, contents, read_exec_only, limit_in_pages;
6204     int seg_not_present, useable, lm;
6205     uint32_t *lp, entry_1, entry_2;
6206     int i;
6207 
6208     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6209     if (!target_ldt_info)
6210         return -TARGET_EFAULT;
6211     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6212     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6213     ldt_info.limit = tswap32(target_ldt_info->limit);
6214     ldt_info.flags = tswap32(target_ldt_info->flags);
6215     if (ldt_info.entry_number == -1) {
6216         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6217             if (gdt_table[i] == 0) {
6218                 ldt_info.entry_number = i;
6219                 target_ldt_info->entry_number = tswap32(i);
6220                 break;
6221             }
6222         }
6223     }
6224     unlock_user_struct(target_ldt_info, ptr, 1);
6225 
6226     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6227         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6228            return -TARGET_EINVAL;
6229     seg_32bit = ldt_info.flags & 1;
6230     contents = (ldt_info.flags >> 1) & 3;
6231     read_exec_only = (ldt_info.flags >> 3) & 1;
6232     limit_in_pages = (ldt_info.flags >> 4) & 1;
6233     seg_not_present = (ldt_info.flags >> 5) & 1;
6234     useable = (ldt_info.flags >> 6) & 1;
6235 #ifdef TARGET_ABI32
6236     lm = 0;
6237 #else
6238     lm = (ldt_info.flags >> 7) & 1;
6239 #endif
6240 
6241     if (contents == 3) {
6242         if (seg_not_present == 0)
6243             return -TARGET_EINVAL;
6244     }
6245 
6246     /* NOTE: same code as Linux kernel */
6247     /* Allow LDTs to be cleared by the user. */
6248     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6249         if ((contents == 0             &&
6250              read_exec_only == 1       &&
6251              seg_32bit == 0            &&
6252              limit_in_pages == 0       &&
6253              seg_not_present == 1      &&
6254              useable == 0 )) {
6255             entry_1 = 0;
6256             entry_2 = 0;
6257             goto install;
6258         }
6259     }
6260 
6261     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6262         (ldt_info.limit & 0x0ffff);
6263     entry_2 = (ldt_info.base_addr & 0xff000000) |
6264         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6265         (ldt_info.limit & 0xf0000) |
6266         ((read_exec_only ^ 1) << 9) |
6267         (contents << 10) |
6268         ((seg_not_present ^ 1) << 15) |
6269         (seg_32bit << 22) |
6270         (limit_in_pages << 23) |
6271         (useable << 20) |
6272         (lm << 21) |
6273         0x7000;
6274 
6275     /* Install the new entry ...  */
6276 install:
6277     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6278     lp[0] = tswap32(entry_1);
6279     lp[1] = tswap32(entry_2);
6280     return 0;
6281 }
6282 
6283 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6284 {
6285     struct target_modify_ldt_ldt_s *target_ldt_info;
6286     uint64_t *gdt_table = g2h(env->gdt.base);
6287     uint32_t base_addr, limit, flags;
6288     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6289     int seg_not_present, useable, lm;
6290     uint32_t *lp, entry_1, entry_2;
6291 
6292     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6293     if (!target_ldt_info)
6294         return -TARGET_EFAULT;
6295     idx = tswap32(target_ldt_info->entry_number);
6296     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6297         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6298         unlock_user_struct(target_ldt_info, ptr, 1);
6299         return -TARGET_EINVAL;
6300     }
6301     lp = (uint32_t *)(gdt_table + idx);
6302     entry_1 = tswap32(lp[0]);
6303     entry_2 = tswap32(lp[1]);
6304 
6305     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6306     contents = (entry_2 >> 10) & 3;
6307     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6308     seg_32bit = (entry_2 >> 22) & 1;
6309     limit_in_pages = (entry_2 >> 23) & 1;
6310     useable = (entry_2 >> 20) & 1;
6311 #ifdef TARGET_ABI32
6312     lm = 0;
6313 #else
6314     lm = (entry_2 >> 21) & 1;
6315 #endif
6316     flags = (seg_32bit << 0) | (contents << 1) |
6317         (read_exec_only << 3) | (limit_in_pages << 4) |
6318         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6319     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6320     base_addr = (entry_1 >> 16) |
6321         (entry_2 & 0xff000000) |
6322         ((entry_2 & 0xff) << 16);
6323     target_ldt_info->base_addr = tswapal(base_addr);
6324     target_ldt_info->limit = tswap32(limit);
6325     target_ldt_info->flags = tswap32(flags);
6326     unlock_user_struct(target_ldt_info, ptr, 1);
6327     return 0;
6328 }
6329 
6330 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6331 {
6332     return -TARGET_ENOSYS;
6333 }
6334 #else
6335 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6336 {
6337     abi_long ret = 0;
6338     abi_ulong val;
6339     int idx;
6340 
6341     switch(code) {
6342     case TARGET_ARCH_SET_GS:
6343     case TARGET_ARCH_SET_FS:
6344         if (code == TARGET_ARCH_SET_GS)
6345             idx = R_GS;
6346         else
6347             idx = R_FS;
6348         cpu_x86_load_seg(env, idx, 0);
6349         env->segs[idx].base = addr;
6350         break;
6351     case TARGET_ARCH_GET_GS:
6352     case TARGET_ARCH_GET_FS:
6353         if (code == TARGET_ARCH_GET_GS)
6354             idx = R_GS;
6355         else
6356             idx = R_FS;
6357         val = env->segs[idx].base;
6358         if (put_user(val, addr, abi_ulong))
6359             ret = -TARGET_EFAULT;
6360         break;
6361     default:
6362         ret = -TARGET_EINVAL;
6363         break;
6364     }
6365     return ret;
6366 }
6367 #endif /* defined(TARGET_ABI32 */
6368 
6369 #endif /* defined(TARGET_I386) */
6370 
6371 #define NEW_STACK_SIZE 0x40000
6372 
6373 
6374 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6375 typedef struct {
6376     CPUArchState *env;
6377     pthread_mutex_t mutex;
6378     pthread_cond_t cond;
6379     pthread_t thread;
6380     uint32_t tid;
6381     abi_ulong child_tidptr;
6382     abi_ulong parent_tidptr;
6383     sigset_t sigmask;
6384 } new_thread_info;
6385 
6386 static void *clone_func(void *arg)
6387 {
6388     new_thread_info *info = arg;
6389     CPUArchState *env;
6390     CPUState *cpu;
6391     TaskState *ts;
6392 
6393     rcu_register_thread();
6394     tcg_register_thread();
6395     env = info->env;
6396     cpu = env_cpu(env);
6397     thread_cpu = cpu;
6398     ts = (TaskState *)cpu->opaque;
6399     info->tid = sys_gettid();
6400     task_settid(ts);
6401     if (info->child_tidptr)
6402         put_user_u32(info->tid, info->child_tidptr);
6403     if (info->parent_tidptr)
6404         put_user_u32(info->tid, info->parent_tidptr);
6405     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6406     /* Enable signals.  */
6407     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6408     /* Signal to the parent that we're ready.  */
6409     pthread_mutex_lock(&info->mutex);
6410     pthread_cond_broadcast(&info->cond);
6411     pthread_mutex_unlock(&info->mutex);
6412     /* Wait until the parent has finished initializing the tls state.  */
6413     pthread_mutex_lock(&clone_lock);
6414     pthread_mutex_unlock(&clone_lock);
6415     cpu_loop(env);
6416     /* never exits */
6417     return NULL;
6418 }
6419 
6420 /* do_fork() Must return host values and target errnos (unlike most
6421    do_*() functions). */
6422 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6423                    abi_ulong parent_tidptr, target_ulong newtls,
6424                    abi_ulong child_tidptr)
6425 {
6426     CPUState *cpu = env_cpu(env);
6427     int ret;
6428     TaskState *ts;
6429     CPUState *new_cpu;
6430     CPUArchState *new_env;
6431     sigset_t sigmask;
6432 
6433     flags &= ~CLONE_IGNORED_FLAGS;
6434 
6435     /* Emulate vfork() with fork() */
6436     if (flags & CLONE_VFORK)
6437         flags &= ~(CLONE_VFORK | CLONE_VM);
6438 
6439     if (flags & CLONE_VM) {
6440         TaskState *parent_ts = (TaskState *)cpu->opaque;
6441         new_thread_info info;
6442         pthread_attr_t attr;
6443 
6444         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6445             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6446             return -TARGET_EINVAL;
6447         }
6448 
6449         ts = g_new0(TaskState, 1);
6450         init_task_state(ts);
6451 
6452         /* Grab a mutex so that thread setup appears atomic.  */
6453         pthread_mutex_lock(&clone_lock);
6454 
6455         /* we create a new CPU instance. */
6456         new_env = cpu_copy(env);
6457         /* Init regs that differ from the parent.  */
6458         cpu_clone_regs_child(new_env, newsp, flags);
6459         cpu_clone_regs_parent(env, flags);
6460         new_cpu = env_cpu(new_env);
6461         new_cpu->opaque = ts;
6462         ts->bprm = parent_ts->bprm;
6463         ts->info = parent_ts->info;
6464         ts->signal_mask = parent_ts->signal_mask;
6465 
6466         if (flags & CLONE_CHILD_CLEARTID) {
6467             ts->child_tidptr = child_tidptr;
6468         }
6469 
6470         if (flags & CLONE_SETTLS) {
6471             cpu_set_tls (new_env, newtls);
6472         }
6473 
6474         memset(&info, 0, sizeof(info));
6475         pthread_mutex_init(&info.mutex, NULL);
6476         pthread_mutex_lock(&info.mutex);
6477         pthread_cond_init(&info.cond, NULL);
6478         info.env = new_env;
6479         if (flags & CLONE_CHILD_SETTID) {
6480             info.child_tidptr = child_tidptr;
6481         }
6482         if (flags & CLONE_PARENT_SETTID) {
6483             info.parent_tidptr = parent_tidptr;
6484         }
6485 
6486         ret = pthread_attr_init(&attr);
6487         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6488         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6489         /* It is not safe to deliver signals until the child has finished
6490            initializing, so temporarily block all signals.  */
6491         sigfillset(&sigmask);
6492         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6493         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6494 
6495         /* If this is our first additional thread, we need to ensure we
6496          * generate code for parallel execution and flush old translations.
6497          */
6498         if (!parallel_cpus) {
6499             parallel_cpus = true;
6500             tb_flush(cpu);
6501         }
6502 
6503         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6504         /* TODO: Free new CPU state if thread creation failed.  */
6505 
6506         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6507         pthread_attr_destroy(&attr);
6508         if (ret == 0) {
6509             /* Wait for the child to initialize.  */
6510             pthread_cond_wait(&info.cond, &info.mutex);
6511             ret = info.tid;
6512         } else {
6513             ret = -1;
6514         }
6515         pthread_mutex_unlock(&info.mutex);
6516         pthread_cond_destroy(&info.cond);
6517         pthread_mutex_destroy(&info.mutex);
6518         pthread_mutex_unlock(&clone_lock);
6519     } else {
6520         /* if no CLONE_VM, we consider it is a fork */
6521         if (flags & CLONE_INVALID_FORK_FLAGS) {
6522             return -TARGET_EINVAL;
6523         }
6524 
6525         /* We can't support custom termination signals */
6526         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6527             return -TARGET_EINVAL;
6528         }
6529 
6530         if (block_signals()) {
6531             return -TARGET_ERESTARTSYS;
6532         }
6533 
6534         fork_start();
6535         ret = fork();
6536         if (ret == 0) {
6537             /* Child Process.  */
6538             cpu_clone_regs_child(env, newsp, flags);
6539             fork_end(1);
6540             /* There is a race condition here.  The parent process could
6541                theoretically read the TID in the child process before the child
6542                tid is set.  This would require using either ptrace
6543                (not implemented) or having *_tidptr to point at a shared memory
6544                mapping.  We can't repeat the spinlock hack used above because
6545                the child process gets its own copy of the lock.  */
6546             if (flags & CLONE_CHILD_SETTID)
6547                 put_user_u32(sys_gettid(), child_tidptr);
6548             if (flags & CLONE_PARENT_SETTID)
6549                 put_user_u32(sys_gettid(), parent_tidptr);
6550             ts = (TaskState *)cpu->opaque;
6551             if (flags & CLONE_SETTLS)
6552                 cpu_set_tls (env, newtls);
6553             if (flags & CLONE_CHILD_CLEARTID)
6554                 ts->child_tidptr = child_tidptr;
6555         } else {
6556             cpu_clone_regs_parent(env, flags);
6557             fork_end(0);
6558         }
6559     }
6560     return ret;
6561 }
6562 
6563 /* warning : doesn't handle linux specific flags... */
6564 static int target_to_host_fcntl_cmd(int cmd)
6565 {
6566     int ret;
6567 
6568     switch(cmd) {
6569     case TARGET_F_DUPFD:
6570     case TARGET_F_GETFD:
6571     case TARGET_F_SETFD:
6572     case TARGET_F_GETFL:
6573     case TARGET_F_SETFL:
6574     case TARGET_F_OFD_GETLK:
6575     case TARGET_F_OFD_SETLK:
6576     case TARGET_F_OFD_SETLKW:
6577         ret = cmd;
6578         break;
6579     case TARGET_F_GETLK:
6580         ret = F_GETLK64;
6581         break;
6582     case TARGET_F_SETLK:
6583         ret = F_SETLK64;
6584         break;
6585     case TARGET_F_SETLKW:
6586         ret = F_SETLKW64;
6587         break;
6588     case TARGET_F_GETOWN:
6589         ret = F_GETOWN;
6590         break;
6591     case TARGET_F_SETOWN:
6592         ret = F_SETOWN;
6593         break;
6594     case TARGET_F_GETSIG:
6595         ret = F_GETSIG;
6596         break;
6597     case TARGET_F_SETSIG:
6598         ret = F_SETSIG;
6599         break;
6600 #if TARGET_ABI_BITS == 32
6601     case TARGET_F_GETLK64:
6602         ret = F_GETLK64;
6603         break;
6604     case TARGET_F_SETLK64:
6605         ret = F_SETLK64;
6606         break;
6607     case TARGET_F_SETLKW64:
6608         ret = F_SETLKW64;
6609         break;
6610 #endif
6611     case TARGET_F_SETLEASE:
6612         ret = F_SETLEASE;
6613         break;
6614     case TARGET_F_GETLEASE:
6615         ret = F_GETLEASE;
6616         break;
6617 #ifdef F_DUPFD_CLOEXEC
6618     case TARGET_F_DUPFD_CLOEXEC:
6619         ret = F_DUPFD_CLOEXEC;
6620         break;
6621 #endif
6622     case TARGET_F_NOTIFY:
6623         ret = F_NOTIFY;
6624         break;
6625 #ifdef F_GETOWN_EX
6626     case TARGET_F_GETOWN_EX:
6627         ret = F_GETOWN_EX;
6628         break;
6629 #endif
6630 #ifdef F_SETOWN_EX
6631     case TARGET_F_SETOWN_EX:
6632         ret = F_SETOWN_EX;
6633         break;
6634 #endif
6635 #ifdef F_SETPIPE_SZ
6636     case TARGET_F_SETPIPE_SZ:
6637         ret = F_SETPIPE_SZ;
6638         break;
6639     case TARGET_F_GETPIPE_SZ:
6640         ret = F_GETPIPE_SZ;
6641         break;
6642 #endif
6643 #ifdef F_ADD_SEALS
6644     case TARGET_F_ADD_SEALS:
6645         ret = F_ADD_SEALS;
6646         break;
6647     case TARGET_F_GET_SEALS:
6648         ret = F_GET_SEALS;
6649         break;
6650 #endif
6651     default:
6652         ret = -TARGET_EINVAL;
6653         break;
6654     }
6655 
6656 #if defined(__powerpc64__)
6657     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6658      * is not supported by kernel. The glibc fcntl call actually adjusts
6659      * them to 5, 6 and 7 before making the syscall(). Since we make the
6660      * syscall directly, adjust to what is supported by the kernel.
6661      */
6662     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6663         ret -= F_GETLK64 - 5;
6664     }
6665 #endif
6666 
6667     return ret;
6668 }
6669 
6670 #define FLOCK_TRANSTBL \
6671     switch (type) { \
6672     TRANSTBL_CONVERT(F_RDLCK); \
6673     TRANSTBL_CONVERT(F_WRLCK); \
6674     TRANSTBL_CONVERT(F_UNLCK); \
6675     TRANSTBL_CONVERT(F_EXLCK); \
6676     TRANSTBL_CONVERT(F_SHLCK); \
6677     }
6678 
6679 static int target_to_host_flock(int type)
6680 {
6681 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6682     FLOCK_TRANSTBL
6683 #undef  TRANSTBL_CONVERT
6684     return -TARGET_EINVAL;
6685 }
6686 
6687 static int host_to_target_flock(int type)
6688 {
6689 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6690     FLOCK_TRANSTBL
6691 #undef  TRANSTBL_CONVERT
6692     /* if we don't know how to convert the value coming
6693      * from the host we copy to the target field as-is
6694      */
6695     return type;
6696 }
6697 
6698 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6699                                             abi_ulong target_flock_addr)
6700 {
6701     struct target_flock *target_fl;
6702     int l_type;
6703 
6704     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6705         return -TARGET_EFAULT;
6706     }
6707 
6708     __get_user(l_type, &target_fl->l_type);
6709     l_type = target_to_host_flock(l_type);
6710     if (l_type < 0) {
6711         return l_type;
6712     }
6713     fl->l_type = l_type;
6714     __get_user(fl->l_whence, &target_fl->l_whence);
6715     __get_user(fl->l_start, &target_fl->l_start);
6716     __get_user(fl->l_len, &target_fl->l_len);
6717     __get_user(fl->l_pid, &target_fl->l_pid);
6718     unlock_user_struct(target_fl, target_flock_addr, 0);
6719     return 0;
6720 }
6721 
6722 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6723                                           const struct flock64 *fl)
6724 {
6725     struct target_flock *target_fl;
6726     short l_type;
6727 
6728     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6729         return -TARGET_EFAULT;
6730     }
6731 
6732     l_type = host_to_target_flock(fl->l_type);
6733     __put_user(l_type, &target_fl->l_type);
6734     __put_user(fl->l_whence, &target_fl->l_whence);
6735     __put_user(fl->l_start, &target_fl->l_start);
6736     __put_user(fl->l_len, &target_fl->l_len);
6737     __put_user(fl->l_pid, &target_fl->l_pid);
6738     unlock_user_struct(target_fl, target_flock_addr, 1);
6739     return 0;
6740 }
6741 
6742 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6743 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6744 
6745 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6746 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6747                                                    abi_ulong target_flock_addr)
6748 {
6749     struct target_oabi_flock64 *target_fl;
6750     int l_type;
6751 
6752     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6753         return -TARGET_EFAULT;
6754     }
6755 
6756     __get_user(l_type, &target_fl->l_type);
6757     l_type = target_to_host_flock(l_type);
6758     if (l_type < 0) {
6759         return l_type;
6760     }
6761     fl->l_type = l_type;
6762     __get_user(fl->l_whence, &target_fl->l_whence);
6763     __get_user(fl->l_start, &target_fl->l_start);
6764     __get_user(fl->l_len, &target_fl->l_len);
6765     __get_user(fl->l_pid, &target_fl->l_pid);
6766     unlock_user_struct(target_fl, target_flock_addr, 0);
6767     return 0;
6768 }
6769 
6770 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6771                                                  const struct flock64 *fl)
6772 {
6773     struct target_oabi_flock64 *target_fl;
6774     short l_type;
6775 
6776     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6777         return -TARGET_EFAULT;
6778     }
6779 
6780     l_type = host_to_target_flock(fl->l_type);
6781     __put_user(l_type, &target_fl->l_type);
6782     __put_user(fl->l_whence, &target_fl->l_whence);
6783     __put_user(fl->l_start, &target_fl->l_start);
6784     __put_user(fl->l_len, &target_fl->l_len);
6785     __put_user(fl->l_pid, &target_fl->l_pid);
6786     unlock_user_struct(target_fl, target_flock_addr, 1);
6787     return 0;
6788 }
6789 #endif
6790 
6791 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6792                                               abi_ulong target_flock_addr)
6793 {
6794     struct target_flock64 *target_fl;
6795     int l_type;
6796 
6797     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6798         return -TARGET_EFAULT;
6799     }
6800 
6801     __get_user(l_type, &target_fl->l_type);
6802     l_type = target_to_host_flock(l_type);
6803     if (l_type < 0) {
6804         return l_type;
6805     }
6806     fl->l_type = l_type;
6807     __get_user(fl->l_whence, &target_fl->l_whence);
6808     __get_user(fl->l_start, &target_fl->l_start);
6809     __get_user(fl->l_len, &target_fl->l_len);
6810     __get_user(fl->l_pid, &target_fl->l_pid);
6811     unlock_user_struct(target_fl, target_flock_addr, 0);
6812     return 0;
6813 }
6814 
6815 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6816                                             const struct flock64 *fl)
6817 {
6818     struct target_flock64 *target_fl;
6819     short l_type;
6820 
6821     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6822         return -TARGET_EFAULT;
6823     }
6824 
6825     l_type = host_to_target_flock(fl->l_type);
6826     __put_user(l_type, &target_fl->l_type);
6827     __put_user(fl->l_whence, &target_fl->l_whence);
6828     __put_user(fl->l_start, &target_fl->l_start);
6829     __put_user(fl->l_len, &target_fl->l_len);
6830     __put_user(fl->l_pid, &target_fl->l_pid);
6831     unlock_user_struct(target_fl, target_flock_addr, 1);
6832     return 0;
6833 }
6834 
6835 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6836 {
6837     struct flock64 fl64;
6838 #ifdef F_GETOWN_EX
6839     struct f_owner_ex fox;
6840     struct target_f_owner_ex *target_fox;
6841 #endif
6842     abi_long ret;
6843     int host_cmd = target_to_host_fcntl_cmd(cmd);
6844 
6845     if (host_cmd == -TARGET_EINVAL)
6846 	    return host_cmd;
6847 
6848     switch(cmd) {
6849     case TARGET_F_GETLK:
6850         ret = copy_from_user_flock(&fl64, arg);
6851         if (ret) {
6852             return ret;
6853         }
6854         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6855         if (ret == 0) {
6856             ret = copy_to_user_flock(arg, &fl64);
6857         }
6858         break;
6859 
6860     case TARGET_F_SETLK:
6861     case TARGET_F_SETLKW:
6862         ret = copy_from_user_flock(&fl64, arg);
6863         if (ret) {
6864             return ret;
6865         }
6866         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6867         break;
6868 
6869     case TARGET_F_GETLK64:
6870     case TARGET_F_OFD_GETLK:
6871         ret = copy_from_user_flock64(&fl64, arg);
6872         if (ret) {
6873             return ret;
6874         }
6875         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6876         if (ret == 0) {
6877             ret = copy_to_user_flock64(arg, &fl64);
6878         }
6879         break;
6880     case TARGET_F_SETLK64:
6881     case TARGET_F_SETLKW64:
6882     case TARGET_F_OFD_SETLK:
6883     case TARGET_F_OFD_SETLKW:
6884         ret = copy_from_user_flock64(&fl64, arg);
6885         if (ret) {
6886             return ret;
6887         }
6888         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6889         break;
6890 
6891     case TARGET_F_GETFL:
6892         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6893         if (ret >= 0) {
6894             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6895         }
6896         break;
6897 
6898     case TARGET_F_SETFL:
6899         ret = get_errno(safe_fcntl(fd, host_cmd,
6900                                    target_to_host_bitmask(arg,
6901                                                           fcntl_flags_tbl)));
6902         break;
6903 
6904 #ifdef F_GETOWN_EX
6905     case TARGET_F_GETOWN_EX:
6906         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6907         if (ret >= 0) {
6908             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6909                 return -TARGET_EFAULT;
6910             target_fox->type = tswap32(fox.type);
6911             target_fox->pid = tswap32(fox.pid);
6912             unlock_user_struct(target_fox, arg, 1);
6913         }
6914         break;
6915 #endif
6916 
6917 #ifdef F_SETOWN_EX
6918     case TARGET_F_SETOWN_EX:
6919         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6920             return -TARGET_EFAULT;
6921         fox.type = tswap32(target_fox->type);
6922         fox.pid = tswap32(target_fox->pid);
6923         unlock_user_struct(target_fox, arg, 0);
6924         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6925         break;
6926 #endif
6927 
6928     case TARGET_F_SETSIG:
6929         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6930         break;
6931 
6932     case TARGET_F_GETSIG:
6933         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6934         break;
6935 
6936     case TARGET_F_SETOWN:
6937     case TARGET_F_GETOWN:
6938     case TARGET_F_SETLEASE:
6939     case TARGET_F_GETLEASE:
6940     case TARGET_F_SETPIPE_SZ:
6941     case TARGET_F_GETPIPE_SZ:
6942     case TARGET_F_ADD_SEALS:
6943     case TARGET_F_GET_SEALS:
6944         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6945         break;
6946 
6947     default:
6948         ret = get_errno(safe_fcntl(fd, cmd, arg));
6949         break;
6950     }
6951     return ret;
6952 }
6953 
6954 #ifdef USE_UID16
6955 
6956 static inline int high2lowuid(int uid)
6957 {
6958     if (uid > 65535)
6959         return 65534;
6960     else
6961         return uid;
6962 }
6963 
6964 static inline int high2lowgid(int gid)
6965 {
6966     if (gid > 65535)
6967         return 65534;
6968     else
6969         return gid;
6970 }
6971 
6972 static inline int low2highuid(int uid)
6973 {
6974     if ((int16_t)uid == -1)
6975         return -1;
6976     else
6977         return uid;
6978 }
6979 
6980 static inline int low2highgid(int gid)
6981 {
6982     if ((int16_t)gid == -1)
6983         return -1;
6984     else
6985         return gid;
6986 }
6987 static inline int tswapid(int id)
6988 {
6989     return tswap16(id);
6990 }
6991 
6992 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6993 
6994 #else /* !USE_UID16 */
6995 static inline int high2lowuid(int uid)
6996 {
6997     return uid;
6998 }
6999 static inline int high2lowgid(int gid)
7000 {
7001     return gid;
7002 }
7003 static inline int low2highuid(int uid)
7004 {
7005     return uid;
7006 }
7007 static inline int low2highgid(int gid)
7008 {
7009     return gid;
7010 }
7011 static inline int tswapid(int id)
7012 {
7013     return tswap32(id);
7014 }
7015 
7016 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7017 
7018 #endif /* USE_UID16 */
7019 
7020 /* We must do direct syscalls for setting UID/GID, because we want to
7021  * implement the Linux system call semantics of "change only for this thread",
7022  * not the libc/POSIX semantics of "change for all threads in process".
7023  * (See http://ewontfix.com/17/ for more details.)
7024  * We use the 32-bit version of the syscalls if present; if it is not
7025  * then either the host architecture supports 32-bit UIDs natively with
7026  * the standard syscall, or the 16-bit UID is the best we can do.
7027  */
7028 #ifdef __NR_setuid32
7029 #define __NR_sys_setuid __NR_setuid32
7030 #else
7031 #define __NR_sys_setuid __NR_setuid
7032 #endif
7033 #ifdef __NR_setgid32
7034 #define __NR_sys_setgid __NR_setgid32
7035 #else
7036 #define __NR_sys_setgid __NR_setgid
7037 #endif
7038 #ifdef __NR_setresuid32
7039 #define __NR_sys_setresuid __NR_setresuid32
7040 #else
7041 #define __NR_sys_setresuid __NR_setresuid
7042 #endif
7043 #ifdef __NR_setresgid32
7044 #define __NR_sys_setresgid __NR_setresgid32
7045 #else
7046 #define __NR_sys_setresgid __NR_setresgid
7047 #endif
7048 
7049 _syscall1(int, sys_setuid, uid_t, uid)
7050 _syscall1(int, sys_setgid, gid_t, gid)
7051 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7052 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7053 
7054 void syscall_init(void)
7055 {
7056     IOCTLEntry *ie;
7057     const argtype *arg_type;
7058     int size;
7059     int i;
7060 
7061     thunk_init(STRUCT_MAX);
7062 
7063 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7064 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7065 #include "syscall_types.h"
7066 #undef STRUCT
7067 #undef STRUCT_SPECIAL
7068 
7069     /* Build target_to_host_errno_table[] table from
7070      * host_to_target_errno_table[]. */
7071     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7072         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7073     }
7074 
7075     /* we patch the ioctl size if necessary. We rely on the fact that
7076        no ioctl has all the bits at '1' in the size field */
7077     ie = ioctl_entries;
7078     while (ie->target_cmd != 0) {
7079         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7080             TARGET_IOC_SIZEMASK) {
7081             arg_type = ie->arg_type;
7082             if (arg_type[0] != TYPE_PTR) {
7083                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7084                         ie->target_cmd);
7085                 exit(1);
7086             }
7087             arg_type++;
7088             size = thunk_type_size(arg_type, 0);
7089             ie->target_cmd = (ie->target_cmd &
7090                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7091                 (size << TARGET_IOC_SIZESHIFT);
7092         }
7093 
7094         /* automatic consistency check if same arch */
7095 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7096     (defined(__x86_64__) && defined(TARGET_X86_64))
7097         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7098             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7099                     ie->name, ie->target_cmd, ie->host_cmd);
7100         }
7101 #endif
7102         ie++;
7103     }
7104 }
7105 
7106 #ifdef TARGET_NR_truncate64
7107 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7108                                          abi_long arg2,
7109                                          abi_long arg3,
7110                                          abi_long arg4)
7111 {
7112     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7113         arg2 = arg3;
7114         arg3 = arg4;
7115     }
7116     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7117 }
7118 #endif
7119 
7120 #ifdef TARGET_NR_ftruncate64
7121 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7122                                           abi_long arg2,
7123                                           abi_long arg3,
7124                                           abi_long arg4)
7125 {
7126     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7127         arg2 = arg3;
7128         arg3 = arg4;
7129     }
7130     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7131 }
7132 #endif
7133 
7134 #if defined(TARGET_NR_timer_settime) || \
7135     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7136 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7137                                                  abi_ulong target_addr)
7138 {
7139     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7140                                 offsetof(struct target_itimerspec,
7141                                          it_interval)) ||
7142         target_to_host_timespec(&host_its->it_value, target_addr +
7143                                 offsetof(struct target_itimerspec,
7144                                          it_value))) {
7145         return -TARGET_EFAULT;
7146     }
7147 
7148     return 0;
7149 }
7150 #endif
7151 
7152 #if defined(TARGET_NR_timer_settime64) || \
7153     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7154 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7155                                                    abi_ulong target_addr)
7156 {
7157     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7158                                   offsetof(struct target__kernel_itimerspec,
7159                                            it_interval)) ||
7160         target_to_host_timespec64(&host_its->it_value, target_addr +
7161                                   offsetof(struct target__kernel_itimerspec,
7162                                            it_value))) {
7163         return -TARGET_EFAULT;
7164     }
7165 
7166     return 0;
7167 }
7168 #endif
7169 
7170 #if ((defined(TARGET_NR_timerfd_gettime) || \
7171       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7172       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7173 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7174                                                  struct itimerspec *host_its)
7175 {
7176     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7177                                                        it_interval),
7178                                 &host_its->it_interval) ||
7179         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7180                                                        it_value),
7181                                 &host_its->it_value)) {
7182         return -TARGET_EFAULT;
7183     }
7184     return 0;
7185 }
7186 #endif
7187 
7188 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7189       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7190       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7191 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7192                                                    struct itimerspec *host_its)
7193 {
7194     if (host_to_target_timespec64(target_addr +
7195                                   offsetof(struct target__kernel_itimerspec,
7196                                            it_interval),
7197                                   &host_its->it_interval) ||
7198         host_to_target_timespec64(target_addr +
7199                                   offsetof(struct target__kernel_itimerspec,
7200                                            it_value),
7201                                   &host_its->it_value)) {
7202         return -TARGET_EFAULT;
7203     }
7204     return 0;
7205 }
7206 #endif
7207 
7208 #if defined(TARGET_NR_adjtimex) || \
7209     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7210 static inline abi_long target_to_host_timex(struct timex *host_tx,
7211                                             abi_long target_addr)
7212 {
7213     struct target_timex *target_tx;
7214 
7215     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7216         return -TARGET_EFAULT;
7217     }
7218 
7219     __get_user(host_tx->modes, &target_tx->modes);
7220     __get_user(host_tx->offset, &target_tx->offset);
7221     __get_user(host_tx->freq, &target_tx->freq);
7222     __get_user(host_tx->maxerror, &target_tx->maxerror);
7223     __get_user(host_tx->esterror, &target_tx->esterror);
7224     __get_user(host_tx->status, &target_tx->status);
7225     __get_user(host_tx->constant, &target_tx->constant);
7226     __get_user(host_tx->precision, &target_tx->precision);
7227     __get_user(host_tx->tolerance, &target_tx->tolerance);
7228     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7229     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7230     __get_user(host_tx->tick, &target_tx->tick);
7231     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7232     __get_user(host_tx->jitter, &target_tx->jitter);
7233     __get_user(host_tx->shift, &target_tx->shift);
7234     __get_user(host_tx->stabil, &target_tx->stabil);
7235     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7236     __get_user(host_tx->calcnt, &target_tx->calcnt);
7237     __get_user(host_tx->errcnt, &target_tx->errcnt);
7238     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7239     __get_user(host_tx->tai, &target_tx->tai);
7240 
7241     unlock_user_struct(target_tx, target_addr, 0);
7242     return 0;
7243 }
7244 
7245 static inline abi_long host_to_target_timex(abi_long target_addr,
7246                                             struct timex *host_tx)
7247 {
7248     struct target_timex *target_tx;
7249 
7250     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7251         return -TARGET_EFAULT;
7252     }
7253 
7254     __put_user(host_tx->modes, &target_tx->modes);
7255     __put_user(host_tx->offset, &target_tx->offset);
7256     __put_user(host_tx->freq, &target_tx->freq);
7257     __put_user(host_tx->maxerror, &target_tx->maxerror);
7258     __put_user(host_tx->esterror, &target_tx->esterror);
7259     __put_user(host_tx->status, &target_tx->status);
7260     __put_user(host_tx->constant, &target_tx->constant);
7261     __put_user(host_tx->precision, &target_tx->precision);
7262     __put_user(host_tx->tolerance, &target_tx->tolerance);
7263     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7264     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7265     __put_user(host_tx->tick, &target_tx->tick);
7266     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7267     __put_user(host_tx->jitter, &target_tx->jitter);
7268     __put_user(host_tx->shift, &target_tx->shift);
7269     __put_user(host_tx->stabil, &target_tx->stabil);
7270     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7271     __put_user(host_tx->calcnt, &target_tx->calcnt);
7272     __put_user(host_tx->errcnt, &target_tx->errcnt);
7273     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7274     __put_user(host_tx->tai, &target_tx->tai);
7275 
7276     unlock_user_struct(target_tx, target_addr, 1);
7277     return 0;
7278 }
7279 #endif
7280 
7281 
7282 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7283 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7284                                               abi_long target_addr)
7285 {
7286     struct target__kernel_timex *target_tx;
7287 
7288     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7289                                  offsetof(struct target__kernel_timex,
7290                                           time))) {
7291         return -TARGET_EFAULT;
7292     }
7293 
7294     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7295         return -TARGET_EFAULT;
7296     }
7297 
7298     __get_user(host_tx->modes, &target_tx->modes);
7299     __get_user(host_tx->offset, &target_tx->offset);
7300     __get_user(host_tx->freq, &target_tx->freq);
7301     __get_user(host_tx->maxerror, &target_tx->maxerror);
7302     __get_user(host_tx->esterror, &target_tx->esterror);
7303     __get_user(host_tx->status, &target_tx->status);
7304     __get_user(host_tx->constant, &target_tx->constant);
7305     __get_user(host_tx->precision, &target_tx->precision);
7306     __get_user(host_tx->tolerance, &target_tx->tolerance);
7307     __get_user(host_tx->tick, &target_tx->tick);
7308     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7309     __get_user(host_tx->jitter, &target_tx->jitter);
7310     __get_user(host_tx->shift, &target_tx->shift);
7311     __get_user(host_tx->stabil, &target_tx->stabil);
7312     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7313     __get_user(host_tx->calcnt, &target_tx->calcnt);
7314     __get_user(host_tx->errcnt, &target_tx->errcnt);
7315     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7316     __get_user(host_tx->tai, &target_tx->tai);
7317 
7318     unlock_user_struct(target_tx, target_addr, 0);
7319     return 0;
7320 }
7321 
7322 static inline abi_long host_to_target_timex64(abi_long target_addr,
7323                                               struct timex *host_tx)
7324 {
7325     struct target__kernel_timex *target_tx;
7326 
7327    if (copy_to_user_timeval64(target_addr +
7328                               offsetof(struct target__kernel_timex, time),
7329                               &host_tx->time)) {
7330         return -TARGET_EFAULT;
7331     }
7332 
7333     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7334         return -TARGET_EFAULT;
7335     }
7336 
7337     __put_user(host_tx->modes, &target_tx->modes);
7338     __put_user(host_tx->offset, &target_tx->offset);
7339     __put_user(host_tx->freq, &target_tx->freq);
7340     __put_user(host_tx->maxerror, &target_tx->maxerror);
7341     __put_user(host_tx->esterror, &target_tx->esterror);
7342     __put_user(host_tx->status, &target_tx->status);
7343     __put_user(host_tx->constant, &target_tx->constant);
7344     __put_user(host_tx->precision, &target_tx->precision);
7345     __put_user(host_tx->tolerance, &target_tx->tolerance);
7346     __put_user(host_tx->tick, &target_tx->tick);
7347     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7348     __put_user(host_tx->jitter, &target_tx->jitter);
7349     __put_user(host_tx->shift, &target_tx->shift);
7350     __put_user(host_tx->stabil, &target_tx->stabil);
7351     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7352     __put_user(host_tx->calcnt, &target_tx->calcnt);
7353     __put_user(host_tx->errcnt, &target_tx->errcnt);
7354     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7355     __put_user(host_tx->tai, &target_tx->tai);
7356 
7357     unlock_user_struct(target_tx, target_addr, 1);
7358     return 0;
7359 }
7360 #endif
7361 
7362 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7363                                                abi_ulong target_addr)
7364 {
7365     struct target_sigevent *target_sevp;
7366 
7367     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7368         return -TARGET_EFAULT;
7369     }
7370 
7371     /* This union is awkward on 64 bit systems because it has a 32 bit
7372      * integer and a pointer in it; we follow the conversion approach
7373      * used for handling sigval types in signal.c so the guest should get
7374      * the correct value back even if we did a 64 bit byteswap and it's
7375      * using the 32 bit integer.
7376      */
7377     host_sevp->sigev_value.sival_ptr =
7378         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7379     host_sevp->sigev_signo =
7380         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7381     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7382     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7383 
7384     unlock_user_struct(target_sevp, target_addr, 1);
7385     return 0;
7386 }
7387 
7388 #if defined(TARGET_NR_mlockall)
7389 static inline int target_to_host_mlockall_arg(int arg)
7390 {
7391     int result = 0;
7392 
7393     if (arg & TARGET_MCL_CURRENT) {
7394         result |= MCL_CURRENT;
7395     }
7396     if (arg & TARGET_MCL_FUTURE) {
7397         result |= MCL_FUTURE;
7398     }
7399 #ifdef MCL_ONFAULT
7400     if (arg & TARGET_MCL_ONFAULT) {
7401         result |= MCL_ONFAULT;
7402     }
7403 #endif
7404 
7405     return result;
7406 }
7407 #endif
7408 
7409 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7410      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7411      defined(TARGET_NR_newfstatat))
7412 static inline abi_long host_to_target_stat64(void *cpu_env,
7413                                              abi_ulong target_addr,
7414                                              struct stat *host_st)
7415 {
7416 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7417     if (((CPUARMState *)cpu_env)->eabi) {
7418         struct target_eabi_stat64 *target_st;
7419 
7420         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7421             return -TARGET_EFAULT;
7422         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7423         __put_user(host_st->st_dev, &target_st->st_dev);
7424         __put_user(host_st->st_ino, &target_st->st_ino);
7425 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7426         __put_user(host_st->st_ino, &target_st->__st_ino);
7427 #endif
7428         __put_user(host_st->st_mode, &target_st->st_mode);
7429         __put_user(host_st->st_nlink, &target_st->st_nlink);
7430         __put_user(host_st->st_uid, &target_st->st_uid);
7431         __put_user(host_st->st_gid, &target_st->st_gid);
7432         __put_user(host_st->st_rdev, &target_st->st_rdev);
7433         __put_user(host_st->st_size, &target_st->st_size);
7434         __put_user(host_st->st_blksize, &target_st->st_blksize);
7435         __put_user(host_st->st_blocks, &target_st->st_blocks);
7436         __put_user(host_st->st_atime, &target_st->target_st_atime);
7437         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7438         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7439 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7440         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7441         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7442         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7443 #endif
7444         unlock_user_struct(target_st, target_addr, 1);
7445     } else
7446 #endif
7447     {
7448 #if defined(TARGET_HAS_STRUCT_STAT64)
7449         struct target_stat64 *target_st;
7450 #else
7451         struct target_stat *target_st;
7452 #endif
7453 
7454         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7455             return -TARGET_EFAULT;
7456         memset(target_st, 0, sizeof(*target_st));
7457         __put_user(host_st->st_dev, &target_st->st_dev);
7458         __put_user(host_st->st_ino, &target_st->st_ino);
7459 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7460         __put_user(host_st->st_ino, &target_st->__st_ino);
7461 #endif
7462         __put_user(host_st->st_mode, &target_st->st_mode);
7463         __put_user(host_st->st_nlink, &target_st->st_nlink);
7464         __put_user(host_st->st_uid, &target_st->st_uid);
7465         __put_user(host_st->st_gid, &target_st->st_gid);
7466         __put_user(host_st->st_rdev, &target_st->st_rdev);
7467         /* XXX: better use of kernel struct */
7468         __put_user(host_st->st_size, &target_st->st_size);
7469         __put_user(host_st->st_blksize, &target_st->st_blksize);
7470         __put_user(host_st->st_blocks, &target_st->st_blocks);
7471         __put_user(host_st->st_atime, &target_st->target_st_atime);
7472         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7473         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7474 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7475         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7476         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7477         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7478 #endif
7479         unlock_user_struct(target_st, target_addr, 1);
7480     }
7481 
7482     return 0;
7483 }
7484 #endif
7485 
7486 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7487 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7488                                             abi_ulong target_addr)
7489 {
7490     struct target_statx *target_stx;
7491 
7492     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7493         return -TARGET_EFAULT;
7494     }
7495     memset(target_stx, 0, sizeof(*target_stx));
7496 
7497     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7498     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7499     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7500     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7501     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7502     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7503     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7504     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7505     __put_user(host_stx->stx_size, &target_stx->stx_size);
7506     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7507     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7508     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7509     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7510     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7511     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7512     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7513     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7514     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7515     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7516     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7517     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7518     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7519     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7520 
7521     unlock_user_struct(target_stx, target_addr, 1);
7522 
7523     return 0;
7524 }
7525 #endif
7526 
7527 static int do_sys_futex(int *uaddr, int op, int val,
7528                          const struct timespec *timeout, int *uaddr2,
7529                          int val3)
7530 {
7531 #if HOST_LONG_BITS == 64
7532 #if defined(__NR_futex)
7533     /* always a 64-bit time_t, it doesn't define _time64 version  */
7534     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7535 
7536 #endif
7537 #else /* HOST_LONG_BITS == 64 */
7538 #if defined(__NR_futex_time64)
7539     if (sizeof(timeout->tv_sec) == 8) {
7540         /* _time64 function on 32bit arch */
7541         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7542     }
7543 #endif
7544 #if defined(__NR_futex)
7545     /* old function on 32bit arch */
7546     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7547 #endif
7548 #endif /* HOST_LONG_BITS == 64 */
7549     g_assert_not_reached();
7550 }
7551 
7552 static int do_safe_futex(int *uaddr, int op, int val,
7553                          const struct timespec *timeout, int *uaddr2,
7554                          int val3)
7555 {
7556 #if HOST_LONG_BITS == 64
7557 #if defined(__NR_futex)
7558     /* always a 64-bit time_t, it doesn't define _time64 version  */
7559     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7560 #endif
7561 #else /* HOST_LONG_BITS == 64 */
7562 #if defined(__NR_futex_time64)
7563     if (sizeof(timeout->tv_sec) == 8) {
7564         /* _time64 function on 32bit arch */
7565         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7566                                            val3));
7567     }
7568 #endif
7569 #if defined(__NR_futex)
7570     /* old function on 32bit arch */
7571     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7572 #endif
7573 #endif /* HOST_LONG_BITS == 64 */
7574     return -TARGET_ENOSYS;
7575 }
7576 
7577 /* ??? Using host futex calls even when target atomic operations
7578    are not really atomic probably breaks things.  However implementing
7579    futexes locally would make futexes shared between multiple processes
7580    tricky.  However they're probably useless because guest atomic
7581    operations won't work either.  */
7582 #if defined(TARGET_NR_futex)
7583 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7584                     target_ulong uaddr2, int val3)
7585 {
7586     struct timespec ts, *pts;
7587     int base_op;
7588 
7589     /* ??? We assume FUTEX_* constants are the same on both host
7590        and target.  */
7591 #ifdef FUTEX_CMD_MASK
7592     base_op = op & FUTEX_CMD_MASK;
7593 #else
7594     base_op = op;
7595 #endif
7596     switch (base_op) {
7597     case FUTEX_WAIT:
7598     case FUTEX_WAIT_BITSET:
7599         if (timeout) {
7600             pts = &ts;
7601             target_to_host_timespec(pts, timeout);
7602         } else {
7603             pts = NULL;
7604         }
7605         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7606     case FUTEX_WAKE:
7607         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7608     case FUTEX_FD:
7609         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7610     case FUTEX_REQUEUE:
7611     case FUTEX_CMP_REQUEUE:
7612     case FUTEX_WAKE_OP:
7613         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7614            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7615            But the prototype takes a `struct timespec *'; insert casts
7616            to satisfy the compiler.  We do not need to tswap TIMEOUT
7617            since it's not compared to guest memory.  */
7618         pts = (struct timespec *)(uintptr_t) timeout;
7619         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7620                              (base_op == FUTEX_CMP_REQUEUE
7621                                       ? tswap32(val3)
7622                                       : val3));
7623     default:
7624         return -TARGET_ENOSYS;
7625     }
7626 }
7627 #endif
7628 
7629 #if defined(TARGET_NR_futex_time64)
7630 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7631                            target_ulong uaddr2, int val3)
7632 {
7633     struct timespec ts, *pts;
7634     int base_op;
7635 
7636     /* ??? We assume FUTEX_* constants are the same on both host
7637        and target.  */
7638 #ifdef FUTEX_CMD_MASK
7639     base_op = op & FUTEX_CMD_MASK;
7640 #else
7641     base_op = op;
7642 #endif
7643     switch (base_op) {
7644     case FUTEX_WAIT:
7645     case FUTEX_WAIT_BITSET:
7646         if (timeout) {
7647             pts = &ts;
7648             if (target_to_host_timespec64(pts, timeout)) {
7649                 return -TARGET_EFAULT;
7650             }
7651         } else {
7652             pts = NULL;
7653         }
7654         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7655     case FUTEX_WAKE:
7656         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7657     case FUTEX_FD:
7658         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7659     case FUTEX_REQUEUE:
7660     case FUTEX_CMP_REQUEUE:
7661     case FUTEX_WAKE_OP:
7662         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7663            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7664            But the prototype takes a `struct timespec *'; insert casts
7665            to satisfy the compiler.  We do not need to tswap TIMEOUT
7666            since it's not compared to guest memory.  */
7667         pts = (struct timespec *)(uintptr_t) timeout;
7668         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7669                              (base_op == FUTEX_CMP_REQUEUE
7670                                       ? tswap32(val3)
7671                                       : val3));
7672     default:
7673         return -TARGET_ENOSYS;
7674     }
7675 }
7676 #endif
7677 
7678 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7679 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7680                                      abi_long handle, abi_long mount_id,
7681                                      abi_long flags)
7682 {
7683     struct file_handle *target_fh;
7684     struct file_handle *fh;
7685     int mid = 0;
7686     abi_long ret;
7687     char *name;
7688     unsigned int size, total_size;
7689 
7690     if (get_user_s32(size, handle)) {
7691         return -TARGET_EFAULT;
7692     }
7693 
7694     name = lock_user_string(pathname);
7695     if (!name) {
7696         return -TARGET_EFAULT;
7697     }
7698 
7699     total_size = sizeof(struct file_handle) + size;
7700     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7701     if (!target_fh) {
7702         unlock_user(name, pathname, 0);
7703         return -TARGET_EFAULT;
7704     }
7705 
7706     fh = g_malloc0(total_size);
7707     fh->handle_bytes = size;
7708 
7709     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7710     unlock_user(name, pathname, 0);
7711 
7712     /* man name_to_handle_at(2):
7713      * Other than the use of the handle_bytes field, the caller should treat
7714      * the file_handle structure as an opaque data type
7715      */
7716 
7717     memcpy(target_fh, fh, total_size);
7718     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7719     target_fh->handle_type = tswap32(fh->handle_type);
7720     g_free(fh);
7721     unlock_user(target_fh, handle, total_size);
7722 
7723     if (put_user_s32(mid, mount_id)) {
7724         return -TARGET_EFAULT;
7725     }
7726 
7727     return ret;
7728 
7729 }
7730 #endif
7731 
7732 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7733 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7734                                      abi_long flags)
7735 {
7736     struct file_handle *target_fh;
7737     struct file_handle *fh;
7738     unsigned int size, total_size;
7739     abi_long ret;
7740 
7741     if (get_user_s32(size, handle)) {
7742         return -TARGET_EFAULT;
7743     }
7744 
7745     total_size = sizeof(struct file_handle) + size;
7746     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7747     if (!target_fh) {
7748         return -TARGET_EFAULT;
7749     }
7750 
7751     fh = g_memdup(target_fh, total_size);
7752     fh->handle_bytes = size;
7753     fh->handle_type = tswap32(target_fh->handle_type);
7754 
7755     ret = get_errno(open_by_handle_at(mount_fd, fh,
7756                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7757 
7758     g_free(fh);
7759 
7760     unlock_user(target_fh, handle, total_size);
7761 
7762     return ret;
7763 }
7764 #endif
7765 
7766 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7767 
7768 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7769 {
7770     int host_flags;
7771     target_sigset_t *target_mask;
7772     sigset_t host_mask;
7773     abi_long ret;
7774 
7775     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7776         return -TARGET_EINVAL;
7777     }
7778     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7779         return -TARGET_EFAULT;
7780     }
7781 
7782     target_to_host_sigset(&host_mask, target_mask);
7783 
7784     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7785 
7786     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7787     if (ret >= 0) {
7788         fd_trans_register(ret, &target_signalfd_trans);
7789     }
7790 
7791     unlock_user_struct(target_mask, mask, 0);
7792 
7793     return ret;
7794 }
7795 #endif
7796 
7797 /* Map host to target signal numbers for the wait family of syscalls.
7798    Assume all other status bits are the same.  */
7799 int host_to_target_waitstatus(int status)
7800 {
7801     if (WIFSIGNALED(status)) {
7802         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7803     }
7804     if (WIFSTOPPED(status)) {
7805         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7806                | (status & 0xff);
7807     }
7808     return status;
7809 }
7810 
7811 static int open_self_cmdline(void *cpu_env, int fd)
7812 {
7813     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7814     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7815     int i;
7816 
7817     for (i = 0; i < bprm->argc; i++) {
7818         size_t len = strlen(bprm->argv[i]) + 1;
7819 
7820         if (write(fd, bprm->argv[i], len) != len) {
7821             return -1;
7822         }
7823     }
7824 
7825     return 0;
7826 }
7827 
7828 static int open_self_maps(void *cpu_env, int fd)
7829 {
7830     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7831     TaskState *ts = cpu->opaque;
7832     GSList *map_info = read_self_maps();
7833     GSList *s;
7834     int count;
7835 
7836     for (s = map_info; s; s = g_slist_next(s)) {
7837         MapInfo *e = (MapInfo *) s->data;
7838 
7839         if (h2g_valid(e->start)) {
7840             unsigned long min = e->start;
7841             unsigned long max = e->end;
7842             int flags = page_get_flags(h2g(min));
7843             const char *path;
7844 
7845             max = h2g_valid(max - 1) ?
7846                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7847 
7848             if (page_check_range(h2g(min), max - min, flags) == -1) {
7849                 continue;
7850             }
7851 
7852             if (h2g(min) == ts->info->stack_limit) {
7853                 path = "[stack]";
7854             } else {
7855                 path = e->path;
7856             }
7857 
7858             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7859                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7860                             h2g(min), h2g(max - 1) + 1,
7861                             e->is_read ? 'r' : '-',
7862                             e->is_write ? 'w' : '-',
7863                             e->is_exec ? 'x' : '-',
7864                             e->is_priv ? 'p' : '-',
7865                             (uint64_t) e->offset, e->dev, e->inode);
7866             if (path) {
7867                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7868             } else {
7869                 dprintf(fd, "\n");
7870             }
7871         }
7872     }
7873 
7874     free_self_maps(map_info);
7875 
7876 #ifdef TARGET_VSYSCALL_PAGE
7877     /*
7878      * We only support execution from the vsyscall page.
7879      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7880      */
7881     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7882                     " --xp 00000000 00:00 0",
7883                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7884     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7885 #endif
7886 
7887     return 0;
7888 }
7889 
7890 static int open_self_stat(void *cpu_env, int fd)
7891 {
7892     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7893     TaskState *ts = cpu->opaque;
7894     g_autoptr(GString) buf = g_string_new(NULL);
7895     int i;
7896 
7897     for (i = 0; i < 44; i++) {
7898         if (i == 0) {
7899             /* pid */
7900             g_string_printf(buf, FMT_pid " ", getpid());
7901         } else if (i == 1) {
7902             /* app name */
7903             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7904             bin = bin ? bin + 1 : ts->bprm->argv[0];
7905             g_string_printf(buf, "(%.15s) ", bin);
7906         } else if (i == 27) {
7907             /* stack bottom */
7908             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7909         } else {
7910             /* for the rest, there is MasterCard */
7911             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7912         }
7913 
7914         if (write(fd, buf->str, buf->len) != buf->len) {
7915             return -1;
7916         }
7917     }
7918 
7919     return 0;
7920 }
7921 
7922 static int open_self_auxv(void *cpu_env, int fd)
7923 {
7924     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7925     TaskState *ts = cpu->opaque;
7926     abi_ulong auxv = ts->info->saved_auxv;
7927     abi_ulong len = ts->info->auxv_len;
7928     char *ptr;
7929 
7930     /*
7931      * Auxiliary vector is stored in target process stack.
7932      * read in whole auxv vector and copy it to file
7933      */
7934     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7935     if (ptr != NULL) {
7936         while (len > 0) {
7937             ssize_t r;
7938             r = write(fd, ptr, len);
7939             if (r <= 0) {
7940                 break;
7941             }
7942             len -= r;
7943             ptr += r;
7944         }
7945         lseek(fd, 0, SEEK_SET);
7946         unlock_user(ptr, auxv, len);
7947     }
7948 
7949     return 0;
7950 }
7951 
7952 static int is_proc_myself(const char *filename, const char *entry)
7953 {
7954     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7955         filename += strlen("/proc/");
7956         if (!strncmp(filename, "self/", strlen("self/"))) {
7957             filename += strlen("self/");
7958         } else if (*filename >= '1' && *filename <= '9') {
7959             char myself[80];
7960             snprintf(myself, sizeof(myself), "%d/", getpid());
7961             if (!strncmp(filename, myself, strlen(myself))) {
7962                 filename += strlen(myself);
7963             } else {
7964                 return 0;
7965             }
7966         } else {
7967             return 0;
7968         }
7969         if (!strcmp(filename, entry)) {
7970             return 1;
7971         }
7972     }
7973     return 0;
7974 }
7975 
7976 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7977     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7978 static int is_proc(const char *filename, const char *entry)
7979 {
7980     return strcmp(filename, entry) == 0;
7981 }
7982 #endif
7983 
7984 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7985 static int open_net_route(void *cpu_env, int fd)
7986 {
7987     FILE *fp;
7988     char *line = NULL;
7989     size_t len = 0;
7990     ssize_t read;
7991 
7992     fp = fopen("/proc/net/route", "r");
7993     if (fp == NULL) {
7994         return -1;
7995     }
7996 
7997     /* read header */
7998 
7999     read = getline(&line, &len, fp);
8000     dprintf(fd, "%s", line);
8001 
8002     /* read routes */
8003 
8004     while ((read = getline(&line, &len, fp)) != -1) {
8005         char iface[16];
8006         uint32_t dest, gw, mask;
8007         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8008         int fields;
8009 
8010         fields = sscanf(line,
8011                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8012                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8013                         &mask, &mtu, &window, &irtt);
8014         if (fields != 11) {
8015             continue;
8016         }
8017         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8018                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8019                 metric, tswap32(mask), mtu, window, irtt);
8020     }
8021 
8022     free(line);
8023     fclose(fp);
8024 
8025     return 0;
8026 }
8027 #endif
8028 
8029 #if defined(TARGET_SPARC)
8030 static int open_cpuinfo(void *cpu_env, int fd)
8031 {
8032     dprintf(fd, "type\t\t: sun4u\n");
8033     return 0;
8034 }
8035 #endif
8036 
8037 #if defined(TARGET_HPPA)
8038 static int open_cpuinfo(void *cpu_env, int fd)
8039 {
8040     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8041     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8042     dprintf(fd, "capabilities\t: os32\n");
8043     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8044     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8045     return 0;
8046 }
8047 #endif
8048 
8049 #if defined(TARGET_M68K)
8050 static int open_hardware(void *cpu_env, int fd)
8051 {
8052     dprintf(fd, "Model:\t\tqemu-m68k\n");
8053     return 0;
8054 }
8055 #endif
8056 
8057 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8058 {
8059     struct fake_open {
8060         const char *filename;
8061         int (*fill)(void *cpu_env, int fd);
8062         int (*cmp)(const char *s1, const char *s2);
8063     };
8064     const struct fake_open *fake_open;
8065     static const struct fake_open fakes[] = {
8066         { "maps", open_self_maps, is_proc_myself },
8067         { "stat", open_self_stat, is_proc_myself },
8068         { "auxv", open_self_auxv, is_proc_myself },
8069         { "cmdline", open_self_cmdline, is_proc_myself },
8070 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8071         { "/proc/net/route", open_net_route, is_proc },
8072 #endif
8073 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8074         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8075 #endif
8076 #if defined(TARGET_M68K)
8077         { "/proc/hardware", open_hardware, is_proc },
8078 #endif
8079         { NULL, NULL, NULL }
8080     };
8081 
8082     if (is_proc_myself(pathname, "exe")) {
8083         int execfd = qemu_getauxval(AT_EXECFD);
8084         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8085     }
8086 
8087     for (fake_open = fakes; fake_open->filename; fake_open++) {
8088         if (fake_open->cmp(pathname, fake_open->filename)) {
8089             break;
8090         }
8091     }
8092 
8093     if (fake_open->filename) {
8094         const char *tmpdir;
8095         char filename[PATH_MAX];
8096         int fd, r;
8097 
8098         /* create temporary file to map stat to */
8099         tmpdir = getenv("TMPDIR");
8100         if (!tmpdir)
8101             tmpdir = "/tmp";
8102         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8103         fd = mkstemp(filename);
8104         if (fd < 0) {
8105             return fd;
8106         }
8107         unlink(filename);
8108 
8109         if ((r = fake_open->fill(cpu_env, fd))) {
8110             int e = errno;
8111             close(fd);
8112             errno = e;
8113             return r;
8114         }
8115         lseek(fd, 0, SEEK_SET);
8116 
8117         return fd;
8118     }
8119 
8120     return safe_openat(dirfd, path(pathname), flags, mode);
8121 }
8122 
8123 #define TIMER_MAGIC 0x0caf0000
8124 #define TIMER_MAGIC_MASK 0xffff0000
8125 
8126 /* Convert QEMU provided timer ID back to internal 16bit index format */
8127 static target_timer_t get_timer_id(abi_long arg)
8128 {
8129     target_timer_t timerid = arg;
8130 
8131     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8132         return -TARGET_EINVAL;
8133     }
8134 
8135     timerid &= 0xffff;
8136 
8137     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8138         return -TARGET_EINVAL;
8139     }
8140 
8141     return timerid;
8142 }
8143 
8144 static int target_to_host_cpu_mask(unsigned long *host_mask,
8145                                    size_t host_size,
8146                                    abi_ulong target_addr,
8147                                    size_t target_size)
8148 {
8149     unsigned target_bits = sizeof(abi_ulong) * 8;
8150     unsigned host_bits = sizeof(*host_mask) * 8;
8151     abi_ulong *target_mask;
8152     unsigned i, j;
8153 
8154     assert(host_size >= target_size);
8155 
8156     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8157     if (!target_mask) {
8158         return -TARGET_EFAULT;
8159     }
8160     memset(host_mask, 0, host_size);
8161 
8162     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8163         unsigned bit = i * target_bits;
8164         abi_ulong val;
8165 
8166         __get_user(val, &target_mask[i]);
8167         for (j = 0; j < target_bits; j++, bit++) {
8168             if (val & (1UL << j)) {
8169                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8170             }
8171         }
8172     }
8173 
8174     unlock_user(target_mask, target_addr, 0);
8175     return 0;
8176 }
8177 
8178 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8179                                    size_t host_size,
8180                                    abi_ulong target_addr,
8181                                    size_t target_size)
8182 {
8183     unsigned target_bits = sizeof(abi_ulong) * 8;
8184     unsigned host_bits = sizeof(*host_mask) * 8;
8185     abi_ulong *target_mask;
8186     unsigned i, j;
8187 
8188     assert(host_size >= target_size);
8189 
8190     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8191     if (!target_mask) {
8192         return -TARGET_EFAULT;
8193     }
8194 
8195     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8196         unsigned bit = i * target_bits;
8197         abi_ulong val = 0;
8198 
8199         for (j = 0; j < target_bits; j++, bit++) {
8200             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8201                 val |= 1UL << j;
8202             }
8203         }
8204         __put_user(val, &target_mask[i]);
8205     }
8206 
8207     unlock_user(target_mask, target_addr, target_size);
8208     return 0;
8209 }
8210 
8211 /* This is an internal helper for do_syscall so that it is easier
8212  * to have a single return point, so that actions, such as logging
8213  * of syscall results, can be performed.
8214  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8215  */
8216 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8217                             abi_long arg2, abi_long arg3, abi_long arg4,
8218                             abi_long arg5, abi_long arg6, abi_long arg7,
8219                             abi_long arg8)
8220 {
8221     CPUState *cpu = env_cpu(cpu_env);
8222     abi_long ret;
8223 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8224     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8225     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8226     || defined(TARGET_NR_statx)
8227     struct stat st;
8228 #endif
8229 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8230     || defined(TARGET_NR_fstatfs)
8231     struct statfs stfs;
8232 #endif
8233     void *p;
8234 
8235     switch(num) {
8236     case TARGET_NR_exit:
8237         /* In old applications this may be used to implement _exit(2).
8238            However in threaded applications it is used for thread termination,
8239            and _exit_group is used for application termination.
8240            Do thread termination if we have more then one thread.  */
8241 
8242         if (block_signals()) {
8243             return -TARGET_ERESTARTSYS;
8244         }
8245 
8246         pthread_mutex_lock(&clone_lock);
8247 
8248         if (CPU_NEXT(first_cpu)) {
8249             TaskState *ts = cpu->opaque;
8250 
8251             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8252             object_unref(OBJECT(cpu));
8253             /*
8254              * At this point the CPU should be unrealized and removed
8255              * from cpu lists. We can clean-up the rest of the thread
8256              * data without the lock held.
8257              */
8258 
8259             pthread_mutex_unlock(&clone_lock);
8260 
8261             if (ts->child_tidptr) {
8262                 put_user_u32(0, ts->child_tidptr);
8263                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8264                           NULL, NULL, 0);
8265             }
8266             thread_cpu = NULL;
8267             g_free(ts);
8268             rcu_unregister_thread();
8269             pthread_exit(NULL);
8270         }
8271 
8272         pthread_mutex_unlock(&clone_lock);
8273         preexit_cleanup(cpu_env, arg1);
8274         _exit(arg1);
8275         return 0; /* avoid warning */
8276     case TARGET_NR_read:
8277         if (arg2 == 0 && arg3 == 0) {
8278             return get_errno(safe_read(arg1, 0, 0));
8279         } else {
8280             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8281                 return -TARGET_EFAULT;
8282             ret = get_errno(safe_read(arg1, p, arg3));
8283             if (ret >= 0 &&
8284                 fd_trans_host_to_target_data(arg1)) {
8285                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8286             }
8287             unlock_user(p, arg2, ret);
8288         }
8289         return ret;
8290     case TARGET_NR_write:
8291         if (arg2 == 0 && arg3 == 0) {
8292             return get_errno(safe_write(arg1, 0, 0));
8293         }
8294         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8295             return -TARGET_EFAULT;
8296         if (fd_trans_target_to_host_data(arg1)) {
8297             void *copy = g_malloc(arg3);
8298             memcpy(copy, p, arg3);
8299             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8300             if (ret >= 0) {
8301                 ret = get_errno(safe_write(arg1, copy, ret));
8302             }
8303             g_free(copy);
8304         } else {
8305             ret = get_errno(safe_write(arg1, p, arg3));
8306         }
8307         unlock_user(p, arg2, 0);
8308         return ret;
8309 
8310 #ifdef TARGET_NR_open
8311     case TARGET_NR_open:
8312         if (!(p = lock_user_string(arg1)))
8313             return -TARGET_EFAULT;
8314         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8315                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8316                                   arg3));
8317         fd_trans_unregister(ret);
8318         unlock_user(p, arg1, 0);
8319         return ret;
8320 #endif
8321     case TARGET_NR_openat:
8322         if (!(p = lock_user_string(arg2)))
8323             return -TARGET_EFAULT;
8324         ret = get_errno(do_openat(cpu_env, arg1, p,
8325                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8326                                   arg4));
8327         fd_trans_unregister(ret);
8328         unlock_user(p, arg2, 0);
8329         return ret;
8330 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8331     case TARGET_NR_name_to_handle_at:
8332         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8333         return ret;
8334 #endif
8335 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8336     case TARGET_NR_open_by_handle_at:
8337         ret = do_open_by_handle_at(arg1, arg2, arg3);
8338         fd_trans_unregister(ret);
8339         return ret;
8340 #endif
8341     case TARGET_NR_close:
8342         fd_trans_unregister(arg1);
8343         return get_errno(close(arg1));
8344 
8345     case TARGET_NR_brk:
8346         return do_brk(arg1);
8347 #ifdef TARGET_NR_fork
8348     case TARGET_NR_fork:
8349         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8350 #endif
8351 #ifdef TARGET_NR_waitpid
8352     case TARGET_NR_waitpid:
8353         {
8354             int status;
8355             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8356             if (!is_error(ret) && arg2 && ret
8357                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8358                 return -TARGET_EFAULT;
8359         }
8360         return ret;
8361 #endif
8362 #ifdef TARGET_NR_waitid
8363     case TARGET_NR_waitid:
8364         {
8365             siginfo_t info;
8366             info.si_pid = 0;
8367             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8368             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8369                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8370                     return -TARGET_EFAULT;
8371                 host_to_target_siginfo(p, &info);
8372                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8373             }
8374         }
8375         return ret;
8376 #endif
8377 #ifdef TARGET_NR_creat /* not on alpha */
8378     case TARGET_NR_creat:
8379         if (!(p = lock_user_string(arg1)))
8380             return -TARGET_EFAULT;
8381         ret = get_errno(creat(p, arg2));
8382         fd_trans_unregister(ret);
8383         unlock_user(p, arg1, 0);
8384         return ret;
8385 #endif
8386 #ifdef TARGET_NR_link
8387     case TARGET_NR_link:
8388         {
8389             void * p2;
8390             p = lock_user_string(arg1);
8391             p2 = lock_user_string(arg2);
8392             if (!p || !p2)
8393                 ret = -TARGET_EFAULT;
8394             else
8395                 ret = get_errno(link(p, p2));
8396             unlock_user(p2, arg2, 0);
8397             unlock_user(p, arg1, 0);
8398         }
8399         return ret;
8400 #endif
8401 #if defined(TARGET_NR_linkat)
8402     case TARGET_NR_linkat:
8403         {
8404             void * p2 = NULL;
8405             if (!arg2 || !arg4)
8406                 return -TARGET_EFAULT;
8407             p  = lock_user_string(arg2);
8408             p2 = lock_user_string(arg4);
8409             if (!p || !p2)
8410                 ret = -TARGET_EFAULT;
8411             else
8412                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8413             unlock_user(p, arg2, 0);
8414             unlock_user(p2, arg4, 0);
8415         }
8416         return ret;
8417 #endif
8418 #ifdef TARGET_NR_unlink
8419     case TARGET_NR_unlink:
8420         if (!(p = lock_user_string(arg1)))
8421             return -TARGET_EFAULT;
8422         ret = get_errno(unlink(p));
8423         unlock_user(p, arg1, 0);
8424         return ret;
8425 #endif
8426 #if defined(TARGET_NR_unlinkat)
8427     case TARGET_NR_unlinkat:
8428         if (!(p = lock_user_string(arg2)))
8429             return -TARGET_EFAULT;
8430         ret = get_errno(unlinkat(arg1, p, arg3));
8431         unlock_user(p, arg2, 0);
8432         return ret;
8433 #endif
8434     case TARGET_NR_execve:
8435         {
8436             char **argp, **envp;
8437             int argc, envc;
8438             abi_ulong gp;
8439             abi_ulong guest_argp;
8440             abi_ulong guest_envp;
8441             abi_ulong addr;
8442             char **q;
8443             int total_size = 0;
8444 
8445             argc = 0;
8446             guest_argp = arg2;
8447             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8448                 if (get_user_ual(addr, gp))
8449                     return -TARGET_EFAULT;
8450                 if (!addr)
8451                     break;
8452                 argc++;
8453             }
8454             envc = 0;
8455             guest_envp = arg3;
8456             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8457                 if (get_user_ual(addr, gp))
8458                     return -TARGET_EFAULT;
8459                 if (!addr)
8460                     break;
8461                 envc++;
8462             }
8463 
8464             argp = g_new0(char *, argc + 1);
8465             envp = g_new0(char *, envc + 1);
8466 
8467             for (gp = guest_argp, q = argp; gp;
8468                   gp += sizeof(abi_ulong), q++) {
8469                 if (get_user_ual(addr, gp))
8470                     goto execve_efault;
8471                 if (!addr)
8472                     break;
8473                 if (!(*q = lock_user_string(addr)))
8474                     goto execve_efault;
8475                 total_size += strlen(*q) + 1;
8476             }
8477             *q = NULL;
8478 
8479             for (gp = guest_envp, q = envp; gp;
8480                   gp += sizeof(abi_ulong), q++) {
8481                 if (get_user_ual(addr, gp))
8482                     goto execve_efault;
8483                 if (!addr)
8484                     break;
8485                 if (!(*q = lock_user_string(addr)))
8486                     goto execve_efault;
8487                 total_size += strlen(*q) + 1;
8488             }
8489             *q = NULL;
8490 
8491             if (!(p = lock_user_string(arg1)))
8492                 goto execve_efault;
8493             /* Although execve() is not an interruptible syscall it is
8494              * a special case where we must use the safe_syscall wrapper:
8495              * if we allow a signal to happen before we make the host
8496              * syscall then we will 'lose' it, because at the point of
8497              * execve the process leaves QEMU's control. So we use the
8498              * safe syscall wrapper to ensure that we either take the
8499              * signal as a guest signal, or else it does not happen
8500              * before the execve completes and makes it the other
8501              * program's problem.
8502              */
8503             ret = get_errno(safe_execve(p, argp, envp));
8504             unlock_user(p, arg1, 0);
8505 
8506             goto execve_end;
8507 
8508         execve_efault:
8509             ret = -TARGET_EFAULT;
8510 
8511         execve_end:
8512             for (gp = guest_argp, q = argp; *q;
8513                   gp += sizeof(abi_ulong), q++) {
8514                 if (get_user_ual(addr, gp)
8515                     || !addr)
8516                     break;
8517                 unlock_user(*q, addr, 0);
8518             }
8519             for (gp = guest_envp, q = envp; *q;
8520                   gp += sizeof(abi_ulong), q++) {
8521                 if (get_user_ual(addr, gp)
8522                     || !addr)
8523                     break;
8524                 unlock_user(*q, addr, 0);
8525             }
8526 
8527             g_free(argp);
8528             g_free(envp);
8529         }
8530         return ret;
8531     case TARGET_NR_chdir:
8532         if (!(p = lock_user_string(arg1)))
8533             return -TARGET_EFAULT;
8534         ret = get_errno(chdir(p));
8535         unlock_user(p, arg1, 0);
8536         return ret;
8537 #ifdef TARGET_NR_time
8538     case TARGET_NR_time:
8539         {
8540             time_t host_time;
8541             ret = get_errno(time(&host_time));
8542             if (!is_error(ret)
8543                 && arg1
8544                 && put_user_sal(host_time, arg1))
8545                 return -TARGET_EFAULT;
8546         }
8547         return ret;
8548 #endif
8549 #ifdef TARGET_NR_mknod
8550     case TARGET_NR_mknod:
8551         if (!(p = lock_user_string(arg1)))
8552             return -TARGET_EFAULT;
8553         ret = get_errno(mknod(p, arg2, arg3));
8554         unlock_user(p, arg1, 0);
8555         return ret;
8556 #endif
8557 #if defined(TARGET_NR_mknodat)
8558     case TARGET_NR_mknodat:
8559         if (!(p = lock_user_string(arg2)))
8560             return -TARGET_EFAULT;
8561         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8562         unlock_user(p, arg2, 0);
8563         return ret;
8564 #endif
8565 #ifdef TARGET_NR_chmod
8566     case TARGET_NR_chmod:
8567         if (!(p = lock_user_string(arg1)))
8568             return -TARGET_EFAULT;
8569         ret = get_errno(chmod(p, arg2));
8570         unlock_user(p, arg1, 0);
8571         return ret;
8572 #endif
8573 #ifdef TARGET_NR_lseek
8574     case TARGET_NR_lseek:
8575         return get_errno(lseek(arg1, arg2, arg3));
8576 #endif
8577 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8578     /* Alpha specific */
8579     case TARGET_NR_getxpid:
8580         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8581         return get_errno(getpid());
8582 #endif
8583 #ifdef TARGET_NR_getpid
8584     case TARGET_NR_getpid:
8585         return get_errno(getpid());
8586 #endif
8587     case TARGET_NR_mount:
8588         {
8589             /* need to look at the data field */
8590             void *p2, *p3;
8591 
8592             if (arg1) {
8593                 p = lock_user_string(arg1);
8594                 if (!p) {
8595                     return -TARGET_EFAULT;
8596                 }
8597             } else {
8598                 p = NULL;
8599             }
8600 
8601             p2 = lock_user_string(arg2);
8602             if (!p2) {
8603                 if (arg1) {
8604                     unlock_user(p, arg1, 0);
8605                 }
8606                 return -TARGET_EFAULT;
8607             }
8608 
8609             if (arg3) {
8610                 p3 = lock_user_string(arg3);
8611                 if (!p3) {
8612                     if (arg1) {
8613                         unlock_user(p, arg1, 0);
8614                     }
8615                     unlock_user(p2, arg2, 0);
8616                     return -TARGET_EFAULT;
8617                 }
8618             } else {
8619                 p3 = NULL;
8620             }
8621 
8622             /* FIXME - arg5 should be locked, but it isn't clear how to
8623              * do that since it's not guaranteed to be a NULL-terminated
8624              * string.
8625              */
8626             if (!arg5) {
8627                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8628             } else {
8629                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8630             }
8631             ret = get_errno(ret);
8632 
8633             if (arg1) {
8634                 unlock_user(p, arg1, 0);
8635             }
8636             unlock_user(p2, arg2, 0);
8637             if (arg3) {
8638                 unlock_user(p3, arg3, 0);
8639             }
8640         }
8641         return ret;
8642 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8643 #if defined(TARGET_NR_umount)
8644     case TARGET_NR_umount:
8645 #endif
8646 #if defined(TARGET_NR_oldumount)
8647     case TARGET_NR_oldumount:
8648 #endif
8649         if (!(p = lock_user_string(arg1)))
8650             return -TARGET_EFAULT;
8651         ret = get_errno(umount(p));
8652         unlock_user(p, arg1, 0);
8653         return ret;
8654 #endif
8655 #ifdef TARGET_NR_stime /* not on alpha */
8656     case TARGET_NR_stime:
8657         {
8658             struct timespec ts;
8659             ts.tv_nsec = 0;
8660             if (get_user_sal(ts.tv_sec, arg1)) {
8661                 return -TARGET_EFAULT;
8662             }
8663             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8664         }
8665 #endif
8666 #ifdef TARGET_NR_alarm /* not on alpha */
8667     case TARGET_NR_alarm:
8668         return alarm(arg1);
8669 #endif
8670 #ifdef TARGET_NR_pause /* not on alpha */
8671     case TARGET_NR_pause:
8672         if (!block_signals()) {
8673             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8674         }
8675         return -TARGET_EINTR;
8676 #endif
8677 #ifdef TARGET_NR_utime
8678     case TARGET_NR_utime:
8679         {
8680             struct utimbuf tbuf, *host_tbuf;
8681             struct target_utimbuf *target_tbuf;
8682             if (arg2) {
8683                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8684                     return -TARGET_EFAULT;
8685                 tbuf.actime = tswapal(target_tbuf->actime);
8686                 tbuf.modtime = tswapal(target_tbuf->modtime);
8687                 unlock_user_struct(target_tbuf, arg2, 0);
8688                 host_tbuf = &tbuf;
8689             } else {
8690                 host_tbuf = NULL;
8691             }
8692             if (!(p = lock_user_string(arg1)))
8693                 return -TARGET_EFAULT;
8694             ret = get_errno(utime(p, host_tbuf));
8695             unlock_user(p, arg1, 0);
8696         }
8697         return ret;
8698 #endif
8699 #ifdef TARGET_NR_utimes
8700     case TARGET_NR_utimes:
8701         {
8702             struct timeval *tvp, tv[2];
8703             if (arg2) {
8704                 if (copy_from_user_timeval(&tv[0], arg2)
8705                     || copy_from_user_timeval(&tv[1],
8706                                               arg2 + sizeof(struct target_timeval)))
8707                     return -TARGET_EFAULT;
8708                 tvp = tv;
8709             } else {
8710                 tvp = NULL;
8711             }
8712             if (!(p = lock_user_string(arg1)))
8713                 return -TARGET_EFAULT;
8714             ret = get_errno(utimes(p, tvp));
8715             unlock_user(p, arg1, 0);
8716         }
8717         return ret;
8718 #endif
8719 #if defined(TARGET_NR_futimesat)
8720     case TARGET_NR_futimesat:
8721         {
8722             struct timeval *tvp, tv[2];
8723             if (arg3) {
8724                 if (copy_from_user_timeval(&tv[0], arg3)
8725                     || copy_from_user_timeval(&tv[1],
8726                                               arg3 + sizeof(struct target_timeval)))
8727                     return -TARGET_EFAULT;
8728                 tvp = tv;
8729             } else {
8730                 tvp = NULL;
8731             }
8732             if (!(p = lock_user_string(arg2))) {
8733                 return -TARGET_EFAULT;
8734             }
8735             ret = get_errno(futimesat(arg1, path(p), tvp));
8736             unlock_user(p, arg2, 0);
8737         }
8738         return ret;
8739 #endif
8740 #ifdef TARGET_NR_access
8741     case TARGET_NR_access:
8742         if (!(p = lock_user_string(arg1))) {
8743             return -TARGET_EFAULT;
8744         }
8745         ret = get_errno(access(path(p), arg2));
8746         unlock_user(p, arg1, 0);
8747         return ret;
8748 #endif
8749 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8750     case TARGET_NR_faccessat:
8751         if (!(p = lock_user_string(arg2))) {
8752             return -TARGET_EFAULT;
8753         }
8754         ret = get_errno(faccessat(arg1, p, arg3, 0));
8755         unlock_user(p, arg2, 0);
8756         return ret;
8757 #endif
8758 #ifdef TARGET_NR_nice /* not on alpha */
8759     case TARGET_NR_nice:
8760         return get_errno(nice(arg1));
8761 #endif
8762     case TARGET_NR_sync:
8763         sync();
8764         return 0;
8765 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8766     case TARGET_NR_syncfs:
8767         return get_errno(syncfs(arg1));
8768 #endif
8769     case TARGET_NR_kill:
8770         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8771 #ifdef TARGET_NR_rename
8772     case TARGET_NR_rename:
8773         {
8774             void *p2;
8775             p = lock_user_string(arg1);
8776             p2 = lock_user_string(arg2);
8777             if (!p || !p2)
8778                 ret = -TARGET_EFAULT;
8779             else
8780                 ret = get_errno(rename(p, p2));
8781             unlock_user(p2, arg2, 0);
8782             unlock_user(p, arg1, 0);
8783         }
8784         return ret;
8785 #endif
8786 #if defined(TARGET_NR_renameat)
8787     case TARGET_NR_renameat:
8788         {
8789             void *p2;
8790             p  = lock_user_string(arg2);
8791             p2 = lock_user_string(arg4);
8792             if (!p || !p2)
8793                 ret = -TARGET_EFAULT;
8794             else
8795                 ret = get_errno(renameat(arg1, p, arg3, p2));
8796             unlock_user(p2, arg4, 0);
8797             unlock_user(p, arg2, 0);
8798         }
8799         return ret;
8800 #endif
8801 #if defined(TARGET_NR_renameat2)
8802     case TARGET_NR_renameat2:
8803         {
8804             void *p2;
8805             p  = lock_user_string(arg2);
8806             p2 = lock_user_string(arg4);
8807             if (!p || !p2) {
8808                 ret = -TARGET_EFAULT;
8809             } else {
8810                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8811             }
8812             unlock_user(p2, arg4, 0);
8813             unlock_user(p, arg2, 0);
8814         }
8815         return ret;
8816 #endif
8817 #ifdef TARGET_NR_mkdir
8818     case TARGET_NR_mkdir:
8819         if (!(p = lock_user_string(arg1)))
8820             return -TARGET_EFAULT;
8821         ret = get_errno(mkdir(p, arg2));
8822         unlock_user(p, arg1, 0);
8823         return ret;
8824 #endif
8825 #if defined(TARGET_NR_mkdirat)
8826     case TARGET_NR_mkdirat:
8827         if (!(p = lock_user_string(arg2)))
8828             return -TARGET_EFAULT;
8829         ret = get_errno(mkdirat(arg1, p, arg3));
8830         unlock_user(p, arg2, 0);
8831         return ret;
8832 #endif
8833 #ifdef TARGET_NR_rmdir
8834     case TARGET_NR_rmdir:
8835         if (!(p = lock_user_string(arg1)))
8836             return -TARGET_EFAULT;
8837         ret = get_errno(rmdir(p));
8838         unlock_user(p, arg1, 0);
8839         return ret;
8840 #endif
8841     case TARGET_NR_dup:
8842         ret = get_errno(dup(arg1));
8843         if (ret >= 0) {
8844             fd_trans_dup(arg1, ret);
8845         }
8846         return ret;
8847 #ifdef TARGET_NR_pipe
8848     case TARGET_NR_pipe:
8849         return do_pipe(cpu_env, arg1, 0, 0);
8850 #endif
8851 #ifdef TARGET_NR_pipe2
8852     case TARGET_NR_pipe2:
8853         return do_pipe(cpu_env, arg1,
8854                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8855 #endif
8856     case TARGET_NR_times:
8857         {
8858             struct target_tms *tmsp;
8859             struct tms tms;
8860             ret = get_errno(times(&tms));
8861             if (arg1) {
8862                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8863                 if (!tmsp)
8864                     return -TARGET_EFAULT;
8865                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8866                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8867                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8868                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8869             }
8870             if (!is_error(ret))
8871                 ret = host_to_target_clock_t(ret);
8872         }
8873         return ret;
8874     case TARGET_NR_acct:
8875         if (arg1 == 0) {
8876             ret = get_errno(acct(NULL));
8877         } else {
8878             if (!(p = lock_user_string(arg1))) {
8879                 return -TARGET_EFAULT;
8880             }
8881             ret = get_errno(acct(path(p)));
8882             unlock_user(p, arg1, 0);
8883         }
8884         return ret;
8885 #ifdef TARGET_NR_umount2
8886     case TARGET_NR_umount2:
8887         if (!(p = lock_user_string(arg1)))
8888             return -TARGET_EFAULT;
8889         ret = get_errno(umount2(p, arg2));
8890         unlock_user(p, arg1, 0);
8891         return ret;
8892 #endif
8893     case TARGET_NR_ioctl:
8894         return do_ioctl(arg1, arg2, arg3);
8895 #ifdef TARGET_NR_fcntl
8896     case TARGET_NR_fcntl:
8897         return do_fcntl(arg1, arg2, arg3);
8898 #endif
8899     case TARGET_NR_setpgid:
8900         return get_errno(setpgid(arg1, arg2));
8901     case TARGET_NR_umask:
8902         return get_errno(umask(arg1));
8903     case TARGET_NR_chroot:
8904         if (!(p = lock_user_string(arg1)))
8905             return -TARGET_EFAULT;
8906         ret = get_errno(chroot(p));
8907         unlock_user(p, arg1, 0);
8908         return ret;
8909 #ifdef TARGET_NR_dup2
8910     case TARGET_NR_dup2:
8911         ret = get_errno(dup2(arg1, arg2));
8912         if (ret >= 0) {
8913             fd_trans_dup(arg1, arg2);
8914         }
8915         return ret;
8916 #endif
8917 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8918     case TARGET_NR_dup3:
8919     {
8920         int host_flags;
8921 
8922         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8923             return -EINVAL;
8924         }
8925         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8926         ret = get_errno(dup3(arg1, arg2, host_flags));
8927         if (ret >= 0) {
8928             fd_trans_dup(arg1, arg2);
8929         }
8930         return ret;
8931     }
8932 #endif
8933 #ifdef TARGET_NR_getppid /* not on alpha */
8934     case TARGET_NR_getppid:
8935         return get_errno(getppid());
8936 #endif
8937 #ifdef TARGET_NR_getpgrp
8938     case TARGET_NR_getpgrp:
8939         return get_errno(getpgrp());
8940 #endif
8941     case TARGET_NR_setsid:
8942         return get_errno(setsid());
8943 #ifdef TARGET_NR_sigaction
8944     case TARGET_NR_sigaction:
8945         {
8946 #if defined(TARGET_ALPHA)
8947             struct target_sigaction act, oact, *pact = 0;
8948             struct target_old_sigaction *old_act;
8949             if (arg2) {
8950                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8951                     return -TARGET_EFAULT;
8952                 act._sa_handler = old_act->_sa_handler;
8953                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8954                 act.sa_flags = old_act->sa_flags;
8955                 act.sa_restorer = 0;
8956                 unlock_user_struct(old_act, arg2, 0);
8957                 pact = &act;
8958             }
8959             ret = get_errno(do_sigaction(arg1, pact, &oact));
8960             if (!is_error(ret) && arg3) {
8961                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8962                     return -TARGET_EFAULT;
8963                 old_act->_sa_handler = oact._sa_handler;
8964                 old_act->sa_mask = oact.sa_mask.sig[0];
8965                 old_act->sa_flags = oact.sa_flags;
8966                 unlock_user_struct(old_act, arg3, 1);
8967             }
8968 #elif defined(TARGET_MIPS)
8969 	    struct target_sigaction act, oact, *pact, *old_act;
8970 
8971 	    if (arg2) {
8972                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8973                     return -TARGET_EFAULT;
8974 		act._sa_handler = old_act->_sa_handler;
8975 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8976 		act.sa_flags = old_act->sa_flags;
8977 		unlock_user_struct(old_act, arg2, 0);
8978 		pact = &act;
8979 	    } else {
8980 		pact = NULL;
8981 	    }
8982 
8983 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8984 
8985 	    if (!is_error(ret) && arg3) {
8986                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8987                     return -TARGET_EFAULT;
8988 		old_act->_sa_handler = oact._sa_handler;
8989 		old_act->sa_flags = oact.sa_flags;
8990 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8991 		old_act->sa_mask.sig[1] = 0;
8992 		old_act->sa_mask.sig[2] = 0;
8993 		old_act->sa_mask.sig[3] = 0;
8994 		unlock_user_struct(old_act, arg3, 1);
8995 	    }
8996 #else
8997             struct target_old_sigaction *old_act;
8998             struct target_sigaction act, oact, *pact;
8999             if (arg2) {
9000                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9001                     return -TARGET_EFAULT;
9002                 act._sa_handler = old_act->_sa_handler;
9003                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9004                 act.sa_flags = old_act->sa_flags;
9005                 act.sa_restorer = old_act->sa_restorer;
9006 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9007                 act.ka_restorer = 0;
9008 #endif
9009                 unlock_user_struct(old_act, arg2, 0);
9010                 pact = &act;
9011             } else {
9012                 pact = NULL;
9013             }
9014             ret = get_errno(do_sigaction(arg1, pact, &oact));
9015             if (!is_error(ret) && arg3) {
9016                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9017                     return -TARGET_EFAULT;
9018                 old_act->_sa_handler = oact._sa_handler;
9019                 old_act->sa_mask = oact.sa_mask.sig[0];
9020                 old_act->sa_flags = oact.sa_flags;
9021                 old_act->sa_restorer = oact.sa_restorer;
9022                 unlock_user_struct(old_act, arg3, 1);
9023             }
9024 #endif
9025         }
9026         return ret;
9027 #endif
9028     case TARGET_NR_rt_sigaction:
9029         {
9030 #if defined(TARGET_ALPHA)
9031             /* For Alpha and SPARC this is a 5 argument syscall, with
9032              * a 'restorer' parameter which must be copied into the
9033              * sa_restorer field of the sigaction struct.
9034              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9035              * and arg5 is the sigsetsize.
9036              * Alpha also has a separate rt_sigaction struct that it uses
9037              * here; SPARC uses the usual sigaction struct.
9038              */
9039             struct target_rt_sigaction *rt_act;
9040             struct target_sigaction act, oact, *pact = 0;
9041 
9042             if (arg4 != sizeof(target_sigset_t)) {
9043                 return -TARGET_EINVAL;
9044             }
9045             if (arg2) {
9046                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
9047                     return -TARGET_EFAULT;
9048                 act._sa_handler = rt_act->_sa_handler;
9049                 act.sa_mask = rt_act->sa_mask;
9050                 act.sa_flags = rt_act->sa_flags;
9051                 act.sa_restorer = arg5;
9052                 unlock_user_struct(rt_act, arg2, 0);
9053                 pact = &act;
9054             }
9055             ret = get_errno(do_sigaction(arg1, pact, &oact));
9056             if (!is_error(ret) && arg3) {
9057                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
9058                     return -TARGET_EFAULT;
9059                 rt_act->_sa_handler = oact._sa_handler;
9060                 rt_act->sa_mask = oact.sa_mask;
9061                 rt_act->sa_flags = oact.sa_flags;
9062                 unlock_user_struct(rt_act, arg3, 1);
9063             }
9064 #else
9065 #ifdef TARGET_SPARC
9066             target_ulong restorer = arg4;
9067             target_ulong sigsetsize = arg5;
9068 #else
9069             target_ulong sigsetsize = arg4;
9070 #endif
9071             struct target_sigaction *act;
9072             struct target_sigaction *oact;
9073 
9074             if (sigsetsize != sizeof(target_sigset_t)) {
9075                 return -TARGET_EINVAL;
9076             }
9077             if (arg2) {
9078                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9079                     return -TARGET_EFAULT;
9080                 }
9081 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9082                 act->ka_restorer = restorer;
9083 #endif
9084             } else {
9085                 act = NULL;
9086             }
9087             if (arg3) {
9088                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9089                     ret = -TARGET_EFAULT;
9090                     goto rt_sigaction_fail;
9091                 }
9092             } else
9093                 oact = NULL;
9094             ret = get_errno(do_sigaction(arg1, act, oact));
9095 	rt_sigaction_fail:
9096             if (act)
9097                 unlock_user_struct(act, arg2, 0);
9098             if (oact)
9099                 unlock_user_struct(oact, arg3, 1);
9100 #endif
9101         }
9102         return ret;
9103 #ifdef TARGET_NR_sgetmask /* not on alpha */
9104     case TARGET_NR_sgetmask:
9105         {
9106             sigset_t cur_set;
9107             abi_ulong target_set;
9108             ret = do_sigprocmask(0, NULL, &cur_set);
9109             if (!ret) {
9110                 host_to_target_old_sigset(&target_set, &cur_set);
9111                 ret = target_set;
9112             }
9113         }
9114         return ret;
9115 #endif
9116 #ifdef TARGET_NR_ssetmask /* not on alpha */
9117     case TARGET_NR_ssetmask:
9118         {
9119             sigset_t set, oset;
9120             abi_ulong target_set = arg1;
9121             target_to_host_old_sigset(&set, &target_set);
9122             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9123             if (!ret) {
9124                 host_to_target_old_sigset(&target_set, &oset);
9125                 ret = target_set;
9126             }
9127         }
9128         return ret;
9129 #endif
9130 #ifdef TARGET_NR_sigprocmask
9131     case TARGET_NR_sigprocmask:
9132         {
9133 #if defined(TARGET_ALPHA)
9134             sigset_t set, oldset;
9135             abi_ulong mask;
9136             int how;
9137 
9138             switch (arg1) {
9139             case TARGET_SIG_BLOCK:
9140                 how = SIG_BLOCK;
9141                 break;
9142             case TARGET_SIG_UNBLOCK:
9143                 how = SIG_UNBLOCK;
9144                 break;
9145             case TARGET_SIG_SETMASK:
9146                 how = SIG_SETMASK;
9147                 break;
9148             default:
9149                 return -TARGET_EINVAL;
9150             }
9151             mask = arg2;
9152             target_to_host_old_sigset(&set, &mask);
9153 
9154             ret = do_sigprocmask(how, &set, &oldset);
9155             if (!is_error(ret)) {
9156                 host_to_target_old_sigset(&mask, &oldset);
9157                 ret = mask;
9158                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9159             }
9160 #else
9161             sigset_t set, oldset, *set_ptr;
9162             int how;
9163 
9164             if (arg2) {
9165                 switch (arg1) {
9166                 case TARGET_SIG_BLOCK:
9167                     how = SIG_BLOCK;
9168                     break;
9169                 case TARGET_SIG_UNBLOCK:
9170                     how = SIG_UNBLOCK;
9171                     break;
9172                 case TARGET_SIG_SETMASK:
9173                     how = SIG_SETMASK;
9174                     break;
9175                 default:
9176                     return -TARGET_EINVAL;
9177                 }
9178                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9179                     return -TARGET_EFAULT;
9180                 target_to_host_old_sigset(&set, p);
9181                 unlock_user(p, arg2, 0);
9182                 set_ptr = &set;
9183             } else {
9184                 how = 0;
9185                 set_ptr = NULL;
9186             }
9187             ret = do_sigprocmask(how, set_ptr, &oldset);
9188             if (!is_error(ret) && arg3) {
9189                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9190                     return -TARGET_EFAULT;
9191                 host_to_target_old_sigset(p, &oldset);
9192                 unlock_user(p, arg3, sizeof(target_sigset_t));
9193             }
9194 #endif
9195         }
9196         return ret;
9197 #endif
9198     case TARGET_NR_rt_sigprocmask:
9199         {
9200             int how = arg1;
9201             sigset_t set, oldset, *set_ptr;
9202 
9203             if (arg4 != sizeof(target_sigset_t)) {
9204                 return -TARGET_EINVAL;
9205             }
9206 
9207             if (arg2) {
9208                 switch(how) {
9209                 case TARGET_SIG_BLOCK:
9210                     how = SIG_BLOCK;
9211                     break;
9212                 case TARGET_SIG_UNBLOCK:
9213                     how = SIG_UNBLOCK;
9214                     break;
9215                 case TARGET_SIG_SETMASK:
9216                     how = SIG_SETMASK;
9217                     break;
9218                 default:
9219                     return -TARGET_EINVAL;
9220                 }
9221                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9222                     return -TARGET_EFAULT;
9223                 target_to_host_sigset(&set, p);
9224                 unlock_user(p, arg2, 0);
9225                 set_ptr = &set;
9226             } else {
9227                 how = 0;
9228                 set_ptr = NULL;
9229             }
9230             ret = do_sigprocmask(how, set_ptr, &oldset);
9231             if (!is_error(ret) && arg3) {
9232                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9233                     return -TARGET_EFAULT;
9234                 host_to_target_sigset(p, &oldset);
9235                 unlock_user(p, arg3, sizeof(target_sigset_t));
9236             }
9237         }
9238         return ret;
9239 #ifdef TARGET_NR_sigpending
9240     case TARGET_NR_sigpending:
9241         {
9242             sigset_t set;
9243             ret = get_errno(sigpending(&set));
9244             if (!is_error(ret)) {
9245                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9246                     return -TARGET_EFAULT;
9247                 host_to_target_old_sigset(p, &set);
9248                 unlock_user(p, arg1, sizeof(target_sigset_t));
9249             }
9250         }
9251         return ret;
9252 #endif
9253     case TARGET_NR_rt_sigpending:
9254         {
9255             sigset_t set;
9256 
9257             /* Yes, this check is >, not != like most. We follow the kernel's
9258              * logic and it does it like this because it implements
9259              * NR_sigpending through the same code path, and in that case
9260              * the old_sigset_t is smaller in size.
9261              */
9262             if (arg2 > sizeof(target_sigset_t)) {
9263                 return -TARGET_EINVAL;
9264             }
9265 
9266             ret = get_errno(sigpending(&set));
9267             if (!is_error(ret)) {
9268                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9269                     return -TARGET_EFAULT;
9270                 host_to_target_sigset(p, &set);
9271                 unlock_user(p, arg1, sizeof(target_sigset_t));
9272             }
9273         }
9274         return ret;
9275 #ifdef TARGET_NR_sigsuspend
9276     case TARGET_NR_sigsuspend:
9277         {
9278             TaskState *ts = cpu->opaque;
9279 #if defined(TARGET_ALPHA)
9280             abi_ulong mask = arg1;
9281             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9282 #else
9283             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9284                 return -TARGET_EFAULT;
9285             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9286             unlock_user(p, arg1, 0);
9287 #endif
9288             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9289                                                SIGSET_T_SIZE));
9290             if (ret != -TARGET_ERESTARTSYS) {
9291                 ts->in_sigsuspend = 1;
9292             }
9293         }
9294         return ret;
9295 #endif
9296     case TARGET_NR_rt_sigsuspend:
9297         {
9298             TaskState *ts = cpu->opaque;
9299 
9300             if (arg2 != sizeof(target_sigset_t)) {
9301                 return -TARGET_EINVAL;
9302             }
9303             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9304                 return -TARGET_EFAULT;
9305             target_to_host_sigset(&ts->sigsuspend_mask, p);
9306             unlock_user(p, arg1, 0);
9307             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9308                                                SIGSET_T_SIZE));
9309             if (ret != -TARGET_ERESTARTSYS) {
9310                 ts->in_sigsuspend = 1;
9311             }
9312         }
9313         return ret;
9314 #ifdef TARGET_NR_rt_sigtimedwait
9315     case TARGET_NR_rt_sigtimedwait:
9316         {
9317             sigset_t set;
9318             struct timespec uts, *puts;
9319             siginfo_t uinfo;
9320 
9321             if (arg4 != sizeof(target_sigset_t)) {
9322                 return -TARGET_EINVAL;
9323             }
9324 
9325             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9326                 return -TARGET_EFAULT;
9327             target_to_host_sigset(&set, p);
9328             unlock_user(p, arg1, 0);
9329             if (arg3) {
9330                 puts = &uts;
9331                 if (target_to_host_timespec(puts, arg3)) {
9332                     return -TARGET_EFAULT;
9333                 }
9334             } else {
9335                 puts = NULL;
9336             }
9337             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9338                                                  SIGSET_T_SIZE));
9339             if (!is_error(ret)) {
9340                 if (arg2) {
9341                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9342                                   0);
9343                     if (!p) {
9344                         return -TARGET_EFAULT;
9345                     }
9346                     host_to_target_siginfo(p, &uinfo);
9347                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9348                 }
9349                 ret = host_to_target_signal(ret);
9350             }
9351         }
9352         return ret;
9353 #endif
9354 #ifdef TARGET_NR_rt_sigtimedwait_time64
9355     case TARGET_NR_rt_sigtimedwait_time64:
9356         {
9357             sigset_t set;
9358             struct timespec uts, *puts;
9359             siginfo_t uinfo;
9360 
9361             if (arg4 != sizeof(target_sigset_t)) {
9362                 return -TARGET_EINVAL;
9363             }
9364 
9365             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9366             if (!p) {
9367                 return -TARGET_EFAULT;
9368             }
9369             target_to_host_sigset(&set, p);
9370             unlock_user(p, arg1, 0);
9371             if (arg3) {
9372                 puts = &uts;
9373                 if (target_to_host_timespec64(puts, arg3)) {
9374                     return -TARGET_EFAULT;
9375                 }
9376             } else {
9377                 puts = NULL;
9378             }
9379             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9380                                                  SIGSET_T_SIZE));
9381             if (!is_error(ret)) {
9382                 if (arg2) {
9383                     p = lock_user(VERIFY_WRITE, arg2,
9384                                   sizeof(target_siginfo_t), 0);
9385                     if (!p) {
9386                         return -TARGET_EFAULT;
9387                     }
9388                     host_to_target_siginfo(p, &uinfo);
9389                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9390                 }
9391                 ret = host_to_target_signal(ret);
9392             }
9393         }
9394         return ret;
9395 #endif
9396     case TARGET_NR_rt_sigqueueinfo:
9397         {
9398             siginfo_t uinfo;
9399 
9400             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9401             if (!p) {
9402                 return -TARGET_EFAULT;
9403             }
9404             target_to_host_siginfo(&uinfo, p);
9405             unlock_user(p, arg3, 0);
9406             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9407         }
9408         return ret;
9409     case TARGET_NR_rt_tgsigqueueinfo:
9410         {
9411             siginfo_t uinfo;
9412 
9413             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9414             if (!p) {
9415                 return -TARGET_EFAULT;
9416             }
9417             target_to_host_siginfo(&uinfo, p);
9418             unlock_user(p, arg4, 0);
9419             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9420         }
9421         return ret;
9422 #ifdef TARGET_NR_sigreturn
9423     case TARGET_NR_sigreturn:
9424         if (block_signals()) {
9425             return -TARGET_ERESTARTSYS;
9426         }
9427         return do_sigreturn(cpu_env);
9428 #endif
9429     case TARGET_NR_rt_sigreturn:
9430         if (block_signals()) {
9431             return -TARGET_ERESTARTSYS;
9432         }
9433         return do_rt_sigreturn(cpu_env);
9434     case TARGET_NR_sethostname:
9435         if (!(p = lock_user_string(arg1)))
9436             return -TARGET_EFAULT;
9437         ret = get_errno(sethostname(p, arg2));
9438         unlock_user(p, arg1, 0);
9439         return ret;
9440 #ifdef TARGET_NR_setrlimit
9441     case TARGET_NR_setrlimit:
9442         {
9443             int resource = target_to_host_resource(arg1);
9444             struct target_rlimit *target_rlim;
9445             struct rlimit rlim;
9446             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9447                 return -TARGET_EFAULT;
9448             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9449             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9450             unlock_user_struct(target_rlim, arg2, 0);
9451             /*
9452              * If we just passed through resource limit settings for memory then
9453              * they would also apply to QEMU's own allocations, and QEMU will
9454              * crash or hang or die if its allocations fail. Ideally we would
9455              * track the guest allocations in QEMU and apply the limits ourselves.
9456              * For now, just tell the guest the call succeeded but don't actually
9457              * limit anything.
9458              */
9459             if (resource != RLIMIT_AS &&
9460                 resource != RLIMIT_DATA &&
9461                 resource != RLIMIT_STACK) {
9462                 return get_errno(setrlimit(resource, &rlim));
9463             } else {
9464                 return 0;
9465             }
9466         }
9467 #endif
9468 #ifdef TARGET_NR_getrlimit
9469     case TARGET_NR_getrlimit:
9470         {
9471             int resource = target_to_host_resource(arg1);
9472             struct target_rlimit *target_rlim;
9473             struct rlimit rlim;
9474 
9475             ret = get_errno(getrlimit(resource, &rlim));
9476             if (!is_error(ret)) {
9477                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9478                     return -TARGET_EFAULT;
9479                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9480                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9481                 unlock_user_struct(target_rlim, arg2, 1);
9482             }
9483         }
9484         return ret;
9485 #endif
9486     case TARGET_NR_getrusage:
9487         {
9488             struct rusage rusage;
9489             ret = get_errno(getrusage(arg1, &rusage));
9490             if (!is_error(ret)) {
9491                 ret = host_to_target_rusage(arg2, &rusage);
9492             }
9493         }
9494         return ret;
9495 #if defined(TARGET_NR_gettimeofday)
9496     case TARGET_NR_gettimeofday:
9497         {
9498             struct timeval tv;
9499             struct timezone tz;
9500 
9501             ret = get_errno(gettimeofday(&tv, &tz));
9502             if (!is_error(ret)) {
9503                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9504                     return -TARGET_EFAULT;
9505                 }
9506                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9507                     return -TARGET_EFAULT;
9508                 }
9509             }
9510         }
9511         return ret;
9512 #endif
9513 #if defined(TARGET_NR_settimeofday)
9514     case TARGET_NR_settimeofday:
9515         {
9516             struct timeval tv, *ptv = NULL;
9517             struct timezone tz, *ptz = NULL;
9518 
9519             if (arg1) {
9520                 if (copy_from_user_timeval(&tv, arg1)) {
9521                     return -TARGET_EFAULT;
9522                 }
9523                 ptv = &tv;
9524             }
9525 
9526             if (arg2) {
9527                 if (copy_from_user_timezone(&tz, arg2)) {
9528                     return -TARGET_EFAULT;
9529                 }
9530                 ptz = &tz;
9531             }
9532 
9533             return get_errno(settimeofday(ptv, ptz));
9534         }
9535 #endif
9536 #if defined(TARGET_NR_select)
9537     case TARGET_NR_select:
9538 #if defined(TARGET_WANT_NI_OLD_SELECT)
9539         /* some architectures used to have old_select here
9540          * but now ENOSYS it.
9541          */
9542         ret = -TARGET_ENOSYS;
9543 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9544         ret = do_old_select(arg1);
9545 #else
9546         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9547 #endif
9548         return ret;
9549 #endif
9550 #ifdef TARGET_NR_pselect6
9551     case TARGET_NR_pselect6:
9552         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9553 #endif
9554 #ifdef TARGET_NR_pselect6_time64
9555     case TARGET_NR_pselect6_time64:
9556         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9557 #endif
9558 #ifdef TARGET_NR_symlink
9559     case TARGET_NR_symlink:
9560         {
9561             void *p2;
9562             p = lock_user_string(arg1);
9563             p2 = lock_user_string(arg2);
9564             if (!p || !p2)
9565                 ret = -TARGET_EFAULT;
9566             else
9567                 ret = get_errno(symlink(p, p2));
9568             unlock_user(p2, arg2, 0);
9569             unlock_user(p, arg1, 0);
9570         }
9571         return ret;
9572 #endif
9573 #if defined(TARGET_NR_symlinkat)
9574     case TARGET_NR_symlinkat:
9575         {
9576             void *p2;
9577             p  = lock_user_string(arg1);
9578             p2 = lock_user_string(arg3);
9579             if (!p || !p2)
9580                 ret = -TARGET_EFAULT;
9581             else
9582                 ret = get_errno(symlinkat(p, arg2, p2));
9583             unlock_user(p2, arg3, 0);
9584             unlock_user(p, arg1, 0);
9585         }
9586         return ret;
9587 #endif
9588 #ifdef TARGET_NR_readlink
9589     case TARGET_NR_readlink:
9590         {
9591             void *p2;
9592             p = lock_user_string(arg1);
9593             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9594             if (!p || !p2) {
9595                 ret = -TARGET_EFAULT;
9596             } else if (!arg3) {
9597                 /* Short circuit this for the magic exe check. */
9598                 ret = -TARGET_EINVAL;
9599             } else if (is_proc_myself((const char *)p, "exe")) {
9600                 char real[PATH_MAX], *temp;
9601                 temp = realpath(exec_path, real);
9602                 /* Return value is # of bytes that we wrote to the buffer. */
9603                 if (temp == NULL) {
9604                     ret = get_errno(-1);
9605                 } else {
9606                     /* Don't worry about sign mismatch as earlier mapping
9607                      * logic would have thrown a bad address error. */
9608                     ret = MIN(strlen(real), arg3);
9609                     /* We cannot NUL terminate the string. */
9610                     memcpy(p2, real, ret);
9611                 }
9612             } else {
9613                 ret = get_errno(readlink(path(p), p2, arg3));
9614             }
9615             unlock_user(p2, arg2, ret);
9616             unlock_user(p, arg1, 0);
9617         }
9618         return ret;
9619 #endif
9620 #if defined(TARGET_NR_readlinkat)
9621     case TARGET_NR_readlinkat:
9622         {
9623             void *p2;
9624             p  = lock_user_string(arg2);
9625             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9626             if (!p || !p2) {
9627                 ret = -TARGET_EFAULT;
9628             } else if (is_proc_myself((const char *)p, "exe")) {
9629                 char real[PATH_MAX], *temp;
9630                 temp = realpath(exec_path, real);
9631                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9632                 snprintf((char *)p2, arg4, "%s", real);
9633             } else {
9634                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9635             }
9636             unlock_user(p2, arg3, ret);
9637             unlock_user(p, arg2, 0);
9638         }
9639         return ret;
9640 #endif
9641 #ifdef TARGET_NR_swapon
9642     case TARGET_NR_swapon:
9643         if (!(p = lock_user_string(arg1)))
9644             return -TARGET_EFAULT;
9645         ret = get_errno(swapon(p, arg2));
9646         unlock_user(p, arg1, 0);
9647         return ret;
9648 #endif
9649     case TARGET_NR_reboot:
9650         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9651            /* arg4 must be ignored in all other cases */
9652            p = lock_user_string(arg4);
9653            if (!p) {
9654                return -TARGET_EFAULT;
9655            }
9656            ret = get_errno(reboot(arg1, arg2, arg3, p));
9657            unlock_user(p, arg4, 0);
9658         } else {
9659            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9660         }
9661         return ret;
9662 #ifdef TARGET_NR_mmap
9663     case TARGET_NR_mmap:
9664 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9665     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9666     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9667     || defined(TARGET_S390X)
9668         {
9669             abi_ulong *v;
9670             abi_ulong v1, v2, v3, v4, v5, v6;
9671             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9672                 return -TARGET_EFAULT;
9673             v1 = tswapal(v[0]);
9674             v2 = tswapal(v[1]);
9675             v3 = tswapal(v[2]);
9676             v4 = tswapal(v[3]);
9677             v5 = tswapal(v[4]);
9678             v6 = tswapal(v[5]);
9679             unlock_user(v, arg1, 0);
9680             ret = get_errno(target_mmap(v1, v2, v3,
9681                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9682                                         v5, v6));
9683         }
9684 #else
9685         ret = get_errno(target_mmap(arg1, arg2, arg3,
9686                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9687                                     arg5,
9688                                     arg6));
9689 #endif
9690         return ret;
9691 #endif
9692 #ifdef TARGET_NR_mmap2
9693     case TARGET_NR_mmap2:
9694 #ifndef MMAP_SHIFT
9695 #define MMAP_SHIFT 12
9696 #endif
9697         ret = target_mmap(arg1, arg2, arg3,
9698                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9699                           arg5, arg6 << MMAP_SHIFT);
9700         return get_errno(ret);
9701 #endif
9702     case TARGET_NR_munmap:
9703         return get_errno(target_munmap(arg1, arg2));
9704     case TARGET_NR_mprotect:
9705         {
9706             TaskState *ts = cpu->opaque;
9707             /* Special hack to detect libc making the stack executable.  */
9708             if ((arg3 & PROT_GROWSDOWN)
9709                 && arg1 >= ts->info->stack_limit
9710                 && arg1 <= ts->info->start_stack) {
9711                 arg3 &= ~PROT_GROWSDOWN;
9712                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9713                 arg1 = ts->info->stack_limit;
9714             }
9715         }
9716         return get_errno(target_mprotect(arg1, arg2, arg3));
9717 #ifdef TARGET_NR_mremap
9718     case TARGET_NR_mremap:
9719         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9720 #endif
9721         /* ??? msync/mlock/munlock are broken for softmmu.  */
9722 #ifdef TARGET_NR_msync
9723     case TARGET_NR_msync:
9724         return get_errno(msync(g2h(arg1), arg2, arg3));
9725 #endif
9726 #ifdef TARGET_NR_mlock
9727     case TARGET_NR_mlock:
9728         return get_errno(mlock(g2h(arg1), arg2));
9729 #endif
9730 #ifdef TARGET_NR_munlock
9731     case TARGET_NR_munlock:
9732         return get_errno(munlock(g2h(arg1), arg2));
9733 #endif
9734 #ifdef TARGET_NR_mlockall
9735     case TARGET_NR_mlockall:
9736         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9737 #endif
9738 #ifdef TARGET_NR_munlockall
9739     case TARGET_NR_munlockall:
9740         return get_errno(munlockall());
9741 #endif
9742 #ifdef TARGET_NR_truncate
9743     case TARGET_NR_truncate:
9744         if (!(p = lock_user_string(arg1)))
9745             return -TARGET_EFAULT;
9746         ret = get_errno(truncate(p, arg2));
9747         unlock_user(p, arg1, 0);
9748         return ret;
9749 #endif
9750 #ifdef TARGET_NR_ftruncate
9751     case TARGET_NR_ftruncate:
9752         return get_errno(ftruncate(arg1, arg2));
9753 #endif
9754     case TARGET_NR_fchmod:
9755         return get_errno(fchmod(arg1, arg2));
9756 #if defined(TARGET_NR_fchmodat)
9757     case TARGET_NR_fchmodat:
9758         if (!(p = lock_user_string(arg2)))
9759             return -TARGET_EFAULT;
9760         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9761         unlock_user(p, arg2, 0);
9762         return ret;
9763 #endif
9764     case TARGET_NR_getpriority:
9765         /* Note that negative values are valid for getpriority, so we must
9766            differentiate based on errno settings.  */
9767         errno = 0;
9768         ret = getpriority(arg1, arg2);
9769         if (ret == -1 && errno != 0) {
9770             return -host_to_target_errno(errno);
9771         }
9772 #ifdef TARGET_ALPHA
9773         /* Return value is the unbiased priority.  Signal no error.  */
9774         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9775 #else
9776         /* Return value is a biased priority to avoid negative numbers.  */
9777         ret = 20 - ret;
9778 #endif
9779         return ret;
9780     case TARGET_NR_setpriority:
9781         return get_errno(setpriority(arg1, arg2, arg3));
9782 #ifdef TARGET_NR_statfs
9783     case TARGET_NR_statfs:
9784         if (!(p = lock_user_string(arg1))) {
9785             return -TARGET_EFAULT;
9786         }
9787         ret = get_errno(statfs(path(p), &stfs));
9788         unlock_user(p, arg1, 0);
9789     convert_statfs:
9790         if (!is_error(ret)) {
9791             struct target_statfs *target_stfs;
9792 
9793             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9794                 return -TARGET_EFAULT;
9795             __put_user(stfs.f_type, &target_stfs->f_type);
9796             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9797             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9798             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9799             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9800             __put_user(stfs.f_files, &target_stfs->f_files);
9801             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9802             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9803             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9804             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9805             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9806 #ifdef _STATFS_F_FLAGS
9807             __put_user(stfs.f_flags, &target_stfs->f_flags);
9808 #else
9809             __put_user(0, &target_stfs->f_flags);
9810 #endif
9811             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9812             unlock_user_struct(target_stfs, arg2, 1);
9813         }
9814         return ret;
9815 #endif
9816 #ifdef TARGET_NR_fstatfs
9817     case TARGET_NR_fstatfs:
9818         ret = get_errno(fstatfs(arg1, &stfs));
9819         goto convert_statfs;
9820 #endif
9821 #ifdef TARGET_NR_statfs64
9822     case TARGET_NR_statfs64:
9823         if (!(p = lock_user_string(arg1))) {
9824             return -TARGET_EFAULT;
9825         }
9826         ret = get_errno(statfs(path(p), &stfs));
9827         unlock_user(p, arg1, 0);
9828     convert_statfs64:
9829         if (!is_error(ret)) {
9830             struct target_statfs64 *target_stfs;
9831 
9832             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9833                 return -TARGET_EFAULT;
9834             __put_user(stfs.f_type, &target_stfs->f_type);
9835             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9836             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9837             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9838             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9839             __put_user(stfs.f_files, &target_stfs->f_files);
9840             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9841             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9842             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9843             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9844             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9845 #ifdef _STATFS_F_FLAGS
9846             __put_user(stfs.f_flags, &target_stfs->f_flags);
9847 #else
9848             __put_user(0, &target_stfs->f_flags);
9849 #endif
9850             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9851             unlock_user_struct(target_stfs, arg3, 1);
9852         }
9853         return ret;
9854     case TARGET_NR_fstatfs64:
9855         ret = get_errno(fstatfs(arg1, &stfs));
9856         goto convert_statfs64;
9857 #endif
9858 #ifdef TARGET_NR_socketcall
9859     case TARGET_NR_socketcall:
9860         return do_socketcall(arg1, arg2);
9861 #endif
9862 #ifdef TARGET_NR_accept
9863     case TARGET_NR_accept:
9864         return do_accept4(arg1, arg2, arg3, 0);
9865 #endif
9866 #ifdef TARGET_NR_accept4
9867     case TARGET_NR_accept4:
9868         return do_accept4(arg1, arg2, arg3, arg4);
9869 #endif
9870 #ifdef TARGET_NR_bind
9871     case TARGET_NR_bind:
9872         return do_bind(arg1, arg2, arg3);
9873 #endif
9874 #ifdef TARGET_NR_connect
9875     case TARGET_NR_connect:
9876         return do_connect(arg1, arg2, arg3);
9877 #endif
9878 #ifdef TARGET_NR_getpeername
9879     case TARGET_NR_getpeername:
9880         return do_getpeername(arg1, arg2, arg3);
9881 #endif
9882 #ifdef TARGET_NR_getsockname
9883     case TARGET_NR_getsockname:
9884         return do_getsockname(arg1, arg2, arg3);
9885 #endif
9886 #ifdef TARGET_NR_getsockopt
9887     case TARGET_NR_getsockopt:
9888         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9889 #endif
9890 #ifdef TARGET_NR_listen
9891     case TARGET_NR_listen:
9892         return get_errno(listen(arg1, arg2));
9893 #endif
9894 #ifdef TARGET_NR_recv
9895     case TARGET_NR_recv:
9896         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9897 #endif
9898 #ifdef TARGET_NR_recvfrom
9899     case TARGET_NR_recvfrom:
9900         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9901 #endif
9902 #ifdef TARGET_NR_recvmsg
9903     case TARGET_NR_recvmsg:
9904         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9905 #endif
9906 #ifdef TARGET_NR_send
9907     case TARGET_NR_send:
9908         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9909 #endif
9910 #ifdef TARGET_NR_sendmsg
9911     case TARGET_NR_sendmsg:
9912         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9913 #endif
9914 #ifdef TARGET_NR_sendmmsg
9915     case TARGET_NR_sendmmsg:
9916         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9917 #endif
9918 #ifdef TARGET_NR_recvmmsg
9919     case TARGET_NR_recvmmsg:
9920         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9921 #endif
9922 #ifdef TARGET_NR_sendto
9923     case TARGET_NR_sendto:
9924         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9925 #endif
9926 #ifdef TARGET_NR_shutdown
9927     case TARGET_NR_shutdown:
9928         return get_errno(shutdown(arg1, arg2));
9929 #endif
9930 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9931     case TARGET_NR_getrandom:
9932         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9933         if (!p) {
9934             return -TARGET_EFAULT;
9935         }
9936         ret = get_errno(getrandom(p, arg2, arg3));
9937         unlock_user(p, arg1, ret);
9938         return ret;
9939 #endif
9940 #ifdef TARGET_NR_socket
9941     case TARGET_NR_socket:
9942         return do_socket(arg1, arg2, arg3);
9943 #endif
9944 #ifdef TARGET_NR_socketpair
9945     case TARGET_NR_socketpair:
9946         return do_socketpair(arg1, arg2, arg3, arg4);
9947 #endif
9948 #ifdef TARGET_NR_setsockopt
9949     case TARGET_NR_setsockopt:
9950         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9951 #endif
9952 #if defined(TARGET_NR_syslog)
9953     case TARGET_NR_syslog:
9954         {
9955             int len = arg2;
9956 
9957             switch (arg1) {
9958             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9959             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9960             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9961             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9962             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9963             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9964             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9965             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9966                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9967             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9968             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9969             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9970                 {
9971                     if (len < 0) {
9972                         return -TARGET_EINVAL;
9973                     }
9974                     if (len == 0) {
9975                         return 0;
9976                     }
9977                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9978                     if (!p) {
9979                         return -TARGET_EFAULT;
9980                     }
9981                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9982                     unlock_user(p, arg2, arg3);
9983                 }
9984                 return ret;
9985             default:
9986                 return -TARGET_EINVAL;
9987             }
9988         }
9989         break;
9990 #endif
9991     case TARGET_NR_setitimer:
9992         {
9993             struct itimerval value, ovalue, *pvalue;
9994 
9995             if (arg2) {
9996                 pvalue = &value;
9997                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9998                     || copy_from_user_timeval(&pvalue->it_value,
9999                                               arg2 + sizeof(struct target_timeval)))
10000                     return -TARGET_EFAULT;
10001             } else {
10002                 pvalue = NULL;
10003             }
10004             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10005             if (!is_error(ret) && arg3) {
10006                 if (copy_to_user_timeval(arg3,
10007                                          &ovalue.it_interval)
10008                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10009                                             &ovalue.it_value))
10010                     return -TARGET_EFAULT;
10011             }
10012         }
10013         return ret;
10014     case TARGET_NR_getitimer:
10015         {
10016             struct itimerval value;
10017 
10018             ret = get_errno(getitimer(arg1, &value));
10019             if (!is_error(ret) && arg2) {
10020                 if (copy_to_user_timeval(arg2,
10021                                          &value.it_interval)
10022                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10023                                             &value.it_value))
10024                     return -TARGET_EFAULT;
10025             }
10026         }
10027         return ret;
10028 #ifdef TARGET_NR_stat
10029     case TARGET_NR_stat:
10030         if (!(p = lock_user_string(arg1))) {
10031             return -TARGET_EFAULT;
10032         }
10033         ret = get_errno(stat(path(p), &st));
10034         unlock_user(p, arg1, 0);
10035         goto do_stat;
10036 #endif
10037 #ifdef TARGET_NR_lstat
10038     case TARGET_NR_lstat:
10039         if (!(p = lock_user_string(arg1))) {
10040             return -TARGET_EFAULT;
10041         }
10042         ret = get_errno(lstat(path(p), &st));
10043         unlock_user(p, arg1, 0);
10044         goto do_stat;
10045 #endif
10046 #ifdef TARGET_NR_fstat
10047     case TARGET_NR_fstat:
10048         {
10049             ret = get_errno(fstat(arg1, &st));
10050 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10051         do_stat:
10052 #endif
10053             if (!is_error(ret)) {
10054                 struct target_stat *target_st;
10055 
10056                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10057                     return -TARGET_EFAULT;
10058                 memset(target_st, 0, sizeof(*target_st));
10059                 __put_user(st.st_dev, &target_st->st_dev);
10060                 __put_user(st.st_ino, &target_st->st_ino);
10061                 __put_user(st.st_mode, &target_st->st_mode);
10062                 __put_user(st.st_uid, &target_st->st_uid);
10063                 __put_user(st.st_gid, &target_st->st_gid);
10064                 __put_user(st.st_nlink, &target_st->st_nlink);
10065                 __put_user(st.st_rdev, &target_st->st_rdev);
10066                 __put_user(st.st_size, &target_st->st_size);
10067                 __put_user(st.st_blksize, &target_st->st_blksize);
10068                 __put_user(st.st_blocks, &target_st->st_blocks);
10069                 __put_user(st.st_atime, &target_st->target_st_atime);
10070                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10071                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10072 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10073     defined(TARGET_STAT_HAVE_NSEC)
10074                 __put_user(st.st_atim.tv_nsec,
10075                            &target_st->target_st_atime_nsec);
10076                 __put_user(st.st_mtim.tv_nsec,
10077                            &target_st->target_st_mtime_nsec);
10078                 __put_user(st.st_ctim.tv_nsec,
10079                            &target_st->target_st_ctime_nsec);
10080 #endif
10081                 unlock_user_struct(target_st, arg2, 1);
10082             }
10083         }
10084         return ret;
10085 #endif
10086     case TARGET_NR_vhangup:
10087         return get_errno(vhangup());
10088 #ifdef TARGET_NR_syscall
10089     case TARGET_NR_syscall:
10090         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10091                           arg6, arg7, arg8, 0);
10092 #endif
10093 #if defined(TARGET_NR_wait4)
10094     case TARGET_NR_wait4:
10095         {
10096             int status;
10097             abi_long status_ptr = arg2;
10098             struct rusage rusage, *rusage_ptr;
10099             abi_ulong target_rusage = arg4;
10100             abi_long rusage_err;
10101             if (target_rusage)
10102                 rusage_ptr = &rusage;
10103             else
10104                 rusage_ptr = NULL;
10105             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10106             if (!is_error(ret)) {
10107                 if (status_ptr && ret) {
10108                     status = host_to_target_waitstatus(status);
10109                     if (put_user_s32(status, status_ptr))
10110                         return -TARGET_EFAULT;
10111                 }
10112                 if (target_rusage) {
10113                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10114                     if (rusage_err) {
10115                         ret = rusage_err;
10116                     }
10117                 }
10118             }
10119         }
10120         return ret;
10121 #endif
10122 #ifdef TARGET_NR_swapoff
10123     case TARGET_NR_swapoff:
10124         if (!(p = lock_user_string(arg1)))
10125             return -TARGET_EFAULT;
10126         ret = get_errno(swapoff(p));
10127         unlock_user(p, arg1, 0);
10128         return ret;
10129 #endif
10130     case TARGET_NR_sysinfo:
10131         {
10132             struct target_sysinfo *target_value;
10133             struct sysinfo value;
10134             ret = get_errno(sysinfo(&value));
10135             if (!is_error(ret) && arg1)
10136             {
10137                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10138                     return -TARGET_EFAULT;
10139                 __put_user(value.uptime, &target_value->uptime);
10140                 __put_user(value.loads[0], &target_value->loads[0]);
10141                 __put_user(value.loads[1], &target_value->loads[1]);
10142                 __put_user(value.loads[2], &target_value->loads[2]);
10143                 __put_user(value.totalram, &target_value->totalram);
10144                 __put_user(value.freeram, &target_value->freeram);
10145                 __put_user(value.sharedram, &target_value->sharedram);
10146                 __put_user(value.bufferram, &target_value->bufferram);
10147                 __put_user(value.totalswap, &target_value->totalswap);
10148                 __put_user(value.freeswap, &target_value->freeswap);
10149                 __put_user(value.procs, &target_value->procs);
10150                 __put_user(value.totalhigh, &target_value->totalhigh);
10151                 __put_user(value.freehigh, &target_value->freehigh);
10152                 __put_user(value.mem_unit, &target_value->mem_unit);
10153                 unlock_user_struct(target_value, arg1, 1);
10154             }
10155         }
10156         return ret;
10157 #ifdef TARGET_NR_ipc
10158     case TARGET_NR_ipc:
10159         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10160 #endif
10161 #ifdef TARGET_NR_semget
10162     case TARGET_NR_semget:
10163         return get_errno(semget(arg1, arg2, arg3));
10164 #endif
10165 #ifdef TARGET_NR_semop
10166     case TARGET_NR_semop:
10167         return do_semtimedop(arg1, arg2, arg3, 0, false);
10168 #endif
10169 #ifdef TARGET_NR_semtimedop
10170     case TARGET_NR_semtimedop:
10171         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10172 #endif
10173 #ifdef TARGET_NR_semtimedop_time64
10174     case TARGET_NR_semtimedop_time64:
10175         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10176 #endif
10177 #ifdef TARGET_NR_semctl
10178     case TARGET_NR_semctl:
10179         return do_semctl(arg1, arg2, arg3, arg4);
10180 #endif
10181 #ifdef TARGET_NR_msgctl
10182     case TARGET_NR_msgctl:
10183         return do_msgctl(arg1, arg2, arg3);
10184 #endif
10185 #ifdef TARGET_NR_msgget
10186     case TARGET_NR_msgget:
10187         return get_errno(msgget(arg1, arg2));
10188 #endif
10189 #ifdef TARGET_NR_msgrcv
10190     case TARGET_NR_msgrcv:
10191         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10192 #endif
10193 #ifdef TARGET_NR_msgsnd
10194     case TARGET_NR_msgsnd:
10195         return do_msgsnd(arg1, arg2, arg3, arg4);
10196 #endif
10197 #ifdef TARGET_NR_shmget
10198     case TARGET_NR_shmget:
10199         return get_errno(shmget(arg1, arg2, arg3));
10200 #endif
10201 #ifdef TARGET_NR_shmctl
10202     case TARGET_NR_shmctl:
10203         return do_shmctl(arg1, arg2, arg3);
10204 #endif
10205 #ifdef TARGET_NR_shmat
10206     case TARGET_NR_shmat:
10207         return do_shmat(cpu_env, arg1, arg2, arg3);
10208 #endif
10209 #ifdef TARGET_NR_shmdt
10210     case TARGET_NR_shmdt:
10211         return do_shmdt(arg1);
10212 #endif
10213     case TARGET_NR_fsync:
10214         return get_errno(fsync(arg1));
10215     case TARGET_NR_clone:
10216         /* Linux manages to have three different orderings for its
10217          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10218          * match the kernel's CONFIG_CLONE_* settings.
10219          * Microblaze is further special in that it uses a sixth
10220          * implicit argument to clone for the TLS pointer.
10221          */
10222 #if defined(TARGET_MICROBLAZE)
10223         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10224 #elif defined(TARGET_CLONE_BACKWARDS)
10225         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10226 #elif defined(TARGET_CLONE_BACKWARDS2)
10227         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10228 #else
10229         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10230 #endif
10231         return ret;
10232 #ifdef __NR_exit_group
10233         /* new thread calls */
10234     case TARGET_NR_exit_group:
10235         preexit_cleanup(cpu_env, arg1);
10236         return get_errno(exit_group(arg1));
10237 #endif
10238     case TARGET_NR_setdomainname:
10239         if (!(p = lock_user_string(arg1)))
10240             return -TARGET_EFAULT;
10241         ret = get_errno(setdomainname(p, arg2));
10242         unlock_user(p, arg1, 0);
10243         return ret;
10244     case TARGET_NR_uname:
10245         /* no need to transcode because we use the linux syscall */
10246         {
10247             struct new_utsname * buf;
10248 
10249             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10250                 return -TARGET_EFAULT;
10251             ret = get_errno(sys_uname(buf));
10252             if (!is_error(ret)) {
10253                 /* Overwrite the native machine name with whatever is being
10254                    emulated. */
10255                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10256                           sizeof(buf->machine));
10257                 /* Allow the user to override the reported release.  */
10258                 if (qemu_uname_release && *qemu_uname_release) {
10259                     g_strlcpy(buf->release, qemu_uname_release,
10260                               sizeof(buf->release));
10261                 }
10262             }
10263             unlock_user_struct(buf, arg1, 1);
10264         }
10265         return ret;
10266 #ifdef TARGET_I386
10267     case TARGET_NR_modify_ldt:
10268         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10269 #if !defined(TARGET_X86_64)
10270     case TARGET_NR_vm86:
10271         return do_vm86(cpu_env, arg1, arg2);
10272 #endif
10273 #endif
10274 #if defined(TARGET_NR_adjtimex)
10275     case TARGET_NR_adjtimex:
10276         {
10277             struct timex host_buf;
10278 
10279             if (target_to_host_timex(&host_buf, arg1) != 0) {
10280                 return -TARGET_EFAULT;
10281             }
10282             ret = get_errno(adjtimex(&host_buf));
10283             if (!is_error(ret)) {
10284                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10285                     return -TARGET_EFAULT;
10286                 }
10287             }
10288         }
10289         return ret;
10290 #endif
10291 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10292     case TARGET_NR_clock_adjtime:
10293         {
10294             struct timex htx, *phtx = &htx;
10295 
10296             if (target_to_host_timex(phtx, arg2) != 0) {
10297                 return -TARGET_EFAULT;
10298             }
10299             ret = get_errno(clock_adjtime(arg1, phtx));
10300             if (!is_error(ret) && phtx) {
10301                 if (host_to_target_timex(arg2, phtx) != 0) {
10302                     return -TARGET_EFAULT;
10303                 }
10304             }
10305         }
10306         return ret;
10307 #endif
10308 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10309     case TARGET_NR_clock_adjtime64:
10310         {
10311             struct timex htx;
10312 
10313             if (target_to_host_timex64(&htx, arg2) != 0) {
10314                 return -TARGET_EFAULT;
10315             }
10316             ret = get_errno(clock_adjtime(arg1, &htx));
10317             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10318                     return -TARGET_EFAULT;
10319             }
10320         }
10321         return ret;
10322 #endif
10323     case TARGET_NR_getpgid:
10324         return get_errno(getpgid(arg1));
10325     case TARGET_NR_fchdir:
10326         return get_errno(fchdir(arg1));
10327     case TARGET_NR_personality:
10328         return get_errno(personality(arg1));
10329 #ifdef TARGET_NR__llseek /* Not on alpha */
10330     case TARGET_NR__llseek:
10331         {
10332             int64_t res;
10333 #if !defined(__NR_llseek)
10334             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10335             if (res == -1) {
10336                 ret = get_errno(res);
10337             } else {
10338                 ret = 0;
10339             }
10340 #else
10341             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10342 #endif
10343             if ((ret == 0) && put_user_s64(res, arg4)) {
10344                 return -TARGET_EFAULT;
10345             }
10346         }
10347         return ret;
10348 #endif
10349 #ifdef TARGET_NR_getdents
10350     case TARGET_NR_getdents:
10351 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10352 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10353         {
10354             struct target_dirent *target_dirp;
10355             struct linux_dirent *dirp;
10356             abi_long count = arg3;
10357 
10358             dirp = g_try_malloc(count);
10359             if (!dirp) {
10360                 return -TARGET_ENOMEM;
10361             }
10362 
10363             ret = get_errno(sys_getdents(arg1, dirp, count));
10364             if (!is_error(ret)) {
10365                 struct linux_dirent *de;
10366 		struct target_dirent *tde;
10367                 int len = ret;
10368                 int reclen, treclen;
10369 		int count1, tnamelen;
10370 
10371 		count1 = 0;
10372                 de = dirp;
10373                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10374                     return -TARGET_EFAULT;
10375 		tde = target_dirp;
10376                 while (len > 0) {
10377                     reclen = de->d_reclen;
10378                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10379                     assert(tnamelen >= 0);
10380                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10381                     assert(count1 + treclen <= count);
10382                     tde->d_reclen = tswap16(treclen);
10383                     tde->d_ino = tswapal(de->d_ino);
10384                     tde->d_off = tswapal(de->d_off);
10385                     memcpy(tde->d_name, de->d_name, tnamelen);
10386                     de = (struct linux_dirent *)((char *)de + reclen);
10387                     len -= reclen;
10388                     tde = (struct target_dirent *)((char *)tde + treclen);
10389 		    count1 += treclen;
10390                 }
10391 		ret = count1;
10392                 unlock_user(target_dirp, arg2, ret);
10393             }
10394             g_free(dirp);
10395         }
10396 #else
10397         {
10398             struct linux_dirent *dirp;
10399             abi_long count = arg3;
10400 
10401             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10402                 return -TARGET_EFAULT;
10403             ret = get_errno(sys_getdents(arg1, dirp, count));
10404             if (!is_error(ret)) {
10405                 struct linux_dirent *de;
10406                 int len = ret;
10407                 int reclen;
10408                 de = dirp;
10409                 while (len > 0) {
10410                     reclen = de->d_reclen;
10411                     if (reclen > len)
10412                         break;
10413                     de->d_reclen = tswap16(reclen);
10414                     tswapls(&de->d_ino);
10415                     tswapls(&de->d_off);
10416                     de = (struct linux_dirent *)((char *)de + reclen);
10417                     len -= reclen;
10418                 }
10419             }
10420             unlock_user(dirp, arg2, ret);
10421         }
10422 #endif
10423 #else
10424         /* Implement getdents in terms of getdents64 */
10425         {
10426             struct linux_dirent64 *dirp;
10427             abi_long count = arg3;
10428 
10429             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10430             if (!dirp) {
10431                 return -TARGET_EFAULT;
10432             }
10433             ret = get_errno(sys_getdents64(arg1, dirp, count));
10434             if (!is_error(ret)) {
10435                 /* Convert the dirent64 structs to target dirent.  We do this
10436                  * in-place, since we can guarantee that a target_dirent is no
10437                  * larger than a dirent64; however this means we have to be
10438                  * careful to read everything before writing in the new format.
10439                  */
10440                 struct linux_dirent64 *de;
10441                 struct target_dirent *tde;
10442                 int len = ret;
10443                 int tlen = 0;
10444 
10445                 de = dirp;
10446                 tde = (struct target_dirent *)dirp;
10447                 while (len > 0) {
10448                     int namelen, treclen;
10449                     int reclen = de->d_reclen;
10450                     uint64_t ino = de->d_ino;
10451                     int64_t off = de->d_off;
10452                     uint8_t type = de->d_type;
10453 
10454                     namelen = strlen(de->d_name);
10455                     treclen = offsetof(struct target_dirent, d_name)
10456                         + namelen + 2;
10457                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10458 
10459                     memmove(tde->d_name, de->d_name, namelen + 1);
10460                     tde->d_ino = tswapal(ino);
10461                     tde->d_off = tswapal(off);
10462                     tde->d_reclen = tswap16(treclen);
10463                     /* The target_dirent type is in what was formerly a padding
10464                      * byte at the end of the structure:
10465                      */
10466                     *(((char *)tde) + treclen - 1) = type;
10467 
10468                     de = (struct linux_dirent64 *)((char *)de + reclen);
10469                     tde = (struct target_dirent *)((char *)tde + treclen);
10470                     len -= reclen;
10471                     tlen += treclen;
10472                 }
10473                 ret = tlen;
10474             }
10475             unlock_user(dirp, arg2, ret);
10476         }
10477 #endif
10478         return ret;
10479 #endif /* TARGET_NR_getdents */
10480 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10481     case TARGET_NR_getdents64:
10482         {
10483             struct linux_dirent64 *dirp;
10484             abi_long count = arg3;
10485             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10486                 return -TARGET_EFAULT;
10487             ret = get_errno(sys_getdents64(arg1, dirp, count));
10488             if (!is_error(ret)) {
10489                 struct linux_dirent64 *de;
10490                 int len = ret;
10491                 int reclen;
10492                 de = dirp;
10493                 while (len > 0) {
10494                     reclen = de->d_reclen;
10495                     if (reclen > len)
10496                         break;
10497                     de->d_reclen = tswap16(reclen);
10498                     tswap64s((uint64_t *)&de->d_ino);
10499                     tswap64s((uint64_t *)&de->d_off);
10500                     de = (struct linux_dirent64 *)((char *)de + reclen);
10501                     len -= reclen;
10502                 }
10503             }
10504             unlock_user(dirp, arg2, ret);
10505         }
10506         return ret;
10507 #endif /* TARGET_NR_getdents64 */
10508 #if defined(TARGET_NR__newselect)
10509     case TARGET_NR__newselect:
10510         return do_select(arg1, arg2, arg3, arg4, arg5);
10511 #endif
10512 #ifdef TARGET_NR_poll
10513     case TARGET_NR_poll:
10514         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10515 #endif
10516 #ifdef TARGET_NR_ppoll
10517     case TARGET_NR_ppoll:
10518         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10519 #endif
10520 #ifdef TARGET_NR_ppoll_time64
10521     case TARGET_NR_ppoll_time64:
10522         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10523 #endif
10524     case TARGET_NR_flock:
10525         /* NOTE: the flock constant seems to be the same for every
10526            Linux platform */
10527         return get_errno(safe_flock(arg1, arg2));
10528     case TARGET_NR_readv:
10529         {
10530             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10531             if (vec != NULL) {
10532                 ret = get_errno(safe_readv(arg1, vec, arg3));
10533                 unlock_iovec(vec, arg2, arg3, 1);
10534             } else {
10535                 ret = -host_to_target_errno(errno);
10536             }
10537         }
10538         return ret;
10539     case TARGET_NR_writev:
10540         {
10541             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10542             if (vec != NULL) {
10543                 ret = get_errno(safe_writev(arg1, vec, arg3));
10544                 unlock_iovec(vec, arg2, arg3, 0);
10545             } else {
10546                 ret = -host_to_target_errno(errno);
10547             }
10548         }
10549         return ret;
10550 #if defined(TARGET_NR_preadv)
10551     case TARGET_NR_preadv:
10552         {
10553             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10554             if (vec != NULL) {
10555                 unsigned long low, high;
10556 
10557                 target_to_host_low_high(arg4, arg5, &low, &high);
10558                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10559                 unlock_iovec(vec, arg2, arg3, 1);
10560             } else {
10561                 ret = -host_to_target_errno(errno);
10562            }
10563         }
10564         return ret;
10565 #endif
10566 #if defined(TARGET_NR_pwritev)
10567     case TARGET_NR_pwritev:
10568         {
10569             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10570             if (vec != NULL) {
10571                 unsigned long low, high;
10572 
10573                 target_to_host_low_high(arg4, arg5, &low, &high);
10574                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10575                 unlock_iovec(vec, arg2, arg3, 0);
10576             } else {
10577                 ret = -host_to_target_errno(errno);
10578            }
10579         }
10580         return ret;
10581 #endif
10582     case TARGET_NR_getsid:
10583         return get_errno(getsid(arg1));
10584 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10585     case TARGET_NR_fdatasync:
10586         return get_errno(fdatasync(arg1));
10587 #endif
10588     case TARGET_NR_sched_getaffinity:
10589         {
10590             unsigned int mask_size;
10591             unsigned long *mask;
10592 
10593             /*
10594              * sched_getaffinity needs multiples of ulong, so need to take
10595              * care of mismatches between target ulong and host ulong sizes.
10596              */
10597             if (arg2 & (sizeof(abi_ulong) - 1)) {
10598                 return -TARGET_EINVAL;
10599             }
10600             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10601 
10602             mask = alloca(mask_size);
10603             memset(mask, 0, mask_size);
10604             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10605 
10606             if (!is_error(ret)) {
10607                 if (ret > arg2) {
10608                     /* More data returned than the caller's buffer will fit.
10609                      * This only happens if sizeof(abi_long) < sizeof(long)
10610                      * and the caller passed us a buffer holding an odd number
10611                      * of abi_longs. If the host kernel is actually using the
10612                      * extra 4 bytes then fail EINVAL; otherwise we can just
10613                      * ignore them and only copy the interesting part.
10614                      */
10615                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10616                     if (numcpus > arg2 * 8) {
10617                         return -TARGET_EINVAL;
10618                     }
10619                     ret = arg2;
10620                 }
10621 
10622                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10623                     return -TARGET_EFAULT;
10624                 }
10625             }
10626         }
10627         return ret;
10628     case TARGET_NR_sched_setaffinity:
10629         {
10630             unsigned int mask_size;
10631             unsigned long *mask;
10632 
10633             /*
10634              * sched_setaffinity needs multiples of ulong, so need to take
10635              * care of mismatches between target ulong and host ulong sizes.
10636              */
10637             if (arg2 & (sizeof(abi_ulong) - 1)) {
10638                 return -TARGET_EINVAL;
10639             }
10640             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10641             mask = alloca(mask_size);
10642 
10643             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10644             if (ret) {
10645                 return ret;
10646             }
10647 
10648             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10649         }
10650     case TARGET_NR_getcpu:
10651         {
10652             unsigned cpu, node;
10653             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10654                                        arg2 ? &node : NULL,
10655                                        NULL));
10656             if (is_error(ret)) {
10657                 return ret;
10658             }
10659             if (arg1 && put_user_u32(cpu, arg1)) {
10660                 return -TARGET_EFAULT;
10661             }
10662             if (arg2 && put_user_u32(node, arg2)) {
10663                 return -TARGET_EFAULT;
10664             }
10665         }
10666         return ret;
10667     case TARGET_NR_sched_setparam:
10668         {
10669             struct sched_param *target_schp;
10670             struct sched_param schp;
10671 
10672             if (arg2 == 0) {
10673                 return -TARGET_EINVAL;
10674             }
10675             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10676                 return -TARGET_EFAULT;
10677             schp.sched_priority = tswap32(target_schp->sched_priority);
10678             unlock_user_struct(target_schp, arg2, 0);
10679             return get_errno(sched_setparam(arg1, &schp));
10680         }
10681     case TARGET_NR_sched_getparam:
10682         {
10683             struct sched_param *target_schp;
10684             struct sched_param schp;
10685 
10686             if (arg2 == 0) {
10687                 return -TARGET_EINVAL;
10688             }
10689             ret = get_errno(sched_getparam(arg1, &schp));
10690             if (!is_error(ret)) {
10691                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10692                     return -TARGET_EFAULT;
10693                 target_schp->sched_priority = tswap32(schp.sched_priority);
10694                 unlock_user_struct(target_schp, arg2, 1);
10695             }
10696         }
10697         return ret;
10698     case TARGET_NR_sched_setscheduler:
10699         {
10700             struct sched_param *target_schp;
10701             struct sched_param schp;
10702             if (arg3 == 0) {
10703                 return -TARGET_EINVAL;
10704             }
10705             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10706                 return -TARGET_EFAULT;
10707             schp.sched_priority = tswap32(target_schp->sched_priority);
10708             unlock_user_struct(target_schp, arg3, 0);
10709             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10710         }
10711     case TARGET_NR_sched_getscheduler:
10712         return get_errno(sched_getscheduler(arg1));
10713     case TARGET_NR_sched_yield:
10714         return get_errno(sched_yield());
10715     case TARGET_NR_sched_get_priority_max:
10716         return get_errno(sched_get_priority_max(arg1));
10717     case TARGET_NR_sched_get_priority_min:
10718         return get_errno(sched_get_priority_min(arg1));
10719 #ifdef TARGET_NR_sched_rr_get_interval
10720     case TARGET_NR_sched_rr_get_interval:
10721         {
10722             struct timespec ts;
10723             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10724             if (!is_error(ret)) {
10725                 ret = host_to_target_timespec(arg2, &ts);
10726             }
10727         }
10728         return ret;
10729 #endif
10730 #ifdef TARGET_NR_sched_rr_get_interval_time64
10731     case TARGET_NR_sched_rr_get_interval_time64:
10732         {
10733             struct timespec ts;
10734             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10735             if (!is_error(ret)) {
10736                 ret = host_to_target_timespec64(arg2, &ts);
10737             }
10738         }
10739         return ret;
10740 #endif
10741 #if defined(TARGET_NR_nanosleep)
10742     case TARGET_NR_nanosleep:
10743         {
10744             struct timespec req, rem;
10745             target_to_host_timespec(&req, arg1);
10746             ret = get_errno(safe_nanosleep(&req, &rem));
10747             if (is_error(ret) && arg2) {
10748                 host_to_target_timespec(arg2, &rem);
10749             }
10750         }
10751         return ret;
10752 #endif
10753     case TARGET_NR_prctl:
10754         switch (arg1) {
10755         case PR_GET_PDEATHSIG:
10756         {
10757             int deathsig;
10758             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10759             if (!is_error(ret) && arg2
10760                 && put_user_s32(deathsig, arg2)) {
10761                 return -TARGET_EFAULT;
10762             }
10763             return ret;
10764         }
10765 #ifdef PR_GET_NAME
10766         case PR_GET_NAME:
10767         {
10768             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10769             if (!name) {
10770                 return -TARGET_EFAULT;
10771             }
10772             ret = get_errno(prctl(arg1, (unsigned long)name,
10773                                   arg3, arg4, arg5));
10774             unlock_user(name, arg2, 16);
10775             return ret;
10776         }
10777         case PR_SET_NAME:
10778         {
10779             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10780             if (!name) {
10781                 return -TARGET_EFAULT;
10782             }
10783             ret = get_errno(prctl(arg1, (unsigned long)name,
10784                                   arg3, arg4, arg5));
10785             unlock_user(name, arg2, 0);
10786             return ret;
10787         }
10788 #endif
10789 #ifdef TARGET_MIPS
10790         case TARGET_PR_GET_FP_MODE:
10791         {
10792             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10793             ret = 0;
10794             if (env->CP0_Status & (1 << CP0St_FR)) {
10795                 ret |= TARGET_PR_FP_MODE_FR;
10796             }
10797             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10798                 ret |= TARGET_PR_FP_MODE_FRE;
10799             }
10800             return ret;
10801         }
10802         case TARGET_PR_SET_FP_MODE:
10803         {
10804             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10805             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10806             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10807             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10808             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10809 
10810             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10811                                             TARGET_PR_FP_MODE_FRE;
10812 
10813             /* If nothing to change, return right away, successfully.  */
10814             if (old_fr == new_fr && old_fre == new_fre) {
10815                 return 0;
10816             }
10817             /* Check the value is valid */
10818             if (arg2 & ~known_bits) {
10819                 return -TARGET_EOPNOTSUPP;
10820             }
10821             /* Setting FRE without FR is not supported.  */
10822             if (new_fre && !new_fr) {
10823                 return -TARGET_EOPNOTSUPP;
10824             }
10825             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10826                 /* FR1 is not supported */
10827                 return -TARGET_EOPNOTSUPP;
10828             }
10829             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10830                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10831                 /* cannot set FR=0 */
10832                 return -TARGET_EOPNOTSUPP;
10833             }
10834             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10835                 /* Cannot set FRE=1 */
10836                 return -TARGET_EOPNOTSUPP;
10837             }
10838 
10839             int i;
10840             fpr_t *fpr = env->active_fpu.fpr;
10841             for (i = 0; i < 32 ; i += 2) {
10842                 if (!old_fr && new_fr) {
10843                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10844                 } else if (old_fr && !new_fr) {
10845                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10846                 }
10847             }
10848 
10849             if (new_fr) {
10850                 env->CP0_Status |= (1 << CP0St_FR);
10851                 env->hflags |= MIPS_HFLAG_F64;
10852             } else {
10853                 env->CP0_Status &= ~(1 << CP0St_FR);
10854                 env->hflags &= ~MIPS_HFLAG_F64;
10855             }
10856             if (new_fre) {
10857                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10858                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10859                     env->hflags |= MIPS_HFLAG_FRE;
10860                 }
10861             } else {
10862                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10863                 env->hflags &= ~MIPS_HFLAG_FRE;
10864             }
10865 
10866             return 0;
10867         }
10868 #endif /* MIPS */
10869 #ifdef TARGET_AARCH64
10870         case TARGET_PR_SVE_SET_VL:
10871             /*
10872              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10873              * PR_SVE_VL_INHERIT.  Note the kernel definition
10874              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10875              * even though the current architectural maximum is VQ=16.
10876              */
10877             ret = -TARGET_EINVAL;
10878             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10879                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10880                 CPUARMState *env = cpu_env;
10881                 ARMCPU *cpu = env_archcpu(env);
10882                 uint32_t vq, old_vq;
10883 
10884                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10885                 vq = MAX(arg2 / 16, 1);
10886                 vq = MIN(vq, cpu->sve_max_vq);
10887 
10888                 if (vq < old_vq) {
10889                     aarch64_sve_narrow_vq(env, vq);
10890                 }
10891                 env->vfp.zcr_el[1] = vq - 1;
10892                 arm_rebuild_hflags(env);
10893                 ret = vq * 16;
10894             }
10895             return ret;
10896         case TARGET_PR_SVE_GET_VL:
10897             ret = -TARGET_EINVAL;
10898             {
10899                 ARMCPU *cpu = env_archcpu(cpu_env);
10900                 if (cpu_isar_feature(aa64_sve, cpu)) {
10901                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10902                 }
10903             }
10904             return ret;
10905         case TARGET_PR_PAC_RESET_KEYS:
10906             {
10907                 CPUARMState *env = cpu_env;
10908                 ARMCPU *cpu = env_archcpu(env);
10909 
10910                 if (arg3 || arg4 || arg5) {
10911                     return -TARGET_EINVAL;
10912                 }
10913                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10914                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10915                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10916                                TARGET_PR_PAC_APGAKEY);
10917                     int ret = 0;
10918                     Error *err = NULL;
10919 
10920                     if (arg2 == 0) {
10921                         arg2 = all;
10922                     } else if (arg2 & ~all) {
10923                         return -TARGET_EINVAL;
10924                     }
10925                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10926                         ret |= qemu_guest_getrandom(&env->keys.apia,
10927                                                     sizeof(ARMPACKey), &err);
10928                     }
10929                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10930                         ret |= qemu_guest_getrandom(&env->keys.apib,
10931                                                     sizeof(ARMPACKey), &err);
10932                     }
10933                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10934                         ret |= qemu_guest_getrandom(&env->keys.apda,
10935                                                     sizeof(ARMPACKey), &err);
10936                     }
10937                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10938                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10939                                                     sizeof(ARMPACKey), &err);
10940                     }
10941                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10942                         ret |= qemu_guest_getrandom(&env->keys.apga,
10943                                                     sizeof(ARMPACKey), &err);
10944                     }
10945                     if (ret != 0) {
10946                         /*
10947                          * Some unknown failure in the crypto.  The best
10948                          * we can do is log it and fail the syscall.
10949                          * The real syscall cannot fail this way.
10950                          */
10951                         qemu_log_mask(LOG_UNIMP,
10952                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10953                                       error_get_pretty(err));
10954                         error_free(err);
10955                         return -TARGET_EIO;
10956                     }
10957                     return 0;
10958                 }
10959             }
10960             return -TARGET_EINVAL;
10961 #endif /* AARCH64 */
10962         case PR_GET_SECCOMP:
10963         case PR_SET_SECCOMP:
10964             /* Disable seccomp to prevent the target disabling syscalls we
10965              * need. */
10966             return -TARGET_EINVAL;
10967         default:
10968             /* Most prctl options have no pointer arguments */
10969             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10970         }
10971         break;
10972 #ifdef TARGET_NR_arch_prctl
10973     case TARGET_NR_arch_prctl:
10974         return do_arch_prctl(cpu_env, arg1, arg2);
10975 #endif
10976 #ifdef TARGET_NR_pread64
10977     case TARGET_NR_pread64:
10978         if (regpairs_aligned(cpu_env, num)) {
10979             arg4 = arg5;
10980             arg5 = arg6;
10981         }
10982         if (arg2 == 0 && arg3 == 0) {
10983             /* Special-case NULL buffer and zero length, which should succeed */
10984             p = 0;
10985         } else {
10986             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10987             if (!p) {
10988                 return -TARGET_EFAULT;
10989             }
10990         }
10991         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10992         unlock_user(p, arg2, ret);
10993         return ret;
10994     case TARGET_NR_pwrite64:
10995         if (regpairs_aligned(cpu_env, num)) {
10996             arg4 = arg5;
10997             arg5 = arg6;
10998         }
10999         if (arg2 == 0 && arg3 == 0) {
11000             /* Special-case NULL buffer and zero length, which should succeed */
11001             p = 0;
11002         } else {
11003             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11004             if (!p) {
11005                 return -TARGET_EFAULT;
11006             }
11007         }
11008         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11009         unlock_user(p, arg2, 0);
11010         return ret;
11011 #endif
11012     case TARGET_NR_getcwd:
11013         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11014             return -TARGET_EFAULT;
11015         ret = get_errno(sys_getcwd1(p, arg2));
11016         unlock_user(p, arg1, ret);
11017         return ret;
11018     case TARGET_NR_capget:
11019     case TARGET_NR_capset:
11020     {
11021         struct target_user_cap_header *target_header;
11022         struct target_user_cap_data *target_data = NULL;
11023         struct __user_cap_header_struct header;
11024         struct __user_cap_data_struct data[2];
11025         struct __user_cap_data_struct *dataptr = NULL;
11026         int i, target_datalen;
11027         int data_items = 1;
11028 
11029         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11030             return -TARGET_EFAULT;
11031         }
11032         header.version = tswap32(target_header->version);
11033         header.pid = tswap32(target_header->pid);
11034 
11035         if (header.version != _LINUX_CAPABILITY_VERSION) {
11036             /* Version 2 and up takes pointer to two user_data structs */
11037             data_items = 2;
11038         }
11039 
11040         target_datalen = sizeof(*target_data) * data_items;
11041 
11042         if (arg2) {
11043             if (num == TARGET_NR_capget) {
11044                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11045             } else {
11046                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11047             }
11048             if (!target_data) {
11049                 unlock_user_struct(target_header, arg1, 0);
11050                 return -TARGET_EFAULT;
11051             }
11052 
11053             if (num == TARGET_NR_capset) {
11054                 for (i = 0; i < data_items; i++) {
11055                     data[i].effective = tswap32(target_data[i].effective);
11056                     data[i].permitted = tswap32(target_data[i].permitted);
11057                     data[i].inheritable = tswap32(target_data[i].inheritable);
11058                 }
11059             }
11060 
11061             dataptr = data;
11062         }
11063 
11064         if (num == TARGET_NR_capget) {
11065             ret = get_errno(capget(&header, dataptr));
11066         } else {
11067             ret = get_errno(capset(&header, dataptr));
11068         }
11069 
11070         /* The kernel always updates version for both capget and capset */
11071         target_header->version = tswap32(header.version);
11072         unlock_user_struct(target_header, arg1, 1);
11073 
11074         if (arg2) {
11075             if (num == TARGET_NR_capget) {
11076                 for (i = 0; i < data_items; i++) {
11077                     target_data[i].effective = tswap32(data[i].effective);
11078                     target_data[i].permitted = tswap32(data[i].permitted);
11079                     target_data[i].inheritable = tswap32(data[i].inheritable);
11080                 }
11081                 unlock_user(target_data, arg2, target_datalen);
11082             } else {
11083                 unlock_user(target_data, arg2, 0);
11084             }
11085         }
11086         return ret;
11087     }
11088     case TARGET_NR_sigaltstack:
11089         return do_sigaltstack(arg1, arg2,
11090                               get_sp_from_cpustate((CPUArchState *)cpu_env));
11091 
11092 #ifdef CONFIG_SENDFILE
11093 #ifdef TARGET_NR_sendfile
11094     case TARGET_NR_sendfile:
11095     {
11096         off_t *offp = NULL;
11097         off_t off;
11098         if (arg3) {
11099             ret = get_user_sal(off, arg3);
11100             if (is_error(ret)) {
11101                 return ret;
11102             }
11103             offp = &off;
11104         }
11105         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11106         if (!is_error(ret) && arg3) {
11107             abi_long ret2 = put_user_sal(off, arg3);
11108             if (is_error(ret2)) {
11109                 ret = ret2;
11110             }
11111         }
11112         return ret;
11113     }
11114 #endif
11115 #ifdef TARGET_NR_sendfile64
11116     case TARGET_NR_sendfile64:
11117     {
11118         off_t *offp = NULL;
11119         off_t off;
11120         if (arg3) {
11121             ret = get_user_s64(off, arg3);
11122             if (is_error(ret)) {
11123                 return ret;
11124             }
11125             offp = &off;
11126         }
11127         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11128         if (!is_error(ret) && arg3) {
11129             abi_long ret2 = put_user_s64(off, arg3);
11130             if (is_error(ret2)) {
11131                 ret = ret2;
11132             }
11133         }
11134         return ret;
11135     }
11136 #endif
11137 #endif
11138 #ifdef TARGET_NR_vfork
11139     case TARGET_NR_vfork:
11140         return get_errno(do_fork(cpu_env,
11141                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11142                          0, 0, 0, 0));
11143 #endif
11144 #ifdef TARGET_NR_ugetrlimit
11145     case TARGET_NR_ugetrlimit:
11146     {
11147 	struct rlimit rlim;
11148 	int resource = target_to_host_resource(arg1);
11149 	ret = get_errno(getrlimit(resource, &rlim));
11150 	if (!is_error(ret)) {
11151 	    struct target_rlimit *target_rlim;
11152             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11153                 return -TARGET_EFAULT;
11154 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11155 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11156             unlock_user_struct(target_rlim, arg2, 1);
11157 	}
11158         return ret;
11159     }
11160 #endif
11161 #ifdef TARGET_NR_truncate64
11162     case TARGET_NR_truncate64:
11163         if (!(p = lock_user_string(arg1)))
11164             return -TARGET_EFAULT;
11165 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11166         unlock_user(p, arg1, 0);
11167         return ret;
11168 #endif
11169 #ifdef TARGET_NR_ftruncate64
11170     case TARGET_NR_ftruncate64:
11171         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11172 #endif
11173 #ifdef TARGET_NR_stat64
11174     case TARGET_NR_stat64:
11175         if (!(p = lock_user_string(arg1))) {
11176             return -TARGET_EFAULT;
11177         }
11178         ret = get_errno(stat(path(p), &st));
11179         unlock_user(p, arg1, 0);
11180         if (!is_error(ret))
11181             ret = host_to_target_stat64(cpu_env, arg2, &st);
11182         return ret;
11183 #endif
11184 #ifdef TARGET_NR_lstat64
11185     case TARGET_NR_lstat64:
11186         if (!(p = lock_user_string(arg1))) {
11187             return -TARGET_EFAULT;
11188         }
11189         ret = get_errno(lstat(path(p), &st));
11190         unlock_user(p, arg1, 0);
11191         if (!is_error(ret))
11192             ret = host_to_target_stat64(cpu_env, arg2, &st);
11193         return ret;
11194 #endif
11195 #ifdef TARGET_NR_fstat64
11196     case TARGET_NR_fstat64:
11197         ret = get_errno(fstat(arg1, &st));
11198         if (!is_error(ret))
11199             ret = host_to_target_stat64(cpu_env, arg2, &st);
11200         return ret;
11201 #endif
11202 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11203 #ifdef TARGET_NR_fstatat64
11204     case TARGET_NR_fstatat64:
11205 #endif
11206 #ifdef TARGET_NR_newfstatat
11207     case TARGET_NR_newfstatat:
11208 #endif
11209         if (!(p = lock_user_string(arg2))) {
11210             return -TARGET_EFAULT;
11211         }
11212         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11213         unlock_user(p, arg2, 0);
11214         if (!is_error(ret))
11215             ret = host_to_target_stat64(cpu_env, arg3, &st);
11216         return ret;
11217 #endif
11218 #if defined(TARGET_NR_statx)
11219     case TARGET_NR_statx:
11220         {
11221             struct target_statx *target_stx;
11222             int dirfd = arg1;
11223             int flags = arg3;
11224 
11225             p = lock_user_string(arg2);
11226             if (p == NULL) {
11227                 return -TARGET_EFAULT;
11228             }
11229 #if defined(__NR_statx)
11230             {
11231                 /*
11232                  * It is assumed that struct statx is architecture independent.
11233                  */
11234                 struct target_statx host_stx;
11235                 int mask = arg4;
11236 
11237                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11238                 if (!is_error(ret)) {
11239                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11240                         unlock_user(p, arg2, 0);
11241                         return -TARGET_EFAULT;
11242                     }
11243                 }
11244 
11245                 if (ret != -TARGET_ENOSYS) {
11246                     unlock_user(p, arg2, 0);
11247                     return ret;
11248                 }
11249             }
11250 #endif
11251             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11252             unlock_user(p, arg2, 0);
11253 
11254             if (!is_error(ret)) {
11255                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11256                     return -TARGET_EFAULT;
11257                 }
11258                 memset(target_stx, 0, sizeof(*target_stx));
11259                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11260                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11261                 __put_user(st.st_ino, &target_stx->stx_ino);
11262                 __put_user(st.st_mode, &target_stx->stx_mode);
11263                 __put_user(st.st_uid, &target_stx->stx_uid);
11264                 __put_user(st.st_gid, &target_stx->stx_gid);
11265                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11266                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11267                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11268                 __put_user(st.st_size, &target_stx->stx_size);
11269                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11270                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11271                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11272                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11273                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11274                 unlock_user_struct(target_stx, arg5, 1);
11275             }
11276         }
11277         return ret;
11278 #endif
11279 #ifdef TARGET_NR_lchown
11280     case TARGET_NR_lchown:
11281         if (!(p = lock_user_string(arg1)))
11282             return -TARGET_EFAULT;
11283         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11284         unlock_user(p, arg1, 0);
11285         return ret;
11286 #endif
11287 #ifdef TARGET_NR_getuid
11288     case TARGET_NR_getuid:
11289         return get_errno(high2lowuid(getuid()));
11290 #endif
11291 #ifdef TARGET_NR_getgid
11292     case TARGET_NR_getgid:
11293         return get_errno(high2lowgid(getgid()));
11294 #endif
11295 #ifdef TARGET_NR_geteuid
11296     case TARGET_NR_geteuid:
11297         return get_errno(high2lowuid(geteuid()));
11298 #endif
11299 #ifdef TARGET_NR_getegid
11300     case TARGET_NR_getegid:
11301         return get_errno(high2lowgid(getegid()));
11302 #endif
11303     case TARGET_NR_setreuid:
11304         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11305     case TARGET_NR_setregid:
11306         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11307     case TARGET_NR_getgroups:
11308         {
11309             int gidsetsize = arg1;
11310             target_id *target_grouplist;
11311             gid_t *grouplist;
11312             int i;
11313 
11314             grouplist = alloca(gidsetsize * sizeof(gid_t));
11315             ret = get_errno(getgroups(gidsetsize, grouplist));
11316             if (gidsetsize == 0)
11317                 return ret;
11318             if (!is_error(ret)) {
11319                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11320                 if (!target_grouplist)
11321                     return -TARGET_EFAULT;
11322                 for(i = 0;i < ret; i++)
11323                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11324                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11325             }
11326         }
11327         return ret;
11328     case TARGET_NR_setgroups:
11329         {
11330             int gidsetsize = arg1;
11331             target_id *target_grouplist;
11332             gid_t *grouplist = NULL;
11333             int i;
11334             if (gidsetsize) {
11335                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11336                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11337                 if (!target_grouplist) {
11338                     return -TARGET_EFAULT;
11339                 }
11340                 for (i = 0; i < gidsetsize; i++) {
11341                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11342                 }
11343                 unlock_user(target_grouplist, arg2, 0);
11344             }
11345             return get_errno(setgroups(gidsetsize, grouplist));
11346         }
11347     case TARGET_NR_fchown:
11348         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11349 #if defined(TARGET_NR_fchownat)
11350     case TARGET_NR_fchownat:
11351         if (!(p = lock_user_string(arg2)))
11352             return -TARGET_EFAULT;
11353         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11354                                  low2highgid(arg4), arg5));
11355         unlock_user(p, arg2, 0);
11356         return ret;
11357 #endif
11358 #ifdef TARGET_NR_setresuid
11359     case TARGET_NR_setresuid:
11360         return get_errno(sys_setresuid(low2highuid(arg1),
11361                                        low2highuid(arg2),
11362                                        low2highuid(arg3)));
11363 #endif
11364 #ifdef TARGET_NR_getresuid
11365     case TARGET_NR_getresuid:
11366         {
11367             uid_t ruid, euid, suid;
11368             ret = get_errno(getresuid(&ruid, &euid, &suid));
11369             if (!is_error(ret)) {
11370                 if (put_user_id(high2lowuid(ruid), arg1)
11371                     || put_user_id(high2lowuid(euid), arg2)
11372                     || put_user_id(high2lowuid(suid), arg3))
11373                     return -TARGET_EFAULT;
11374             }
11375         }
11376         return ret;
11377 #endif
11378 #ifdef TARGET_NR_getresgid
11379     case TARGET_NR_setresgid:
11380         return get_errno(sys_setresgid(low2highgid(arg1),
11381                                        low2highgid(arg2),
11382                                        low2highgid(arg3)));
11383 #endif
11384 #ifdef TARGET_NR_getresgid
11385     case TARGET_NR_getresgid:
11386         {
11387             gid_t rgid, egid, sgid;
11388             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11389             if (!is_error(ret)) {
11390                 if (put_user_id(high2lowgid(rgid), arg1)
11391                     || put_user_id(high2lowgid(egid), arg2)
11392                     || put_user_id(high2lowgid(sgid), arg3))
11393                     return -TARGET_EFAULT;
11394             }
11395         }
11396         return ret;
11397 #endif
11398 #ifdef TARGET_NR_chown
11399     case TARGET_NR_chown:
11400         if (!(p = lock_user_string(arg1)))
11401             return -TARGET_EFAULT;
11402         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11403         unlock_user(p, arg1, 0);
11404         return ret;
11405 #endif
11406     case TARGET_NR_setuid:
11407         return get_errno(sys_setuid(low2highuid(arg1)));
11408     case TARGET_NR_setgid:
11409         return get_errno(sys_setgid(low2highgid(arg1)));
11410     case TARGET_NR_setfsuid:
11411         return get_errno(setfsuid(arg1));
11412     case TARGET_NR_setfsgid:
11413         return get_errno(setfsgid(arg1));
11414 
11415 #ifdef TARGET_NR_lchown32
11416     case TARGET_NR_lchown32:
11417         if (!(p = lock_user_string(arg1)))
11418             return -TARGET_EFAULT;
11419         ret = get_errno(lchown(p, arg2, arg3));
11420         unlock_user(p, arg1, 0);
11421         return ret;
11422 #endif
11423 #ifdef TARGET_NR_getuid32
11424     case TARGET_NR_getuid32:
11425         return get_errno(getuid());
11426 #endif
11427 
11428 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11429    /* Alpha specific */
11430     case TARGET_NR_getxuid:
11431          {
11432             uid_t euid;
11433             euid=geteuid();
11434             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11435          }
11436         return get_errno(getuid());
11437 #endif
11438 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11439    /* Alpha specific */
11440     case TARGET_NR_getxgid:
11441          {
11442             uid_t egid;
11443             egid=getegid();
11444             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11445          }
11446         return get_errno(getgid());
11447 #endif
11448 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11449     /* Alpha specific */
11450     case TARGET_NR_osf_getsysinfo:
11451         ret = -TARGET_EOPNOTSUPP;
11452         switch (arg1) {
11453           case TARGET_GSI_IEEE_FP_CONTROL:
11454             {
11455                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11456                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11457 
11458                 swcr &= ~SWCR_STATUS_MASK;
11459                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11460 
11461                 if (put_user_u64 (swcr, arg2))
11462                         return -TARGET_EFAULT;
11463                 ret = 0;
11464             }
11465             break;
11466 
11467           /* case GSI_IEEE_STATE_AT_SIGNAL:
11468              -- Not implemented in linux kernel.
11469              case GSI_UACPROC:
11470              -- Retrieves current unaligned access state; not much used.
11471              case GSI_PROC_TYPE:
11472              -- Retrieves implver information; surely not used.
11473              case GSI_GET_HWRPB:
11474              -- Grabs a copy of the HWRPB; surely not used.
11475           */
11476         }
11477         return ret;
11478 #endif
11479 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11480     /* Alpha specific */
11481     case TARGET_NR_osf_setsysinfo:
11482         ret = -TARGET_EOPNOTSUPP;
11483         switch (arg1) {
11484           case TARGET_SSI_IEEE_FP_CONTROL:
11485             {
11486                 uint64_t swcr, fpcr;
11487 
11488                 if (get_user_u64 (swcr, arg2)) {
11489                     return -TARGET_EFAULT;
11490                 }
11491 
11492                 /*
11493                  * The kernel calls swcr_update_status to update the
11494                  * status bits from the fpcr at every point that it
11495                  * could be queried.  Therefore, we store the status
11496                  * bits only in FPCR.
11497                  */
11498                 ((CPUAlphaState *)cpu_env)->swcr
11499                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11500 
11501                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11502                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11503                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11504                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11505                 ret = 0;
11506             }
11507             break;
11508 
11509           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11510             {
11511                 uint64_t exc, fpcr, fex;
11512 
11513                 if (get_user_u64(exc, arg2)) {
11514                     return -TARGET_EFAULT;
11515                 }
11516                 exc &= SWCR_STATUS_MASK;
11517                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11518 
11519                 /* Old exceptions are not signaled.  */
11520                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11521                 fex = exc & ~fex;
11522                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11523                 fex &= ((CPUArchState *)cpu_env)->swcr;
11524 
11525                 /* Update the hardware fpcr.  */
11526                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11527                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11528 
11529                 if (fex) {
11530                     int si_code = TARGET_FPE_FLTUNK;
11531                     target_siginfo_t info;
11532 
11533                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11534                         si_code = TARGET_FPE_FLTUND;
11535                     }
11536                     if (fex & SWCR_TRAP_ENABLE_INE) {
11537                         si_code = TARGET_FPE_FLTRES;
11538                     }
11539                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11540                         si_code = TARGET_FPE_FLTUND;
11541                     }
11542                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11543                         si_code = TARGET_FPE_FLTOVF;
11544                     }
11545                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11546                         si_code = TARGET_FPE_FLTDIV;
11547                     }
11548                     if (fex & SWCR_TRAP_ENABLE_INV) {
11549                         si_code = TARGET_FPE_FLTINV;
11550                     }
11551 
11552                     info.si_signo = SIGFPE;
11553                     info.si_errno = 0;
11554                     info.si_code = si_code;
11555                     info._sifields._sigfault._addr
11556                         = ((CPUArchState *)cpu_env)->pc;
11557                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11558                                  QEMU_SI_FAULT, &info);
11559                 }
11560                 ret = 0;
11561             }
11562             break;
11563 
11564           /* case SSI_NVPAIRS:
11565              -- Used with SSIN_UACPROC to enable unaligned accesses.
11566              case SSI_IEEE_STATE_AT_SIGNAL:
11567              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11568              -- Not implemented in linux kernel
11569           */
11570         }
11571         return ret;
11572 #endif
11573 #ifdef TARGET_NR_osf_sigprocmask
11574     /* Alpha specific.  */
11575     case TARGET_NR_osf_sigprocmask:
11576         {
11577             abi_ulong mask;
11578             int how;
11579             sigset_t set, oldset;
11580 
11581             switch(arg1) {
11582             case TARGET_SIG_BLOCK:
11583                 how = SIG_BLOCK;
11584                 break;
11585             case TARGET_SIG_UNBLOCK:
11586                 how = SIG_UNBLOCK;
11587                 break;
11588             case TARGET_SIG_SETMASK:
11589                 how = SIG_SETMASK;
11590                 break;
11591             default:
11592                 return -TARGET_EINVAL;
11593             }
11594             mask = arg2;
11595             target_to_host_old_sigset(&set, &mask);
11596             ret = do_sigprocmask(how, &set, &oldset);
11597             if (!ret) {
11598                 host_to_target_old_sigset(&mask, &oldset);
11599                 ret = mask;
11600             }
11601         }
11602         return ret;
11603 #endif
11604 
11605 #ifdef TARGET_NR_getgid32
11606     case TARGET_NR_getgid32:
11607         return get_errno(getgid());
11608 #endif
11609 #ifdef TARGET_NR_geteuid32
11610     case TARGET_NR_geteuid32:
11611         return get_errno(geteuid());
11612 #endif
11613 #ifdef TARGET_NR_getegid32
11614     case TARGET_NR_getegid32:
11615         return get_errno(getegid());
11616 #endif
11617 #ifdef TARGET_NR_setreuid32
11618     case TARGET_NR_setreuid32:
11619         return get_errno(setreuid(arg1, arg2));
11620 #endif
11621 #ifdef TARGET_NR_setregid32
11622     case TARGET_NR_setregid32:
11623         return get_errno(setregid(arg1, arg2));
11624 #endif
11625 #ifdef TARGET_NR_getgroups32
11626     case TARGET_NR_getgroups32:
11627         {
11628             int gidsetsize = arg1;
11629             uint32_t *target_grouplist;
11630             gid_t *grouplist;
11631             int i;
11632 
11633             grouplist = alloca(gidsetsize * sizeof(gid_t));
11634             ret = get_errno(getgroups(gidsetsize, grouplist));
11635             if (gidsetsize == 0)
11636                 return ret;
11637             if (!is_error(ret)) {
11638                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11639                 if (!target_grouplist) {
11640                     return -TARGET_EFAULT;
11641                 }
11642                 for(i = 0;i < ret; i++)
11643                     target_grouplist[i] = tswap32(grouplist[i]);
11644                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11645             }
11646         }
11647         return ret;
11648 #endif
11649 #ifdef TARGET_NR_setgroups32
11650     case TARGET_NR_setgroups32:
11651         {
11652             int gidsetsize = arg1;
11653             uint32_t *target_grouplist;
11654             gid_t *grouplist;
11655             int i;
11656 
11657             grouplist = alloca(gidsetsize * sizeof(gid_t));
11658             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11659             if (!target_grouplist) {
11660                 return -TARGET_EFAULT;
11661             }
11662             for(i = 0;i < gidsetsize; i++)
11663                 grouplist[i] = tswap32(target_grouplist[i]);
11664             unlock_user(target_grouplist, arg2, 0);
11665             return get_errno(setgroups(gidsetsize, grouplist));
11666         }
11667 #endif
11668 #ifdef TARGET_NR_fchown32
11669     case TARGET_NR_fchown32:
11670         return get_errno(fchown(arg1, arg2, arg3));
11671 #endif
11672 #ifdef TARGET_NR_setresuid32
11673     case TARGET_NR_setresuid32:
11674         return get_errno(sys_setresuid(arg1, arg2, arg3));
11675 #endif
11676 #ifdef TARGET_NR_getresuid32
11677     case TARGET_NR_getresuid32:
11678         {
11679             uid_t ruid, euid, suid;
11680             ret = get_errno(getresuid(&ruid, &euid, &suid));
11681             if (!is_error(ret)) {
11682                 if (put_user_u32(ruid, arg1)
11683                     || put_user_u32(euid, arg2)
11684                     || put_user_u32(suid, arg3))
11685                     return -TARGET_EFAULT;
11686             }
11687         }
11688         return ret;
11689 #endif
11690 #ifdef TARGET_NR_setresgid32
11691     case TARGET_NR_setresgid32:
11692         return get_errno(sys_setresgid(arg1, arg2, arg3));
11693 #endif
11694 #ifdef TARGET_NR_getresgid32
11695     case TARGET_NR_getresgid32:
11696         {
11697             gid_t rgid, egid, sgid;
11698             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11699             if (!is_error(ret)) {
11700                 if (put_user_u32(rgid, arg1)
11701                     || put_user_u32(egid, arg2)
11702                     || put_user_u32(sgid, arg3))
11703                     return -TARGET_EFAULT;
11704             }
11705         }
11706         return ret;
11707 #endif
11708 #ifdef TARGET_NR_chown32
11709     case TARGET_NR_chown32:
11710         if (!(p = lock_user_string(arg1)))
11711             return -TARGET_EFAULT;
11712         ret = get_errno(chown(p, arg2, arg3));
11713         unlock_user(p, arg1, 0);
11714         return ret;
11715 #endif
11716 #ifdef TARGET_NR_setuid32
11717     case TARGET_NR_setuid32:
11718         return get_errno(sys_setuid(arg1));
11719 #endif
11720 #ifdef TARGET_NR_setgid32
11721     case TARGET_NR_setgid32:
11722         return get_errno(sys_setgid(arg1));
11723 #endif
11724 #ifdef TARGET_NR_setfsuid32
11725     case TARGET_NR_setfsuid32:
11726         return get_errno(setfsuid(arg1));
11727 #endif
11728 #ifdef TARGET_NR_setfsgid32
11729     case TARGET_NR_setfsgid32:
11730         return get_errno(setfsgid(arg1));
11731 #endif
11732 #ifdef TARGET_NR_mincore
11733     case TARGET_NR_mincore:
11734         {
11735             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11736             if (!a) {
11737                 return -TARGET_ENOMEM;
11738             }
11739             p = lock_user_string(arg3);
11740             if (!p) {
11741                 ret = -TARGET_EFAULT;
11742             } else {
11743                 ret = get_errno(mincore(a, arg2, p));
11744                 unlock_user(p, arg3, ret);
11745             }
11746             unlock_user(a, arg1, 0);
11747         }
11748         return ret;
11749 #endif
11750 #ifdef TARGET_NR_arm_fadvise64_64
11751     case TARGET_NR_arm_fadvise64_64:
11752         /* arm_fadvise64_64 looks like fadvise64_64 but
11753          * with different argument order: fd, advice, offset, len
11754          * rather than the usual fd, offset, len, advice.
11755          * Note that offset and len are both 64-bit so appear as
11756          * pairs of 32-bit registers.
11757          */
11758         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11759                             target_offset64(arg5, arg6), arg2);
11760         return -host_to_target_errno(ret);
11761 #endif
11762 
11763 #if TARGET_ABI_BITS == 32
11764 
11765 #ifdef TARGET_NR_fadvise64_64
11766     case TARGET_NR_fadvise64_64:
11767 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11768         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11769         ret = arg2;
11770         arg2 = arg3;
11771         arg3 = arg4;
11772         arg4 = arg5;
11773         arg5 = arg6;
11774         arg6 = ret;
11775 #else
11776         /* 6 args: fd, offset (high, low), len (high, low), advice */
11777         if (regpairs_aligned(cpu_env, num)) {
11778             /* offset is in (3,4), len in (5,6) and advice in 7 */
11779             arg2 = arg3;
11780             arg3 = arg4;
11781             arg4 = arg5;
11782             arg5 = arg6;
11783             arg6 = arg7;
11784         }
11785 #endif
11786         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11787                             target_offset64(arg4, arg5), arg6);
11788         return -host_to_target_errno(ret);
11789 #endif
11790 
11791 #ifdef TARGET_NR_fadvise64
11792     case TARGET_NR_fadvise64:
11793         /* 5 args: fd, offset (high, low), len, advice */
11794         if (regpairs_aligned(cpu_env, num)) {
11795             /* offset is in (3,4), len in 5 and advice in 6 */
11796             arg2 = arg3;
11797             arg3 = arg4;
11798             arg4 = arg5;
11799             arg5 = arg6;
11800         }
11801         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11802         return -host_to_target_errno(ret);
11803 #endif
11804 
11805 #else /* not a 32-bit ABI */
11806 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11807 #ifdef TARGET_NR_fadvise64_64
11808     case TARGET_NR_fadvise64_64:
11809 #endif
11810 #ifdef TARGET_NR_fadvise64
11811     case TARGET_NR_fadvise64:
11812 #endif
11813 #ifdef TARGET_S390X
11814         switch (arg4) {
11815         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11816         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11817         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11818         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11819         default: break;
11820         }
11821 #endif
11822         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11823 #endif
11824 #endif /* end of 64-bit ABI fadvise handling */
11825 
11826 #ifdef TARGET_NR_madvise
11827     case TARGET_NR_madvise:
11828         /* A straight passthrough may not be safe because qemu sometimes
11829            turns private file-backed mappings into anonymous mappings.
11830            This will break MADV_DONTNEED.
11831            This is a hint, so ignoring and returning success is ok.  */
11832         return 0;
11833 #endif
11834 #ifdef TARGET_NR_fcntl64
11835     case TARGET_NR_fcntl64:
11836     {
11837         int cmd;
11838         struct flock64 fl;
11839         from_flock64_fn *copyfrom = copy_from_user_flock64;
11840         to_flock64_fn *copyto = copy_to_user_flock64;
11841 
11842 #ifdef TARGET_ARM
11843         if (!((CPUARMState *)cpu_env)->eabi) {
11844             copyfrom = copy_from_user_oabi_flock64;
11845             copyto = copy_to_user_oabi_flock64;
11846         }
11847 #endif
11848 
11849         cmd = target_to_host_fcntl_cmd(arg2);
11850         if (cmd == -TARGET_EINVAL) {
11851             return cmd;
11852         }
11853 
11854         switch(arg2) {
11855         case TARGET_F_GETLK64:
11856             ret = copyfrom(&fl, arg3);
11857             if (ret) {
11858                 break;
11859             }
11860             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11861             if (ret == 0) {
11862                 ret = copyto(arg3, &fl);
11863             }
11864 	    break;
11865 
11866         case TARGET_F_SETLK64:
11867         case TARGET_F_SETLKW64:
11868             ret = copyfrom(&fl, arg3);
11869             if (ret) {
11870                 break;
11871             }
11872             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11873 	    break;
11874         default:
11875             ret = do_fcntl(arg1, arg2, arg3);
11876             break;
11877         }
11878         return ret;
11879     }
11880 #endif
11881 #ifdef TARGET_NR_cacheflush
11882     case TARGET_NR_cacheflush:
11883         /* self-modifying code is handled automatically, so nothing needed */
11884         return 0;
11885 #endif
11886 #ifdef TARGET_NR_getpagesize
11887     case TARGET_NR_getpagesize:
11888         return TARGET_PAGE_SIZE;
11889 #endif
11890     case TARGET_NR_gettid:
11891         return get_errno(sys_gettid());
11892 #ifdef TARGET_NR_readahead
11893     case TARGET_NR_readahead:
11894 #if TARGET_ABI_BITS == 32
11895         if (regpairs_aligned(cpu_env, num)) {
11896             arg2 = arg3;
11897             arg3 = arg4;
11898             arg4 = arg5;
11899         }
11900         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11901 #else
11902         ret = get_errno(readahead(arg1, arg2, arg3));
11903 #endif
11904         return ret;
11905 #endif
11906 #ifdef CONFIG_ATTR
11907 #ifdef TARGET_NR_setxattr
11908     case TARGET_NR_listxattr:
11909     case TARGET_NR_llistxattr:
11910     {
11911         void *p, *b = 0;
11912         if (arg2) {
11913             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11914             if (!b) {
11915                 return -TARGET_EFAULT;
11916             }
11917         }
11918         p = lock_user_string(arg1);
11919         if (p) {
11920             if (num == TARGET_NR_listxattr) {
11921                 ret = get_errno(listxattr(p, b, arg3));
11922             } else {
11923                 ret = get_errno(llistxattr(p, b, arg3));
11924             }
11925         } else {
11926             ret = -TARGET_EFAULT;
11927         }
11928         unlock_user(p, arg1, 0);
11929         unlock_user(b, arg2, arg3);
11930         return ret;
11931     }
11932     case TARGET_NR_flistxattr:
11933     {
11934         void *b = 0;
11935         if (arg2) {
11936             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11937             if (!b) {
11938                 return -TARGET_EFAULT;
11939             }
11940         }
11941         ret = get_errno(flistxattr(arg1, b, arg3));
11942         unlock_user(b, arg2, arg3);
11943         return ret;
11944     }
11945     case TARGET_NR_setxattr:
11946     case TARGET_NR_lsetxattr:
11947         {
11948             void *p, *n, *v = 0;
11949             if (arg3) {
11950                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11951                 if (!v) {
11952                     return -TARGET_EFAULT;
11953                 }
11954             }
11955             p = lock_user_string(arg1);
11956             n = lock_user_string(arg2);
11957             if (p && n) {
11958                 if (num == TARGET_NR_setxattr) {
11959                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11960                 } else {
11961                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11962                 }
11963             } else {
11964                 ret = -TARGET_EFAULT;
11965             }
11966             unlock_user(p, arg1, 0);
11967             unlock_user(n, arg2, 0);
11968             unlock_user(v, arg3, 0);
11969         }
11970         return ret;
11971     case TARGET_NR_fsetxattr:
11972         {
11973             void *n, *v = 0;
11974             if (arg3) {
11975                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11976                 if (!v) {
11977                     return -TARGET_EFAULT;
11978                 }
11979             }
11980             n = lock_user_string(arg2);
11981             if (n) {
11982                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11983             } else {
11984                 ret = -TARGET_EFAULT;
11985             }
11986             unlock_user(n, arg2, 0);
11987             unlock_user(v, arg3, 0);
11988         }
11989         return ret;
11990     case TARGET_NR_getxattr:
11991     case TARGET_NR_lgetxattr:
11992         {
11993             void *p, *n, *v = 0;
11994             if (arg3) {
11995                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11996                 if (!v) {
11997                     return -TARGET_EFAULT;
11998                 }
11999             }
12000             p = lock_user_string(arg1);
12001             n = lock_user_string(arg2);
12002             if (p && n) {
12003                 if (num == TARGET_NR_getxattr) {
12004                     ret = get_errno(getxattr(p, n, v, arg4));
12005                 } else {
12006                     ret = get_errno(lgetxattr(p, n, v, arg4));
12007                 }
12008             } else {
12009                 ret = -TARGET_EFAULT;
12010             }
12011             unlock_user(p, arg1, 0);
12012             unlock_user(n, arg2, 0);
12013             unlock_user(v, arg3, arg4);
12014         }
12015         return ret;
12016     case TARGET_NR_fgetxattr:
12017         {
12018             void *n, *v = 0;
12019             if (arg3) {
12020                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12021                 if (!v) {
12022                     return -TARGET_EFAULT;
12023                 }
12024             }
12025             n = lock_user_string(arg2);
12026             if (n) {
12027                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12028             } else {
12029                 ret = -TARGET_EFAULT;
12030             }
12031             unlock_user(n, arg2, 0);
12032             unlock_user(v, arg3, arg4);
12033         }
12034         return ret;
12035     case TARGET_NR_removexattr:
12036     case TARGET_NR_lremovexattr:
12037         {
12038             void *p, *n;
12039             p = lock_user_string(arg1);
12040             n = lock_user_string(arg2);
12041             if (p && n) {
12042                 if (num == TARGET_NR_removexattr) {
12043                     ret = get_errno(removexattr(p, n));
12044                 } else {
12045                     ret = get_errno(lremovexattr(p, n));
12046                 }
12047             } else {
12048                 ret = -TARGET_EFAULT;
12049             }
12050             unlock_user(p, arg1, 0);
12051             unlock_user(n, arg2, 0);
12052         }
12053         return ret;
12054     case TARGET_NR_fremovexattr:
12055         {
12056             void *n;
12057             n = lock_user_string(arg2);
12058             if (n) {
12059                 ret = get_errno(fremovexattr(arg1, n));
12060             } else {
12061                 ret = -TARGET_EFAULT;
12062             }
12063             unlock_user(n, arg2, 0);
12064         }
12065         return ret;
12066 #endif
12067 #endif /* CONFIG_ATTR */
12068 #ifdef TARGET_NR_set_thread_area
12069     case TARGET_NR_set_thread_area:
12070 #if defined(TARGET_MIPS)
12071       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12072       return 0;
12073 #elif defined(TARGET_CRIS)
12074       if (arg1 & 0xff)
12075           ret = -TARGET_EINVAL;
12076       else {
12077           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12078           ret = 0;
12079       }
12080       return ret;
12081 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12082       return do_set_thread_area(cpu_env, arg1);
12083 #elif defined(TARGET_M68K)
12084       {
12085           TaskState *ts = cpu->opaque;
12086           ts->tp_value = arg1;
12087           return 0;
12088       }
12089 #else
12090       return -TARGET_ENOSYS;
12091 #endif
12092 #endif
12093 #ifdef TARGET_NR_get_thread_area
12094     case TARGET_NR_get_thread_area:
12095 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12096         return do_get_thread_area(cpu_env, arg1);
12097 #elif defined(TARGET_M68K)
12098         {
12099             TaskState *ts = cpu->opaque;
12100             return ts->tp_value;
12101         }
12102 #else
12103         return -TARGET_ENOSYS;
12104 #endif
12105 #endif
12106 #ifdef TARGET_NR_getdomainname
12107     case TARGET_NR_getdomainname:
12108         return -TARGET_ENOSYS;
12109 #endif
12110 
12111 #ifdef TARGET_NR_clock_settime
12112     case TARGET_NR_clock_settime:
12113     {
12114         struct timespec ts;
12115 
12116         ret = target_to_host_timespec(&ts, arg2);
12117         if (!is_error(ret)) {
12118             ret = get_errno(clock_settime(arg1, &ts));
12119         }
12120         return ret;
12121     }
12122 #endif
12123 #ifdef TARGET_NR_clock_settime64
12124     case TARGET_NR_clock_settime64:
12125     {
12126         struct timespec ts;
12127 
12128         ret = target_to_host_timespec64(&ts, arg2);
12129         if (!is_error(ret)) {
12130             ret = get_errno(clock_settime(arg1, &ts));
12131         }
12132         return ret;
12133     }
12134 #endif
12135 #ifdef TARGET_NR_clock_gettime
12136     case TARGET_NR_clock_gettime:
12137     {
12138         struct timespec ts;
12139         ret = get_errno(clock_gettime(arg1, &ts));
12140         if (!is_error(ret)) {
12141             ret = host_to_target_timespec(arg2, &ts);
12142         }
12143         return ret;
12144     }
12145 #endif
12146 #ifdef TARGET_NR_clock_gettime64
12147     case TARGET_NR_clock_gettime64:
12148     {
12149         struct timespec ts;
12150         ret = get_errno(clock_gettime(arg1, &ts));
12151         if (!is_error(ret)) {
12152             ret = host_to_target_timespec64(arg2, &ts);
12153         }
12154         return ret;
12155     }
12156 #endif
12157 #ifdef TARGET_NR_clock_getres
12158     case TARGET_NR_clock_getres:
12159     {
12160         struct timespec ts;
12161         ret = get_errno(clock_getres(arg1, &ts));
12162         if (!is_error(ret)) {
12163             host_to_target_timespec(arg2, &ts);
12164         }
12165         return ret;
12166     }
12167 #endif
12168 #ifdef TARGET_NR_clock_getres_time64
12169     case TARGET_NR_clock_getres_time64:
12170     {
12171         struct timespec ts;
12172         ret = get_errno(clock_getres(arg1, &ts));
12173         if (!is_error(ret)) {
12174             host_to_target_timespec64(arg2, &ts);
12175         }
12176         return ret;
12177     }
12178 #endif
12179 #ifdef TARGET_NR_clock_nanosleep
12180     case TARGET_NR_clock_nanosleep:
12181     {
12182         struct timespec ts;
12183         if (target_to_host_timespec(&ts, arg3)) {
12184             return -TARGET_EFAULT;
12185         }
12186         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12187                                              &ts, arg4 ? &ts : NULL));
12188         /*
12189          * if the call is interrupted by a signal handler, it fails
12190          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12191          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12192          */
12193         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12194             host_to_target_timespec(arg4, &ts)) {
12195               return -TARGET_EFAULT;
12196         }
12197 
12198         return ret;
12199     }
12200 #endif
12201 #ifdef TARGET_NR_clock_nanosleep_time64
12202     case TARGET_NR_clock_nanosleep_time64:
12203     {
12204         struct timespec ts;
12205 
12206         if (target_to_host_timespec64(&ts, arg3)) {
12207             return -TARGET_EFAULT;
12208         }
12209 
12210         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12211                                              &ts, arg4 ? &ts : NULL));
12212 
12213         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12214             host_to_target_timespec64(arg4, &ts)) {
12215             return -TARGET_EFAULT;
12216         }
12217         return ret;
12218     }
12219 #endif
12220 
12221 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12222     case TARGET_NR_set_tid_address:
12223         return get_errno(set_tid_address((int *)g2h(arg1)));
12224 #endif
12225 
12226     case TARGET_NR_tkill:
12227         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12228 
12229     case TARGET_NR_tgkill:
12230         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12231                          target_to_host_signal(arg3)));
12232 
12233 #ifdef TARGET_NR_set_robust_list
12234     case TARGET_NR_set_robust_list:
12235     case TARGET_NR_get_robust_list:
12236         /* The ABI for supporting robust futexes has userspace pass
12237          * the kernel a pointer to a linked list which is updated by
12238          * userspace after the syscall; the list is walked by the kernel
12239          * when the thread exits. Since the linked list in QEMU guest
12240          * memory isn't a valid linked list for the host and we have
12241          * no way to reliably intercept the thread-death event, we can't
12242          * support these. Silently return ENOSYS so that guest userspace
12243          * falls back to a non-robust futex implementation (which should
12244          * be OK except in the corner case of the guest crashing while
12245          * holding a mutex that is shared with another process via
12246          * shared memory).
12247          */
12248         return -TARGET_ENOSYS;
12249 #endif
12250 
12251 #if defined(TARGET_NR_utimensat)
12252     case TARGET_NR_utimensat:
12253         {
12254             struct timespec *tsp, ts[2];
12255             if (!arg3) {
12256                 tsp = NULL;
12257             } else {
12258                 if (target_to_host_timespec(ts, arg3)) {
12259                     return -TARGET_EFAULT;
12260                 }
12261                 if (target_to_host_timespec(ts + 1, arg3 +
12262                                             sizeof(struct target_timespec))) {
12263                     return -TARGET_EFAULT;
12264                 }
12265                 tsp = ts;
12266             }
12267             if (!arg2)
12268                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12269             else {
12270                 if (!(p = lock_user_string(arg2))) {
12271                     return -TARGET_EFAULT;
12272                 }
12273                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12274                 unlock_user(p, arg2, 0);
12275             }
12276         }
12277         return ret;
12278 #endif
12279 #ifdef TARGET_NR_utimensat_time64
12280     case TARGET_NR_utimensat_time64:
12281         {
12282             struct timespec *tsp, ts[2];
12283             if (!arg3) {
12284                 tsp = NULL;
12285             } else {
12286                 if (target_to_host_timespec64(ts, arg3)) {
12287                     return -TARGET_EFAULT;
12288                 }
12289                 if (target_to_host_timespec64(ts + 1, arg3 +
12290                                      sizeof(struct target__kernel_timespec))) {
12291                     return -TARGET_EFAULT;
12292                 }
12293                 tsp = ts;
12294             }
12295             if (!arg2)
12296                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12297             else {
12298                 p = lock_user_string(arg2);
12299                 if (!p) {
12300                     return -TARGET_EFAULT;
12301                 }
12302                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12303                 unlock_user(p, arg2, 0);
12304             }
12305         }
12306         return ret;
12307 #endif
12308 #ifdef TARGET_NR_futex
12309     case TARGET_NR_futex:
12310         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12311 #endif
12312 #ifdef TARGET_NR_futex_time64
12313     case TARGET_NR_futex_time64:
12314         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12315 #endif
12316 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12317     case TARGET_NR_inotify_init:
12318         ret = get_errno(sys_inotify_init());
12319         if (ret >= 0) {
12320             fd_trans_register(ret, &target_inotify_trans);
12321         }
12322         return ret;
12323 #endif
12324 #ifdef CONFIG_INOTIFY1
12325 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12326     case TARGET_NR_inotify_init1:
12327         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12328                                           fcntl_flags_tbl)));
12329         if (ret >= 0) {
12330             fd_trans_register(ret, &target_inotify_trans);
12331         }
12332         return ret;
12333 #endif
12334 #endif
12335 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12336     case TARGET_NR_inotify_add_watch:
12337         p = lock_user_string(arg2);
12338         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12339         unlock_user(p, arg2, 0);
12340         return ret;
12341 #endif
12342 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12343     case TARGET_NR_inotify_rm_watch:
12344         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12345 #endif
12346 
12347 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12348     case TARGET_NR_mq_open:
12349         {
12350             struct mq_attr posix_mq_attr;
12351             struct mq_attr *pposix_mq_attr;
12352             int host_flags;
12353 
12354             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12355             pposix_mq_attr = NULL;
12356             if (arg4) {
12357                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12358                     return -TARGET_EFAULT;
12359                 }
12360                 pposix_mq_attr = &posix_mq_attr;
12361             }
12362             p = lock_user_string(arg1 - 1);
12363             if (!p) {
12364                 return -TARGET_EFAULT;
12365             }
12366             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12367             unlock_user (p, arg1, 0);
12368         }
12369         return ret;
12370 
12371     case TARGET_NR_mq_unlink:
12372         p = lock_user_string(arg1 - 1);
12373         if (!p) {
12374             return -TARGET_EFAULT;
12375         }
12376         ret = get_errno(mq_unlink(p));
12377         unlock_user (p, arg1, 0);
12378         return ret;
12379 
12380 #ifdef TARGET_NR_mq_timedsend
12381     case TARGET_NR_mq_timedsend:
12382         {
12383             struct timespec ts;
12384 
12385             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12386             if (arg5 != 0) {
12387                 if (target_to_host_timespec(&ts, arg5)) {
12388                     return -TARGET_EFAULT;
12389                 }
12390                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12391                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12392                     return -TARGET_EFAULT;
12393                 }
12394             } else {
12395                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12396             }
12397             unlock_user (p, arg2, arg3);
12398         }
12399         return ret;
12400 #endif
12401 #ifdef TARGET_NR_mq_timedsend_time64
12402     case TARGET_NR_mq_timedsend_time64:
12403         {
12404             struct timespec ts;
12405 
12406             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12407             if (arg5 != 0) {
12408                 if (target_to_host_timespec64(&ts, arg5)) {
12409                     return -TARGET_EFAULT;
12410                 }
12411                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12412                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12413                     return -TARGET_EFAULT;
12414                 }
12415             } else {
12416                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12417             }
12418             unlock_user(p, arg2, arg3);
12419         }
12420         return ret;
12421 #endif
12422 
12423 #ifdef TARGET_NR_mq_timedreceive
12424     case TARGET_NR_mq_timedreceive:
12425         {
12426             struct timespec ts;
12427             unsigned int prio;
12428 
12429             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12430             if (arg5 != 0) {
12431                 if (target_to_host_timespec(&ts, arg5)) {
12432                     return -TARGET_EFAULT;
12433                 }
12434                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12435                                                      &prio, &ts));
12436                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12437                     return -TARGET_EFAULT;
12438                 }
12439             } else {
12440                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12441                                                      &prio, NULL));
12442             }
12443             unlock_user (p, arg2, arg3);
12444             if (arg4 != 0)
12445                 put_user_u32(prio, arg4);
12446         }
12447         return ret;
12448 #endif
12449 #ifdef TARGET_NR_mq_timedreceive_time64
12450     case TARGET_NR_mq_timedreceive_time64:
12451         {
12452             struct timespec ts;
12453             unsigned int prio;
12454 
12455             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12456             if (arg5 != 0) {
12457                 if (target_to_host_timespec64(&ts, arg5)) {
12458                     return -TARGET_EFAULT;
12459                 }
12460                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12461                                                      &prio, &ts));
12462                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12463                     return -TARGET_EFAULT;
12464                 }
12465             } else {
12466                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12467                                                      &prio, NULL));
12468             }
12469             unlock_user(p, arg2, arg3);
12470             if (arg4 != 0) {
12471                 put_user_u32(prio, arg4);
12472             }
12473         }
12474         return ret;
12475 #endif
12476 
12477     /* Not implemented for now... */
12478 /*     case TARGET_NR_mq_notify: */
12479 /*         break; */
12480 
12481     case TARGET_NR_mq_getsetattr:
12482         {
12483             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12484             ret = 0;
12485             if (arg2 != 0) {
12486                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12487                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12488                                            &posix_mq_attr_out));
12489             } else if (arg3 != 0) {
12490                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12491             }
12492             if (ret == 0 && arg3 != 0) {
12493                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12494             }
12495         }
12496         return ret;
12497 #endif
12498 
12499 #ifdef CONFIG_SPLICE
12500 #ifdef TARGET_NR_tee
12501     case TARGET_NR_tee:
12502         {
12503             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12504         }
12505         return ret;
12506 #endif
12507 #ifdef TARGET_NR_splice
12508     case TARGET_NR_splice:
12509         {
12510             loff_t loff_in, loff_out;
12511             loff_t *ploff_in = NULL, *ploff_out = NULL;
12512             if (arg2) {
12513                 if (get_user_u64(loff_in, arg2)) {
12514                     return -TARGET_EFAULT;
12515                 }
12516                 ploff_in = &loff_in;
12517             }
12518             if (arg4) {
12519                 if (get_user_u64(loff_out, arg4)) {
12520                     return -TARGET_EFAULT;
12521                 }
12522                 ploff_out = &loff_out;
12523             }
12524             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12525             if (arg2) {
12526                 if (put_user_u64(loff_in, arg2)) {
12527                     return -TARGET_EFAULT;
12528                 }
12529             }
12530             if (arg4) {
12531                 if (put_user_u64(loff_out, arg4)) {
12532                     return -TARGET_EFAULT;
12533                 }
12534             }
12535         }
12536         return ret;
12537 #endif
12538 #ifdef TARGET_NR_vmsplice
12539 	case TARGET_NR_vmsplice:
12540         {
12541             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12542             if (vec != NULL) {
12543                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12544                 unlock_iovec(vec, arg2, arg3, 0);
12545             } else {
12546                 ret = -host_to_target_errno(errno);
12547             }
12548         }
12549         return ret;
12550 #endif
12551 #endif /* CONFIG_SPLICE */
12552 #ifdef CONFIG_EVENTFD
12553 #if defined(TARGET_NR_eventfd)
12554     case TARGET_NR_eventfd:
12555         ret = get_errno(eventfd(arg1, 0));
12556         if (ret >= 0) {
12557             fd_trans_register(ret, &target_eventfd_trans);
12558         }
12559         return ret;
12560 #endif
12561 #if defined(TARGET_NR_eventfd2)
12562     case TARGET_NR_eventfd2:
12563     {
12564         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12565         if (arg2 & TARGET_O_NONBLOCK) {
12566             host_flags |= O_NONBLOCK;
12567         }
12568         if (arg2 & TARGET_O_CLOEXEC) {
12569             host_flags |= O_CLOEXEC;
12570         }
12571         ret = get_errno(eventfd(arg1, host_flags));
12572         if (ret >= 0) {
12573             fd_trans_register(ret, &target_eventfd_trans);
12574         }
12575         return ret;
12576     }
12577 #endif
12578 #endif /* CONFIG_EVENTFD  */
12579 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12580     case TARGET_NR_fallocate:
12581 #if TARGET_ABI_BITS == 32
12582         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12583                                   target_offset64(arg5, arg6)));
12584 #else
12585         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12586 #endif
12587         return ret;
12588 #endif
12589 #if defined(CONFIG_SYNC_FILE_RANGE)
12590 #if defined(TARGET_NR_sync_file_range)
12591     case TARGET_NR_sync_file_range:
12592 #if TARGET_ABI_BITS == 32
12593 #if defined(TARGET_MIPS)
12594         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12595                                         target_offset64(arg5, arg6), arg7));
12596 #else
12597         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12598                                         target_offset64(arg4, arg5), arg6));
12599 #endif /* !TARGET_MIPS */
12600 #else
12601         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12602 #endif
12603         return ret;
12604 #endif
12605 #if defined(TARGET_NR_sync_file_range2) || \
12606     defined(TARGET_NR_arm_sync_file_range)
12607 #if defined(TARGET_NR_sync_file_range2)
12608     case TARGET_NR_sync_file_range2:
12609 #endif
12610 #if defined(TARGET_NR_arm_sync_file_range)
12611     case TARGET_NR_arm_sync_file_range:
12612 #endif
12613         /* This is like sync_file_range but the arguments are reordered */
12614 #if TARGET_ABI_BITS == 32
12615         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12616                                         target_offset64(arg5, arg6), arg2));
12617 #else
12618         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12619 #endif
12620         return ret;
12621 #endif
12622 #endif
12623 #if defined(TARGET_NR_signalfd4)
12624     case TARGET_NR_signalfd4:
12625         return do_signalfd4(arg1, arg2, arg4);
12626 #endif
12627 #if defined(TARGET_NR_signalfd)
12628     case TARGET_NR_signalfd:
12629         return do_signalfd4(arg1, arg2, 0);
12630 #endif
12631 #if defined(CONFIG_EPOLL)
12632 #if defined(TARGET_NR_epoll_create)
12633     case TARGET_NR_epoll_create:
12634         return get_errno(epoll_create(arg1));
12635 #endif
12636 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12637     case TARGET_NR_epoll_create1:
12638         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12639 #endif
12640 #if defined(TARGET_NR_epoll_ctl)
12641     case TARGET_NR_epoll_ctl:
12642     {
12643         struct epoll_event ep;
12644         struct epoll_event *epp = 0;
12645         if (arg4) {
12646             if (arg2 != EPOLL_CTL_DEL) {
12647                 struct target_epoll_event *target_ep;
12648                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12649                     return -TARGET_EFAULT;
12650                 }
12651                 ep.events = tswap32(target_ep->events);
12652                 /*
12653                  * The epoll_data_t union is just opaque data to the kernel,
12654                  * so we transfer all 64 bits across and need not worry what
12655                  * actual data type it is.
12656                  */
12657                 ep.data.u64 = tswap64(target_ep->data.u64);
12658                 unlock_user_struct(target_ep, arg4, 0);
12659             }
12660             /*
12661              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12662              * non-null pointer, even though this argument is ignored.
12663              *
12664              */
12665             epp = &ep;
12666         }
12667         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12668     }
12669 #endif
12670 
12671 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12672 #if defined(TARGET_NR_epoll_wait)
12673     case TARGET_NR_epoll_wait:
12674 #endif
12675 #if defined(TARGET_NR_epoll_pwait)
12676     case TARGET_NR_epoll_pwait:
12677 #endif
12678     {
12679         struct target_epoll_event *target_ep;
12680         struct epoll_event *ep;
12681         int epfd = arg1;
12682         int maxevents = arg3;
12683         int timeout = arg4;
12684 
12685         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12686             return -TARGET_EINVAL;
12687         }
12688 
12689         target_ep = lock_user(VERIFY_WRITE, arg2,
12690                               maxevents * sizeof(struct target_epoll_event), 1);
12691         if (!target_ep) {
12692             return -TARGET_EFAULT;
12693         }
12694 
12695         ep = g_try_new(struct epoll_event, maxevents);
12696         if (!ep) {
12697             unlock_user(target_ep, arg2, 0);
12698             return -TARGET_ENOMEM;
12699         }
12700 
12701         switch (num) {
12702 #if defined(TARGET_NR_epoll_pwait)
12703         case TARGET_NR_epoll_pwait:
12704         {
12705             target_sigset_t *target_set;
12706             sigset_t _set, *set = &_set;
12707 
12708             if (arg5) {
12709                 if (arg6 != sizeof(target_sigset_t)) {
12710                     ret = -TARGET_EINVAL;
12711                     break;
12712                 }
12713 
12714                 target_set = lock_user(VERIFY_READ, arg5,
12715                                        sizeof(target_sigset_t), 1);
12716                 if (!target_set) {
12717                     ret = -TARGET_EFAULT;
12718                     break;
12719                 }
12720                 target_to_host_sigset(set, target_set);
12721                 unlock_user(target_set, arg5, 0);
12722             } else {
12723                 set = NULL;
12724             }
12725 
12726             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12727                                              set, SIGSET_T_SIZE));
12728             break;
12729         }
12730 #endif
12731 #if defined(TARGET_NR_epoll_wait)
12732         case TARGET_NR_epoll_wait:
12733             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12734                                              NULL, 0));
12735             break;
12736 #endif
12737         default:
12738             ret = -TARGET_ENOSYS;
12739         }
12740         if (!is_error(ret)) {
12741             int i;
12742             for (i = 0; i < ret; i++) {
12743                 target_ep[i].events = tswap32(ep[i].events);
12744                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12745             }
12746             unlock_user(target_ep, arg2,
12747                         ret * sizeof(struct target_epoll_event));
12748         } else {
12749             unlock_user(target_ep, arg2, 0);
12750         }
12751         g_free(ep);
12752         return ret;
12753     }
12754 #endif
12755 #endif
12756 #ifdef TARGET_NR_prlimit64
12757     case TARGET_NR_prlimit64:
12758     {
12759         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12760         struct target_rlimit64 *target_rnew, *target_rold;
12761         struct host_rlimit64 rnew, rold, *rnewp = 0;
12762         int resource = target_to_host_resource(arg2);
12763 
12764         if (arg3 && (resource != RLIMIT_AS &&
12765                      resource != RLIMIT_DATA &&
12766                      resource != RLIMIT_STACK)) {
12767             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12768                 return -TARGET_EFAULT;
12769             }
12770             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12771             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12772             unlock_user_struct(target_rnew, arg3, 0);
12773             rnewp = &rnew;
12774         }
12775 
12776         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12777         if (!is_error(ret) && arg4) {
12778             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12779                 return -TARGET_EFAULT;
12780             }
12781             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12782             target_rold->rlim_max = tswap64(rold.rlim_max);
12783             unlock_user_struct(target_rold, arg4, 1);
12784         }
12785         return ret;
12786     }
12787 #endif
12788 #ifdef TARGET_NR_gethostname
12789     case TARGET_NR_gethostname:
12790     {
12791         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12792         if (name) {
12793             ret = get_errno(gethostname(name, arg2));
12794             unlock_user(name, arg1, arg2);
12795         } else {
12796             ret = -TARGET_EFAULT;
12797         }
12798         return ret;
12799     }
12800 #endif
12801 #ifdef TARGET_NR_atomic_cmpxchg_32
12802     case TARGET_NR_atomic_cmpxchg_32:
12803     {
12804         /* should use start_exclusive from main.c */
12805         abi_ulong mem_value;
12806         if (get_user_u32(mem_value, arg6)) {
12807             target_siginfo_t info;
12808             info.si_signo = SIGSEGV;
12809             info.si_errno = 0;
12810             info.si_code = TARGET_SEGV_MAPERR;
12811             info._sifields._sigfault._addr = arg6;
12812             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12813                          QEMU_SI_FAULT, &info);
12814             ret = 0xdeadbeef;
12815 
12816         }
12817         if (mem_value == arg2)
12818             put_user_u32(arg1, arg6);
12819         return mem_value;
12820     }
12821 #endif
12822 #ifdef TARGET_NR_atomic_barrier
12823     case TARGET_NR_atomic_barrier:
12824         /* Like the kernel implementation and the
12825            qemu arm barrier, no-op this? */
12826         return 0;
12827 #endif
12828 
12829 #ifdef TARGET_NR_timer_create
12830     case TARGET_NR_timer_create:
12831     {
12832         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12833 
12834         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12835 
12836         int clkid = arg1;
12837         int timer_index = next_free_host_timer();
12838 
12839         if (timer_index < 0) {
12840             ret = -TARGET_EAGAIN;
12841         } else {
12842             timer_t *phtimer = g_posix_timers  + timer_index;
12843 
12844             if (arg2) {
12845                 phost_sevp = &host_sevp;
12846                 ret = target_to_host_sigevent(phost_sevp, arg2);
12847                 if (ret != 0) {
12848                     return ret;
12849                 }
12850             }
12851 
12852             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12853             if (ret) {
12854                 phtimer = NULL;
12855             } else {
12856                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12857                     return -TARGET_EFAULT;
12858                 }
12859             }
12860         }
12861         return ret;
12862     }
12863 #endif
12864 
12865 #ifdef TARGET_NR_timer_settime
12866     case TARGET_NR_timer_settime:
12867     {
12868         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12869          * struct itimerspec * old_value */
12870         target_timer_t timerid = get_timer_id(arg1);
12871 
12872         if (timerid < 0) {
12873             ret = timerid;
12874         } else if (arg3 == 0) {
12875             ret = -TARGET_EINVAL;
12876         } else {
12877             timer_t htimer = g_posix_timers[timerid];
12878             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12879 
12880             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12881                 return -TARGET_EFAULT;
12882             }
12883             ret = get_errno(
12884                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12885             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12886                 return -TARGET_EFAULT;
12887             }
12888         }
12889         return ret;
12890     }
12891 #endif
12892 
12893 #ifdef TARGET_NR_timer_settime64
12894     case TARGET_NR_timer_settime64:
12895     {
12896         target_timer_t timerid = get_timer_id(arg1);
12897 
12898         if (timerid < 0) {
12899             ret = timerid;
12900         } else if (arg3 == 0) {
12901             ret = -TARGET_EINVAL;
12902         } else {
12903             timer_t htimer = g_posix_timers[timerid];
12904             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12905 
12906             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12907                 return -TARGET_EFAULT;
12908             }
12909             ret = get_errno(
12910                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12911             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12912                 return -TARGET_EFAULT;
12913             }
12914         }
12915         return ret;
12916     }
12917 #endif
12918 
12919 #ifdef TARGET_NR_timer_gettime
12920     case TARGET_NR_timer_gettime:
12921     {
12922         /* args: timer_t timerid, struct itimerspec *curr_value */
12923         target_timer_t timerid = get_timer_id(arg1);
12924 
12925         if (timerid < 0) {
12926             ret = timerid;
12927         } else if (!arg2) {
12928             ret = -TARGET_EFAULT;
12929         } else {
12930             timer_t htimer = g_posix_timers[timerid];
12931             struct itimerspec hspec;
12932             ret = get_errno(timer_gettime(htimer, &hspec));
12933 
12934             if (host_to_target_itimerspec(arg2, &hspec)) {
12935                 ret = -TARGET_EFAULT;
12936             }
12937         }
12938         return ret;
12939     }
12940 #endif
12941 
12942 #ifdef TARGET_NR_timer_gettime64
12943     case TARGET_NR_timer_gettime64:
12944     {
12945         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12946         target_timer_t timerid = get_timer_id(arg1);
12947 
12948         if (timerid < 0) {
12949             ret = timerid;
12950         } else if (!arg2) {
12951             ret = -TARGET_EFAULT;
12952         } else {
12953             timer_t htimer = g_posix_timers[timerid];
12954             struct itimerspec hspec;
12955             ret = get_errno(timer_gettime(htimer, &hspec));
12956 
12957             if (host_to_target_itimerspec64(arg2, &hspec)) {
12958                 ret = -TARGET_EFAULT;
12959             }
12960         }
12961         return ret;
12962     }
12963 #endif
12964 
12965 #ifdef TARGET_NR_timer_getoverrun
12966     case TARGET_NR_timer_getoverrun:
12967     {
12968         /* args: timer_t timerid */
12969         target_timer_t timerid = get_timer_id(arg1);
12970 
12971         if (timerid < 0) {
12972             ret = timerid;
12973         } else {
12974             timer_t htimer = g_posix_timers[timerid];
12975             ret = get_errno(timer_getoverrun(htimer));
12976         }
12977         return ret;
12978     }
12979 #endif
12980 
12981 #ifdef TARGET_NR_timer_delete
12982     case TARGET_NR_timer_delete:
12983     {
12984         /* args: timer_t timerid */
12985         target_timer_t timerid = get_timer_id(arg1);
12986 
12987         if (timerid < 0) {
12988             ret = timerid;
12989         } else {
12990             timer_t htimer = g_posix_timers[timerid];
12991             ret = get_errno(timer_delete(htimer));
12992             g_posix_timers[timerid] = 0;
12993         }
12994         return ret;
12995     }
12996 #endif
12997 
12998 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12999     case TARGET_NR_timerfd_create:
13000         return get_errno(timerfd_create(arg1,
13001                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13002 #endif
13003 
13004 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13005     case TARGET_NR_timerfd_gettime:
13006         {
13007             struct itimerspec its_curr;
13008 
13009             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13010 
13011             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13012                 return -TARGET_EFAULT;
13013             }
13014         }
13015         return ret;
13016 #endif
13017 
13018 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13019     case TARGET_NR_timerfd_gettime64:
13020         {
13021             struct itimerspec its_curr;
13022 
13023             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13024 
13025             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13026                 return -TARGET_EFAULT;
13027             }
13028         }
13029         return ret;
13030 #endif
13031 
13032 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13033     case TARGET_NR_timerfd_settime:
13034         {
13035             struct itimerspec its_new, its_old, *p_new;
13036 
13037             if (arg3) {
13038                 if (target_to_host_itimerspec(&its_new, arg3)) {
13039                     return -TARGET_EFAULT;
13040                 }
13041                 p_new = &its_new;
13042             } else {
13043                 p_new = NULL;
13044             }
13045 
13046             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13047 
13048             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13049                 return -TARGET_EFAULT;
13050             }
13051         }
13052         return ret;
13053 #endif
13054 
13055 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13056     case TARGET_NR_timerfd_settime64:
13057         {
13058             struct itimerspec its_new, its_old, *p_new;
13059 
13060             if (arg3) {
13061                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13062                     return -TARGET_EFAULT;
13063                 }
13064                 p_new = &its_new;
13065             } else {
13066                 p_new = NULL;
13067             }
13068 
13069             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13070 
13071             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13072                 return -TARGET_EFAULT;
13073             }
13074         }
13075         return ret;
13076 #endif
13077 
13078 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13079     case TARGET_NR_ioprio_get:
13080         return get_errno(ioprio_get(arg1, arg2));
13081 #endif
13082 
13083 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13084     case TARGET_NR_ioprio_set:
13085         return get_errno(ioprio_set(arg1, arg2, arg3));
13086 #endif
13087 
13088 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13089     case TARGET_NR_setns:
13090         return get_errno(setns(arg1, arg2));
13091 #endif
13092 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13093     case TARGET_NR_unshare:
13094         return get_errno(unshare(arg1));
13095 #endif
13096 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13097     case TARGET_NR_kcmp:
13098         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13099 #endif
13100 #ifdef TARGET_NR_swapcontext
13101     case TARGET_NR_swapcontext:
13102         /* PowerPC specific.  */
13103         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13104 #endif
13105 #ifdef TARGET_NR_memfd_create
13106     case TARGET_NR_memfd_create:
13107         p = lock_user_string(arg1);
13108         if (!p) {
13109             return -TARGET_EFAULT;
13110         }
13111         ret = get_errno(memfd_create(p, arg2));
13112         fd_trans_unregister(ret);
13113         unlock_user(p, arg1, 0);
13114         return ret;
13115 #endif
13116 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13117     case TARGET_NR_membarrier:
13118         return get_errno(membarrier(arg1, arg2));
13119 #endif
13120 
13121 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13122     case TARGET_NR_copy_file_range:
13123         {
13124             loff_t inoff, outoff;
13125             loff_t *pinoff = NULL, *poutoff = NULL;
13126 
13127             if (arg2) {
13128                 if (get_user_u64(inoff, arg2)) {
13129                     return -TARGET_EFAULT;
13130                 }
13131                 pinoff = &inoff;
13132             }
13133             if (arg4) {
13134                 if (get_user_u64(outoff, arg4)) {
13135                     return -TARGET_EFAULT;
13136                 }
13137                 poutoff = &outoff;
13138             }
13139             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13140                                                  arg5, arg6));
13141             if (!is_error(ret) && ret > 0) {
13142                 if (arg2) {
13143                     if (put_user_u64(inoff, arg2)) {
13144                         return -TARGET_EFAULT;
13145                     }
13146                 }
13147                 if (arg4) {
13148                     if (put_user_u64(outoff, arg4)) {
13149                         return -TARGET_EFAULT;
13150                     }
13151                 }
13152             }
13153         }
13154         return ret;
13155 #endif
13156 
13157     default:
13158         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13159         return -TARGET_ENOSYS;
13160     }
13161     return ret;
13162 }
13163 
13164 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13165                     abi_long arg2, abi_long arg3, abi_long arg4,
13166                     abi_long arg5, abi_long arg6, abi_long arg7,
13167                     abi_long arg8)
13168 {
13169     CPUState *cpu = env_cpu(cpu_env);
13170     abi_long ret;
13171 
13172 #ifdef DEBUG_ERESTARTSYS
13173     /* Debug-only code for exercising the syscall-restart code paths
13174      * in the per-architecture cpu main loops: restart every syscall
13175      * the guest makes once before letting it through.
13176      */
13177     {
13178         static bool flag;
13179         flag = !flag;
13180         if (flag) {
13181             return -TARGET_ERESTARTSYS;
13182         }
13183     }
13184 #endif
13185 
13186     record_syscall_start(cpu, num, arg1,
13187                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13188 
13189     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13190         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13191     }
13192 
13193     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13194                       arg5, arg6, arg7, arg8);
13195 
13196     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13197         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13198                           arg3, arg4, arg5, arg6);
13199     }
13200 
13201     record_syscall_return(cpu, num, ret);
13202     return ret;
13203 }
13204