xref: /openbmc/qemu/linux-user/syscall.c (revision 9a4e273ddec3927920c5958d2226c6b38b543336)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include "exec/mmap-lock.h"
30 #include "exec/tb-flush.h"
31 #include "exec/translation-block.h"
32 #include <elf.h>
33 #include <endian.h>
34 #include <grp.h>
35 #include <sys/ipc.h>
36 #include <sys/msg.h>
37 #include <sys/wait.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/swap.h>
45 #include <linux/capability.h>
46 #include <sched.h>
47 #include <sys/timex.h>
48 #include <sys/socket.h>
49 #include <linux/sockios.h>
50 #include <sys/un.h>
51 #include <sys/uio.h>
52 #include <poll.h>
53 #include <sys/times.h>
54 #include <sys/shm.h>
55 #include <sys/sem.h>
56 #include <sys/statfs.h>
57 #include <utime.h>
58 #include <sys/sysinfo.h>
59 #include <sys/signalfd.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 #include <netinet/tcp.h>
63 #include <netinet/udp.h>
64 #include <linux/wireless.h>
65 #include <linux/icmp.h>
66 #include <linux/icmpv6.h>
67 #include <linux/if_tun.h>
68 #include <linux/in6.h>
69 #include <linux/errqueue.h>
70 #include <linux/random.h>
71 #ifdef CONFIG_TIMERFD
72 #include <sys/timerfd.h>
73 #endif
74 #ifdef CONFIG_EVENTFD
75 #include <sys/eventfd.h>
76 #endif
77 #ifdef CONFIG_EPOLL
78 #include <sys/epoll.h>
79 #endif
80 #ifdef CONFIG_ATTR
81 #include "qemu/xattr.h"
82 #endif
83 #ifdef CONFIG_SENDFILE
84 #include <sys/sendfile.h>
85 #endif
86 #ifdef HAVE_SYS_KCOV_H
87 #include <sys/kcov.h>
88 #endif
89 
90 #define termios host_termios
91 #define winsize host_winsize
92 #define termio host_termio
93 #define sgttyb host_sgttyb /* same as target */
94 #define tchars host_tchars /* same as target */
95 #define ltchars host_ltchars /* same as target */
96 
97 #include <linux/termios.h>
98 #include <linux/unistd.h>
99 #include <linux/cdrom.h>
100 #include <linux/hdreg.h>
101 #include <linux/soundcard.h>
102 #include <linux/kd.h>
103 #include <linux/mtio.h>
104 #include <linux/fs.h>
105 #include <linux/fd.h>
106 #if defined(CONFIG_FIEMAP)
107 #include <linux/fiemap.h>
108 #endif
109 #include <linux/fb.h>
110 #if defined(CONFIG_USBFS)
111 #include <linux/usbdevice_fs.h>
112 #include <linux/usb/ch9.h>
113 #endif
114 #include <linux/vt.h>
115 #include <linux/dm-ioctl.h>
116 #include <linux/reboot.h>
117 #include <linux/route.h>
118 #include <linux/filter.h>
119 #include <linux/blkpg.h>
120 #include <netpacket/packet.h>
121 #include <linux/netlink.h>
122 #include <linux/if_alg.h>
123 #include <linux/rtc.h>
124 #include <sound/asound.h>
125 #ifdef HAVE_BTRFS_H
126 #include <linux/btrfs.h>
127 #endif
128 #ifdef HAVE_DRM_H
129 #include <libdrm/drm.h>
130 #include <libdrm/i915_drm.h>
131 #endif
132 #include "linux_loop.h"
133 #include "uname.h"
134 
135 #include "qemu.h"
136 #include "user-internals.h"
137 #include "strace.h"
138 #include "signal-common.h"
139 #include "loader.h"
140 #include "user-mmap.h"
141 #include "user/page-protection.h"
142 #include "user/safe-syscall.h"
143 #include "user/signal.h"
144 #include "qemu/guest-random.h"
145 #include "qemu/selfmap.h"
146 #include "user/syscall-trace.h"
147 #include "special-errno.h"
148 #include "qapi/error.h"
149 #include "fd-trans.h"
150 #include "user/cpu_loop.h"
151 
152 #ifndef CLONE_IO
153 #define CLONE_IO                0x80000000      /* Clone io context */
154 #endif
155 
156 /* We can't directly call the host clone syscall, because this will
157  * badly confuse libc (breaking mutexes, for example). So we must
158  * divide clone flags into:
159  *  * flag combinations that look like pthread_create()
160  *  * flag combinations that look like fork()
161  *  * flags we can implement within QEMU itself
162  *  * flags we can't support and will return an error for
163  */
164 /* For thread creation, all these flags must be present; for
165  * fork, none must be present.
166  */
167 #define CLONE_THREAD_FLAGS                              \
168     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
169      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
170 
171 /* These flags are ignored:
172  * CLONE_DETACHED is now ignored by the kernel;
173  * CLONE_IO is just an optimisation hint to the I/O scheduler
174  */
175 #define CLONE_IGNORED_FLAGS                     \
176     (CLONE_DETACHED | CLONE_IO)
177 
178 #ifndef CLONE_PIDFD
179 # define CLONE_PIDFD 0x00001000
180 #endif
181 
182 /* Flags for fork which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_FORK_FLAGS               \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
186 
187 /* Flags for thread creation which we can implement within QEMU itself */
188 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
189     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
190      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
191 
192 #define CLONE_INVALID_FORK_FLAGS                                        \
193     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
194 
195 #define CLONE_INVALID_THREAD_FLAGS                                      \
196     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
197        CLONE_IGNORED_FLAGS))
198 
199 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
200  * have almost all been allocated. We cannot support any of
201  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
202  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
203  * The checks against the invalid thread masks above will catch these.
204  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
205  */
206 
207 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
208  * once. This exercises the codepaths for restart.
209  */
210 //#define DEBUG_ERESTARTSYS
211 
212 //#include <linux/msdos_fs.h>
213 #define VFAT_IOCTL_READDIR_BOTH \
214     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
215 #define VFAT_IOCTL_READDIR_SHORT \
216     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
217 
218 #undef _syscall0
219 #undef _syscall1
220 #undef _syscall2
221 #undef _syscall3
222 #undef _syscall4
223 #undef _syscall5
224 #undef _syscall6
225 
226 #define _syscall0(type,name)		\
227 static type name (void)			\
228 {					\
229 	return syscall(__NR_##name);	\
230 }
231 
232 #define _syscall1(type,name,type1,arg1)		\
233 static type name (type1 arg1)			\
234 {						\
235 	return syscall(__NR_##name, arg1);	\
236 }
237 
238 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
239 static type name (type1 arg1,type2 arg2)		\
240 {							\
241 	return syscall(__NR_##name, arg1, arg2);	\
242 }
243 
244 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
245 static type name (type1 arg1,type2 arg2,type3 arg3)		\
246 {								\
247 	return syscall(__NR_##name, arg1, arg2, arg3);		\
248 }
249 
250 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
251 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
252 {										\
253 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
254 }
255 
256 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
257 		  type5,arg5)							\
258 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
259 {										\
260 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
261 }
262 
263 
264 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
265 		  type5,arg5,type6,arg6)					\
266 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
267                   type6 arg6)							\
268 {										\
269 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
270 }
271 
272 
273 #define __NR_sys_uname __NR_uname
274 #define __NR_sys_getcwd1 __NR_getcwd
275 #define __NR_sys_getdents __NR_getdents
276 #define __NR_sys_getdents64 __NR_getdents64
277 #define __NR_sys_getpriority __NR_getpriority
278 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
279 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
280 #define __NR_sys_syslog __NR_syslog
281 #if defined(__NR_futex)
282 # define __NR_sys_futex __NR_futex
283 #endif
284 #if defined(__NR_futex_time64)
285 # define __NR_sys_futex_time64 __NR_futex_time64
286 #endif
287 #define __NR_sys_statx __NR_statx
288 
289 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
290 #define __NR__llseek __NR_lseek
291 #endif
292 
293 /* Newer kernel ports have llseek() instead of _llseek() */
294 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
295 #define TARGET_NR__llseek TARGET_NR_llseek
296 #endif
297 
298 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
299 #ifndef TARGET_O_NONBLOCK_MASK
300 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
301 #endif
302 
303 #define __NR_sys_gettid __NR_gettid
304 _syscall0(int, sys_gettid)
305 
306 /* For the 64-bit guest on 32-bit host case we must emulate
307  * getdents using getdents64, because otherwise the host
308  * might hand us back more dirent records than we can fit
309  * into the guest buffer after structure format conversion.
310  * Otherwise we emulate getdents with getdents if the host has it.
311  */
312 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
313 #define EMULATE_GETDENTS_WITH_GETDENTS
314 #endif
315 
316 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
317 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
318 #endif
319 #if (defined(TARGET_NR_getdents) && \
320       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
321     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
322 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
323 #endif
324 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
325 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
326           loff_t *, res, unsigned int, wh);
327 #endif
328 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
329 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
330           siginfo_t *, uinfo)
331 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
332 #ifdef __NR_exit_group
333 _syscall1(int,exit_group,int,error_code)
334 #endif
335 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
336 #define __NR_sys_close_range __NR_close_range
337 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
338 #ifndef CLOSE_RANGE_CLOEXEC
339 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
340 #endif
341 #endif
342 #if defined(__NR_futex)
343 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
344           const struct timespec *,timeout,int *,uaddr2,int,val3)
345 #endif
346 #if defined(__NR_futex_time64)
347 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
348           const struct timespec *,timeout,int *,uaddr2,int,val3)
349 #endif
350 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
351 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
352 #endif
353 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
354 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
355                              unsigned int, flags);
356 #endif
357 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
358 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
359 #endif
360 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
361 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
362           unsigned long *, user_mask_ptr);
363 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
364 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
365           unsigned long *, user_mask_ptr);
366 /* sched_attr is not defined in glibc < 2.41 */
367 #ifndef SCHED_ATTR_SIZE_VER0
368 struct sched_attr {
369     uint32_t size;
370     uint32_t sched_policy;
371     uint64_t sched_flags;
372     int32_t sched_nice;
373     uint32_t sched_priority;
374     uint64_t sched_runtime;
375     uint64_t sched_deadline;
376     uint64_t sched_period;
377     uint32_t sched_util_min;
378     uint32_t sched_util_max;
379 };
380 #endif
381 #define __NR_sys_sched_getattr __NR_sched_getattr
382 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
383           unsigned int, size, unsigned int, flags);
384 #define __NR_sys_sched_setattr __NR_sched_setattr
385 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
386           unsigned int, flags);
387 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
388 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
389 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
390 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
391           const struct sched_param *, param);
392 #define __NR_sys_sched_getparam __NR_sched_getparam
393 _syscall2(int, sys_sched_getparam, pid_t, pid,
394           struct sched_param *, param);
395 #define __NR_sys_sched_setparam __NR_sched_setparam
396 _syscall2(int, sys_sched_setparam, pid_t, pid,
397           const struct sched_param *, param);
398 #define __NR_sys_getcpu __NR_getcpu
399 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
400 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
401           void *, arg);
402 _syscall2(int, capget, struct __user_cap_header_struct *, header,
403           struct __user_cap_data_struct *, data);
404 _syscall2(int, capset, struct __user_cap_header_struct *, header,
405           struct __user_cap_data_struct *, data);
406 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
407 _syscall2(int, ioprio_get, int, which, int, who)
408 #endif
409 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
410 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
411 #endif
412 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
413 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
414 #endif
415 
416 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
417 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
418           unsigned long, idx1, unsigned long, idx2)
419 #endif
420 
421 /*
422  * It is assumed that struct statx is architecture independent.
423  */
424 #if defined(TARGET_NR_statx) && defined(__NR_statx)
425 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
426           unsigned int, mask, struct target_statx *, statxbuf)
427 #endif
428 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
429 _syscall2(int, membarrier, int, cmd, int, flags)
430 #endif
431 
432 static const bitmask_transtbl fcntl_flags_tbl[] = {
433   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
434   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
435   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
436   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
437   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
438   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
439   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
440   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
441   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
442   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
443   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
444   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
445   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
446 #if defined(O_DIRECT)
447   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
448 #endif
449 #if defined(O_NOATIME)
450   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
451 #endif
452 #if defined(O_CLOEXEC)
453   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
454 #endif
455 #if defined(O_PATH)
456   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
457 #endif
458 #if defined(O_TMPFILE)
459   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
460 #endif
461   /* Don't terminate the list prematurely on 64-bit host+guest.  */
462 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
463   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
464 #endif
465 };
466 
467 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
468 
469 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
470 #if defined(__NR_utimensat)
471 #define __NR_sys_utimensat __NR_utimensat
472 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
473           const struct timespec *,tsp,int,flags)
474 #else
475 static int sys_utimensat(int dirfd, const char *pathname,
476                          const struct timespec times[2], int flags)
477 {
478     errno = ENOSYS;
479     return -1;
480 }
481 #endif
482 #endif /* TARGET_NR_utimensat */
483 
484 #ifdef TARGET_NR_renameat2
485 #if defined(__NR_renameat2)
486 #define __NR_sys_renameat2 __NR_renameat2
487 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
488           const char *, new, unsigned int, flags)
489 #else
490 static int sys_renameat2(int oldfd, const char *old,
491                          int newfd, const char *new, int flags)
492 {
493     if (flags == 0) {
494         return renameat(oldfd, old, newfd, new);
495     }
496     errno = ENOSYS;
497     return -1;
498 }
499 #endif
500 #endif /* TARGET_NR_renameat2 */
501 
502 #ifdef CONFIG_INOTIFY
503 #include <sys/inotify.h>
504 #else
505 /* Userspace can usually survive runtime without inotify */
506 #undef TARGET_NR_inotify_init
507 #undef TARGET_NR_inotify_init1
508 #undef TARGET_NR_inotify_add_watch
509 #undef TARGET_NR_inotify_rm_watch
510 #endif /* CONFIG_INOTIFY  */
511 
512 #if defined(TARGET_NR_prlimit64)
513 #ifndef __NR_prlimit64
514 # define __NR_prlimit64 -1
515 #endif
516 #define __NR_sys_prlimit64 __NR_prlimit64
517 /* The glibc rlimit structure may not be that used by the underlying syscall */
518 struct host_rlimit64 {
519     uint64_t rlim_cur;
520     uint64_t rlim_max;
521 };
522 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
523           const struct host_rlimit64 *, new_limit,
524           struct host_rlimit64 *, old_limit)
525 #endif
526 
527 
528 #if defined(TARGET_NR_timer_create)
529 /* Maximum of 32 active POSIX timers allowed at any one time. */
530 #define GUEST_TIMER_MAX 32
531 static timer_t g_posix_timers[GUEST_TIMER_MAX];
532 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
533 
next_free_host_timer(void)534 static inline int next_free_host_timer(void)
535 {
536     int k;
537     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
538         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
539             return k;
540         }
541     }
542     return -1;
543 }
544 
free_host_timer_slot(int id)545 static inline void free_host_timer_slot(int id)
546 {
547     qatomic_store_release(g_posix_timer_allocated + id, 0);
548 }
549 #endif
550 
host_to_target_errno(int host_errno)551 static inline int host_to_target_errno(int host_errno)
552 {
553     switch (host_errno) {
554 #define E(X)  case X: return TARGET_##X;
555 #include "errnos.c.inc"
556 #undef E
557     default:
558         return host_errno;
559     }
560 }
561 
target_to_host_errno(int target_errno)562 static inline int target_to_host_errno(int target_errno)
563 {
564     switch (target_errno) {
565 #define E(X)  case TARGET_##X: return X;
566 #include "errnos.c.inc"
567 #undef E
568     default:
569         return target_errno;
570     }
571 }
572 
get_errno(abi_long ret)573 abi_long get_errno(abi_long ret)
574 {
575     if (ret == -1)
576         return -host_to_target_errno(errno);
577     else
578         return ret;
579 }
580 
target_strerror(int err)581 const char *target_strerror(int err)
582 {
583     if (err == QEMU_ERESTARTSYS) {
584         return "To be restarted";
585     }
586     if (err == QEMU_ESIGRETURN) {
587         return "Successful exit from sigreturn";
588     }
589 
590     return strerror(target_to_host_errno(err));
591 }
592 
check_zeroed_user(abi_long addr,size_t ksize,size_t usize)593 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
594 {
595     int i;
596     uint8_t b;
597     if (usize <= ksize) {
598         return 1;
599     }
600     for (i = ksize; i < usize; i++) {
601         if (get_user_u8(b, addr + i)) {
602             return -TARGET_EFAULT;
603         }
604         if (b != 0) {
605             return 0;
606         }
607     }
608     return 1;
609 }
610 
611 /*
612  * Copies a target struct to a host struct, in a way that guarantees
613  * backwards-compatibility for struct syscall arguments.
614  *
615  * Similar to kernels uaccess.h:copy_struct_from_user()
616  */
copy_struct_from_user(void * dst,size_t ksize,abi_ptr src,size_t usize)617 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
618 {
619     size_t size = MIN(ksize, usize);
620     size_t rest = MAX(ksize, usize) - size;
621 
622     /* Deal with trailing bytes. */
623     if (usize < ksize) {
624         memset(dst + size, 0, rest);
625     } else if (usize > ksize) {
626         int ret = check_zeroed_user(src, ksize, usize);
627         if (ret <= 0) {
628             return ret ?: -TARGET_E2BIG;
629         }
630     }
631     /* Copy the interoperable parts of the struct. */
632     if (copy_from_user(dst, src, size)) {
633         return -TARGET_EFAULT;
634     }
635     return 0;
636 }
637 
638 #define safe_syscall0(type, name) \
639 static type safe_##name(void) \
640 { \
641     return safe_syscall(__NR_##name); \
642 }
643 
644 #define safe_syscall1(type, name, type1, arg1) \
645 static type safe_##name(type1 arg1) \
646 { \
647     return safe_syscall(__NR_##name, arg1); \
648 }
649 
650 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
651 static type safe_##name(type1 arg1, type2 arg2) \
652 { \
653     return safe_syscall(__NR_##name, arg1, arg2); \
654 }
655 
656 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
657 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
658 { \
659     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
660 }
661 
662 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
663     type4, arg4) \
664 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
665 { \
666     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
667 }
668 
669 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
670     type4, arg4, type5, arg5) \
671 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
672     type5 arg5) \
673 { \
674     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
675 }
676 
677 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
678     type4, arg4, type5, arg5, type6, arg6) \
679 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
680     type5 arg5, type6 arg6) \
681 { \
682     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
683 }
684 
safe_syscall3(ssize_t,read,int,fd,void *,buff,size_t,count)685 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
686 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
687 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
688               int, flags, mode_t, mode)
689 
690 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
691               const struct open_how_ver0 *, how, size_t, size)
692 
693 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
694 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
695               struct rusage *, rusage)
696 #endif
697 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
698               int, options, struct rusage *, rusage)
699 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
700 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
701               char **, argv, char **, envp, int, flags)
702 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
703     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
704 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
705               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
706 #endif
707 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
708 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
709               struct timespec *, tsp, const sigset_t *, sigmask,
710               size_t, sigsetsize)
711 #endif
712 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
713               int, maxevents, int, timeout, const sigset_t *, sigmask,
714               size_t, sigsetsize)
715 #if defined(__NR_futex)
716 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
717               const struct timespec *,timeout,int *,uaddr2,int,val3)
718 #endif
719 #if defined(__NR_futex_time64)
720 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
721               const struct timespec *,timeout,int *,uaddr2,int,val3)
722 #endif
723 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
724 safe_syscall2(int, kill, pid_t, pid, int, sig)
725 safe_syscall2(int, tkill, int, tid, int, sig)
726 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
727 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
728 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
729 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
730               unsigned long, pos_l, unsigned long, pos_h)
731 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
732               unsigned long, pos_l, unsigned long, pos_h)
733 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
734               socklen_t, addrlen)
735 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
736               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
737 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
738               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
739 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
740 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
741 safe_syscall2(int, flock, int, fd, int, operation)
742 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
743 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
744               const struct timespec *, uts, size_t, sigsetsize)
745 #endif
746 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
747               int, flags)
748 #if defined(TARGET_NR_nanosleep)
749 safe_syscall2(int, nanosleep, const struct timespec *, req,
750               struct timespec *, rem)
751 #endif
752 #if defined(TARGET_NR_clock_nanosleep) || \
753     defined(TARGET_NR_clock_nanosleep_time64)
754 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
755               const struct timespec *, req, struct timespec *, rem)
756 #endif
757 #ifdef __NR_ipc
758 #ifdef __s390x__
759 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
760               void *, ptr)
761 #else
762 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
763               void *, ptr, long, fifth)
764 #endif
765 #endif
766 #ifdef __NR_msgsnd
767 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
768               int, flags)
769 #endif
770 #ifdef __NR_msgrcv
771 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
772               long, msgtype, int, flags)
773 #endif
774 #ifdef __NR_semtimedop
775 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
776               unsigned, nsops, const struct timespec *, timeout)
777 #endif
778 #if defined(TARGET_NR_mq_timedsend) || \
779     defined(TARGET_NR_mq_timedsend_time64)
780 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
781               size_t, len, unsigned, prio, const struct timespec *, timeout)
782 #endif
783 #if defined(TARGET_NR_mq_timedreceive) || \
784     defined(TARGET_NR_mq_timedreceive_time64)
785 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
786               size_t, len, unsigned *, prio, const struct timespec *, timeout)
787 #endif
788 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
789 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
790               int, outfd, loff_t *, poutoff, size_t, length,
791               unsigned int, flags)
792 #endif
793 #if defined(TARGET_NR_fchmodat2) && defined(__NR_fchmodat2)
794 safe_syscall4(int, fchmodat2, int, dfd, const char *, filename,
795               unsigned short, mode, unsigned int, flags)
796 #endif
797 
798 /* We do ioctl like this rather than via safe_syscall3 to preserve the
799  * "third argument might be integer or pointer or not present" behaviour of
800  * the libc function.
801  */
802 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
803 /* Similarly for fcntl. Since we always build with LFS enabled,
804  * we should be using the 64-bit structures automatically.
805  */
806 #ifdef __NR_fcntl64
807 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
808 #else
809 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
810 #endif
811 
812 static inline int host_to_target_sock_type(int host_type)
813 {
814     int target_type;
815 
816     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
817     case SOCK_DGRAM:
818         target_type = TARGET_SOCK_DGRAM;
819         break;
820     case SOCK_STREAM:
821         target_type = TARGET_SOCK_STREAM;
822         break;
823     default:
824         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
825         break;
826     }
827 
828 #if defined(SOCK_CLOEXEC)
829     if (host_type & SOCK_CLOEXEC) {
830         target_type |= TARGET_SOCK_CLOEXEC;
831     }
832 #endif
833 
834 #if defined(SOCK_NONBLOCK)
835     if (host_type & SOCK_NONBLOCK) {
836         target_type |= TARGET_SOCK_NONBLOCK;
837     }
838 #endif
839 
840     return target_type;
841 }
842 
843 static abi_ulong target_brk, initial_target_brk;
844 
target_set_brk(abi_ulong new_brk)845 void target_set_brk(abi_ulong new_brk)
846 {
847     target_brk = TARGET_PAGE_ALIGN(new_brk);
848     initial_target_brk = target_brk;
849 }
850 
851 /* do_brk() must return target values and target errnos. */
do_brk(abi_ulong brk_val)852 abi_long do_brk(abi_ulong brk_val)
853 {
854     abi_long mapped_addr;
855     abi_ulong new_brk;
856     abi_ulong old_brk;
857 
858     /* brk pointers are always untagged */
859 
860     /* do not allow to shrink below initial brk value */
861     if (brk_val < initial_target_brk) {
862         return target_brk;
863     }
864 
865     new_brk = TARGET_PAGE_ALIGN(brk_val);
866     old_brk = TARGET_PAGE_ALIGN(target_brk);
867 
868     /* new and old target_brk might be on the same page */
869     if (new_brk == old_brk) {
870         target_brk = brk_val;
871         return target_brk;
872     }
873 
874     /* Release heap if necessary */
875     if (new_brk < old_brk) {
876         target_munmap(new_brk, old_brk - new_brk);
877 
878         target_brk = brk_val;
879         return target_brk;
880     }
881 
882     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
883                               PROT_READ | PROT_WRITE,
884                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
885                               -1, 0);
886 
887     if (mapped_addr == old_brk) {
888         target_brk = brk_val;
889         return target_brk;
890     }
891 
892 #if defined(TARGET_ALPHA)
893     /* We (partially) emulate OSF/1 on Alpha, which requires we
894        return a proper errno, not an unchanged brk value.  */
895     return -TARGET_ENOMEM;
896 #endif
897     /* For everything else, return the previous break. */
898     return target_brk;
899 }
900 
901 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
902     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
copy_from_user_fdset(fd_set * fds,abi_ulong target_fds_addr,int n)903 static inline abi_long copy_from_user_fdset(fd_set *fds,
904                                             abi_ulong target_fds_addr,
905                                             int n)
906 {
907     int i, nw, j, k;
908     abi_ulong b, *target_fds;
909 
910     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
911     if (!(target_fds = lock_user(VERIFY_READ,
912                                  target_fds_addr,
913                                  sizeof(abi_ulong) * nw,
914                                  1)))
915         return -TARGET_EFAULT;
916 
917     FD_ZERO(fds);
918     k = 0;
919     for (i = 0; i < nw; i++) {
920         /* grab the abi_ulong */
921         __get_user(b, &target_fds[i]);
922         for (j = 0; j < TARGET_ABI_BITS; j++) {
923             /* check the bit inside the abi_ulong */
924             if ((b >> j) & 1)
925                 FD_SET(k, fds);
926             k++;
927         }
928     }
929 
930     unlock_user(target_fds, target_fds_addr, 0);
931 
932     return 0;
933 }
934 
copy_from_user_fdset_ptr(fd_set * fds,fd_set ** fds_ptr,abi_ulong target_fds_addr,int n)935 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
936                                                  abi_ulong target_fds_addr,
937                                                  int n)
938 {
939     if (target_fds_addr) {
940         if (copy_from_user_fdset(fds, target_fds_addr, n))
941             return -TARGET_EFAULT;
942         *fds_ptr = fds;
943     } else {
944         *fds_ptr = NULL;
945     }
946     return 0;
947 }
948 
copy_to_user_fdset(abi_ulong target_fds_addr,const fd_set * fds,int n)949 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
950                                           const fd_set *fds,
951                                           int n)
952 {
953     int i, nw, j, k;
954     abi_long v;
955     abi_ulong *target_fds;
956 
957     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
958     if (!(target_fds = lock_user(VERIFY_WRITE,
959                                  target_fds_addr,
960                                  sizeof(abi_ulong) * nw,
961                                  0)))
962         return -TARGET_EFAULT;
963 
964     k = 0;
965     for (i = 0; i < nw; i++) {
966         v = 0;
967         for (j = 0; j < TARGET_ABI_BITS; j++) {
968             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
969             k++;
970         }
971         __put_user(v, &target_fds[i]);
972     }
973 
974     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
975 
976     return 0;
977 }
978 #endif
979 
980 #if defined(__alpha__)
981 #define HOST_HZ 1024
982 #else
983 #define HOST_HZ 100
984 #endif
985 
host_to_target_clock_t(long ticks)986 static inline abi_long host_to_target_clock_t(long ticks)
987 {
988 #if HOST_HZ == TARGET_HZ
989     return ticks;
990 #else
991     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
992 #endif
993 }
994 
host_to_target_rusage(abi_ulong target_addr,const struct rusage * rusage)995 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
996                                              const struct rusage *rusage)
997 {
998     struct target_rusage *target_rusage;
999 
1000     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1001         return -TARGET_EFAULT;
1002     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1003     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1004     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1005     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1006     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1007     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1008     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1009     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1010     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1011     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1012     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1013     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1014     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1015     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1016     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1017     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1018     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1019     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1020     unlock_user_struct(target_rusage, target_addr, 1);
1021 
1022     return 0;
1023 }
1024 
1025 #ifdef TARGET_NR_setrlimit
target_to_host_rlim(abi_ulong target_rlim)1026 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1027 {
1028     abi_ulong target_rlim_swap;
1029     rlim_t result;
1030 
1031     target_rlim_swap = tswapal(target_rlim);
1032     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1033         return RLIM_INFINITY;
1034 
1035     result = target_rlim_swap;
1036     if (target_rlim_swap != (rlim_t)result)
1037         return RLIM_INFINITY;
1038 
1039     return result;
1040 }
1041 #endif
1042 
1043 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
host_to_target_rlim(rlim_t rlim)1044 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1045 {
1046     abi_ulong target_rlim_swap;
1047     abi_ulong result;
1048 
1049     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1050         target_rlim_swap = TARGET_RLIM_INFINITY;
1051     else
1052         target_rlim_swap = rlim;
1053     result = tswapal(target_rlim_swap);
1054 
1055     return result;
1056 }
1057 #endif
1058 
target_to_host_resource(int code)1059 static inline int target_to_host_resource(int code)
1060 {
1061     switch (code) {
1062     case TARGET_RLIMIT_AS:
1063         return RLIMIT_AS;
1064     case TARGET_RLIMIT_CORE:
1065         return RLIMIT_CORE;
1066     case TARGET_RLIMIT_CPU:
1067         return RLIMIT_CPU;
1068     case TARGET_RLIMIT_DATA:
1069         return RLIMIT_DATA;
1070     case TARGET_RLIMIT_FSIZE:
1071         return RLIMIT_FSIZE;
1072     case TARGET_RLIMIT_LOCKS:
1073         return RLIMIT_LOCKS;
1074     case TARGET_RLIMIT_MEMLOCK:
1075         return RLIMIT_MEMLOCK;
1076     case TARGET_RLIMIT_MSGQUEUE:
1077         return RLIMIT_MSGQUEUE;
1078     case TARGET_RLIMIT_NICE:
1079         return RLIMIT_NICE;
1080     case TARGET_RLIMIT_NOFILE:
1081         return RLIMIT_NOFILE;
1082     case TARGET_RLIMIT_NPROC:
1083         return RLIMIT_NPROC;
1084     case TARGET_RLIMIT_RSS:
1085         return RLIMIT_RSS;
1086     case TARGET_RLIMIT_RTPRIO:
1087         return RLIMIT_RTPRIO;
1088 #ifdef RLIMIT_RTTIME
1089     case TARGET_RLIMIT_RTTIME:
1090         return RLIMIT_RTTIME;
1091 #endif
1092     case TARGET_RLIMIT_SIGPENDING:
1093         return RLIMIT_SIGPENDING;
1094     case TARGET_RLIMIT_STACK:
1095         return RLIMIT_STACK;
1096     default:
1097         return code;
1098     }
1099 }
1100 
copy_from_user_timeval(struct timeval * tv,abi_ulong target_tv_addr)1101 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1102                                               abi_ulong target_tv_addr)
1103 {
1104     struct target_timeval *target_tv;
1105 
1106     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1107         return -TARGET_EFAULT;
1108     }
1109 
1110     __get_user(tv->tv_sec, &target_tv->tv_sec);
1111     __get_user(tv->tv_usec, &target_tv->tv_usec);
1112 
1113     unlock_user_struct(target_tv, target_tv_addr, 0);
1114 
1115     return 0;
1116 }
1117 
copy_to_user_timeval(abi_ulong target_tv_addr,const struct timeval * tv)1118 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1119                                             const struct timeval *tv)
1120 {
1121     struct target_timeval *target_tv;
1122 
1123     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1124         return -TARGET_EFAULT;
1125     }
1126 
1127     __put_user(tv->tv_sec, &target_tv->tv_sec);
1128     __put_user(tv->tv_usec, &target_tv->tv_usec);
1129 
1130     unlock_user_struct(target_tv, target_tv_addr, 1);
1131 
1132     return 0;
1133 }
1134 
1135 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
copy_from_user_timeval64(struct timeval * tv,abi_ulong target_tv_addr)1136 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1137                                                 abi_ulong target_tv_addr)
1138 {
1139     struct target__kernel_sock_timeval *target_tv;
1140 
1141     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1142         return -TARGET_EFAULT;
1143     }
1144 
1145     __get_user(tv->tv_sec, &target_tv->tv_sec);
1146     __get_user(tv->tv_usec, &target_tv->tv_usec);
1147 
1148     unlock_user_struct(target_tv, target_tv_addr, 0);
1149 
1150     return 0;
1151 }
1152 #endif
1153 
copy_to_user_timeval64(abi_ulong target_tv_addr,const struct timeval * tv)1154 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1155                                               const struct timeval *tv)
1156 {
1157     struct target__kernel_sock_timeval *target_tv;
1158 
1159     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1160         return -TARGET_EFAULT;
1161     }
1162 
1163     __put_user(tv->tv_sec, &target_tv->tv_sec);
1164     __put_user(tv->tv_usec, &target_tv->tv_usec);
1165 
1166     unlock_user_struct(target_tv, target_tv_addr, 1);
1167 
1168     return 0;
1169 }
1170 
1171 #if defined(TARGET_NR_futex) || \
1172     defined(TARGET_NR_rt_sigtimedwait) || \
1173     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1174     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1175     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1176     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1177     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1178     defined(TARGET_NR_timer_settime) || \
1179     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_timespec(struct timespec * host_ts,abi_ulong target_addr)1180 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1181                                                abi_ulong target_addr)
1182 {
1183     struct target_timespec *target_ts;
1184 
1185     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1186         return -TARGET_EFAULT;
1187     }
1188     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1189     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1190     unlock_user_struct(target_ts, target_addr, 0);
1191     return 0;
1192 }
1193 #endif
1194 
1195 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1196     defined(TARGET_NR_timer_settime64) || \
1197     defined(TARGET_NR_mq_timedsend_time64) || \
1198     defined(TARGET_NR_mq_timedreceive_time64) || \
1199     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1200     defined(TARGET_NR_clock_nanosleep_time64) || \
1201     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1202     defined(TARGET_NR_utimensat) || \
1203     defined(TARGET_NR_utimensat_time64) || \
1204     defined(TARGET_NR_semtimedop_time64) || \
1205     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
target_to_host_timespec64(struct timespec * host_ts,abi_ulong target_addr)1206 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1207                                                  abi_ulong target_addr)
1208 {
1209     struct target__kernel_timespec *target_ts;
1210 
1211     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1212         return -TARGET_EFAULT;
1213     }
1214     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1215     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1216     /* in 32bit mode, this drops the padding */
1217     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1218     unlock_user_struct(target_ts, target_addr, 0);
1219     return 0;
1220 }
1221 #endif
1222 
host_to_target_timespec(abi_ulong target_addr,struct timespec * host_ts)1223 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1224                                                struct timespec *host_ts)
1225 {
1226     struct target_timespec *target_ts;
1227 
1228     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1229         return -TARGET_EFAULT;
1230     }
1231     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1232     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1233     unlock_user_struct(target_ts, target_addr, 1);
1234     return 0;
1235 }
1236 
host_to_target_timespec64(abi_ulong target_addr,struct timespec * host_ts)1237 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1238                                                  struct timespec *host_ts)
1239 {
1240     struct target__kernel_timespec *target_ts;
1241 
1242     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1243         return -TARGET_EFAULT;
1244     }
1245     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1246     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1247     unlock_user_struct(target_ts, target_addr, 1);
1248     return 0;
1249 }
1250 
1251 #if defined(TARGET_NR_gettimeofday)
copy_to_user_timezone(abi_ulong target_tz_addr,struct timezone * tz)1252 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1253                                              struct timezone *tz)
1254 {
1255     struct target_timezone *target_tz;
1256 
1257     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1258         return -TARGET_EFAULT;
1259     }
1260 
1261     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1262     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1263 
1264     unlock_user_struct(target_tz, target_tz_addr, 1);
1265 
1266     return 0;
1267 }
1268 #endif
1269 
1270 #if defined(TARGET_NR_settimeofday)
copy_from_user_timezone(struct timezone * tz,abi_ulong target_tz_addr)1271 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1272                                                abi_ulong target_tz_addr)
1273 {
1274     struct target_timezone *target_tz;
1275 
1276     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1277         return -TARGET_EFAULT;
1278     }
1279 
1280     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1281     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1282 
1283     unlock_user_struct(target_tz, target_tz_addr, 0);
1284 
1285     return 0;
1286 }
1287 #endif
1288 
1289 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1290 #include <mqueue.h>
1291 
copy_from_user_mq_attr(struct mq_attr * attr,abi_ulong target_mq_attr_addr)1292 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1293                                               abi_ulong target_mq_attr_addr)
1294 {
1295     struct target_mq_attr *target_mq_attr;
1296 
1297     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1298                           target_mq_attr_addr, 1))
1299         return -TARGET_EFAULT;
1300 
1301     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1302     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1303     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1304     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1305 
1306     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1307 
1308     return 0;
1309 }
1310 
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,const struct mq_attr * attr)1311 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1312                                             const struct mq_attr *attr)
1313 {
1314     struct target_mq_attr *target_mq_attr;
1315 
1316     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1317                           target_mq_attr_addr, 0))
1318         return -TARGET_EFAULT;
1319 
1320     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1321     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1322     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1323     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1324 
1325     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1326 
1327     return 0;
1328 }
1329 #endif
1330 
1331 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1332 /* do_select() must return target values and target errnos. */
do_select(int n,abi_ulong rfd_addr,abi_ulong wfd_addr,abi_ulong efd_addr,abi_ulong target_tv_addr)1333 static abi_long do_select(int n,
1334                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1335                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1336 {
1337     fd_set rfds, wfds, efds;
1338     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1339     struct timeval tv;
1340     struct timespec ts, *ts_ptr;
1341     abi_long ret;
1342 
1343     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1344     if (ret) {
1345         return ret;
1346     }
1347     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1348     if (ret) {
1349         return ret;
1350     }
1351     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1352     if (ret) {
1353         return ret;
1354     }
1355 
1356     if (target_tv_addr) {
1357         if (copy_from_user_timeval(&tv, target_tv_addr))
1358             return -TARGET_EFAULT;
1359         ts.tv_sec = tv.tv_sec;
1360         ts.tv_nsec = tv.tv_usec * 1000;
1361         ts_ptr = &ts;
1362     } else {
1363         ts_ptr = NULL;
1364     }
1365 
1366     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1367                                   ts_ptr, NULL));
1368 
1369     if (!is_error(ret)) {
1370         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1371             return -TARGET_EFAULT;
1372         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1373             return -TARGET_EFAULT;
1374         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1375             return -TARGET_EFAULT;
1376 
1377         if (target_tv_addr) {
1378             tv.tv_sec = ts.tv_sec;
1379             tv.tv_usec = ts.tv_nsec / 1000;
1380             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1381                 return -TARGET_EFAULT;
1382             }
1383         }
1384     }
1385 
1386     return ret;
1387 }
1388 
1389 #if defined(TARGET_WANT_OLD_SYS_SELECT)
do_old_select(abi_ulong arg1)1390 static abi_long do_old_select(abi_ulong arg1)
1391 {
1392     struct target_sel_arg_struct *sel;
1393     abi_ulong inp, outp, exp, tvp;
1394     long nsel;
1395 
1396     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1397         return -TARGET_EFAULT;
1398     }
1399 
1400     nsel = tswapal(sel->n);
1401     inp = tswapal(sel->inp);
1402     outp = tswapal(sel->outp);
1403     exp = tswapal(sel->exp);
1404     tvp = tswapal(sel->tvp);
1405 
1406     unlock_user_struct(sel, arg1, 0);
1407 
1408     return do_select(nsel, inp, outp, exp, tvp);
1409 }
1410 #endif
1411 #endif
1412 
1413 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
do_pselect6(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,bool time64)1414 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1415                             abi_long arg4, abi_long arg5, abi_long arg6,
1416                             bool time64)
1417 {
1418     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1419     fd_set rfds, wfds, efds;
1420     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1421     struct timespec ts, *ts_ptr;
1422     abi_long ret;
1423 
1424     /*
1425      * The 6th arg is actually two args smashed together,
1426      * so we cannot use the C library.
1427      */
1428     struct {
1429         sigset_t *set;
1430         size_t size;
1431     } sig, *sig_ptr;
1432 
1433     abi_ulong arg_sigset, arg_sigsize, *arg7;
1434 
1435     n = arg1;
1436     rfd_addr = arg2;
1437     wfd_addr = arg3;
1438     efd_addr = arg4;
1439     ts_addr = arg5;
1440 
1441     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1442     if (ret) {
1443         return ret;
1444     }
1445     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1446     if (ret) {
1447         return ret;
1448     }
1449     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1450     if (ret) {
1451         return ret;
1452     }
1453 
1454     /*
1455      * This takes a timespec, and not a timeval, so we cannot
1456      * use the do_select() helper ...
1457      */
1458     if (ts_addr) {
1459         if (time64) {
1460             if (target_to_host_timespec64(&ts, ts_addr)) {
1461                 return -TARGET_EFAULT;
1462             }
1463         } else {
1464             if (target_to_host_timespec(&ts, ts_addr)) {
1465                 return -TARGET_EFAULT;
1466             }
1467         }
1468             ts_ptr = &ts;
1469     } else {
1470         ts_ptr = NULL;
1471     }
1472 
1473     /* Extract the two packed args for the sigset */
1474     sig_ptr = NULL;
1475     if (arg6) {
1476         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1477         if (!arg7) {
1478             return -TARGET_EFAULT;
1479         }
1480         arg_sigset = tswapal(arg7[0]);
1481         arg_sigsize = tswapal(arg7[1]);
1482         unlock_user(arg7, arg6, 0);
1483 
1484         if (arg_sigset) {
1485             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1486             if (ret != 0) {
1487                 return ret;
1488             }
1489             sig_ptr = &sig;
1490             sig.size = SIGSET_T_SIZE;
1491         }
1492     }
1493 
1494     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1495                                   ts_ptr, sig_ptr));
1496 
1497     if (sig_ptr) {
1498         finish_sigsuspend_mask(ret);
1499     }
1500 
1501     if (!is_error(ret)) {
1502         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1503             return -TARGET_EFAULT;
1504         }
1505         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1506             return -TARGET_EFAULT;
1507         }
1508         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1509             return -TARGET_EFAULT;
1510         }
1511         if (time64) {
1512             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1513                 return -TARGET_EFAULT;
1514             }
1515         } else {
1516             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1517                 return -TARGET_EFAULT;
1518             }
1519         }
1520     }
1521     return ret;
1522 }
1523 #endif
1524 
1525 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1526     defined(TARGET_NR_ppoll_time64)
do_ppoll(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,bool ppoll,bool time64)1527 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1528                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1529 {
1530     struct target_pollfd *target_pfd;
1531     unsigned int nfds = arg2;
1532     struct pollfd *pfd;
1533     unsigned int i;
1534     abi_long ret;
1535 
1536     pfd = NULL;
1537     target_pfd = NULL;
1538     if (nfds) {
1539         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1540             return -TARGET_EINVAL;
1541         }
1542         target_pfd = lock_user(VERIFY_WRITE, arg1,
1543                                sizeof(struct target_pollfd) * nfds, 1);
1544         if (!target_pfd) {
1545             return -TARGET_EFAULT;
1546         }
1547 
1548         pfd = alloca(sizeof(struct pollfd) * nfds);
1549         for (i = 0; i < nfds; i++) {
1550             pfd[i].fd = tswap32(target_pfd[i].fd);
1551             pfd[i].events = tswap16(target_pfd[i].events);
1552         }
1553     }
1554     if (ppoll) {
1555         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1556         sigset_t *set = NULL;
1557 
1558         if (arg3) {
1559             if (time64) {
1560                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1561                     unlock_user(target_pfd, arg1, 0);
1562                     return -TARGET_EFAULT;
1563                 }
1564             } else {
1565                 if (target_to_host_timespec(timeout_ts, arg3)) {
1566                     unlock_user(target_pfd, arg1, 0);
1567                     return -TARGET_EFAULT;
1568                 }
1569             }
1570         } else {
1571             timeout_ts = NULL;
1572         }
1573 
1574         if (arg4) {
1575             ret = process_sigsuspend_mask(&set, arg4, arg5);
1576             if (ret != 0) {
1577                 unlock_user(target_pfd, arg1, 0);
1578                 return ret;
1579             }
1580         }
1581 
1582         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1583                                    set, SIGSET_T_SIZE));
1584 
1585         if (set) {
1586             finish_sigsuspend_mask(ret);
1587         }
1588         if (!is_error(ret) && arg3) {
1589             if (time64) {
1590                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1591                     return -TARGET_EFAULT;
1592                 }
1593             } else {
1594                 if (host_to_target_timespec(arg3, timeout_ts)) {
1595                     return -TARGET_EFAULT;
1596                 }
1597             }
1598         }
1599     } else {
1600           struct timespec ts, *pts;
1601 
1602           if (arg3 >= 0) {
1603               /* Convert ms to secs, ns */
1604               ts.tv_sec = arg3 / 1000;
1605               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1606               pts = &ts;
1607           } else {
1608               /* -ve poll() timeout means "infinite" */
1609               pts = NULL;
1610           }
1611           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1612     }
1613 
1614     if (!is_error(ret)) {
1615         for (i = 0; i < nfds; i++) {
1616             target_pfd[i].revents = tswap16(pfd[i].revents);
1617         }
1618     }
1619     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1620     return ret;
1621 }
1622 #endif
1623 
do_pipe(CPUArchState * cpu_env,abi_ulong pipedes,int flags,int is_pipe2)1624 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1625                         int flags, int is_pipe2)
1626 {
1627     int host_pipe[2];
1628     abi_long ret;
1629     ret = pipe2(host_pipe, flags);
1630 
1631     if (is_error(ret))
1632         return get_errno(ret);
1633 
1634     /* Several targets have special calling conventions for the original
1635        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1636     if (!is_pipe2) {
1637 #if defined(TARGET_ALPHA)
1638         cpu_env->ir[IR_A4] = host_pipe[1];
1639         return host_pipe[0];
1640 #elif defined(TARGET_MIPS)
1641         cpu_env->active_tc.gpr[3] = host_pipe[1];
1642         return host_pipe[0];
1643 #elif defined(TARGET_SH4)
1644         cpu_env->gregs[1] = host_pipe[1];
1645         return host_pipe[0];
1646 #elif defined(TARGET_SPARC)
1647         cpu_env->regwptr[1] = host_pipe[1];
1648         return host_pipe[0];
1649 #endif
1650     }
1651 
1652     if (put_user_s32(host_pipe[0], pipedes)
1653         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1654         return -TARGET_EFAULT;
1655     return get_errno(ret);
1656 }
1657 
target_to_host_sockaddr(int fd,struct sockaddr * addr,abi_ulong target_addr,socklen_t len)1658 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1659                                                abi_ulong target_addr,
1660                                                socklen_t len)
1661 {
1662     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1663     sa_family_t sa_family;
1664     struct target_sockaddr *target_saddr;
1665 
1666     if (fd_trans_target_to_host_addr(fd)) {
1667         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1668     }
1669 
1670     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1671     if (!target_saddr)
1672         return -TARGET_EFAULT;
1673 
1674     sa_family = tswap16(target_saddr->sa_family);
1675 
1676     /* Oops. The caller might send a incomplete sun_path; sun_path
1677      * must be terminated by \0 (see the manual page), but
1678      * unfortunately it is quite common to specify sockaddr_un
1679      * length as "strlen(x->sun_path)" while it should be
1680      * "strlen(...) + 1". We'll fix that here if needed.
1681      * Linux kernel has a similar feature.
1682      */
1683 
1684     if (sa_family == AF_UNIX) {
1685         if (len < unix_maxlen && len > 0) {
1686             char *cp = (char*)target_saddr;
1687 
1688             if ( cp[len-1] && !cp[len] )
1689                 len++;
1690         }
1691         if (len > unix_maxlen)
1692             len = unix_maxlen;
1693     }
1694 
1695     memcpy(addr, target_saddr, len);
1696     addr->sa_family = sa_family;
1697     if (sa_family == AF_NETLINK) {
1698         struct sockaddr_nl *nladdr;
1699 
1700         nladdr = (struct sockaddr_nl *)addr;
1701         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1702         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1703     } else if (sa_family == AF_PACKET) {
1704 	struct target_sockaddr_ll *lladdr;
1705 
1706 	lladdr = (struct target_sockaddr_ll *)addr;
1707 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1708 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1709     } else if (sa_family == AF_INET6) {
1710         struct sockaddr_in6 *in6addr;
1711 
1712         in6addr = (struct sockaddr_in6 *)addr;
1713         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1714     }
1715     unlock_user(target_saddr, target_addr, 0);
1716 
1717     return 0;
1718 }
1719 
host_to_target_sockaddr(abi_ulong target_addr,struct sockaddr * addr,socklen_t len)1720 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1721                                                struct sockaddr *addr,
1722                                                socklen_t len)
1723 {
1724     struct target_sockaddr *target_saddr;
1725 
1726     if (len == 0) {
1727         return 0;
1728     }
1729     assert(addr);
1730 
1731     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1732     if (!target_saddr)
1733         return -TARGET_EFAULT;
1734     memcpy(target_saddr, addr, len);
1735     if (len >= offsetof(struct target_sockaddr, sa_family) +
1736         sizeof(target_saddr->sa_family)) {
1737         target_saddr->sa_family = tswap16(addr->sa_family);
1738     }
1739     if (addr->sa_family == AF_NETLINK &&
1740         len >= sizeof(struct target_sockaddr_nl)) {
1741         struct target_sockaddr_nl *target_nl =
1742                (struct target_sockaddr_nl *)target_saddr;
1743         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1744         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1745     } else if (addr->sa_family == AF_PACKET) {
1746         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1747         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1748         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1749     } else if (addr->sa_family == AF_INET6 &&
1750                len >= sizeof(struct target_sockaddr_in6)) {
1751         struct target_sockaddr_in6 *target_in6 =
1752                (struct target_sockaddr_in6 *)target_saddr;
1753         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1754     }
1755     unlock_user(target_saddr, target_addr, len);
1756 
1757     return 0;
1758 }
1759 
target_to_host_cmsg(struct msghdr * msgh,struct target_msghdr * target_msgh)1760 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1761                                            struct target_msghdr *target_msgh)
1762 {
1763     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1764     abi_long msg_controllen;
1765     abi_ulong target_cmsg_addr;
1766     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1767     socklen_t space = 0;
1768 
1769     msg_controllen = tswapal(target_msgh->msg_controllen);
1770     if (msg_controllen < sizeof (struct target_cmsghdr))
1771         goto the_end;
1772     target_cmsg_addr = tswapal(target_msgh->msg_control);
1773     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1774     target_cmsg_start = target_cmsg;
1775     if (!target_cmsg)
1776         return -TARGET_EFAULT;
1777 
1778     while (cmsg && target_cmsg) {
1779         void *data = CMSG_DATA(cmsg);
1780         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1781 
1782         int len = tswapal(target_cmsg->cmsg_len)
1783             - sizeof(struct target_cmsghdr);
1784 
1785         space += CMSG_SPACE(len);
1786         if (space > msgh->msg_controllen) {
1787             space -= CMSG_SPACE(len);
1788             /* This is a QEMU bug, since we allocated the payload
1789              * area ourselves (unlike overflow in host-to-target
1790              * conversion, which is just the guest giving us a buffer
1791              * that's too small). It can't happen for the payload types
1792              * we currently support; if it becomes an issue in future
1793              * we would need to improve our allocation strategy to
1794              * something more intelligent than "twice the size of the
1795              * target buffer we're reading from".
1796              */
1797             qemu_log_mask(LOG_UNIMP,
1798                           ("Unsupported ancillary data %d/%d: "
1799                            "unhandled msg size\n"),
1800                           tswap32(target_cmsg->cmsg_level),
1801                           tswap32(target_cmsg->cmsg_type));
1802             break;
1803         }
1804 
1805         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1806             cmsg->cmsg_level = SOL_SOCKET;
1807         } else {
1808             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1809         }
1810         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1811         cmsg->cmsg_len = CMSG_LEN(len);
1812 
1813         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1814             int *fd = (int *)data;
1815             int *target_fd = (int *)target_data;
1816             int i, numfds = len / sizeof(int);
1817 
1818             for (i = 0; i < numfds; i++) {
1819                 __get_user(fd[i], target_fd + i);
1820             }
1821         } else if (cmsg->cmsg_level == SOL_SOCKET
1822                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1823             struct ucred *cred = (struct ucred *)data;
1824             struct target_ucred *target_cred =
1825                 (struct target_ucred *)target_data;
1826 
1827             __get_user(cred->pid, &target_cred->pid);
1828             __get_user(cred->uid, &target_cred->uid);
1829             __get_user(cred->gid, &target_cred->gid);
1830         } else if (cmsg->cmsg_level == SOL_ALG) {
1831             uint32_t *dst = (uint32_t *)data;
1832 
1833             memcpy(dst, target_data, len);
1834             /* fix endianness of first 32-bit word */
1835             if (len >= sizeof(uint32_t)) {
1836                 *dst = tswap32(*dst);
1837             }
1838         } else {
1839             qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n",
1840                           cmsg->cmsg_level, cmsg->cmsg_type);
1841             memcpy(data, target_data, len);
1842         }
1843 
1844         cmsg = CMSG_NXTHDR(msgh, cmsg);
1845         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1846                                          target_cmsg_start);
1847     }
1848     unlock_user(target_cmsg, target_cmsg_addr, 0);
1849  the_end:
1850     msgh->msg_controllen = space;
1851     return 0;
1852 }
1853 
host_to_target_cmsg(struct target_msghdr * target_msgh,struct msghdr * msgh)1854 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1855                                            struct msghdr *msgh)
1856 {
1857     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1858     abi_long msg_controllen;
1859     abi_ulong target_cmsg_addr;
1860     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1861     socklen_t space = 0;
1862 
1863     msg_controllen = tswapal(target_msgh->msg_controllen);
1864     if (msg_controllen < sizeof (struct target_cmsghdr))
1865         goto the_end;
1866     target_cmsg_addr = tswapal(target_msgh->msg_control);
1867     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1868     target_cmsg_start = target_cmsg;
1869     if (!target_cmsg)
1870         return -TARGET_EFAULT;
1871 
1872     while (cmsg && target_cmsg) {
1873         void *data = CMSG_DATA(cmsg);
1874         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1875 
1876         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1877         int tgt_len, tgt_space;
1878 
1879         /* We never copy a half-header but may copy half-data;
1880          * this is Linux's behaviour in put_cmsg(). Note that
1881          * truncation here is a guest problem (which we report
1882          * to the guest via the CTRUNC bit), unlike truncation
1883          * in target_to_host_cmsg, which is a QEMU bug.
1884          */
1885         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1886             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1887             break;
1888         }
1889 
1890         if (cmsg->cmsg_level == SOL_SOCKET) {
1891             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1892         } else {
1893             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1894         }
1895         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1896 
1897         /* Payload types which need a different size of payload on
1898          * the target must adjust tgt_len here.
1899          */
1900         tgt_len = len;
1901         switch (cmsg->cmsg_level) {
1902         case SOL_SOCKET:
1903             switch (cmsg->cmsg_type) {
1904             case SO_TIMESTAMP:
1905                 tgt_len = sizeof(struct target_timeval);
1906                 break;
1907             default:
1908                 break;
1909             }
1910             break;
1911         default:
1912             break;
1913         }
1914 
1915         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1916             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1917             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1918         }
1919 
1920         /* We must now copy-and-convert len bytes of payload
1921          * into tgt_len bytes of destination space. Bear in mind
1922          * that in both source and destination we may be dealing
1923          * with a truncated value!
1924          */
1925         switch (cmsg->cmsg_level) {
1926         case SOL_SOCKET:
1927             switch (cmsg->cmsg_type) {
1928             case SCM_RIGHTS:
1929             {
1930                 int *fd = (int *)data;
1931                 int *target_fd = (int *)target_data;
1932                 int i, numfds = tgt_len / sizeof(int);
1933 
1934                 for (i = 0; i < numfds; i++) {
1935                     __put_user(fd[i], target_fd + i);
1936                 }
1937                 break;
1938             }
1939             case SO_TIMESTAMP:
1940             {
1941                 struct timeval *tv = (struct timeval *)data;
1942                 struct target_timeval *target_tv =
1943                     (struct target_timeval *)target_data;
1944 
1945                 if (len != sizeof(struct timeval) ||
1946                     tgt_len != sizeof(struct target_timeval)) {
1947                     goto unimplemented;
1948                 }
1949 
1950                 /* copy struct timeval to target */
1951                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1952                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1953                 break;
1954             }
1955             case SCM_CREDENTIALS:
1956             {
1957                 struct ucred *cred = (struct ucred *)data;
1958                 struct target_ucred *target_cred =
1959                     (struct target_ucred *)target_data;
1960 
1961                 __put_user(cred->pid, &target_cred->pid);
1962                 __put_user(cred->uid, &target_cred->uid);
1963                 __put_user(cred->gid, &target_cred->gid);
1964                 break;
1965             }
1966             default:
1967                 goto unimplemented;
1968             }
1969             break;
1970 
1971         case SOL_IP:
1972             switch (cmsg->cmsg_type) {
1973             case IP_TTL:
1974             {
1975                 uint32_t *v = (uint32_t *)data;
1976                 uint32_t *t_int = (uint32_t *)target_data;
1977 
1978                 if (len != sizeof(uint32_t) ||
1979                     tgt_len != sizeof(uint32_t)) {
1980                     goto unimplemented;
1981                 }
1982                 __put_user(*v, t_int);
1983                 break;
1984             }
1985             case IP_RECVERR:
1986             {
1987                 struct errhdr_t {
1988                    struct sock_extended_err ee;
1989                    struct sockaddr_in offender;
1990                 };
1991                 struct errhdr_t *errh = (struct errhdr_t *)data;
1992                 struct errhdr_t *target_errh =
1993                     (struct errhdr_t *)target_data;
1994 
1995                 if (len != sizeof(struct errhdr_t) ||
1996                     tgt_len != sizeof(struct errhdr_t)) {
1997                     goto unimplemented;
1998                 }
1999                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2000                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2001                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2002                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2003                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2004                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2005                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2006                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2007                     (void *) &errh->offender, sizeof(errh->offender));
2008                 break;
2009             }
2010             case IP_PKTINFO:
2011             {
2012                 struct in_pktinfo *pkti = data;
2013                 struct target_in_pktinfo *target_pi = target_data;
2014 
2015                 __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex);
2016                 target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr;
2017                 target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr;
2018                 break;
2019             }
2020             default:
2021                 goto unimplemented;
2022             }
2023             break;
2024 
2025         case SOL_IPV6:
2026             switch (cmsg->cmsg_type) {
2027             case IPV6_HOPLIMIT:
2028             {
2029                 uint32_t *v = (uint32_t *)data;
2030                 uint32_t *t_int = (uint32_t *)target_data;
2031 
2032                 if (len != sizeof(uint32_t) ||
2033                     tgt_len != sizeof(uint32_t)) {
2034                     goto unimplemented;
2035                 }
2036                 __put_user(*v, t_int);
2037                 break;
2038             }
2039             case IPV6_RECVERR:
2040             {
2041                 struct errhdr6_t {
2042                    struct sock_extended_err ee;
2043                    struct sockaddr_in6 offender;
2044                 };
2045                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2046                 struct errhdr6_t *target_errh =
2047                     (struct errhdr6_t *)target_data;
2048 
2049                 if (len != sizeof(struct errhdr6_t) ||
2050                     tgt_len != sizeof(struct errhdr6_t)) {
2051                     goto unimplemented;
2052                 }
2053                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2054                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2055                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2056                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2057                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2058                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2059                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2060                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2061                     (void *) &errh->offender, sizeof(errh->offender));
2062                 break;
2063             }
2064             default:
2065                 goto unimplemented;
2066             }
2067             break;
2068 
2069         default:
2070         unimplemented:
2071             qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n",
2072                           cmsg->cmsg_level, cmsg->cmsg_type);
2073             memcpy(target_data, data, MIN(len, tgt_len));
2074             if (tgt_len > len) {
2075                 memset(target_data + len, 0, tgt_len - len);
2076             }
2077         }
2078 
2079         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2080         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2081         if (msg_controllen < tgt_space) {
2082             tgt_space = msg_controllen;
2083         }
2084         msg_controllen -= tgt_space;
2085         space += tgt_space;
2086         cmsg = CMSG_NXTHDR(msgh, cmsg);
2087         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2088                                          target_cmsg_start);
2089     }
2090     unlock_user(target_cmsg, target_cmsg_addr, space);
2091  the_end:
2092     target_msgh->msg_controllen = tswapal(space);
2093     return 0;
2094 }
2095 
2096 /* do_setsockopt() Must return target values and target errnos. */
do_setsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,socklen_t optlen)2097 static abi_long do_setsockopt(int sockfd, int level, int optname,
2098                               abi_ulong optval_addr, socklen_t optlen)
2099 {
2100     abi_long ret;
2101     int val;
2102 
2103     switch(level) {
2104     case SOL_TCP:
2105     case SOL_UDP:
2106         /* TCP and UDP options all take an 'int' value.  */
2107         if (optlen < sizeof(uint32_t))
2108             return -TARGET_EINVAL;
2109 
2110         if (get_user_u32(val, optval_addr))
2111             return -TARGET_EFAULT;
2112         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2113         break;
2114     case SOL_IP:
2115         switch(optname) {
2116         case IP_TOS:
2117         case IP_TTL:
2118         case IP_HDRINCL:
2119         case IP_ROUTER_ALERT:
2120         case IP_RECVOPTS:
2121         case IP_RETOPTS:
2122         case IP_PKTINFO:
2123         case IP_MTU_DISCOVER:
2124         case IP_RECVERR:
2125         case IP_RECVTTL:
2126         case IP_RECVTOS:
2127 #ifdef IP_FREEBIND
2128         case IP_FREEBIND:
2129 #endif
2130         case IP_MULTICAST_TTL:
2131         case IP_MULTICAST_LOOP:
2132             val = 0;
2133             if (optlen >= sizeof(uint32_t)) {
2134                 if (get_user_u32(val, optval_addr))
2135                     return -TARGET_EFAULT;
2136             } else if (optlen >= 1) {
2137                 if (get_user_u8(val, optval_addr))
2138                     return -TARGET_EFAULT;
2139             }
2140             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2141             break;
2142         case IP_MULTICAST_IF:
2143         case IP_ADD_MEMBERSHIP:
2144         case IP_DROP_MEMBERSHIP:
2145         {
2146             struct ip_mreqn ip_mreq;
2147             struct target_ip_mreqn *target_smreqn;
2148             int min_size;
2149 
2150             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2151                               sizeof(struct target_ip_mreq));
2152 
2153             if (optname == IP_MULTICAST_IF) {
2154                 min_size = sizeof(struct in_addr);
2155             } else {
2156                 min_size = sizeof(struct target_ip_mreq);
2157             }
2158             if (optlen < min_size ||
2159                 optlen > sizeof (struct target_ip_mreqn)) {
2160                 return -TARGET_EINVAL;
2161             }
2162 
2163             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2164             if (!target_smreqn) {
2165                 return -TARGET_EFAULT;
2166             }
2167             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2168             if (optlen >= sizeof(struct target_ip_mreq)) {
2169                 ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2170                 if (optlen >= sizeof(struct target_ip_mreqn)) {
2171                     __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex);
2172                     optlen = sizeof(struct ip_mreqn);
2173                 }
2174             }
2175             unlock_user(target_smreqn, optval_addr, 0);
2176             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2177             break;
2178         }
2179         case IP_BLOCK_SOURCE:
2180         case IP_UNBLOCK_SOURCE:
2181         case IP_ADD_SOURCE_MEMBERSHIP:
2182         case IP_DROP_SOURCE_MEMBERSHIP:
2183         {
2184             struct ip_mreq_source *ip_mreq_source;
2185 
2186             if (optlen != sizeof (struct target_ip_mreq_source))
2187                 return -TARGET_EINVAL;
2188 
2189             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2190             if (!ip_mreq_source) {
2191                 return -TARGET_EFAULT;
2192             }
2193             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2194             unlock_user (ip_mreq_source, optval_addr, 0);
2195             break;
2196         }
2197         default:
2198             goto unimplemented;
2199         }
2200         break;
2201     case SOL_IPV6:
2202         switch (optname) {
2203         case IPV6_MTU_DISCOVER:
2204         case IPV6_MTU:
2205         case IPV6_V6ONLY:
2206         case IPV6_RECVPKTINFO:
2207         case IPV6_UNICAST_HOPS:
2208         case IPV6_MULTICAST_HOPS:
2209         case IPV6_MULTICAST_LOOP:
2210         case IPV6_RECVERR:
2211         case IPV6_RECVHOPLIMIT:
2212         case IPV6_2292HOPLIMIT:
2213         case IPV6_CHECKSUM:
2214         case IPV6_ADDRFORM:
2215         case IPV6_2292PKTINFO:
2216         case IPV6_RECVTCLASS:
2217         case IPV6_RECVRTHDR:
2218         case IPV6_2292RTHDR:
2219         case IPV6_RECVHOPOPTS:
2220         case IPV6_2292HOPOPTS:
2221         case IPV6_RECVDSTOPTS:
2222         case IPV6_2292DSTOPTS:
2223         case IPV6_TCLASS:
2224         case IPV6_ADDR_PREFERENCES:
2225 #ifdef IPV6_RECVPATHMTU
2226         case IPV6_RECVPATHMTU:
2227 #endif
2228 #ifdef IPV6_TRANSPARENT
2229         case IPV6_TRANSPARENT:
2230 #endif
2231 #ifdef IPV6_FREEBIND
2232         case IPV6_FREEBIND:
2233 #endif
2234 #ifdef IPV6_RECVORIGDSTADDR
2235         case IPV6_RECVORIGDSTADDR:
2236 #endif
2237             val = 0;
2238             if (optlen < sizeof(uint32_t)) {
2239                 return -TARGET_EINVAL;
2240             }
2241             if (get_user_u32(val, optval_addr)) {
2242                 return -TARGET_EFAULT;
2243             }
2244             ret = get_errno(setsockopt(sockfd, level, optname,
2245                                        &val, sizeof(val)));
2246             break;
2247         case IPV6_PKTINFO:
2248         {
2249             struct in6_pktinfo pki;
2250 
2251             if (optlen < sizeof(pki)) {
2252                 return -TARGET_EINVAL;
2253             }
2254 
2255             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2256                 return -TARGET_EFAULT;
2257             }
2258 
2259             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2260 
2261             ret = get_errno(setsockopt(sockfd, level, optname,
2262                                        &pki, sizeof(pki)));
2263             break;
2264         }
2265         case IPV6_ADD_MEMBERSHIP:
2266         case IPV6_DROP_MEMBERSHIP:
2267         {
2268             struct ipv6_mreq ipv6mreq;
2269 
2270             if (optlen < sizeof(ipv6mreq)) {
2271                 return -TARGET_EINVAL;
2272             }
2273 
2274             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2275                 return -TARGET_EFAULT;
2276             }
2277 
2278             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2279 
2280             ret = get_errno(setsockopt(sockfd, level, optname,
2281                                        &ipv6mreq, sizeof(ipv6mreq)));
2282             break;
2283         }
2284         default:
2285             goto unimplemented;
2286         }
2287         break;
2288     case SOL_ICMPV6:
2289         switch (optname) {
2290         case ICMPV6_FILTER:
2291         {
2292             struct icmp6_filter icmp6f;
2293 
2294             if (optlen > sizeof(icmp6f)) {
2295                 optlen = sizeof(icmp6f);
2296             }
2297 
2298             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2299                 return -TARGET_EFAULT;
2300             }
2301 
2302             for (val = 0; val < 8; val++) {
2303                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2304             }
2305 
2306             ret = get_errno(setsockopt(sockfd, level, optname,
2307                                        &icmp6f, optlen));
2308             break;
2309         }
2310         default:
2311             goto unimplemented;
2312         }
2313         break;
2314     case SOL_RAW:
2315         switch (optname) {
2316         case ICMP_FILTER:
2317         case IPV6_CHECKSUM:
2318             /* those take an u32 value */
2319             if (optlen < sizeof(uint32_t)) {
2320                 return -TARGET_EINVAL;
2321             }
2322 
2323             if (get_user_u32(val, optval_addr)) {
2324                 return -TARGET_EFAULT;
2325             }
2326             ret = get_errno(setsockopt(sockfd, level, optname,
2327                                        &val, sizeof(val)));
2328             break;
2329 
2330         default:
2331             goto unimplemented;
2332         }
2333         break;
2334 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2335     case SOL_ALG:
2336         switch (optname) {
2337         case ALG_SET_KEY:
2338         {
2339             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2340             if (!alg_key) {
2341                 return -TARGET_EFAULT;
2342             }
2343             ret = get_errno(setsockopt(sockfd, level, optname,
2344                                        alg_key, optlen));
2345             unlock_user(alg_key, optval_addr, optlen);
2346             break;
2347         }
2348         case ALG_SET_AEAD_AUTHSIZE:
2349         {
2350             ret = get_errno(setsockopt(sockfd, level, optname,
2351                                        NULL, optlen));
2352             break;
2353         }
2354         default:
2355             goto unimplemented;
2356         }
2357         break;
2358 #endif
2359     case TARGET_SOL_SOCKET:
2360         switch (optname) {
2361         case TARGET_SO_RCVTIMEO:
2362         case TARGET_SO_SNDTIMEO:
2363         {
2364                 struct timeval tv;
2365 
2366                 if (optlen != sizeof(struct target_timeval)) {
2367                     return -TARGET_EINVAL;
2368                 }
2369 
2370                 if (copy_from_user_timeval(&tv, optval_addr)) {
2371                     return -TARGET_EFAULT;
2372                 }
2373 
2374                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2375                                 optname == TARGET_SO_RCVTIMEO ?
2376                                     SO_RCVTIMEO : SO_SNDTIMEO,
2377                                 &tv, sizeof(tv)));
2378                 return ret;
2379         }
2380         case TARGET_SO_ATTACH_FILTER:
2381         {
2382                 struct target_sock_fprog *tfprog;
2383                 struct target_sock_filter *tfilter;
2384                 struct sock_fprog fprog;
2385                 struct sock_filter *filter;
2386                 int i;
2387 
2388                 if (optlen != sizeof(*tfprog)) {
2389                     return -TARGET_EINVAL;
2390                 }
2391                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2392                     return -TARGET_EFAULT;
2393                 }
2394                 if (!lock_user_struct(VERIFY_READ, tfilter,
2395                                       tswapal(tfprog->filter), 0)) {
2396                     unlock_user_struct(tfprog, optval_addr, 1);
2397                     return -TARGET_EFAULT;
2398                 }
2399 
2400                 fprog.len = tswap16(tfprog->len);
2401                 filter = g_try_new(struct sock_filter, fprog.len);
2402                 if (filter == NULL) {
2403                     unlock_user_struct(tfilter, tfprog->filter, 1);
2404                     unlock_user_struct(tfprog, optval_addr, 1);
2405                     return -TARGET_ENOMEM;
2406                 }
2407                 for (i = 0; i < fprog.len; i++) {
2408                     filter[i].code = tswap16(tfilter[i].code);
2409                     filter[i].jt = tfilter[i].jt;
2410                     filter[i].jf = tfilter[i].jf;
2411                     filter[i].k = tswap32(tfilter[i].k);
2412                 }
2413                 fprog.filter = filter;
2414 
2415                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2416                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2417                 g_free(filter);
2418 
2419                 unlock_user_struct(tfilter, tfprog->filter, 1);
2420                 unlock_user_struct(tfprog, optval_addr, 1);
2421                 return ret;
2422         }
2423 	case TARGET_SO_BINDTODEVICE:
2424 	{
2425 		char *dev_ifname, *addr_ifname;
2426 
2427 		if (optlen > IFNAMSIZ - 1) {
2428 		    optlen = IFNAMSIZ - 1;
2429 		}
2430 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2431 		if (!dev_ifname) {
2432 		    return -TARGET_EFAULT;
2433 		}
2434 		optname = SO_BINDTODEVICE;
2435 		addr_ifname = alloca(IFNAMSIZ);
2436 		memcpy(addr_ifname, dev_ifname, optlen);
2437 		addr_ifname[optlen] = 0;
2438 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2439                                            addr_ifname, optlen));
2440 		unlock_user (dev_ifname, optval_addr, 0);
2441 		return ret;
2442 	}
2443         case TARGET_SO_LINGER:
2444         {
2445                 struct linger lg;
2446                 struct target_linger *tlg;
2447 
2448                 if (optlen != sizeof(struct target_linger)) {
2449                     return -TARGET_EINVAL;
2450                 }
2451                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2452                     return -TARGET_EFAULT;
2453                 }
2454                 __get_user(lg.l_onoff, &tlg->l_onoff);
2455                 __get_user(lg.l_linger, &tlg->l_linger);
2456                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2457                                 &lg, sizeof(lg)));
2458                 unlock_user_struct(tlg, optval_addr, 0);
2459                 return ret;
2460         }
2461             /* Options with 'int' argument.  */
2462         case TARGET_SO_DEBUG:
2463 		optname = SO_DEBUG;
2464 		break;
2465         case TARGET_SO_REUSEADDR:
2466 		optname = SO_REUSEADDR;
2467 		break;
2468 #ifdef SO_REUSEPORT
2469         case TARGET_SO_REUSEPORT:
2470                 optname = SO_REUSEPORT;
2471                 break;
2472 #endif
2473         case TARGET_SO_TYPE:
2474 		optname = SO_TYPE;
2475 		break;
2476         case TARGET_SO_ERROR:
2477 		optname = SO_ERROR;
2478 		break;
2479         case TARGET_SO_DONTROUTE:
2480 		optname = SO_DONTROUTE;
2481 		break;
2482         case TARGET_SO_BROADCAST:
2483 		optname = SO_BROADCAST;
2484 		break;
2485         case TARGET_SO_SNDBUF:
2486 		optname = SO_SNDBUF;
2487 		break;
2488         case TARGET_SO_SNDBUFFORCE:
2489                 optname = SO_SNDBUFFORCE;
2490                 break;
2491         case TARGET_SO_RCVBUF:
2492 		optname = SO_RCVBUF;
2493 		break;
2494         case TARGET_SO_RCVBUFFORCE:
2495                 optname = SO_RCVBUFFORCE;
2496                 break;
2497         case TARGET_SO_KEEPALIVE:
2498 		optname = SO_KEEPALIVE;
2499 		break;
2500         case TARGET_SO_OOBINLINE:
2501 		optname = SO_OOBINLINE;
2502 		break;
2503         case TARGET_SO_NO_CHECK:
2504 		optname = SO_NO_CHECK;
2505 		break;
2506         case TARGET_SO_PRIORITY:
2507 		optname = SO_PRIORITY;
2508 		break;
2509 #ifdef SO_BSDCOMPAT
2510         case TARGET_SO_BSDCOMPAT:
2511 		optname = SO_BSDCOMPAT;
2512 		break;
2513 #endif
2514         case TARGET_SO_PASSCRED:
2515 		optname = SO_PASSCRED;
2516 		break;
2517         case TARGET_SO_PASSSEC:
2518                 optname = SO_PASSSEC;
2519                 break;
2520         case TARGET_SO_TIMESTAMP:
2521 		optname = SO_TIMESTAMP;
2522 		break;
2523         case TARGET_SO_RCVLOWAT:
2524 		optname = SO_RCVLOWAT;
2525 		break;
2526         default:
2527             goto unimplemented;
2528         }
2529 	if (optlen < sizeof(uint32_t))
2530             return -TARGET_EINVAL;
2531 
2532 	if (get_user_u32(val, optval_addr))
2533             return -TARGET_EFAULT;
2534 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2535         break;
2536 #ifdef SOL_NETLINK
2537     case SOL_NETLINK:
2538         switch (optname) {
2539         case NETLINK_PKTINFO:
2540         case NETLINK_ADD_MEMBERSHIP:
2541         case NETLINK_DROP_MEMBERSHIP:
2542         case NETLINK_BROADCAST_ERROR:
2543         case NETLINK_NO_ENOBUFS:
2544 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2545         case NETLINK_LISTEN_ALL_NSID:
2546         case NETLINK_CAP_ACK:
2547 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2548 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2549         case NETLINK_EXT_ACK:
2550 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2551 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2552         case NETLINK_GET_STRICT_CHK:
2553 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2554             break;
2555         default:
2556             goto unimplemented;
2557         }
2558         val = 0;
2559         if (optlen < sizeof(uint32_t)) {
2560             return -TARGET_EINVAL;
2561         }
2562         if (get_user_u32(val, optval_addr)) {
2563             return -TARGET_EFAULT;
2564         }
2565         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2566                                    sizeof(val)));
2567         break;
2568 #endif /* SOL_NETLINK */
2569     default:
2570     unimplemented:
2571         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2572                       level, optname);
2573         ret = -TARGET_ENOPROTOOPT;
2574     }
2575     return ret;
2576 }
2577 
2578 /* do_getsockopt() Must return target values and target errnos. */
do_getsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,abi_ulong optlen)2579 static abi_long do_getsockopt(int sockfd, int level, int optname,
2580                               abi_ulong optval_addr, abi_ulong optlen)
2581 {
2582     abi_long ret;
2583     int len, val;
2584     socklen_t lv;
2585 
2586     switch(level) {
2587     case TARGET_SOL_SOCKET:
2588         level = SOL_SOCKET;
2589         switch (optname) {
2590         /* These don't just return a single integer */
2591         case TARGET_SO_PEERNAME:
2592             goto unimplemented;
2593         case TARGET_SO_RCVTIMEO: {
2594             struct timeval tv;
2595             socklen_t tvlen;
2596 
2597             optname = SO_RCVTIMEO;
2598 
2599 get_timeout:
2600             if (get_user_u32(len, optlen)) {
2601                 return -TARGET_EFAULT;
2602             }
2603             if (len < 0) {
2604                 return -TARGET_EINVAL;
2605             }
2606 
2607             tvlen = sizeof(tv);
2608             ret = get_errno(getsockopt(sockfd, level, optname,
2609                                        &tv, &tvlen));
2610             if (ret < 0) {
2611                 return ret;
2612             }
2613             if (len > sizeof(struct target_timeval)) {
2614                 len = sizeof(struct target_timeval);
2615             }
2616             if (copy_to_user_timeval(optval_addr, &tv)) {
2617                 return -TARGET_EFAULT;
2618             }
2619             if (put_user_u32(len, optlen)) {
2620                 return -TARGET_EFAULT;
2621             }
2622             break;
2623         }
2624         case TARGET_SO_SNDTIMEO:
2625             optname = SO_SNDTIMEO;
2626             goto get_timeout;
2627         case TARGET_SO_PEERCRED: {
2628             struct ucred cr;
2629             socklen_t crlen;
2630             struct target_ucred *tcr;
2631 
2632             if (get_user_u32(len, optlen)) {
2633                 return -TARGET_EFAULT;
2634             }
2635             if (len < 0) {
2636                 return -TARGET_EINVAL;
2637             }
2638 
2639             crlen = sizeof(cr);
2640             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2641                                        &cr, &crlen));
2642             if (ret < 0) {
2643                 return ret;
2644             }
2645             if (len > crlen) {
2646                 len = crlen;
2647             }
2648             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2649                 return -TARGET_EFAULT;
2650             }
2651             __put_user(cr.pid, &tcr->pid);
2652             __put_user(cr.uid, &tcr->uid);
2653             __put_user(cr.gid, &tcr->gid);
2654             unlock_user_struct(tcr, optval_addr, 1);
2655             if (put_user_u32(len, optlen)) {
2656                 return -TARGET_EFAULT;
2657             }
2658             break;
2659         }
2660         case TARGET_SO_PEERSEC: {
2661             char *name;
2662 
2663             if (get_user_u32(len, optlen)) {
2664                 return -TARGET_EFAULT;
2665             }
2666             if (len < 0) {
2667                 return -TARGET_EINVAL;
2668             }
2669             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2670             if (!name) {
2671                 return -TARGET_EFAULT;
2672             }
2673             lv = len;
2674             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2675                                        name, &lv));
2676             if (put_user_u32(lv, optlen)) {
2677                 ret = -TARGET_EFAULT;
2678             }
2679             unlock_user(name, optval_addr, lv);
2680             break;
2681         }
2682         case TARGET_SO_LINGER:
2683         {
2684             struct linger lg;
2685             socklen_t lglen;
2686             struct target_linger *tlg;
2687 
2688             if (get_user_u32(len, optlen)) {
2689                 return -TARGET_EFAULT;
2690             }
2691             if (len < 0) {
2692                 return -TARGET_EINVAL;
2693             }
2694 
2695             lglen = sizeof(lg);
2696             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2697                                        &lg, &lglen));
2698             if (ret < 0) {
2699                 return ret;
2700             }
2701             if (len > lglen) {
2702                 len = lglen;
2703             }
2704             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2705                 return -TARGET_EFAULT;
2706             }
2707             __put_user(lg.l_onoff, &tlg->l_onoff);
2708             __put_user(lg.l_linger, &tlg->l_linger);
2709             unlock_user_struct(tlg, optval_addr, 1);
2710             if (put_user_u32(len, optlen)) {
2711                 return -TARGET_EFAULT;
2712             }
2713             break;
2714         }
2715         /* Options with 'int' argument.  */
2716         case TARGET_SO_DEBUG:
2717             optname = SO_DEBUG;
2718             goto int_case;
2719         case TARGET_SO_REUSEADDR:
2720             optname = SO_REUSEADDR;
2721             goto int_case;
2722 #ifdef SO_REUSEPORT
2723         case TARGET_SO_REUSEPORT:
2724             optname = SO_REUSEPORT;
2725             goto int_case;
2726 #endif
2727         case TARGET_SO_TYPE:
2728             optname = SO_TYPE;
2729             goto int_case;
2730         case TARGET_SO_ERROR:
2731             optname = SO_ERROR;
2732             goto int_case;
2733         case TARGET_SO_DONTROUTE:
2734             optname = SO_DONTROUTE;
2735             goto int_case;
2736         case TARGET_SO_BROADCAST:
2737             optname = SO_BROADCAST;
2738             goto int_case;
2739         case TARGET_SO_SNDBUF:
2740             optname = SO_SNDBUF;
2741             goto int_case;
2742         case TARGET_SO_RCVBUF:
2743             optname = SO_RCVBUF;
2744             goto int_case;
2745         case TARGET_SO_KEEPALIVE:
2746             optname = SO_KEEPALIVE;
2747             goto int_case;
2748         case TARGET_SO_OOBINLINE:
2749             optname = SO_OOBINLINE;
2750             goto int_case;
2751         case TARGET_SO_NO_CHECK:
2752             optname = SO_NO_CHECK;
2753             goto int_case;
2754         case TARGET_SO_PRIORITY:
2755             optname = SO_PRIORITY;
2756             goto int_case;
2757 #ifdef SO_BSDCOMPAT
2758         case TARGET_SO_BSDCOMPAT:
2759             optname = SO_BSDCOMPAT;
2760             goto int_case;
2761 #endif
2762         case TARGET_SO_PASSCRED:
2763             optname = SO_PASSCRED;
2764             goto int_case;
2765         case TARGET_SO_TIMESTAMP:
2766             optname = SO_TIMESTAMP;
2767             goto int_case;
2768         case TARGET_SO_RCVLOWAT:
2769             optname = SO_RCVLOWAT;
2770             goto int_case;
2771         case TARGET_SO_ACCEPTCONN:
2772             optname = SO_ACCEPTCONN;
2773             goto int_case;
2774         case TARGET_SO_PROTOCOL:
2775             optname = SO_PROTOCOL;
2776             goto int_case;
2777         case TARGET_SO_DOMAIN:
2778             optname = SO_DOMAIN;
2779             goto int_case;
2780         default:
2781             goto int_case;
2782         }
2783         break;
2784     case SOL_TCP:
2785     case SOL_UDP:
2786         /* TCP and UDP options all take an 'int' value.  */
2787     int_case:
2788         if (get_user_u32(len, optlen))
2789             return -TARGET_EFAULT;
2790         if (len < 0)
2791             return -TARGET_EINVAL;
2792         lv = sizeof(lv);
2793         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2794         if (ret < 0)
2795             return ret;
2796         switch (optname) {
2797         case SO_TYPE:
2798             val = host_to_target_sock_type(val);
2799             break;
2800         case SO_ERROR:
2801             val = host_to_target_errno(val);
2802             break;
2803         }
2804         if (len > lv)
2805             len = lv;
2806         if (len == 4) {
2807             if (put_user_u32(val, optval_addr))
2808                 return -TARGET_EFAULT;
2809         } else {
2810             if (put_user_u8(val, optval_addr))
2811                 return -TARGET_EFAULT;
2812         }
2813         if (put_user_u32(len, optlen))
2814             return -TARGET_EFAULT;
2815         break;
2816     case SOL_IP:
2817         switch(optname) {
2818         case IP_TOS:
2819         case IP_TTL:
2820         case IP_HDRINCL:
2821         case IP_ROUTER_ALERT:
2822         case IP_RECVOPTS:
2823         case IP_RETOPTS:
2824         case IP_PKTINFO:
2825         case IP_MTU_DISCOVER:
2826         case IP_RECVERR:
2827         case IP_RECVTOS:
2828 #ifdef IP_FREEBIND
2829         case IP_FREEBIND:
2830 #endif
2831         case IP_MULTICAST_TTL:
2832         case IP_MULTICAST_LOOP:
2833             if (get_user_u32(len, optlen))
2834                 return -TARGET_EFAULT;
2835             if (len < 0)
2836                 return -TARGET_EINVAL;
2837             lv = sizeof(lv);
2838             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2839             if (ret < 0)
2840                 return ret;
2841             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2842                 len = 1;
2843                 if (put_user_u32(len, optlen)
2844                     || put_user_u8(val, optval_addr))
2845                     return -TARGET_EFAULT;
2846             } else {
2847                 if (len > sizeof(int))
2848                     len = sizeof(int);
2849                 if (put_user_u32(len, optlen)
2850                     || put_user_u32(val, optval_addr))
2851                     return -TARGET_EFAULT;
2852             }
2853             break;
2854         default:
2855             ret = -TARGET_ENOPROTOOPT;
2856             break;
2857         }
2858         break;
2859     case SOL_IPV6:
2860         switch (optname) {
2861         case IPV6_MTU_DISCOVER:
2862         case IPV6_MTU:
2863         case IPV6_V6ONLY:
2864         case IPV6_RECVPKTINFO:
2865         case IPV6_UNICAST_HOPS:
2866         case IPV6_MULTICAST_HOPS:
2867         case IPV6_MULTICAST_LOOP:
2868         case IPV6_RECVERR:
2869         case IPV6_RECVHOPLIMIT:
2870         case IPV6_2292HOPLIMIT:
2871         case IPV6_CHECKSUM:
2872         case IPV6_ADDRFORM:
2873         case IPV6_2292PKTINFO:
2874         case IPV6_RECVTCLASS:
2875         case IPV6_RECVRTHDR:
2876         case IPV6_2292RTHDR:
2877         case IPV6_RECVHOPOPTS:
2878         case IPV6_2292HOPOPTS:
2879         case IPV6_RECVDSTOPTS:
2880         case IPV6_2292DSTOPTS:
2881         case IPV6_TCLASS:
2882         case IPV6_ADDR_PREFERENCES:
2883 #ifdef IPV6_RECVPATHMTU
2884         case IPV6_RECVPATHMTU:
2885 #endif
2886 #ifdef IPV6_TRANSPARENT
2887         case IPV6_TRANSPARENT:
2888 #endif
2889 #ifdef IPV6_FREEBIND
2890         case IPV6_FREEBIND:
2891 #endif
2892 #ifdef IPV6_RECVORIGDSTADDR
2893         case IPV6_RECVORIGDSTADDR:
2894 #endif
2895             if (get_user_u32(len, optlen))
2896                 return -TARGET_EFAULT;
2897             if (len < 0)
2898                 return -TARGET_EINVAL;
2899             lv = sizeof(lv);
2900             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2901             if (ret < 0)
2902                 return ret;
2903             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2904                 len = 1;
2905                 if (put_user_u32(len, optlen)
2906                     || put_user_u8(val, optval_addr))
2907                     return -TARGET_EFAULT;
2908             } else {
2909                 if (len > sizeof(int))
2910                     len = sizeof(int);
2911                 if (put_user_u32(len, optlen)
2912                     || put_user_u32(val, optval_addr))
2913                     return -TARGET_EFAULT;
2914             }
2915             break;
2916         default:
2917             ret = -TARGET_ENOPROTOOPT;
2918             break;
2919         }
2920         break;
2921 #ifdef SOL_NETLINK
2922     case SOL_NETLINK:
2923         switch (optname) {
2924         case NETLINK_PKTINFO:
2925         case NETLINK_BROADCAST_ERROR:
2926         case NETLINK_NO_ENOBUFS:
2927 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2928         case NETLINK_LISTEN_ALL_NSID:
2929         case NETLINK_CAP_ACK:
2930 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2931 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2932         case NETLINK_EXT_ACK:
2933 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2934 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2935         case NETLINK_GET_STRICT_CHK:
2936 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2937             if (get_user_u32(len, optlen)) {
2938                 return -TARGET_EFAULT;
2939             }
2940             if (len != sizeof(val)) {
2941                 return -TARGET_EINVAL;
2942             }
2943             lv = len;
2944             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2945             if (ret < 0) {
2946                 return ret;
2947             }
2948             if (put_user_u32(lv, optlen)
2949                 || put_user_u32(val, optval_addr)) {
2950                 return -TARGET_EFAULT;
2951             }
2952             break;
2953 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2954         case NETLINK_LIST_MEMBERSHIPS:
2955         {
2956             uint32_t *results;
2957             int i;
2958             if (get_user_u32(len, optlen)) {
2959                 return -TARGET_EFAULT;
2960             }
2961             if (len < 0) {
2962                 return -TARGET_EINVAL;
2963             }
2964             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2965             if (!results && len > 0) {
2966                 return -TARGET_EFAULT;
2967             }
2968             lv = len;
2969             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2970             if (ret < 0) {
2971                 unlock_user(results, optval_addr, 0);
2972                 return ret;
2973             }
2974             /* swap host endianness to target endianness. */
2975             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2976                 results[i] = tswap32(results[i]);
2977             }
2978             if (put_user_u32(lv, optlen)) {
2979                 return -TARGET_EFAULT;
2980             }
2981             unlock_user(results, optval_addr, 0);
2982             break;
2983         }
2984 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2985         default:
2986             goto unimplemented;
2987         }
2988         break;
2989 #endif /* SOL_NETLINK */
2990     default:
2991     unimplemented:
2992         qemu_log_mask(LOG_UNIMP,
2993                       "getsockopt level=%d optname=%d not yet supported\n",
2994                       level, optname);
2995         ret = -TARGET_EOPNOTSUPP;
2996         break;
2997     }
2998     return ret;
2999 }
3000 
3001 /* Convert target low/high pair representing file offset into the host
3002  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3003  * as the kernel doesn't handle them either.
3004  */
target_to_host_low_high(abi_ulong tlow,abi_ulong thigh,unsigned long * hlow,unsigned long * hhigh)3005 static void target_to_host_low_high(abi_ulong tlow,
3006                                     abi_ulong thigh,
3007                                     unsigned long *hlow,
3008                                     unsigned long *hhigh)
3009 {
3010     uint64_t off = tlow |
3011         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3012         TARGET_LONG_BITS / 2;
3013 
3014     *hlow = off;
3015     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3016 }
3017 
lock_iovec(int type,abi_ulong target_addr,abi_ulong count,int copy)3018 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3019                                 abi_ulong count, int copy)
3020 {
3021     struct target_iovec *target_vec;
3022     struct iovec *vec;
3023     abi_ulong total_len, max_len;
3024     int i;
3025     int err = 0;
3026     bool bad_address = false;
3027 
3028     if (count == 0) {
3029         errno = 0;
3030         return NULL;
3031     }
3032     if (count > IOV_MAX) {
3033         errno = EINVAL;
3034         return NULL;
3035     }
3036 
3037     vec = g_try_new0(struct iovec, count);
3038     if (vec == NULL) {
3039         errno = ENOMEM;
3040         return NULL;
3041     }
3042 
3043     target_vec = lock_user(VERIFY_READ, target_addr,
3044                            count * sizeof(struct target_iovec), 1);
3045     if (target_vec == NULL) {
3046         err = EFAULT;
3047         goto fail2;
3048     }
3049 
3050     /* ??? If host page size > target page size, this will result in a
3051        value larger than what we can actually support.  */
3052     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3053     total_len = 0;
3054 
3055     for (i = 0; i < count; i++) {
3056         abi_ulong base = tswapal(target_vec[i].iov_base);
3057         abi_long len = tswapal(target_vec[i].iov_len);
3058 
3059         if (len < 0) {
3060             err = EINVAL;
3061             goto fail;
3062         } else if (len == 0) {
3063             /* Zero length pointer is ignored.  */
3064             vec[i].iov_base = 0;
3065         } else {
3066             vec[i].iov_base = lock_user(type, base, len, copy);
3067             /* If the first buffer pointer is bad, this is a fault.  But
3068              * subsequent bad buffers will result in a partial write; this
3069              * is realized by filling the vector with null pointers and
3070              * zero lengths. */
3071             if (!vec[i].iov_base) {
3072                 if (i == 0) {
3073                     err = EFAULT;
3074                     goto fail;
3075                 } else {
3076                     bad_address = true;
3077                 }
3078             }
3079             if (bad_address) {
3080                 len = 0;
3081             }
3082             if (len > max_len - total_len) {
3083                 len = max_len - total_len;
3084             }
3085         }
3086         vec[i].iov_len = len;
3087         total_len += len;
3088     }
3089 
3090     unlock_user(target_vec, target_addr, 0);
3091     return vec;
3092 
3093  fail:
3094     while (--i >= 0) {
3095         if (tswapal(target_vec[i].iov_len) > 0) {
3096             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3097         }
3098     }
3099     unlock_user(target_vec, target_addr, 0);
3100  fail2:
3101     g_free(vec);
3102     errno = err;
3103     return NULL;
3104 }
3105 
unlock_iovec(struct iovec * vec,abi_ulong target_addr,abi_ulong count,int copy)3106 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3107                          abi_ulong count, int copy)
3108 {
3109     struct target_iovec *target_vec;
3110     int i;
3111 
3112     target_vec = lock_user(VERIFY_READ, target_addr,
3113                            count * sizeof(struct target_iovec), 1);
3114     if (target_vec) {
3115         for (i = 0; i < count; i++) {
3116             abi_ulong base = tswapal(target_vec[i].iov_base);
3117             abi_long len = tswapal(target_vec[i].iov_len);
3118             if (len < 0) {
3119                 break;
3120             }
3121             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3122         }
3123         unlock_user(target_vec, target_addr, 0);
3124     }
3125 
3126     g_free(vec);
3127 }
3128 
target_to_host_sock_type(int * type)3129 static inline int target_to_host_sock_type(int *type)
3130 {
3131     int host_type = 0;
3132     int target_type = *type;
3133 
3134     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3135     case TARGET_SOCK_DGRAM:
3136         host_type = SOCK_DGRAM;
3137         break;
3138     case TARGET_SOCK_STREAM:
3139         host_type = SOCK_STREAM;
3140         break;
3141     default:
3142         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3143         break;
3144     }
3145     if (target_type & TARGET_SOCK_CLOEXEC) {
3146 #if defined(SOCK_CLOEXEC)
3147         host_type |= SOCK_CLOEXEC;
3148 #else
3149         return -TARGET_EINVAL;
3150 #endif
3151     }
3152     if (target_type & TARGET_SOCK_NONBLOCK) {
3153 #if defined(SOCK_NONBLOCK)
3154         host_type |= SOCK_NONBLOCK;
3155 #elif !defined(O_NONBLOCK)
3156         return -TARGET_EINVAL;
3157 #endif
3158     }
3159     *type = host_type;
3160     return 0;
3161 }
3162 
3163 /* Try to emulate socket type flags after socket creation.  */
sock_flags_fixup(int fd,int target_type)3164 static int sock_flags_fixup(int fd, int target_type)
3165 {
3166 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3167     if (target_type & TARGET_SOCK_NONBLOCK) {
3168         int flags = fcntl(fd, F_GETFL);
3169         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3170             close(fd);
3171             return -TARGET_EINVAL;
3172         }
3173     }
3174 #endif
3175     return fd;
3176 }
3177 
3178 /* do_socket() Must return target values and target errnos. */
do_socket(int domain,int type,int protocol)3179 static abi_long do_socket(int domain, int type, int protocol)
3180 {
3181     int target_type = type;
3182     int ret;
3183 
3184     ret = target_to_host_sock_type(&type);
3185     if (ret) {
3186         return ret;
3187     }
3188 
3189     if (domain == PF_NETLINK && !(
3190 #ifdef CONFIG_RTNETLINK
3191          protocol == NETLINK_ROUTE ||
3192 #endif
3193          protocol == NETLINK_KOBJECT_UEVENT ||
3194          protocol == NETLINK_AUDIT)) {
3195         return -TARGET_EPROTONOSUPPORT;
3196     }
3197 
3198     if (domain == AF_PACKET ||
3199         (domain == AF_INET && type == SOCK_PACKET)) {
3200         protocol = tswap16(protocol);
3201     }
3202 
3203     ret = get_errno(socket(domain, type, protocol));
3204     if (ret >= 0) {
3205         ret = sock_flags_fixup(ret, target_type);
3206         if (type == SOCK_PACKET) {
3207             /* Manage an obsolete case :
3208              * if socket type is SOCK_PACKET, bind by name
3209              */
3210             fd_trans_register(ret, &target_packet_trans);
3211         } else if (domain == PF_NETLINK) {
3212             switch (protocol) {
3213 #ifdef CONFIG_RTNETLINK
3214             case NETLINK_ROUTE:
3215                 fd_trans_register(ret, &target_netlink_route_trans);
3216                 break;
3217 #endif
3218             case NETLINK_KOBJECT_UEVENT:
3219                 /* nothing to do: messages are strings */
3220                 break;
3221             case NETLINK_AUDIT:
3222                 fd_trans_register(ret, &target_netlink_audit_trans);
3223                 break;
3224             default:
3225                 g_assert_not_reached();
3226             }
3227         }
3228     }
3229     return ret;
3230 }
3231 
3232 /* do_bind() Must return target values and target errnos. */
do_bind(int sockfd,abi_ulong target_addr,socklen_t addrlen)3233 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3234                         socklen_t addrlen)
3235 {
3236     void *addr;
3237     abi_long ret;
3238 
3239     if ((int)addrlen < 0) {
3240         return -TARGET_EINVAL;
3241     }
3242 
3243     addr = alloca(addrlen+1);
3244 
3245     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3246     if (ret)
3247         return ret;
3248 
3249     return get_errno(bind(sockfd, addr, addrlen));
3250 }
3251 
3252 /* do_connect() Must return target values and target errnos. */
do_connect(int sockfd,abi_ulong target_addr,socklen_t addrlen)3253 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3254                            socklen_t addrlen)
3255 {
3256     void *addr;
3257     abi_long ret;
3258 
3259     if ((int)addrlen < 0) {
3260         return -TARGET_EINVAL;
3261     }
3262 
3263     addr = alloca(addrlen+1);
3264 
3265     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3266     if (ret)
3267         return ret;
3268 
3269     return get_errno(safe_connect(sockfd, addr, addrlen));
3270 }
3271 
3272 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
do_sendrecvmsg_locked(int fd,struct target_msghdr * msgp,int flags,int send)3273 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3274                                       int flags, int send)
3275 {
3276     abi_long ret, len;
3277     struct msghdr msg;
3278     abi_ulong count;
3279     struct iovec *vec;
3280     abi_ulong target_vec;
3281 
3282     if (msgp->msg_name) {
3283         msg.msg_namelen = tswap32(msgp->msg_namelen);
3284         msg.msg_name = alloca(msg.msg_namelen+1);
3285         ret = target_to_host_sockaddr(fd, msg.msg_name,
3286                                       tswapal(msgp->msg_name),
3287                                       msg.msg_namelen);
3288         if (ret == -TARGET_EFAULT) {
3289             /* For connected sockets msg_name and msg_namelen must
3290              * be ignored, so returning EFAULT immediately is wrong.
3291              * Instead, pass a bad msg_name to the host kernel, and
3292              * let it decide whether to return EFAULT or not.
3293              */
3294             msg.msg_name = (void *)-1;
3295         } else if (ret) {
3296             goto out2;
3297         }
3298     } else {
3299         msg.msg_name = NULL;
3300         msg.msg_namelen = 0;
3301     }
3302     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3303     msg.msg_control = alloca(msg.msg_controllen);
3304     memset(msg.msg_control, 0, msg.msg_controllen);
3305 
3306     msg.msg_flags = tswap32(msgp->msg_flags);
3307 
3308     count = tswapal(msgp->msg_iovlen);
3309     target_vec = tswapal(msgp->msg_iov);
3310 
3311     if (count > IOV_MAX) {
3312         /* sendrcvmsg returns a different errno for this condition than
3313          * readv/writev, so we must catch it here before lock_iovec() does.
3314          */
3315         ret = -TARGET_EMSGSIZE;
3316         goto out2;
3317     }
3318 
3319     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3320                      target_vec, count, send);
3321     if (vec == NULL) {
3322         ret = -host_to_target_errno(errno);
3323         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3324         if (!send || ret) {
3325             goto out2;
3326         }
3327     }
3328     msg.msg_iovlen = count;
3329     msg.msg_iov = vec;
3330 
3331     if (send) {
3332         if (fd_trans_target_to_host_data(fd)) {
3333             void *host_msg;
3334 
3335             host_msg = g_malloc(msg.msg_iov->iov_len);
3336             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3337             ret = fd_trans_target_to_host_data(fd)(host_msg,
3338                                                    msg.msg_iov->iov_len);
3339             if (ret >= 0) {
3340                 msg.msg_iov->iov_base = host_msg;
3341                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3342             }
3343             g_free(host_msg);
3344         } else {
3345             ret = target_to_host_cmsg(&msg, msgp);
3346             if (ret == 0) {
3347                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3348             }
3349         }
3350     } else {
3351         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3352         if (!is_error(ret)) {
3353             len = ret;
3354             if (fd_trans_host_to_target_data(fd)) {
3355                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3356                                                MIN(msg.msg_iov->iov_len, len));
3357             }
3358             if (!is_error(ret)) {
3359                 ret = host_to_target_cmsg(msgp, &msg);
3360             }
3361             if (!is_error(ret)) {
3362                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3363                 msgp->msg_flags = tswap32(msg.msg_flags);
3364                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3365                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3366                                     msg.msg_name, msg.msg_namelen);
3367                     if (ret) {
3368                         goto out;
3369                     }
3370                 }
3371 
3372                 ret = len;
3373             }
3374         }
3375     }
3376 
3377 out:
3378     if (vec) {
3379         unlock_iovec(vec, target_vec, count, !send);
3380     }
3381 out2:
3382     return ret;
3383 }
3384 
do_sendrecvmsg(int fd,abi_ulong target_msg,int flags,int send)3385 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3386                                int flags, int send)
3387 {
3388     abi_long ret;
3389     struct target_msghdr *msgp;
3390 
3391     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3392                           msgp,
3393                           target_msg,
3394                           send ? 1 : 0)) {
3395         return -TARGET_EFAULT;
3396     }
3397     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3398     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3399     return ret;
3400 }
3401 
3402 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3403  * so it might not have this *mmsg-specific flag either.
3404  */
3405 #ifndef MSG_WAITFORONE
3406 #define MSG_WAITFORONE 0x10000
3407 #endif
3408 
do_sendrecvmmsg(int fd,abi_ulong target_msgvec,unsigned int vlen,unsigned int flags,int send)3409 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3410                                 unsigned int vlen, unsigned int flags,
3411                                 int send)
3412 {
3413     struct target_mmsghdr *mmsgp;
3414     abi_long ret = 0;
3415     int i;
3416 
3417     if (vlen > UIO_MAXIOV) {
3418         vlen = UIO_MAXIOV;
3419     }
3420 
3421     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3422     if (!mmsgp) {
3423         return -TARGET_EFAULT;
3424     }
3425 
3426     for (i = 0; i < vlen; i++) {
3427         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3428         if (is_error(ret)) {
3429             break;
3430         }
3431         mmsgp[i].msg_len = tswap32(ret);
3432         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3433         if (flags & MSG_WAITFORONE) {
3434             flags |= MSG_DONTWAIT;
3435         }
3436     }
3437 
3438     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3439 
3440     /* Return number of datagrams sent if we sent any at all;
3441      * otherwise return the error.
3442      */
3443     if (i) {
3444         return i;
3445     }
3446     return ret;
3447 }
3448 
3449 /* do_accept4() Must return target values and target errnos. */
do_accept4(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr,int flags)3450 static abi_long do_accept4(int fd, abi_ulong target_addr,
3451                            abi_ulong target_addrlen_addr, int flags)
3452 {
3453     socklen_t addrlen, ret_addrlen;
3454     void *addr;
3455     abi_long ret;
3456     int host_flags;
3457 
3458     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3459         return -TARGET_EINVAL;
3460     }
3461 
3462     host_flags = 0;
3463     if (flags & TARGET_SOCK_NONBLOCK) {
3464         host_flags |= SOCK_NONBLOCK;
3465     }
3466     if (flags & TARGET_SOCK_CLOEXEC) {
3467         host_flags |= SOCK_CLOEXEC;
3468     }
3469 
3470     if (target_addr == 0) {
3471         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3472     }
3473 
3474     /* linux returns EFAULT if addrlen pointer is invalid */
3475     if (get_user_u32(addrlen, target_addrlen_addr))
3476         return -TARGET_EFAULT;
3477 
3478     if ((int)addrlen < 0) {
3479         return -TARGET_EINVAL;
3480     }
3481 
3482     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3483         return -TARGET_EFAULT;
3484     }
3485 
3486     addr = alloca(addrlen);
3487 
3488     ret_addrlen = addrlen;
3489     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3490     if (!is_error(ret)) {
3491         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3492         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3493             ret = -TARGET_EFAULT;
3494         }
3495     }
3496     return ret;
3497 }
3498 
3499 /* do_getpeername() Must return target values and target errnos. */
do_getpeername(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3500 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3501                                abi_ulong target_addrlen_addr)
3502 {
3503     socklen_t addrlen, ret_addrlen;
3504     void *addr;
3505     abi_long ret;
3506 
3507     if (get_user_u32(addrlen, target_addrlen_addr))
3508         return -TARGET_EFAULT;
3509 
3510     if ((int)addrlen < 0) {
3511         return -TARGET_EINVAL;
3512     }
3513 
3514     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3515         return -TARGET_EFAULT;
3516     }
3517 
3518     addr = alloca(addrlen);
3519 
3520     ret_addrlen = addrlen;
3521     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3522     if (!is_error(ret)) {
3523         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3524         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3525             ret = -TARGET_EFAULT;
3526         }
3527     }
3528     return ret;
3529 }
3530 
3531 /* do_getsockname() Must return target values and target errnos. */
do_getsockname(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3532 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3533                                abi_ulong target_addrlen_addr)
3534 {
3535     socklen_t addrlen, ret_addrlen;
3536     void *addr;
3537     abi_long ret;
3538 
3539     if (get_user_u32(addrlen, target_addrlen_addr))
3540         return -TARGET_EFAULT;
3541 
3542     if ((int)addrlen < 0) {
3543         return -TARGET_EINVAL;
3544     }
3545 
3546     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3547         return -TARGET_EFAULT;
3548     }
3549 
3550     addr = alloca(addrlen);
3551 
3552     ret_addrlen = addrlen;
3553     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3554     if (!is_error(ret)) {
3555         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3556         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3557             ret = -TARGET_EFAULT;
3558         }
3559     }
3560     return ret;
3561 }
3562 
3563 /* do_socketpair() Must return target values and target errnos. */
do_socketpair(int domain,int type,int protocol,abi_ulong target_tab_addr)3564 static abi_long do_socketpair(int domain, int type, int protocol,
3565                               abi_ulong target_tab_addr)
3566 {
3567     int tab[2];
3568     abi_long ret;
3569 
3570     target_to_host_sock_type(&type);
3571 
3572     ret = get_errno(socketpair(domain, type, protocol, tab));
3573     if (!is_error(ret)) {
3574         if (put_user_s32(tab[0], target_tab_addr)
3575             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3576             ret = -TARGET_EFAULT;
3577     }
3578     return ret;
3579 }
3580 
3581 /* do_sendto() Must return target values and target errnos. */
do_sendto(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,socklen_t addrlen)3582 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3583                           abi_ulong target_addr, socklen_t addrlen)
3584 {
3585     void *addr;
3586     void *host_msg;
3587     void *copy_msg = NULL;
3588     abi_long ret;
3589 
3590     if ((int)addrlen < 0) {
3591         return -TARGET_EINVAL;
3592     }
3593 
3594     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3595     if (!host_msg)
3596         return -TARGET_EFAULT;
3597     if (fd_trans_target_to_host_data(fd)) {
3598         copy_msg = host_msg;
3599         host_msg = g_malloc(len);
3600         memcpy(host_msg, copy_msg, len);
3601         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3602         if (ret < 0) {
3603             goto fail;
3604         }
3605     }
3606     if (target_addr) {
3607         addr = alloca(addrlen+1);
3608         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3609         if (ret) {
3610             goto fail;
3611         }
3612         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3613     } else {
3614         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3615     }
3616 fail:
3617     if (copy_msg) {
3618         g_free(host_msg);
3619         host_msg = copy_msg;
3620     }
3621     unlock_user(host_msg, msg, 0);
3622     return ret;
3623 }
3624 
3625 /* do_recvfrom() Must return target values and target errnos. */
do_recvfrom(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,abi_ulong target_addrlen)3626 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3627                             abi_ulong target_addr,
3628                             abi_ulong target_addrlen)
3629 {
3630     socklen_t addrlen, ret_addrlen;
3631     void *addr;
3632     void *host_msg;
3633     abi_long ret;
3634 
3635     if (!msg) {
3636         host_msg = NULL;
3637     } else {
3638         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3639         if (!host_msg) {
3640             return -TARGET_EFAULT;
3641         }
3642     }
3643     if (target_addr) {
3644         if (get_user_u32(addrlen, target_addrlen)) {
3645             ret = -TARGET_EFAULT;
3646             goto fail;
3647         }
3648         if ((int)addrlen < 0) {
3649             ret = -TARGET_EINVAL;
3650             goto fail;
3651         }
3652         addr = alloca(addrlen);
3653         ret_addrlen = addrlen;
3654         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3655                                       addr, &ret_addrlen));
3656     } else {
3657         addr = NULL; /* To keep compiler quiet.  */
3658         addrlen = 0; /* To keep compiler quiet.  */
3659         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3660     }
3661     if (!is_error(ret)) {
3662         if (fd_trans_host_to_target_data(fd)) {
3663             abi_long trans;
3664             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3665             if (is_error(trans)) {
3666                 ret = trans;
3667                 goto fail;
3668             }
3669         }
3670         if (target_addr) {
3671             host_to_target_sockaddr(target_addr, addr,
3672                                     MIN(addrlen, ret_addrlen));
3673             if (put_user_u32(ret_addrlen, target_addrlen)) {
3674                 ret = -TARGET_EFAULT;
3675                 goto fail;
3676             }
3677         }
3678         unlock_user(host_msg, msg, len);
3679     } else {
3680 fail:
3681         unlock_user(host_msg, msg, 0);
3682     }
3683     return ret;
3684 }
3685 
3686 #ifdef TARGET_NR_socketcall
3687 /* do_socketcall() must return target values and target errnos. */
do_socketcall(int num,abi_ulong vptr)3688 static abi_long do_socketcall(int num, abi_ulong vptr)
3689 {
3690     static const unsigned nargs[] = { /* number of arguments per operation */
3691         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3692         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3693         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3694         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3695         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3696         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3697         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3698         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3699         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3700         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3701         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3702         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3703         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3704         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3705         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3706         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3707         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3708         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3709         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3710         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3711     };
3712     abi_long a[6]; /* max 6 args */
3713     unsigned i;
3714 
3715     /* check the range of the first argument num */
3716     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3717     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3718         return -TARGET_EINVAL;
3719     }
3720     /* ensure we have space for args */
3721     if (nargs[num] > ARRAY_SIZE(a)) {
3722         return -TARGET_EINVAL;
3723     }
3724     /* collect the arguments in a[] according to nargs[] */
3725     for (i = 0; i < nargs[num]; ++i) {
3726         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3727             return -TARGET_EFAULT;
3728         }
3729     }
3730     /* now when we have the args, invoke the appropriate underlying function */
3731     switch (num) {
3732     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3733         return do_socket(a[0], a[1], a[2]);
3734     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3735         return do_bind(a[0], a[1], a[2]);
3736     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3737         return do_connect(a[0], a[1], a[2]);
3738     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3739         return get_errno(listen(a[0], a[1]));
3740     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3741         return do_accept4(a[0], a[1], a[2], 0);
3742     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3743         return do_getsockname(a[0], a[1], a[2]);
3744     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3745         return do_getpeername(a[0], a[1], a[2]);
3746     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3747         return do_socketpair(a[0], a[1], a[2], a[3]);
3748     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3749         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3750     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3751         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3752     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3753         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3754     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3755         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3756     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3757         return get_errno(shutdown(a[0], a[1]));
3758     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3759         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3760     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3761         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3762     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3763         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3764     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3765         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3766     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3767         return do_accept4(a[0], a[1], a[2], a[3]);
3768     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3769         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3770     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3771         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3772     default:
3773         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3774         return -TARGET_EINVAL;
3775     }
3776 }
3777 #endif
3778 
3779 #ifndef TARGET_SEMID64_DS
3780 /* asm-generic version of this struct */
3781 struct target_semid64_ds
3782 {
3783   struct target_ipc_perm sem_perm;
3784   abi_ulong sem_otime;
3785 #if TARGET_ABI_BITS == 32
3786   abi_ulong __unused1;
3787 #endif
3788   abi_ulong sem_ctime;
3789 #if TARGET_ABI_BITS == 32
3790   abi_ulong __unused2;
3791 #endif
3792   abi_ulong sem_nsems;
3793   abi_ulong __unused3;
3794   abi_ulong __unused4;
3795 };
3796 #endif
3797 
target_to_host_ipc_perm(struct ipc_perm * host_ip,abi_ulong target_addr)3798 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3799                                                abi_ulong target_addr)
3800 {
3801     struct target_ipc_perm *target_ip;
3802     struct target_semid64_ds *target_sd;
3803 
3804     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3805         return -TARGET_EFAULT;
3806     target_ip = &(target_sd->sem_perm);
3807     host_ip->__key = tswap32(target_ip->__key);
3808     host_ip->uid = tswap32(target_ip->uid);
3809     host_ip->gid = tswap32(target_ip->gid);
3810     host_ip->cuid = tswap32(target_ip->cuid);
3811     host_ip->cgid = tswap32(target_ip->cgid);
3812 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3813     host_ip->mode = tswap32(target_ip->mode);
3814 #else
3815     host_ip->mode = tswap16(target_ip->mode);
3816 #endif
3817 #if defined(TARGET_PPC)
3818     host_ip->__seq = tswap32(target_ip->__seq);
3819 #else
3820     host_ip->__seq = tswap16(target_ip->__seq);
3821 #endif
3822     unlock_user_struct(target_sd, target_addr, 0);
3823     return 0;
3824 }
3825 
host_to_target_ipc_perm(abi_ulong target_addr,struct ipc_perm * host_ip)3826 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3827                                                struct ipc_perm *host_ip)
3828 {
3829     struct target_ipc_perm *target_ip;
3830     struct target_semid64_ds *target_sd;
3831 
3832     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3833         return -TARGET_EFAULT;
3834     target_ip = &(target_sd->sem_perm);
3835     target_ip->__key = tswap32(host_ip->__key);
3836     target_ip->uid = tswap32(host_ip->uid);
3837     target_ip->gid = tswap32(host_ip->gid);
3838     target_ip->cuid = tswap32(host_ip->cuid);
3839     target_ip->cgid = tswap32(host_ip->cgid);
3840 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3841     target_ip->mode = tswap32(host_ip->mode);
3842 #else
3843     target_ip->mode = tswap16(host_ip->mode);
3844 #endif
3845 #if defined(TARGET_PPC)
3846     target_ip->__seq = tswap32(host_ip->__seq);
3847 #else
3848     target_ip->__seq = tswap16(host_ip->__seq);
3849 #endif
3850     unlock_user_struct(target_sd, target_addr, 1);
3851     return 0;
3852 }
3853 
target_to_host_semid_ds(struct semid_ds * host_sd,abi_ulong target_addr)3854 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3855                                                abi_ulong target_addr)
3856 {
3857     struct target_semid64_ds *target_sd;
3858 
3859     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3860         return -TARGET_EFAULT;
3861     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3862         return -TARGET_EFAULT;
3863     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3864     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3865     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3866     unlock_user_struct(target_sd, target_addr, 0);
3867     return 0;
3868 }
3869 
host_to_target_semid_ds(abi_ulong target_addr,struct semid_ds * host_sd)3870 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3871                                                struct semid_ds *host_sd)
3872 {
3873     struct target_semid64_ds *target_sd;
3874 
3875     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3876         return -TARGET_EFAULT;
3877     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3878         return -TARGET_EFAULT;
3879     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3880     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3881     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3882     unlock_user_struct(target_sd, target_addr, 1);
3883     return 0;
3884 }
3885 
3886 struct target_seminfo {
3887     int semmap;
3888     int semmni;
3889     int semmns;
3890     int semmnu;
3891     int semmsl;
3892     int semopm;
3893     int semume;
3894     int semusz;
3895     int semvmx;
3896     int semaem;
3897 };
3898 
host_to_target_seminfo(abi_ulong target_addr,struct seminfo * host_seminfo)3899 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3900                                               struct seminfo *host_seminfo)
3901 {
3902     struct target_seminfo *target_seminfo;
3903     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3904         return -TARGET_EFAULT;
3905     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3906     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3907     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3908     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3909     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3910     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3911     __put_user(host_seminfo->semume, &target_seminfo->semume);
3912     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3913     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3914     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3915     unlock_user_struct(target_seminfo, target_addr, 1);
3916     return 0;
3917 }
3918 
3919 union semun {
3920 	int val;
3921 	struct semid_ds *buf;
3922 	unsigned short *array;
3923 	struct seminfo *__buf;
3924 };
3925 
3926 union target_semun {
3927 	int val;
3928 	abi_ulong buf;
3929 	abi_ulong array;
3930 	abi_ulong __buf;
3931 };
3932 
target_to_host_semarray(int semid,unsigned short ** host_array,abi_ulong target_addr)3933 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3934                                                abi_ulong target_addr)
3935 {
3936     int nsems;
3937     unsigned short *array;
3938     union semun semun;
3939     struct semid_ds semid_ds;
3940     int i, ret;
3941 
3942     semun.buf = &semid_ds;
3943 
3944     ret = semctl(semid, 0, IPC_STAT, semun);
3945     if (ret == -1)
3946         return get_errno(ret);
3947 
3948     nsems = semid_ds.sem_nsems;
3949 
3950     *host_array = g_try_new(unsigned short, nsems);
3951     if (!*host_array) {
3952         return -TARGET_ENOMEM;
3953     }
3954     array = lock_user(VERIFY_READ, target_addr,
3955                       nsems*sizeof(unsigned short), 1);
3956     if (!array) {
3957         g_free(*host_array);
3958         return -TARGET_EFAULT;
3959     }
3960 
3961     for(i=0; i<nsems; i++) {
3962         __get_user((*host_array)[i], &array[i]);
3963     }
3964     unlock_user(array, target_addr, 0);
3965 
3966     return 0;
3967 }
3968 
host_to_target_semarray(int semid,abi_ulong target_addr,unsigned short ** host_array)3969 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3970                                                unsigned short **host_array)
3971 {
3972     int nsems;
3973     unsigned short *array;
3974     union semun semun;
3975     struct semid_ds semid_ds;
3976     int i, ret;
3977 
3978     semun.buf = &semid_ds;
3979 
3980     ret = semctl(semid, 0, IPC_STAT, semun);
3981     if (ret == -1)
3982         return get_errno(ret);
3983 
3984     nsems = semid_ds.sem_nsems;
3985 
3986     array = lock_user(VERIFY_WRITE, target_addr,
3987                       nsems*sizeof(unsigned short), 0);
3988     if (!array)
3989         return -TARGET_EFAULT;
3990 
3991     for(i=0; i<nsems; i++) {
3992         __put_user((*host_array)[i], &array[i]);
3993     }
3994     g_free(*host_array);
3995     unlock_user(array, target_addr, 1);
3996 
3997     return 0;
3998 }
3999 
do_semctl(int semid,int semnum,int cmd,abi_ulong target_arg)4000 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4001                                  abi_ulong target_arg)
4002 {
4003     union target_semun target_su = { .buf = target_arg };
4004     union semun arg;
4005     struct semid_ds dsarg;
4006     unsigned short *array = NULL;
4007     struct seminfo seminfo;
4008     abi_long ret = -TARGET_EINVAL;
4009     abi_long err;
4010     cmd &= 0xff;
4011 
4012     switch( cmd ) {
4013 	case GETVAL:
4014 	case SETVAL:
4015             /* In 64 bit cross-endian situations, we will erroneously pick up
4016              * the wrong half of the union for the "val" element.  To rectify
4017              * this, the entire 8-byte structure is byteswapped, followed by
4018 	     * a swap of the 4 byte val field. In other cases, the data is
4019 	     * already in proper host byte order. */
4020 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4021 		target_su.buf = tswapal(target_su.buf);
4022 		arg.val = tswap32(target_su.val);
4023 	    } else {
4024 		arg.val = target_su.val;
4025 	    }
4026             ret = get_errno(semctl(semid, semnum, cmd, arg));
4027             break;
4028 	case GETALL:
4029 	case SETALL:
4030             err = target_to_host_semarray(semid, &array, target_su.array);
4031             if (err)
4032                 return err;
4033             arg.array = array;
4034             ret = get_errno(semctl(semid, semnum, cmd, arg));
4035             err = host_to_target_semarray(semid, target_su.array, &array);
4036             if (err)
4037                 return err;
4038             break;
4039 	case IPC_STAT:
4040 	case IPC_SET:
4041 	case SEM_STAT:
4042             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4043             if (err)
4044                 return err;
4045             arg.buf = &dsarg;
4046             ret = get_errno(semctl(semid, semnum, cmd, arg));
4047             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4048             if (err)
4049                 return err;
4050             break;
4051 	case IPC_INFO:
4052 	case SEM_INFO:
4053             arg.__buf = &seminfo;
4054             ret = get_errno(semctl(semid, semnum, cmd, arg));
4055             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4056             if (err)
4057                 return err;
4058             break;
4059 	case IPC_RMID:
4060 	case GETPID:
4061 	case GETNCNT:
4062 	case GETZCNT:
4063             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4064             break;
4065     }
4066 
4067     return ret;
4068 }
4069 
4070 struct target_sembuf {
4071     unsigned short sem_num;
4072     short sem_op;
4073     short sem_flg;
4074 };
4075 
target_to_host_sembuf(struct sembuf * host_sembuf,abi_ulong target_addr,unsigned nsops)4076 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4077                                              abi_ulong target_addr,
4078                                              unsigned nsops)
4079 {
4080     struct target_sembuf *target_sembuf;
4081     int i;
4082 
4083     target_sembuf = lock_user(VERIFY_READ, target_addr,
4084                               nsops*sizeof(struct target_sembuf), 1);
4085     if (!target_sembuf)
4086         return -TARGET_EFAULT;
4087 
4088     for(i=0; i<nsops; i++) {
4089         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4090         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4091         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4092     }
4093 
4094     unlock_user(target_sembuf, target_addr, 0);
4095 
4096     return 0;
4097 }
4098 
4099 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4100     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4101 
4102 /*
4103  * This macro is required to handle the s390 variants, which passes the
4104  * arguments in a different order than default.
4105  */
4106 #ifdef __s390x__
4107 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4108   (__nsops), (__timeout), (__sops)
4109 #else
4110 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4111   (__nsops), 0, (__sops), (__timeout)
4112 #endif
4113 
do_semtimedop(int semid,abi_long ptr,unsigned nsops,abi_long timeout,bool time64)4114 static inline abi_long do_semtimedop(int semid,
4115                                      abi_long ptr,
4116                                      unsigned nsops,
4117                                      abi_long timeout, bool time64)
4118 {
4119     struct sembuf *sops;
4120     struct timespec ts, *pts = NULL;
4121     abi_long ret;
4122 
4123     if (timeout) {
4124         pts = &ts;
4125         if (time64) {
4126             if (target_to_host_timespec64(pts, timeout)) {
4127                 return -TARGET_EFAULT;
4128             }
4129         } else {
4130             if (target_to_host_timespec(pts, timeout)) {
4131                 return -TARGET_EFAULT;
4132             }
4133         }
4134     }
4135 
4136     if (nsops > TARGET_SEMOPM) {
4137         return -TARGET_E2BIG;
4138     }
4139 
4140     sops = g_new(struct sembuf, nsops);
4141 
4142     if (target_to_host_sembuf(sops, ptr, nsops)) {
4143         g_free(sops);
4144         return -TARGET_EFAULT;
4145     }
4146 
4147     ret = -TARGET_ENOSYS;
4148 #ifdef __NR_semtimedop
4149     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4150 #endif
4151 #ifdef __NR_ipc
4152     if (ret == -TARGET_ENOSYS) {
4153         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4154                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4155     }
4156 #endif
4157     g_free(sops);
4158     return ret;
4159 }
4160 #endif
4161 
4162 struct target_msqid_ds
4163 {
4164     struct target_ipc_perm msg_perm;
4165     abi_ulong msg_stime;
4166 #if TARGET_ABI_BITS == 32
4167     abi_ulong __unused1;
4168 #endif
4169     abi_ulong msg_rtime;
4170 #if TARGET_ABI_BITS == 32
4171     abi_ulong __unused2;
4172 #endif
4173     abi_ulong msg_ctime;
4174 #if TARGET_ABI_BITS == 32
4175     abi_ulong __unused3;
4176 #endif
4177     abi_ulong __msg_cbytes;
4178     abi_ulong msg_qnum;
4179     abi_ulong msg_qbytes;
4180     abi_ulong msg_lspid;
4181     abi_ulong msg_lrpid;
4182     abi_ulong __unused4;
4183     abi_ulong __unused5;
4184 };
4185 
target_to_host_msqid_ds(struct msqid_ds * host_md,abi_ulong target_addr)4186 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4187                                                abi_ulong target_addr)
4188 {
4189     struct target_msqid_ds *target_md;
4190 
4191     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4192         return -TARGET_EFAULT;
4193     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4194         return -TARGET_EFAULT;
4195     host_md->msg_stime = tswapal(target_md->msg_stime);
4196     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4197     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4198     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4199     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4200     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4201     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4202     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4203     unlock_user_struct(target_md, target_addr, 0);
4204     return 0;
4205 }
4206 
host_to_target_msqid_ds(abi_ulong target_addr,struct msqid_ds * host_md)4207 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4208                                                struct msqid_ds *host_md)
4209 {
4210     struct target_msqid_ds *target_md;
4211 
4212     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4213         return -TARGET_EFAULT;
4214     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4215         return -TARGET_EFAULT;
4216     target_md->msg_stime = tswapal(host_md->msg_stime);
4217     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4218     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4219     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4220     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4221     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4222     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4223     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4224     unlock_user_struct(target_md, target_addr, 1);
4225     return 0;
4226 }
4227 
4228 struct target_msginfo {
4229     int msgpool;
4230     int msgmap;
4231     int msgmax;
4232     int msgmnb;
4233     int msgmni;
4234     int msgssz;
4235     int msgtql;
4236     unsigned short int msgseg;
4237 };
4238 
host_to_target_msginfo(abi_ulong target_addr,struct msginfo * host_msginfo)4239 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4240                                               struct msginfo *host_msginfo)
4241 {
4242     struct target_msginfo *target_msginfo;
4243     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4244         return -TARGET_EFAULT;
4245     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4246     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4247     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4248     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4249     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4250     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4251     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4252     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4253     unlock_user_struct(target_msginfo, target_addr, 1);
4254     return 0;
4255 }
4256 
do_msgctl(int msgid,int cmd,abi_long ptr)4257 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4258 {
4259     struct msqid_ds dsarg;
4260     struct msginfo msginfo;
4261     abi_long ret = -TARGET_EINVAL;
4262 
4263     cmd &= 0xff;
4264 
4265     switch (cmd) {
4266     case IPC_STAT:
4267     case IPC_SET:
4268     case MSG_STAT:
4269         if (target_to_host_msqid_ds(&dsarg,ptr))
4270             return -TARGET_EFAULT;
4271         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4272         if (host_to_target_msqid_ds(ptr,&dsarg))
4273             return -TARGET_EFAULT;
4274         break;
4275     case IPC_RMID:
4276         ret = get_errno(msgctl(msgid, cmd, NULL));
4277         break;
4278     case IPC_INFO:
4279     case MSG_INFO:
4280         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4281         if (host_to_target_msginfo(ptr, &msginfo))
4282             return -TARGET_EFAULT;
4283         break;
4284     }
4285 
4286     return ret;
4287 }
4288 
4289 struct target_msgbuf {
4290     abi_long mtype;
4291     char	mtext[1];
4292 };
4293 
do_msgsnd(int msqid,abi_long msgp,ssize_t msgsz,int msgflg)4294 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4295                                  ssize_t msgsz, int msgflg)
4296 {
4297     struct target_msgbuf *target_mb;
4298     struct msgbuf *host_mb;
4299     abi_long ret = 0;
4300 
4301     if (msgsz < 0) {
4302         return -TARGET_EINVAL;
4303     }
4304 
4305     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4306         return -TARGET_EFAULT;
4307     host_mb = g_try_malloc(msgsz + sizeof(long));
4308     if (!host_mb) {
4309         unlock_user_struct(target_mb, msgp, 0);
4310         return -TARGET_ENOMEM;
4311     }
4312     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4313     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4314     ret = -TARGET_ENOSYS;
4315 #ifdef __NR_msgsnd
4316     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4317 #endif
4318 #ifdef __NR_ipc
4319     if (ret == -TARGET_ENOSYS) {
4320 #ifdef __s390x__
4321         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4322                                  host_mb));
4323 #else
4324         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4325                                  host_mb, 0));
4326 #endif
4327     }
4328 #endif
4329     g_free(host_mb);
4330     unlock_user_struct(target_mb, msgp, 0);
4331 
4332     return ret;
4333 }
4334 
4335 #ifdef __NR_ipc
4336 #if defined(__sparc__)
4337 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4338 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4339 #elif defined(__s390x__)
4340 /* The s390 sys_ipc variant has only five parameters.  */
4341 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4342     ((long int[]){(long int)__msgp, __msgtyp})
4343 #else
4344 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4345     ((long int[]){(long int)__msgp, __msgtyp}), 0
4346 #endif
4347 #endif
4348 
do_msgrcv(int msqid,abi_long msgp,ssize_t msgsz,abi_long msgtyp,int msgflg)4349 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4350                                  ssize_t msgsz, abi_long msgtyp,
4351                                  int msgflg)
4352 {
4353     struct target_msgbuf *target_mb;
4354     char *target_mtext;
4355     struct msgbuf *host_mb;
4356     abi_long ret = 0;
4357 
4358     if (msgsz < 0) {
4359         return -TARGET_EINVAL;
4360     }
4361 
4362     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4363         return -TARGET_EFAULT;
4364 
4365     host_mb = g_try_malloc(msgsz + sizeof(long));
4366     if (!host_mb) {
4367         ret = -TARGET_ENOMEM;
4368         goto end;
4369     }
4370     ret = -TARGET_ENOSYS;
4371 #ifdef __NR_msgrcv
4372     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4373 #endif
4374 #ifdef __NR_ipc
4375     if (ret == -TARGET_ENOSYS) {
4376         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4377                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4378     }
4379 #endif
4380 
4381     if (ret > 0) {
4382         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4383         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4384         if (!target_mtext) {
4385             ret = -TARGET_EFAULT;
4386             goto end;
4387         }
4388         memcpy(target_mb->mtext, host_mb->mtext, ret);
4389         unlock_user(target_mtext, target_mtext_addr, ret);
4390     }
4391 
4392     target_mb->mtype = tswapal(host_mb->mtype);
4393 
4394 end:
4395     if (target_mb)
4396         unlock_user_struct(target_mb, msgp, 1);
4397     g_free(host_mb);
4398     return ret;
4399 }
4400 
target_to_host_shmid_ds(struct shmid_ds * host_sd,abi_ulong target_addr)4401 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4402                                                abi_ulong target_addr)
4403 {
4404     struct target_shmid_ds *target_sd;
4405 
4406     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4407         return -TARGET_EFAULT;
4408     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4409         return -TARGET_EFAULT;
4410     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4411     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4412     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4413     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4414     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4415     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4416     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4417     unlock_user_struct(target_sd, target_addr, 0);
4418     return 0;
4419 }
4420 
host_to_target_shmid_ds(abi_ulong target_addr,struct shmid_ds * host_sd)4421 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4422                                                struct shmid_ds *host_sd)
4423 {
4424     struct target_shmid_ds *target_sd;
4425 
4426     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4427         return -TARGET_EFAULT;
4428     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4429         return -TARGET_EFAULT;
4430     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4431     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4432     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4433     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4434     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4435     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4436     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4437     unlock_user_struct(target_sd, target_addr, 1);
4438     return 0;
4439 }
4440 
4441 struct  target_shminfo {
4442     abi_ulong shmmax;
4443     abi_ulong shmmin;
4444     abi_ulong shmmni;
4445     abi_ulong shmseg;
4446     abi_ulong shmall;
4447 };
4448 
host_to_target_shminfo(abi_ulong target_addr,struct shminfo * host_shminfo)4449 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4450                                               struct shminfo *host_shminfo)
4451 {
4452     struct target_shminfo *target_shminfo;
4453     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4454         return -TARGET_EFAULT;
4455     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4456     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4457     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4458     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4459     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4460     unlock_user_struct(target_shminfo, target_addr, 1);
4461     return 0;
4462 }
4463 
4464 struct target_shm_info {
4465     int used_ids;
4466     abi_ulong shm_tot;
4467     abi_ulong shm_rss;
4468     abi_ulong shm_swp;
4469     abi_ulong swap_attempts;
4470     abi_ulong swap_successes;
4471 };
4472 
host_to_target_shm_info(abi_ulong target_addr,struct shm_info * host_shm_info)4473 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4474                                                struct shm_info *host_shm_info)
4475 {
4476     struct target_shm_info *target_shm_info;
4477     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4478         return -TARGET_EFAULT;
4479     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4480     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4481     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4482     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4483     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4484     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4485     unlock_user_struct(target_shm_info, target_addr, 1);
4486     return 0;
4487 }
4488 
do_shmctl(int shmid,int cmd,abi_long buf)4489 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4490 {
4491     struct shmid_ds dsarg;
4492     struct shminfo shminfo;
4493     struct shm_info shm_info;
4494     abi_long ret = -TARGET_EINVAL;
4495 
4496     cmd &= 0xff;
4497 
4498     switch(cmd) {
4499     case IPC_STAT:
4500     case IPC_SET:
4501     case SHM_STAT:
4502         if (target_to_host_shmid_ds(&dsarg, buf))
4503             return -TARGET_EFAULT;
4504         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4505         if (host_to_target_shmid_ds(buf, &dsarg))
4506             return -TARGET_EFAULT;
4507         break;
4508     case IPC_INFO:
4509         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4510         if (host_to_target_shminfo(buf, &shminfo))
4511             return -TARGET_EFAULT;
4512         break;
4513     case SHM_INFO:
4514         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4515         if (host_to_target_shm_info(buf, &shm_info))
4516             return -TARGET_EFAULT;
4517         break;
4518     case IPC_RMID:
4519     case SHM_LOCK:
4520     case SHM_UNLOCK:
4521         ret = get_errno(shmctl(shmid, cmd, NULL));
4522         break;
4523     }
4524 
4525     return ret;
4526 }
4527 
4528 #ifdef TARGET_NR_ipc
4529 /* ??? This only works with linear mappings.  */
4530 /* do_ipc() must return target values and target errnos. */
do_ipc(CPUArchState * cpu_env,unsigned int call,abi_long first,abi_long second,abi_long third,abi_long ptr,abi_long fifth)4531 static abi_long do_ipc(CPUArchState *cpu_env,
4532                        unsigned int call, abi_long first,
4533                        abi_long second, abi_long third,
4534                        abi_long ptr, abi_long fifth)
4535 {
4536     int version;
4537     abi_long ret = 0;
4538 
4539     version = call >> 16;
4540     call &= 0xffff;
4541 
4542     switch (call) {
4543     case IPCOP_semop:
4544         ret = do_semtimedop(first, ptr, second, 0, false);
4545         break;
4546     case IPCOP_semtimedop:
4547     /*
4548      * The s390 sys_ipc variant has only five parameters instead of six
4549      * (as for default variant) and the only difference is the handling of
4550      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4551      * to a struct timespec where the generic variant uses fifth parameter.
4552      */
4553 #if defined(TARGET_S390X)
4554         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4555 #else
4556         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4557 #endif
4558         break;
4559 
4560     case IPCOP_semget:
4561         ret = get_errno(semget(first, second, third));
4562         break;
4563 
4564     case IPCOP_semctl: {
4565         /* The semun argument to semctl is passed by value, so dereference the
4566          * ptr argument. */
4567         abi_ulong atptr;
4568         get_user_ual(atptr, ptr);
4569         ret = do_semctl(first, second, third, atptr);
4570         break;
4571     }
4572 
4573     case IPCOP_msgget:
4574         ret = get_errno(msgget(first, second));
4575         break;
4576 
4577     case IPCOP_msgsnd:
4578         ret = do_msgsnd(first, ptr, second, third);
4579         break;
4580 
4581     case IPCOP_msgctl:
4582         ret = do_msgctl(first, second, ptr);
4583         break;
4584 
4585     case IPCOP_msgrcv:
4586         switch (version) {
4587         case 0:
4588             {
4589                 struct target_ipc_kludge {
4590                     abi_long msgp;
4591                     abi_long msgtyp;
4592                 } *tmp;
4593 
4594                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4595                     ret = -TARGET_EFAULT;
4596                     break;
4597                 }
4598 
4599                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4600 
4601                 unlock_user_struct(tmp, ptr, 0);
4602                 break;
4603             }
4604         default:
4605             ret = do_msgrcv(first, ptr, second, fifth, third);
4606         }
4607         break;
4608 
4609     case IPCOP_shmat:
4610         switch (version) {
4611         default:
4612         {
4613             abi_ulong raddr;
4614             raddr = target_shmat(cpu_env, first, ptr, second);
4615             if (is_error(raddr))
4616                 return get_errno(raddr);
4617             if (put_user_ual(raddr, third))
4618                 return -TARGET_EFAULT;
4619             break;
4620         }
4621         case 1:
4622             ret = -TARGET_EINVAL;
4623             break;
4624         }
4625 	break;
4626     case IPCOP_shmdt:
4627         ret = target_shmdt(ptr);
4628 	break;
4629 
4630     case IPCOP_shmget:
4631 	/* IPC_* flag values are the same on all linux platforms */
4632 	ret = get_errno(shmget(first, second, third));
4633 	break;
4634 
4635 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4636     case IPCOP_shmctl:
4637         ret = do_shmctl(first, second, ptr);
4638         break;
4639     default:
4640         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4641                       call, version);
4642 	ret = -TARGET_ENOSYS;
4643 	break;
4644     }
4645     return ret;
4646 }
4647 #endif
4648 
4649 /* kernel structure types definitions */
4650 
4651 #define STRUCT(name, ...) STRUCT_ ## name,
4652 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4653 enum {
4654 #include "syscall_types.h"
4655 STRUCT_MAX
4656 };
4657 #undef STRUCT
4658 #undef STRUCT_SPECIAL
4659 
4660 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4661 #define STRUCT_SPECIAL(name)
4662 #include "syscall_types.h"
4663 #undef STRUCT
4664 #undef STRUCT_SPECIAL
4665 
4666 #define MAX_STRUCT_SIZE 4096
4667 
4668 #ifdef CONFIG_FIEMAP
4669 /* So fiemap access checks don't overflow on 32 bit systems.
4670  * This is very slightly smaller than the limit imposed by
4671  * the underlying kernel.
4672  */
4673 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4674                             / sizeof(struct fiemap_extent))
4675 
do_ioctl_fs_ioc_fiemap(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4676 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4677                                        int fd, int cmd, abi_long arg)
4678 {
4679     /* The parameter for this ioctl is a struct fiemap followed
4680      * by an array of struct fiemap_extent whose size is set
4681      * in fiemap->fm_extent_count. The array is filled in by the
4682      * ioctl.
4683      */
4684     int target_size_in, target_size_out;
4685     struct fiemap *fm;
4686     const argtype *arg_type = ie->arg_type;
4687     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4688     void *argptr, *p;
4689     abi_long ret;
4690     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4691     uint32_t outbufsz;
4692     int free_fm = 0;
4693 
4694     assert(arg_type[0] == TYPE_PTR);
4695     assert(ie->access == IOC_RW);
4696     arg_type++;
4697     target_size_in = thunk_type_size(arg_type, 0);
4698     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4699     if (!argptr) {
4700         return -TARGET_EFAULT;
4701     }
4702     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4703     unlock_user(argptr, arg, 0);
4704     fm = (struct fiemap *)buf_temp;
4705     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4706         return -TARGET_EINVAL;
4707     }
4708 
4709     outbufsz = sizeof (*fm) +
4710         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4711 
4712     if (outbufsz > MAX_STRUCT_SIZE) {
4713         /* We can't fit all the extents into the fixed size buffer.
4714          * Allocate one that is large enough and use it instead.
4715          */
4716         fm = g_try_malloc(outbufsz);
4717         if (!fm) {
4718             return -TARGET_ENOMEM;
4719         }
4720         memcpy(fm, buf_temp, sizeof(struct fiemap));
4721         free_fm = 1;
4722     }
4723     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4724     if (!is_error(ret)) {
4725         target_size_out = target_size_in;
4726         /* An extent_count of 0 means we were only counting the extents
4727          * so there are no structs to copy
4728          */
4729         if (fm->fm_extent_count != 0) {
4730             target_size_out += fm->fm_mapped_extents * extent_size;
4731         }
4732         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4733         if (!argptr) {
4734             ret = -TARGET_EFAULT;
4735         } else {
4736             /* Convert the struct fiemap */
4737             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4738             if (fm->fm_extent_count != 0) {
4739                 p = argptr + target_size_in;
4740                 /* ...and then all the struct fiemap_extents */
4741                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4742                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4743                                   THUNK_TARGET);
4744                     p += extent_size;
4745                 }
4746             }
4747             unlock_user(argptr, arg, target_size_out);
4748         }
4749     }
4750     if (free_fm) {
4751         g_free(fm);
4752     }
4753     return ret;
4754 }
4755 #endif
4756 
do_ioctl_ifconf(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4757 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4758                                 int fd, int cmd, abi_long arg)
4759 {
4760     const argtype *arg_type = ie->arg_type;
4761     int target_size;
4762     void *argptr;
4763     int ret;
4764     struct ifconf *host_ifconf;
4765     uint32_t outbufsz;
4766     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4767     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4768     int target_ifreq_size;
4769     int nb_ifreq;
4770     int free_buf = 0;
4771     int i;
4772     int target_ifc_len;
4773     abi_long target_ifc_buf;
4774     int host_ifc_len;
4775     char *host_ifc_buf;
4776 
4777     assert(arg_type[0] == TYPE_PTR);
4778     assert(ie->access == IOC_RW);
4779 
4780     arg_type++;
4781     target_size = thunk_type_size(arg_type, 0);
4782 
4783     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4784     if (!argptr)
4785         return -TARGET_EFAULT;
4786     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4787     unlock_user(argptr, arg, 0);
4788 
4789     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4790     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4791     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4792 
4793     if (target_ifc_buf != 0) {
4794         target_ifc_len = host_ifconf->ifc_len;
4795         nb_ifreq = target_ifc_len / target_ifreq_size;
4796         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4797 
4798         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4799         if (outbufsz > MAX_STRUCT_SIZE) {
4800             /*
4801              * We can't fit all the extents into the fixed size buffer.
4802              * Allocate one that is large enough and use it instead.
4803              */
4804             host_ifconf = g_try_malloc(outbufsz);
4805             if (!host_ifconf) {
4806                 return -TARGET_ENOMEM;
4807             }
4808             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4809             free_buf = 1;
4810         }
4811         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4812 
4813         host_ifconf->ifc_len = host_ifc_len;
4814     } else {
4815       host_ifc_buf = NULL;
4816     }
4817     host_ifconf->ifc_buf = host_ifc_buf;
4818 
4819     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4820     if (!is_error(ret)) {
4821 	/* convert host ifc_len to target ifc_len */
4822 
4823         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4824         target_ifc_len = nb_ifreq * target_ifreq_size;
4825         host_ifconf->ifc_len = target_ifc_len;
4826 
4827 	/* restore target ifc_buf */
4828 
4829         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4830 
4831 	/* copy struct ifconf to target user */
4832 
4833         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4834         if (!argptr)
4835             return -TARGET_EFAULT;
4836         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4837         unlock_user(argptr, arg, target_size);
4838 
4839         if (target_ifc_buf != 0) {
4840             /* copy ifreq[] to target user */
4841             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4842             for (i = 0; i < nb_ifreq ; i++) {
4843                 thunk_convert(argptr + i * target_ifreq_size,
4844                               host_ifc_buf + i * sizeof(struct ifreq),
4845                               ifreq_arg_type, THUNK_TARGET);
4846             }
4847             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4848         }
4849     }
4850 
4851     if (free_buf) {
4852         g_free(host_ifconf);
4853     }
4854 
4855     return ret;
4856 }
4857 
4858 #if defined(CONFIG_USBFS)
4859 #if HOST_LONG_BITS > 64
4860 #error USBDEVFS thunks do not support >64 bit hosts yet.
4861 #endif
4862 struct live_urb {
4863     uint64_t target_urb_adr;
4864     uint64_t target_buf_adr;
4865     char *target_buf_ptr;
4866     struct usbdevfs_urb host_urb;
4867 };
4868 
usbdevfs_urb_hashtable(void)4869 static GHashTable *usbdevfs_urb_hashtable(void)
4870 {
4871     static GHashTable *urb_hashtable;
4872 
4873     if (!urb_hashtable) {
4874         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4875     }
4876     return urb_hashtable;
4877 }
4878 
urb_hashtable_insert(struct live_urb * urb)4879 static void urb_hashtable_insert(struct live_urb *urb)
4880 {
4881     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4882     g_hash_table_insert(urb_hashtable, urb, urb);
4883 }
4884 
urb_hashtable_lookup(uint64_t target_urb_adr)4885 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4886 {
4887     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4888     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4889 }
4890 
urb_hashtable_remove(struct live_urb * urb)4891 static void urb_hashtable_remove(struct live_urb *urb)
4892 {
4893     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4894     g_hash_table_remove(urb_hashtable, urb);
4895 }
4896 
4897 static abi_long
do_ioctl_usbdevfs_reapurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4898 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4899                           int fd, int cmd, abi_long arg)
4900 {
4901     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4902     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4903     struct live_urb *lurb;
4904     void *argptr;
4905     uint64_t hurb;
4906     int target_size;
4907     uintptr_t target_urb_adr;
4908     abi_long ret;
4909 
4910     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4911 
4912     memset(buf_temp, 0, sizeof(uint64_t));
4913     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4914     if (is_error(ret)) {
4915         return ret;
4916     }
4917 
4918     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4919     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4920     if (!lurb->target_urb_adr) {
4921         return -TARGET_EFAULT;
4922     }
4923     urb_hashtable_remove(lurb);
4924     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4925         lurb->host_urb.buffer_length);
4926     lurb->target_buf_ptr = NULL;
4927 
4928     /* restore the guest buffer pointer */
4929     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4930 
4931     /* update the guest urb struct */
4932     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4933     if (!argptr) {
4934         g_free(lurb);
4935         return -TARGET_EFAULT;
4936     }
4937     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4938     unlock_user(argptr, lurb->target_urb_adr, target_size);
4939 
4940     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4941     /* write back the urb handle */
4942     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4943     if (!argptr) {
4944         g_free(lurb);
4945         return -TARGET_EFAULT;
4946     }
4947 
4948     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4949     target_urb_adr = lurb->target_urb_adr;
4950     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4951     unlock_user(argptr, arg, target_size);
4952 
4953     g_free(lurb);
4954     return ret;
4955 }
4956 
4957 static abi_long
do_ioctl_usbdevfs_discardurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4958 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4959                              uint8_t *buf_temp __attribute__((unused)),
4960                              int fd, int cmd, abi_long arg)
4961 {
4962     struct live_urb *lurb;
4963 
4964     /* map target address back to host URB with metadata. */
4965     lurb = urb_hashtable_lookup(arg);
4966     if (!lurb) {
4967         return -TARGET_EFAULT;
4968     }
4969     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4970 }
4971 
4972 static abi_long
do_ioctl_usbdevfs_submiturb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4973 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4974                             int fd, int cmd, abi_long arg)
4975 {
4976     const argtype *arg_type = ie->arg_type;
4977     int target_size;
4978     abi_long ret;
4979     void *argptr;
4980     int rw_dir;
4981     struct live_urb *lurb;
4982 
4983     /*
4984      * each submitted URB needs to map to a unique ID for the
4985      * kernel, and that unique ID needs to be a pointer to
4986      * host memory.  hence, we need to malloc for each URB.
4987      * isochronous transfers have a variable length struct.
4988      */
4989     arg_type++;
4990     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4991 
4992     /* construct host copy of urb and metadata */
4993     lurb = g_try_new0(struct live_urb, 1);
4994     if (!lurb) {
4995         return -TARGET_ENOMEM;
4996     }
4997 
4998     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4999     if (!argptr) {
5000         g_free(lurb);
5001         return -TARGET_EFAULT;
5002     }
5003     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5004     unlock_user(argptr, arg, 0);
5005 
5006     lurb->target_urb_adr = arg;
5007     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5008 
5009     /* buffer space used depends on endpoint type so lock the entire buffer */
5010     /* control type urbs should check the buffer contents for true direction */
5011     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5012     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5013         lurb->host_urb.buffer_length, 1);
5014     if (lurb->target_buf_ptr == NULL) {
5015         g_free(lurb);
5016         return -TARGET_EFAULT;
5017     }
5018 
5019     /* update buffer pointer in host copy */
5020     lurb->host_urb.buffer = lurb->target_buf_ptr;
5021 
5022     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5023     if (is_error(ret)) {
5024         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5025         g_free(lurb);
5026     } else {
5027         urb_hashtable_insert(lurb);
5028     }
5029 
5030     return ret;
5031 }
5032 #endif /* CONFIG_USBFS */
5033 
do_ioctl_dm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5034 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5035                             int cmd, abi_long arg)
5036 {
5037     void *argptr;
5038     struct dm_ioctl *host_dm;
5039     abi_long guest_data;
5040     uint32_t guest_data_size;
5041     int target_size;
5042     const argtype *arg_type = ie->arg_type;
5043     abi_long ret;
5044     void *big_buf = NULL;
5045     char *host_data;
5046 
5047     arg_type++;
5048     target_size = thunk_type_size(arg_type, 0);
5049     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5050     if (!argptr) {
5051         ret = -TARGET_EFAULT;
5052         goto out;
5053     }
5054     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5055     unlock_user(argptr, arg, 0);
5056 
5057     /* buf_temp is too small, so fetch things into a bigger buffer */
5058     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5059     memcpy(big_buf, buf_temp, target_size);
5060     buf_temp = big_buf;
5061     host_dm = big_buf;
5062 
5063     guest_data = arg + host_dm->data_start;
5064     if ((guest_data - arg) < 0) {
5065         ret = -TARGET_EINVAL;
5066         goto out;
5067     }
5068     guest_data_size = host_dm->data_size - host_dm->data_start;
5069     host_data = (char*)host_dm + host_dm->data_start;
5070 
5071     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5072     if (!argptr) {
5073         ret = -TARGET_EFAULT;
5074         goto out;
5075     }
5076 
5077     switch (ie->host_cmd) {
5078     case DM_REMOVE_ALL:
5079     case DM_LIST_DEVICES:
5080     case DM_DEV_CREATE:
5081     case DM_DEV_REMOVE:
5082     case DM_DEV_SUSPEND:
5083     case DM_DEV_STATUS:
5084     case DM_DEV_WAIT:
5085     case DM_TABLE_STATUS:
5086     case DM_TABLE_CLEAR:
5087     case DM_TABLE_DEPS:
5088     case DM_LIST_VERSIONS:
5089         /* no input data */
5090         break;
5091     case DM_DEV_RENAME:
5092     case DM_DEV_SET_GEOMETRY:
5093         /* data contains only strings */
5094         memcpy(host_data, argptr, guest_data_size);
5095         break;
5096     case DM_TARGET_MSG:
5097         memcpy(host_data, argptr, guest_data_size);
5098         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5099         break;
5100     case DM_TABLE_LOAD:
5101     {
5102         void *gspec = argptr;
5103         void *cur_data = host_data;
5104         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5105         int spec_size = thunk_type_size(dm_arg_type, 0);
5106         int i;
5107 
5108         for (i = 0; i < host_dm->target_count; i++) {
5109             struct dm_target_spec *spec = cur_data;
5110             uint32_t next;
5111             int slen;
5112 
5113             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5114             slen = strlen((char*)gspec + spec_size) + 1;
5115             next = spec->next;
5116             spec->next = sizeof(*spec) + slen;
5117             strcpy((char*)&spec[1], gspec + spec_size);
5118             gspec += next;
5119             cur_data += spec->next;
5120         }
5121         break;
5122     }
5123     default:
5124         ret = -TARGET_EINVAL;
5125         unlock_user(argptr, guest_data, 0);
5126         goto out;
5127     }
5128     unlock_user(argptr, guest_data, 0);
5129 
5130     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5131     if (!is_error(ret)) {
5132         guest_data = arg + host_dm->data_start;
5133         guest_data_size = host_dm->data_size - host_dm->data_start;
5134         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5135         switch (ie->host_cmd) {
5136         case DM_REMOVE_ALL:
5137         case DM_DEV_CREATE:
5138         case DM_DEV_REMOVE:
5139         case DM_DEV_RENAME:
5140         case DM_DEV_SUSPEND:
5141         case DM_DEV_STATUS:
5142         case DM_TABLE_LOAD:
5143         case DM_TABLE_CLEAR:
5144         case DM_TARGET_MSG:
5145         case DM_DEV_SET_GEOMETRY:
5146             /* no return data */
5147             break;
5148         case DM_LIST_DEVICES:
5149         {
5150             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5151             uint32_t remaining_data = guest_data_size;
5152             void *cur_data = argptr;
5153             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5154             int nl_size = 12; /* can't use thunk_size due to alignment */
5155 
5156             while (1) {
5157                 uint32_t next = nl->next;
5158                 if (next) {
5159                     nl->next = nl_size + (strlen(nl->name) + 1);
5160                 }
5161                 if (remaining_data < nl->next) {
5162                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5163                     break;
5164                 }
5165                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5166                 strcpy(cur_data + nl_size, nl->name);
5167                 cur_data += nl->next;
5168                 remaining_data -= nl->next;
5169                 if (!next) {
5170                     break;
5171                 }
5172                 nl = (void*)nl + next;
5173             }
5174             break;
5175         }
5176         case DM_DEV_WAIT:
5177         case DM_TABLE_STATUS:
5178         {
5179             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5180             void *cur_data = argptr;
5181             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5182             int spec_size = thunk_type_size(dm_arg_type, 0);
5183             int i;
5184 
5185             for (i = 0; i < host_dm->target_count; i++) {
5186                 uint32_t next = spec->next;
5187                 int slen = strlen((char*)&spec[1]) + 1;
5188                 spec->next = (cur_data - argptr) + spec_size + slen;
5189                 if (guest_data_size < spec->next) {
5190                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5191                     break;
5192                 }
5193                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5194                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5195                 cur_data = argptr + spec->next;
5196                 spec = (void*)host_dm + host_dm->data_start + next;
5197             }
5198             break;
5199         }
5200         case DM_TABLE_DEPS:
5201         {
5202             void *hdata = (void*)host_dm + host_dm->data_start;
5203             int count = *(uint32_t*)hdata;
5204             uint64_t *hdev = hdata + 8;
5205             uint64_t *gdev = argptr + 8;
5206             int i;
5207 
5208             *(uint32_t*)argptr = tswap32(count);
5209             for (i = 0; i < count; i++) {
5210                 *gdev = tswap64(*hdev);
5211                 gdev++;
5212                 hdev++;
5213             }
5214             break;
5215         }
5216         case DM_LIST_VERSIONS:
5217         {
5218             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5219             uint32_t remaining_data = guest_data_size;
5220             void *cur_data = argptr;
5221             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5222             int vers_size = thunk_type_size(dm_arg_type, 0);
5223 
5224             while (1) {
5225                 uint32_t next = vers->next;
5226                 if (next) {
5227                     vers->next = vers_size + (strlen(vers->name) + 1);
5228                 }
5229                 if (remaining_data < vers->next) {
5230                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5231                     break;
5232                 }
5233                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5234                 strcpy(cur_data + vers_size, vers->name);
5235                 cur_data += vers->next;
5236                 remaining_data -= vers->next;
5237                 if (!next) {
5238                     break;
5239                 }
5240                 vers = (void*)vers + next;
5241             }
5242             break;
5243         }
5244         default:
5245             unlock_user(argptr, guest_data, 0);
5246             ret = -TARGET_EINVAL;
5247             goto out;
5248         }
5249         unlock_user(argptr, guest_data, guest_data_size);
5250 
5251         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5252         if (!argptr) {
5253             ret = -TARGET_EFAULT;
5254             goto out;
5255         }
5256         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5257         unlock_user(argptr, arg, target_size);
5258     }
5259 out:
5260     g_free(big_buf);
5261     return ret;
5262 }
5263 
do_ioctl_blkpg(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5264 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5265                                int cmd, abi_long arg)
5266 {
5267     void *argptr;
5268     int target_size;
5269     const argtype *arg_type = ie->arg_type;
5270     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5271     abi_long ret;
5272 
5273     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5274     struct blkpg_partition host_part;
5275 
5276     /* Read and convert blkpg */
5277     arg_type++;
5278     target_size = thunk_type_size(arg_type, 0);
5279     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5280     if (!argptr) {
5281         ret = -TARGET_EFAULT;
5282         goto out;
5283     }
5284     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5285     unlock_user(argptr, arg, 0);
5286 
5287     switch (host_blkpg->op) {
5288     case BLKPG_ADD_PARTITION:
5289     case BLKPG_DEL_PARTITION:
5290         /* payload is struct blkpg_partition */
5291         break;
5292     default:
5293         /* Unknown opcode */
5294         ret = -TARGET_EINVAL;
5295         goto out;
5296     }
5297 
5298     /* Read and convert blkpg->data */
5299     arg = (abi_long)(uintptr_t)host_blkpg->data;
5300     target_size = thunk_type_size(part_arg_type, 0);
5301     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5302     if (!argptr) {
5303         ret = -TARGET_EFAULT;
5304         goto out;
5305     }
5306     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5307     unlock_user(argptr, arg, 0);
5308 
5309     /* Swizzle the data pointer to our local copy and call! */
5310     host_blkpg->data = &host_part;
5311     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5312 
5313 out:
5314     return ret;
5315 }
5316 
do_ioctl_rt(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5317 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5318                                 int fd, int cmd, abi_long arg)
5319 {
5320     const argtype *arg_type = ie->arg_type;
5321     const StructEntry *se;
5322     const argtype *field_types;
5323     const int *dst_offsets, *src_offsets;
5324     int target_size;
5325     void *argptr;
5326     abi_ulong *target_rt_dev_ptr = NULL;
5327     unsigned long *host_rt_dev_ptr = NULL;
5328     abi_long ret;
5329     int i;
5330 
5331     assert(ie->access == IOC_W);
5332     assert(*arg_type == TYPE_PTR);
5333     arg_type++;
5334     assert(*arg_type == TYPE_STRUCT);
5335     target_size = thunk_type_size(arg_type, 0);
5336     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5337     if (!argptr) {
5338         return -TARGET_EFAULT;
5339     }
5340     arg_type++;
5341     assert(*arg_type == (int)STRUCT_rtentry);
5342     se = struct_entries + *arg_type++;
5343     assert(se->convert[0] == NULL);
5344     /* convert struct here to be able to catch rt_dev string */
5345     field_types = se->field_types;
5346     dst_offsets = se->field_offsets[THUNK_HOST];
5347     src_offsets = se->field_offsets[THUNK_TARGET];
5348     for (i = 0; i < se->nb_fields; i++) {
5349         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5350             assert(*field_types == TYPE_PTRVOID);
5351             target_rt_dev_ptr = argptr + src_offsets[i];
5352             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5353             if (*target_rt_dev_ptr != 0) {
5354                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5355                                                   tswapal(*target_rt_dev_ptr));
5356                 if (!*host_rt_dev_ptr) {
5357                     unlock_user(argptr, arg, 0);
5358                     return -TARGET_EFAULT;
5359                 }
5360             } else {
5361                 *host_rt_dev_ptr = 0;
5362             }
5363             field_types++;
5364             continue;
5365         }
5366         field_types = thunk_convert(buf_temp + dst_offsets[i],
5367                                     argptr + src_offsets[i],
5368                                     field_types, THUNK_HOST);
5369     }
5370     unlock_user(argptr, arg, 0);
5371 
5372     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5373 
5374     assert(host_rt_dev_ptr != NULL);
5375     assert(target_rt_dev_ptr != NULL);
5376     if (*host_rt_dev_ptr != 0) {
5377         unlock_user((void *)*host_rt_dev_ptr,
5378                     *target_rt_dev_ptr, 0);
5379     }
5380     return ret;
5381 }
5382 
do_ioctl_kdsigaccept(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5383 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5384                                      int fd, int cmd, abi_long arg)
5385 {
5386     int sig = target_to_host_signal(arg);
5387     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5388 }
5389 
do_ioctl_SIOCGSTAMP(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5390 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5391                                     int fd, int cmd, abi_long arg)
5392 {
5393     struct timeval tv;
5394     abi_long ret;
5395 
5396     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5397     if (is_error(ret)) {
5398         return ret;
5399     }
5400 
5401     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5402         if (copy_to_user_timeval(arg, &tv)) {
5403             return -TARGET_EFAULT;
5404         }
5405     } else {
5406         if (copy_to_user_timeval64(arg, &tv)) {
5407             return -TARGET_EFAULT;
5408         }
5409     }
5410 
5411     return ret;
5412 }
5413 
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5414 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5415                                       int fd, int cmd, abi_long arg)
5416 {
5417     struct timespec ts;
5418     abi_long ret;
5419 
5420     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5421     if (is_error(ret)) {
5422         return ret;
5423     }
5424 
5425     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5426         if (host_to_target_timespec(arg, &ts)) {
5427             return -TARGET_EFAULT;
5428         }
5429     } else{
5430         if (host_to_target_timespec64(arg, &ts)) {
5431             return -TARGET_EFAULT;
5432         }
5433     }
5434 
5435     return ret;
5436 }
5437 
5438 #ifdef TIOCGPTPEER
do_ioctl_tiocgptpeer(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5439 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5440                                      int fd, int cmd, abi_long arg)
5441 {
5442     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5443     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5444 }
5445 #endif
5446 
5447 #ifdef HAVE_DRM_H
5448 
unlock_drm_version(struct drm_version * host_ver,struct target_drm_version * target_ver,bool copy)5449 static void unlock_drm_version(struct drm_version *host_ver,
5450                                struct target_drm_version *target_ver,
5451                                bool copy)
5452 {
5453     unlock_user(host_ver->name, target_ver->name,
5454                                 copy ? host_ver->name_len : 0);
5455     unlock_user(host_ver->date, target_ver->date,
5456                                 copy ? host_ver->date_len : 0);
5457     unlock_user(host_ver->desc, target_ver->desc,
5458                                 copy ? host_ver->desc_len : 0);
5459 }
5460 
target_to_host_drmversion(struct drm_version * host_ver,struct target_drm_version * target_ver)5461 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5462                                           struct target_drm_version *target_ver)
5463 {
5464     memset(host_ver, 0, sizeof(*host_ver));
5465 
5466     __get_user(host_ver->name_len, &target_ver->name_len);
5467     if (host_ver->name_len) {
5468         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5469                                    target_ver->name_len, 0);
5470         if (!host_ver->name) {
5471             return -EFAULT;
5472         }
5473     }
5474 
5475     __get_user(host_ver->date_len, &target_ver->date_len);
5476     if (host_ver->date_len) {
5477         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5478                                    target_ver->date_len, 0);
5479         if (!host_ver->date) {
5480             goto err;
5481         }
5482     }
5483 
5484     __get_user(host_ver->desc_len, &target_ver->desc_len);
5485     if (host_ver->desc_len) {
5486         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5487                                    target_ver->desc_len, 0);
5488         if (!host_ver->desc) {
5489             goto err;
5490         }
5491     }
5492 
5493     return 0;
5494 err:
5495     unlock_drm_version(host_ver, target_ver, false);
5496     return -EFAULT;
5497 }
5498 
host_to_target_drmversion(struct target_drm_version * target_ver,struct drm_version * host_ver)5499 static inline void host_to_target_drmversion(
5500                                           struct target_drm_version *target_ver,
5501                                           struct drm_version *host_ver)
5502 {
5503     __put_user(host_ver->version_major, &target_ver->version_major);
5504     __put_user(host_ver->version_minor, &target_ver->version_minor);
5505     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5506     __put_user(host_ver->name_len, &target_ver->name_len);
5507     __put_user(host_ver->date_len, &target_ver->date_len);
5508     __put_user(host_ver->desc_len, &target_ver->desc_len);
5509     unlock_drm_version(host_ver, target_ver, true);
5510 }
5511 
do_ioctl_drm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5512 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5513                              int fd, int cmd, abi_long arg)
5514 {
5515     struct drm_version *ver;
5516     struct target_drm_version *target_ver;
5517     abi_long ret;
5518 
5519     switch (ie->host_cmd) {
5520     case DRM_IOCTL_VERSION:
5521         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5522             return -TARGET_EFAULT;
5523         }
5524         ver = (struct drm_version *)buf_temp;
5525         ret = target_to_host_drmversion(ver, target_ver);
5526         if (!is_error(ret)) {
5527             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5528             if (is_error(ret)) {
5529                 unlock_drm_version(ver, target_ver, false);
5530             } else {
5531                 host_to_target_drmversion(target_ver, ver);
5532             }
5533         }
5534         unlock_user_struct(target_ver, arg, 0);
5535         return ret;
5536     }
5537     return -TARGET_ENOSYS;
5538 }
5539 
do_ioctl_drm_i915_getparam(const IOCTLEntry * ie,struct drm_i915_getparam * gparam,int fd,abi_long arg)5540 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5541                                            struct drm_i915_getparam *gparam,
5542                                            int fd, abi_long arg)
5543 {
5544     abi_long ret;
5545     int value;
5546     struct target_drm_i915_getparam *target_gparam;
5547 
5548     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5549         return -TARGET_EFAULT;
5550     }
5551 
5552     __get_user(gparam->param, &target_gparam->param);
5553     gparam->value = &value;
5554     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5555     put_user_s32(value, target_gparam->value);
5556 
5557     unlock_user_struct(target_gparam, arg, 0);
5558     return ret;
5559 }
5560 
do_ioctl_drm_i915(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5561 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5562                                   int fd, int cmd, abi_long arg)
5563 {
5564     switch (ie->host_cmd) {
5565     case DRM_IOCTL_I915_GETPARAM:
5566         return do_ioctl_drm_i915_getparam(ie,
5567                                           (struct drm_i915_getparam *)buf_temp,
5568                                           fd, arg);
5569     default:
5570         return -TARGET_ENOSYS;
5571     }
5572 }
5573 
5574 #endif
5575 
do_ioctl_TUNSETTXFILTER(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5576 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5577                                         int fd, int cmd, abi_long arg)
5578 {
5579     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5580     struct tun_filter *target_filter;
5581     char *target_addr;
5582 
5583     assert(ie->access == IOC_W);
5584 
5585     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5586     if (!target_filter) {
5587         return -TARGET_EFAULT;
5588     }
5589     filter->flags = tswap16(target_filter->flags);
5590     filter->count = tswap16(target_filter->count);
5591     unlock_user(target_filter, arg, 0);
5592 
5593     if (filter->count) {
5594         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5595             MAX_STRUCT_SIZE) {
5596             return -TARGET_EFAULT;
5597         }
5598 
5599         target_addr = lock_user(VERIFY_READ,
5600                                 arg + offsetof(struct tun_filter, addr),
5601                                 filter->count * ETH_ALEN, 1);
5602         if (!target_addr) {
5603             return -TARGET_EFAULT;
5604         }
5605         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5606         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5607     }
5608 
5609     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5610 }
5611 
5612 IOCTLEntry ioctl_entries[] = {
5613 #define IOCTL(cmd, access, ...) \
5614     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5615 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5616     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5617 #define IOCTL_IGNORE(cmd) \
5618     { TARGET_ ## cmd, 0, #cmd },
5619 #include "ioctls.h"
5620     { 0, 0, },
5621 };
5622 
5623 /* ??? Implement proper locking for ioctls.  */
5624 /* do_ioctl() Must return target values and target errnos. */
do_ioctl(int fd,int cmd,abi_long arg)5625 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5626 {
5627     const IOCTLEntry *ie;
5628     const argtype *arg_type;
5629     abi_long ret;
5630     uint8_t buf_temp[MAX_STRUCT_SIZE];
5631     int target_size;
5632     void *argptr;
5633 
5634     ie = ioctl_entries;
5635     for(;;) {
5636         if (ie->target_cmd == 0) {
5637             qemu_log_mask(
5638                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5639             return -TARGET_ENOTTY;
5640         }
5641         if (ie->target_cmd == cmd)
5642             break;
5643         ie++;
5644     }
5645     arg_type = ie->arg_type;
5646     if (ie->do_ioctl) {
5647         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5648     } else if (!ie->host_cmd) {
5649         /* Some architectures define BSD ioctls in their headers
5650            that are not implemented in Linux.  */
5651         return -TARGET_ENOTTY;
5652     }
5653 
5654     switch(arg_type[0]) {
5655     case TYPE_NULL:
5656         /* no argument */
5657         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5658         break;
5659     case TYPE_PTRVOID:
5660     case TYPE_INT:
5661     case TYPE_LONG:
5662     case TYPE_ULONG:
5663         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5664         break;
5665     case TYPE_PTR:
5666         arg_type++;
5667         target_size = thunk_type_size(arg_type, 0);
5668         switch(ie->access) {
5669         case IOC_R:
5670             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5671             if (!is_error(ret)) {
5672                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5673                 if (!argptr)
5674                     return -TARGET_EFAULT;
5675                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5676                 unlock_user(argptr, arg, target_size);
5677             }
5678             break;
5679         case IOC_W:
5680             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5681             if (!argptr)
5682                 return -TARGET_EFAULT;
5683             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5684             unlock_user(argptr, arg, 0);
5685             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5686             break;
5687         default:
5688         case IOC_RW:
5689             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5690             if (!argptr)
5691                 return -TARGET_EFAULT;
5692             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5693             unlock_user(argptr, arg, 0);
5694             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5695             if (!is_error(ret)) {
5696                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5697                 if (!argptr)
5698                     return -TARGET_EFAULT;
5699                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5700                 unlock_user(argptr, arg, target_size);
5701             }
5702             break;
5703         }
5704         break;
5705     default:
5706         qemu_log_mask(LOG_UNIMP,
5707                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5708                       (long)cmd, arg_type[0]);
5709         ret = -TARGET_ENOTTY;
5710         break;
5711     }
5712     return ret;
5713 }
5714 
5715 static const bitmask_transtbl iflag_tbl[] = {
5716         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5717         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5718         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5719         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5720         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5721         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5722         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5723         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5724         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5725         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5726         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5727         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5728         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5729         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5730         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5731 };
5732 
5733 static const bitmask_transtbl oflag_tbl[] = {
5734 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5735 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5736 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5737 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5738 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5739 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5740 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5741 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5742 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5743 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5744 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5745 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5746 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5747 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5748 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5749 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5750 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5751 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5752 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5753 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5754 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5755 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5756 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5757 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5758 };
5759 
5760 static const bitmask_transtbl cflag_tbl[] = {
5761 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5762 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5763 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5764 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5765 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5766 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5767 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5768 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5769 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5770 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5771 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5772 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5773 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5774 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5775 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5776 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5777 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5778 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5779 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5780 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5781 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5782 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5783 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5784 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5785 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5786 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5787 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5788 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5789 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5790 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5791 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5792 };
5793 
5794 static const bitmask_transtbl lflag_tbl[] = {
5795   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5796   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5797   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5798   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5799   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5800   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5801   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5802   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5803   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5804   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5805   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5806   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5807   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5808   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5809   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5810   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5811 };
5812 
target_to_host_termios(void * dst,const void * src)5813 static void target_to_host_termios (void *dst, const void *src)
5814 {
5815     struct host_termios *host = dst;
5816     const struct target_termios *target = src;
5817 
5818     host->c_iflag =
5819         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5820     host->c_oflag =
5821         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5822     host->c_cflag =
5823         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5824     host->c_lflag =
5825         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5826     host->c_line = target->c_line;
5827 
5828     memset(host->c_cc, 0, sizeof(host->c_cc));
5829     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5830     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5831     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5832     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5833     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5834     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5835     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5836     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5837     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5838     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5839     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5840     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5841     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5842     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5843     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5844     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5845     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5846 }
5847 
host_to_target_termios(void * dst,const void * src)5848 static void host_to_target_termios (void *dst, const void *src)
5849 {
5850     struct target_termios *target = dst;
5851     const struct host_termios *host = src;
5852 
5853     target->c_iflag =
5854         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5855     target->c_oflag =
5856         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5857     target->c_cflag =
5858         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5859     target->c_lflag =
5860         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5861     target->c_line = host->c_line;
5862 
5863     memset(target->c_cc, 0, sizeof(target->c_cc));
5864     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5865     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5866     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5867     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5868     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5869     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5870     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5871     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5872     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5873     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5874     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5875     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5876     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5877     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5878     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5879     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5880     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5881 }
5882 
5883 static const StructEntry struct_termios_def = {
5884     .convert = { host_to_target_termios, target_to_host_termios },
5885     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5886     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5887     .print = print_termios,
5888 };
5889 
5890 /* If the host does not provide these bits, they may be safely discarded. */
5891 #ifndef MAP_SYNC
5892 #define MAP_SYNC 0
5893 #endif
5894 #ifndef MAP_UNINITIALIZED
5895 #define MAP_UNINITIALIZED 0
5896 #endif
5897 
5898 static const bitmask_transtbl mmap_flags_tbl[] = {
5899     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5900     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5901       MAP_ANONYMOUS, MAP_ANONYMOUS },
5902     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5903       MAP_GROWSDOWN, MAP_GROWSDOWN },
5904     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5905       MAP_DENYWRITE, MAP_DENYWRITE },
5906     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5907       MAP_EXECUTABLE, MAP_EXECUTABLE },
5908     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5909     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5910       MAP_NORESERVE, MAP_NORESERVE },
5911     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5912     /* MAP_STACK had been ignored by the kernel for quite some time.
5913        Recognize it for the target insofar as we do not want to pass
5914        it through to the host.  */
5915     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5916     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5917     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5918     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5919       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5920     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5921       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5922 };
5923 
5924 /*
5925  * Arrange for legacy / undefined architecture specific flags to be
5926  * ignored by mmap handling code.
5927  */
5928 #ifndef TARGET_MAP_32BIT
5929 #define TARGET_MAP_32BIT 0
5930 #endif
5931 #ifndef TARGET_MAP_HUGE_2MB
5932 #define TARGET_MAP_HUGE_2MB 0
5933 #endif
5934 #ifndef TARGET_MAP_HUGE_1GB
5935 #define TARGET_MAP_HUGE_1GB 0
5936 #endif
5937 
do_mmap(abi_ulong addr,abi_ulong len,int prot,int target_flags,int fd,off_t offset)5938 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5939                         int target_flags, int fd, off_t offset)
5940 {
5941     /*
5942      * The historical set of flags that all mmap types implicitly support.
5943      */
5944     enum {
5945         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5946                                | TARGET_MAP_PRIVATE
5947                                | TARGET_MAP_FIXED
5948                                | TARGET_MAP_ANONYMOUS
5949                                | TARGET_MAP_DENYWRITE
5950                                | TARGET_MAP_EXECUTABLE
5951                                | TARGET_MAP_UNINITIALIZED
5952                                | TARGET_MAP_GROWSDOWN
5953                                | TARGET_MAP_LOCKED
5954                                | TARGET_MAP_NORESERVE
5955                                | TARGET_MAP_POPULATE
5956                                | TARGET_MAP_NONBLOCK
5957                                | TARGET_MAP_STACK
5958                                | TARGET_MAP_HUGETLB
5959                                | TARGET_MAP_32BIT
5960                                | TARGET_MAP_HUGE_2MB
5961                                | TARGET_MAP_HUGE_1GB
5962     };
5963     int host_flags;
5964 
5965     switch (target_flags & TARGET_MAP_TYPE) {
5966     case TARGET_MAP_PRIVATE:
5967         host_flags = MAP_PRIVATE;
5968         break;
5969     case TARGET_MAP_SHARED:
5970         host_flags = MAP_SHARED;
5971         break;
5972     case TARGET_MAP_SHARED_VALIDATE:
5973         /*
5974          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5975          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5976          */
5977         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5978             return -TARGET_EOPNOTSUPP;
5979         }
5980         host_flags = MAP_SHARED_VALIDATE;
5981         if (target_flags & TARGET_MAP_SYNC) {
5982             host_flags |= MAP_SYNC;
5983         }
5984         break;
5985     default:
5986         return -TARGET_EINVAL;
5987     }
5988     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5989 
5990     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5991 }
5992 
5993 /*
5994  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5995  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5996  */
5997 #if defined(TARGET_I386)
5998 
5999 /* NOTE: there is really one LDT for all the threads */
6000 static uint8_t *ldt_table;
6001 
read_ldt(abi_ulong ptr,unsigned long bytecount)6002 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6003 {
6004     int size;
6005     void *p;
6006 
6007     if (!ldt_table)
6008         return 0;
6009     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6010     if (size > bytecount)
6011         size = bytecount;
6012     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6013     if (!p)
6014         return -TARGET_EFAULT;
6015     /* ??? Should this by byteswapped?  */
6016     memcpy(p, ldt_table, size);
6017     unlock_user(p, ptr, size);
6018     return size;
6019 }
6020 
6021 /* XXX: add locking support */
write_ldt(CPUX86State * env,abi_ulong ptr,unsigned long bytecount,int oldmode)6022 static abi_long write_ldt(CPUX86State *env,
6023                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6024 {
6025     struct target_modify_ldt_ldt_s ldt_info;
6026     struct target_modify_ldt_ldt_s *target_ldt_info;
6027     int seg_32bit, contents, read_exec_only, limit_in_pages;
6028     int seg_not_present, useable, lm;
6029     uint32_t *lp, entry_1, entry_2;
6030 
6031     if (bytecount != sizeof(ldt_info))
6032         return -TARGET_EINVAL;
6033     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6034         return -TARGET_EFAULT;
6035     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6036     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6037     ldt_info.limit = tswap32(target_ldt_info->limit);
6038     ldt_info.flags = tswap32(target_ldt_info->flags);
6039     unlock_user_struct(target_ldt_info, ptr, 0);
6040 
6041     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6042         return -TARGET_EINVAL;
6043     seg_32bit = ldt_info.flags & 1;
6044     contents = (ldt_info.flags >> 1) & 3;
6045     read_exec_only = (ldt_info.flags >> 3) & 1;
6046     limit_in_pages = (ldt_info.flags >> 4) & 1;
6047     seg_not_present = (ldt_info.flags >> 5) & 1;
6048     useable = (ldt_info.flags >> 6) & 1;
6049 #ifdef TARGET_ABI32
6050     lm = 0;
6051 #else
6052     lm = (ldt_info.flags >> 7) & 1;
6053 #endif
6054     if (contents == 3) {
6055         if (oldmode)
6056             return -TARGET_EINVAL;
6057         if (seg_not_present == 0)
6058             return -TARGET_EINVAL;
6059     }
6060     /* allocate the LDT */
6061     if (!ldt_table) {
6062         env->ldt.base = target_mmap(0,
6063                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6064                                     PROT_READ|PROT_WRITE,
6065                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6066         if (env->ldt.base == -1)
6067             return -TARGET_ENOMEM;
6068         memset(g2h_untagged(env->ldt.base), 0,
6069                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6070         env->ldt.limit = 0xffff;
6071         ldt_table = g2h_untagged(env->ldt.base);
6072     }
6073 
6074     /* NOTE: same code as Linux kernel */
6075     /* Allow LDTs to be cleared by the user. */
6076     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6077         if (oldmode ||
6078             (contents == 0		&&
6079              read_exec_only == 1	&&
6080              seg_32bit == 0		&&
6081              limit_in_pages == 0	&&
6082              seg_not_present == 1	&&
6083              useable == 0 )) {
6084             entry_1 = 0;
6085             entry_2 = 0;
6086             goto install;
6087         }
6088     }
6089 
6090     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6091         (ldt_info.limit & 0x0ffff);
6092     entry_2 = (ldt_info.base_addr & 0xff000000) |
6093         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6094         (ldt_info.limit & 0xf0000) |
6095         ((read_exec_only ^ 1) << 9) |
6096         (contents << 10) |
6097         ((seg_not_present ^ 1) << 15) |
6098         (seg_32bit << 22) |
6099         (limit_in_pages << 23) |
6100         (lm << 21) |
6101         0x7000;
6102     if (!oldmode)
6103         entry_2 |= (useable << 20);
6104 
6105     /* Install the new entry ...  */
6106 install:
6107     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6108     lp[0] = tswap32(entry_1);
6109     lp[1] = tswap32(entry_2);
6110     return 0;
6111 }
6112 
6113 /* specific and weird i386 syscalls */
do_modify_ldt(CPUX86State * env,int func,abi_ulong ptr,unsigned long bytecount)6114 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6115                               unsigned long bytecount)
6116 {
6117     abi_long ret;
6118 
6119     switch (func) {
6120     case 0:
6121         ret = read_ldt(ptr, bytecount);
6122         break;
6123     case 1:
6124         ret = write_ldt(env, ptr, bytecount, 1);
6125         break;
6126     case 0x11:
6127         ret = write_ldt(env, ptr, bytecount, 0);
6128         break;
6129     default:
6130         ret = -TARGET_ENOSYS;
6131         break;
6132     }
6133     return ret;
6134 }
6135 
6136 #if defined(TARGET_ABI32)
do_set_thread_area(CPUX86State * env,abi_ulong ptr)6137 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6138 {
6139     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6140     struct target_modify_ldt_ldt_s ldt_info;
6141     struct target_modify_ldt_ldt_s *target_ldt_info;
6142     int seg_32bit, contents, read_exec_only, limit_in_pages;
6143     int seg_not_present, useable, lm;
6144     uint32_t *lp, entry_1, entry_2;
6145     int i;
6146 
6147     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6148     if (!target_ldt_info)
6149         return -TARGET_EFAULT;
6150     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6151     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6152     ldt_info.limit = tswap32(target_ldt_info->limit);
6153     ldt_info.flags = tswap32(target_ldt_info->flags);
6154     if (ldt_info.entry_number == -1) {
6155         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6156             if (gdt_table[i] == 0) {
6157                 ldt_info.entry_number = i;
6158                 target_ldt_info->entry_number = tswap32(i);
6159                 break;
6160             }
6161         }
6162     }
6163     unlock_user_struct(target_ldt_info, ptr, 1);
6164 
6165     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6166         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6167            return -TARGET_EINVAL;
6168     seg_32bit = ldt_info.flags & 1;
6169     contents = (ldt_info.flags >> 1) & 3;
6170     read_exec_only = (ldt_info.flags >> 3) & 1;
6171     limit_in_pages = (ldt_info.flags >> 4) & 1;
6172     seg_not_present = (ldt_info.flags >> 5) & 1;
6173     useable = (ldt_info.flags >> 6) & 1;
6174 #ifdef TARGET_ABI32
6175     lm = 0;
6176 #else
6177     lm = (ldt_info.flags >> 7) & 1;
6178 #endif
6179 
6180     if (contents == 3) {
6181         if (seg_not_present == 0)
6182             return -TARGET_EINVAL;
6183     }
6184 
6185     /* NOTE: same code as Linux kernel */
6186     /* Allow LDTs to be cleared by the user. */
6187     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6188         if ((contents == 0             &&
6189              read_exec_only == 1       &&
6190              seg_32bit == 0            &&
6191              limit_in_pages == 0       &&
6192              seg_not_present == 1      &&
6193              useable == 0 )) {
6194             entry_1 = 0;
6195             entry_2 = 0;
6196             goto install;
6197         }
6198     }
6199 
6200     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6201         (ldt_info.limit & 0x0ffff);
6202     entry_2 = (ldt_info.base_addr & 0xff000000) |
6203         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6204         (ldt_info.limit & 0xf0000) |
6205         ((read_exec_only ^ 1) << 9) |
6206         (contents << 10) |
6207         ((seg_not_present ^ 1) << 15) |
6208         (seg_32bit << 22) |
6209         (limit_in_pages << 23) |
6210         (useable << 20) |
6211         (lm << 21) |
6212         0x7000;
6213 
6214     /* Install the new entry ...  */
6215 install:
6216     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6217     lp[0] = tswap32(entry_1);
6218     lp[1] = tswap32(entry_2);
6219     return 0;
6220 }
6221 
do_get_thread_area(CPUX86State * env,abi_ulong ptr)6222 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6223 {
6224     struct target_modify_ldt_ldt_s *target_ldt_info;
6225     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6226     uint32_t base_addr, limit, flags;
6227     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6228     int seg_not_present, useable, lm;
6229     uint32_t *lp, entry_1, entry_2;
6230 
6231     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6232     if (!target_ldt_info)
6233         return -TARGET_EFAULT;
6234     idx = tswap32(target_ldt_info->entry_number);
6235     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6236         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6237         unlock_user_struct(target_ldt_info, ptr, 1);
6238         return -TARGET_EINVAL;
6239     }
6240     lp = (uint32_t *)(gdt_table + idx);
6241     entry_1 = tswap32(lp[0]);
6242     entry_2 = tswap32(lp[1]);
6243 
6244     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6245     contents = (entry_2 >> 10) & 3;
6246     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6247     seg_32bit = (entry_2 >> 22) & 1;
6248     limit_in_pages = (entry_2 >> 23) & 1;
6249     useable = (entry_2 >> 20) & 1;
6250 #ifdef TARGET_ABI32
6251     lm = 0;
6252 #else
6253     lm = (entry_2 >> 21) & 1;
6254 #endif
6255     flags = (seg_32bit << 0) | (contents << 1) |
6256         (read_exec_only << 3) | (limit_in_pages << 4) |
6257         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6258     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6259     base_addr = (entry_1 >> 16) |
6260         (entry_2 & 0xff000000) |
6261         ((entry_2 & 0xff) << 16);
6262     target_ldt_info->base_addr = tswapal(base_addr);
6263     target_ldt_info->limit = tswap32(limit);
6264     target_ldt_info->flags = tswap32(flags);
6265     unlock_user_struct(target_ldt_info, ptr, 1);
6266     return 0;
6267 }
6268 
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6269 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6270 {
6271     return -TARGET_ENOSYS;
6272 }
6273 #else
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6274 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6275 {
6276     abi_long ret = 0;
6277     abi_ulong val;
6278     int idx;
6279 
6280     switch(code) {
6281     case TARGET_ARCH_SET_GS:
6282     case TARGET_ARCH_SET_FS:
6283         if (code == TARGET_ARCH_SET_GS)
6284             idx = R_GS;
6285         else
6286             idx = R_FS;
6287         cpu_x86_load_seg(env, idx, 0);
6288         env->segs[idx].base = addr;
6289         break;
6290     case TARGET_ARCH_GET_GS:
6291     case TARGET_ARCH_GET_FS:
6292         if (code == TARGET_ARCH_GET_GS)
6293             idx = R_GS;
6294         else
6295             idx = R_FS;
6296         val = env->segs[idx].base;
6297         if (put_user(val, addr, abi_ulong))
6298             ret = -TARGET_EFAULT;
6299         break;
6300     default:
6301         ret = -TARGET_EINVAL;
6302         break;
6303     }
6304     return ret;
6305 }
6306 #endif /* defined(TARGET_ABI32 */
6307 #endif /* defined(TARGET_I386) */
6308 
6309 /*
6310  * These constants are generic.  Supply any that are missing from the host.
6311  */
6312 #ifndef PR_SET_NAME
6313 # define PR_SET_NAME    15
6314 # define PR_GET_NAME    16
6315 #endif
6316 #ifndef PR_SET_FP_MODE
6317 # define PR_SET_FP_MODE 45
6318 # define PR_GET_FP_MODE 46
6319 # define PR_FP_MODE_FR   (1 << 0)
6320 # define PR_FP_MODE_FRE  (1 << 1)
6321 #endif
6322 #ifndef PR_SVE_SET_VL
6323 # define PR_SVE_SET_VL  50
6324 # define PR_SVE_GET_VL  51
6325 # define PR_SVE_VL_LEN_MASK  0xffff
6326 # define PR_SVE_VL_INHERIT   (1 << 17)
6327 #endif
6328 #ifndef PR_PAC_RESET_KEYS
6329 # define PR_PAC_RESET_KEYS  54
6330 # define PR_PAC_APIAKEY   (1 << 0)
6331 # define PR_PAC_APIBKEY   (1 << 1)
6332 # define PR_PAC_APDAKEY   (1 << 2)
6333 # define PR_PAC_APDBKEY   (1 << 3)
6334 # define PR_PAC_APGAKEY   (1 << 4)
6335 #endif
6336 #ifndef PR_SET_TAGGED_ADDR_CTRL
6337 # define PR_SET_TAGGED_ADDR_CTRL 55
6338 # define PR_GET_TAGGED_ADDR_CTRL 56
6339 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6340 #endif
6341 #ifndef PR_SET_IO_FLUSHER
6342 # define PR_SET_IO_FLUSHER 57
6343 # define PR_GET_IO_FLUSHER 58
6344 #endif
6345 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6346 # define PR_SET_SYSCALL_USER_DISPATCH 59
6347 #endif
6348 #ifndef PR_SME_SET_VL
6349 # define PR_SME_SET_VL  63
6350 # define PR_SME_GET_VL  64
6351 # define PR_SME_VL_LEN_MASK  0xffff
6352 # define PR_SME_VL_INHERIT   (1 << 17)
6353 #endif
6354 
6355 #include "target_prctl.h"
6356 
do_prctl_inval0(CPUArchState * env)6357 static abi_long do_prctl_inval0(CPUArchState *env)
6358 {
6359     return -TARGET_EINVAL;
6360 }
6361 
do_prctl_inval1(CPUArchState * env,abi_long arg2)6362 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6363 {
6364     return -TARGET_EINVAL;
6365 }
6366 
6367 #ifndef do_prctl_get_fp_mode
6368 #define do_prctl_get_fp_mode do_prctl_inval0
6369 #endif
6370 #ifndef do_prctl_set_fp_mode
6371 #define do_prctl_set_fp_mode do_prctl_inval1
6372 #endif
6373 #ifndef do_prctl_sve_get_vl
6374 #define do_prctl_sve_get_vl do_prctl_inval0
6375 #endif
6376 #ifndef do_prctl_sve_set_vl
6377 #define do_prctl_sve_set_vl do_prctl_inval1
6378 #endif
6379 #ifndef do_prctl_reset_keys
6380 #define do_prctl_reset_keys do_prctl_inval1
6381 #endif
6382 #ifndef do_prctl_set_tagged_addr_ctrl
6383 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6384 #endif
6385 #ifndef do_prctl_get_tagged_addr_ctrl
6386 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6387 #endif
6388 #ifndef do_prctl_get_unalign
6389 #define do_prctl_get_unalign do_prctl_inval1
6390 #endif
6391 #ifndef do_prctl_set_unalign
6392 #define do_prctl_set_unalign do_prctl_inval1
6393 #endif
6394 #ifndef do_prctl_sme_get_vl
6395 #define do_prctl_sme_get_vl do_prctl_inval0
6396 #endif
6397 #ifndef do_prctl_sme_set_vl
6398 #define do_prctl_sme_set_vl do_prctl_inval1
6399 #endif
6400 
do_prctl(CPUArchState * env,abi_long option,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)6401 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6402                          abi_long arg3, abi_long arg4, abi_long arg5)
6403 {
6404     abi_long ret;
6405 
6406     switch (option) {
6407     case PR_GET_PDEATHSIG:
6408         {
6409             int deathsig;
6410             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6411                                   arg3, arg4, arg5));
6412             if (!is_error(ret) &&
6413                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6414                 return -TARGET_EFAULT;
6415             }
6416             return ret;
6417         }
6418     case PR_SET_PDEATHSIG:
6419         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6420                                arg3, arg4, arg5));
6421     case PR_GET_NAME:
6422         {
6423             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6424             if (!name) {
6425                 return -TARGET_EFAULT;
6426             }
6427             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6428                                   arg3, arg4, arg5));
6429             unlock_user(name, arg2, 16);
6430             return ret;
6431         }
6432     case PR_SET_NAME:
6433         {
6434             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6435             if (!name) {
6436                 return -TARGET_EFAULT;
6437             }
6438             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6439                                   arg3, arg4, arg5));
6440             unlock_user(name, arg2, 0);
6441             return ret;
6442         }
6443     case PR_GET_FP_MODE:
6444         return do_prctl_get_fp_mode(env);
6445     case PR_SET_FP_MODE:
6446         return do_prctl_set_fp_mode(env, arg2);
6447     case PR_SVE_GET_VL:
6448         return do_prctl_sve_get_vl(env);
6449     case PR_SVE_SET_VL:
6450         return do_prctl_sve_set_vl(env, arg2);
6451     case PR_SME_GET_VL:
6452         return do_prctl_sme_get_vl(env);
6453     case PR_SME_SET_VL:
6454         return do_prctl_sme_set_vl(env, arg2);
6455     case PR_PAC_RESET_KEYS:
6456         if (arg3 || arg4 || arg5) {
6457             return -TARGET_EINVAL;
6458         }
6459         return do_prctl_reset_keys(env, arg2);
6460     case PR_SET_TAGGED_ADDR_CTRL:
6461         if (arg3 || arg4 || arg5) {
6462             return -TARGET_EINVAL;
6463         }
6464         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6465     case PR_GET_TAGGED_ADDR_CTRL:
6466         if (arg2 || arg3 || arg4 || arg5) {
6467             return -TARGET_EINVAL;
6468         }
6469         return do_prctl_get_tagged_addr_ctrl(env);
6470 
6471     case PR_GET_UNALIGN:
6472         return do_prctl_get_unalign(env, arg2);
6473     case PR_SET_UNALIGN:
6474         return do_prctl_set_unalign(env, arg2);
6475 
6476     case PR_CAP_AMBIENT:
6477     case PR_CAPBSET_READ:
6478     case PR_CAPBSET_DROP:
6479     case PR_GET_DUMPABLE:
6480     case PR_SET_DUMPABLE:
6481     case PR_GET_KEEPCAPS:
6482     case PR_SET_KEEPCAPS:
6483     case PR_GET_SECUREBITS:
6484     case PR_SET_SECUREBITS:
6485     case PR_GET_TIMING:
6486     case PR_SET_TIMING:
6487     case PR_GET_TIMERSLACK:
6488     case PR_SET_TIMERSLACK:
6489     case PR_MCE_KILL:
6490     case PR_MCE_KILL_GET:
6491     case PR_GET_NO_NEW_PRIVS:
6492     case PR_SET_NO_NEW_PRIVS:
6493     case PR_GET_IO_FLUSHER:
6494     case PR_SET_IO_FLUSHER:
6495     case PR_SET_CHILD_SUBREAPER:
6496     case PR_GET_SPECULATION_CTRL:
6497     case PR_SET_SPECULATION_CTRL:
6498         /* Some prctl options have no pointer arguments and we can pass on. */
6499         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6500 
6501     case PR_GET_CHILD_SUBREAPER:
6502         {
6503             int val;
6504             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6505                                   arg3, arg4, arg5));
6506             if (!is_error(ret) && put_user_s32(val, arg2)) {
6507                 return -TARGET_EFAULT;
6508             }
6509             return ret;
6510         }
6511 
6512     case PR_GET_TID_ADDRESS:
6513         {
6514             TaskState *ts = get_task_state(env_cpu(env));
6515             return put_user_ual(ts->child_tidptr, arg2);
6516         }
6517 
6518     case PR_GET_FPEXC:
6519     case PR_SET_FPEXC:
6520         /* Was used for SPE on PowerPC. */
6521         return -TARGET_EINVAL;
6522 
6523     case PR_GET_ENDIAN:
6524     case PR_SET_ENDIAN:
6525     case PR_GET_FPEMU:
6526     case PR_SET_FPEMU:
6527     case PR_SET_MM:
6528     case PR_GET_SECCOMP:
6529     case PR_SET_SECCOMP:
6530     case PR_SET_SYSCALL_USER_DISPATCH:
6531     case PR_GET_THP_DISABLE:
6532     case PR_SET_THP_DISABLE:
6533     case PR_GET_TSC:
6534     case PR_SET_TSC:
6535         /* Disable to prevent the target disabling stuff we need. */
6536         return -TARGET_EINVAL;
6537 
6538     default:
6539         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6540                       option);
6541         return -TARGET_EINVAL;
6542     }
6543 }
6544 
6545 #define NEW_STACK_SIZE 0x40000
6546 
6547 
6548 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6549 typedef struct {
6550     CPUArchState *env;
6551     pthread_mutex_t mutex;
6552     pthread_cond_t cond;
6553     pthread_t thread;
6554     uint32_t tid;
6555     abi_ulong child_tidptr;
6556     abi_ulong parent_tidptr;
6557     sigset_t sigmask;
6558 } new_thread_info;
6559 
clone_func(void * arg)6560 static void *clone_func(void *arg)
6561 {
6562     new_thread_info *info = arg;
6563     CPUArchState *env;
6564     CPUState *cpu;
6565     TaskState *ts;
6566 
6567     rcu_register_thread();
6568     tcg_register_thread();
6569     env = info->env;
6570     cpu = env_cpu(env);
6571     thread_cpu = cpu;
6572     ts = get_task_state(cpu);
6573     info->tid = sys_gettid();
6574     task_settid(ts);
6575     if (info->child_tidptr)
6576         put_user_u32(info->tid, info->child_tidptr);
6577     if (info->parent_tidptr)
6578         put_user_u32(info->tid, info->parent_tidptr);
6579     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6580     /* Enable signals.  */
6581     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6582     /* Signal to the parent that we're ready.  */
6583     pthread_mutex_lock(&info->mutex);
6584     pthread_cond_broadcast(&info->cond);
6585     pthread_mutex_unlock(&info->mutex);
6586     /* Wait until the parent has finished initializing the tls state.  */
6587     pthread_mutex_lock(&clone_lock);
6588     pthread_mutex_unlock(&clone_lock);
6589     cpu_loop(env);
6590     /* never exits */
6591     return NULL;
6592 }
6593 
6594 /* do_fork() Must return host values and target errnos (unlike most
6595    do_*() functions). */
do_fork(CPUArchState * env,unsigned int flags,abi_ulong newsp,abi_ulong parent_tidptr,target_ulong newtls,abi_ulong child_tidptr)6596 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6597                    abi_ulong parent_tidptr, target_ulong newtls,
6598                    abi_ulong child_tidptr)
6599 {
6600     CPUState *cpu = env_cpu(env);
6601     int ret;
6602     TaskState *ts;
6603     CPUState *new_cpu;
6604     CPUArchState *new_env;
6605     sigset_t sigmask;
6606 
6607     flags &= ~CLONE_IGNORED_FLAGS;
6608 
6609     /* Emulate vfork() with fork() */
6610     if (flags & CLONE_VFORK)
6611         flags &= ~(CLONE_VFORK | CLONE_VM);
6612 
6613     if (flags & CLONE_VM) {
6614         TaskState *parent_ts = get_task_state(cpu);
6615         new_thread_info info;
6616         pthread_attr_t attr;
6617 
6618         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6619             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6620             return -TARGET_EINVAL;
6621         }
6622 
6623         ts = g_new0(TaskState, 1);
6624         init_task_state(ts);
6625 
6626         /* Grab a mutex so that thread setup appears atomic.  */
6627         pthread_mutex_lock(&clone_lock);
6628 
6629         /*
6630          * If this is our first additional thread, we need to ensure we
6631          * generate code for parallel execution and flush old translations.
6632          * Do this now so that the copy gets CF_PARALLEL too.
6633          */
6634         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6635             tcg_cflags_set(cpu, CF_PARALLEL);
6636             tb_flush(cpu);
6637         }
6638 
6639         /* we create a new CPU instance. */
6640         new_env = cpu_copy(env);
6641         /* Init regs that differ from the parent.  */
6642         cpu_clone_regs_child(new_env, newsp, flags);
6643         cpu_clone_regs_parent(env, flags);
6644         new_cpu = env_cpu(new_env);
6645         new_cpu->opaque = ts;
6646         ts->bprm = parent_ts->bprm;
6647         ts->info = parent_ts->info;
6648         ts->signal_mask = parent_ts->signal_mask;
6649 
6650         if (flags & CLONE_CHILD_CLEARTID) {
6651             ts->child_tidptr = child_tidptr;
6652         }
6653 
6654         if (flags & CLONE_SETTLS) {
6655             cpu_set_tls (new_env, newtls);
6656         }
6657 
6658         memset(&info, 0, sizeof(info));
6659         pthread_mutex_init(&info.mutex, NULL);
6660         pthread_mutex_lock(&info.mutex);
6661         pthread_cond_init(&info.cond, NULL);
6662         info.env = new_env;
6663         if (flags & CLONE_CHILD_SETTID) {
6664             info.child_tidptr = child_tidptr;
6665         }
6666         if (flags & CLONE_PARENT_SETTID) {
6667             info.parent_tidptr = parent_tidptr;
6668         }
6669 
6670         ret = pthread_attr_init(&attr);
6671         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6672         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6673         /* It is not safe to deliver signals until the child has finished
6674            initializing, so temporarily block all signals.  */
6675         sigfillset(&sigmask);
6676         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6677         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6678 
6679         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6680         /* TODO: Free new CPU state if thread creation failed.  */
6681 
6682         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6683         pthread_attr_destroy(&attr);
6684         if (ret == 0) {
6685             /* Wait for the child to initialize.  */
6686             pthread_cond_wait(&info.cond, &info.mutex);
6687             ret = info.tid;
6688         } else {
6689             ret = -1;
6690         }
6691         pthread_mutex_unlock(&info.mutex);
6692         pthread_cond_destroy(&info.cond);
6693         pthread_mutex_destroy(&info.mutex);
6694         pthread_mutex_unlock(&clone_lock);
6695     } else {
6696         /* if no CLONE_VM, we consider it is a fork */
6697         if (flags & CLONE_INVALID_FORK_FLAGS) {
6698             return -TARGET_EINVAL;
6699         }
6700 
6701         /* We can't support custom termination signals */
6702         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6703             return -TARGET_EINVAL;
6704         }
6705 
6706 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6707         if (flags & CLONE_PIDFD) {
6708             return -TARGET_EINVAL;
6709         }
6710 #endif
6711 
6712         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6713         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6714             return -TARGET_EINVAL;
6715         }
6716 
6717         if (block_signals()) {
6718             return -QEMU_ERESTARTSYS;
6719         }
6720 
6721         fork_start();
6722         ret = fork();
6723         if (ret == 0) {
6724             /* Child Process.  */
6725             cpu_clone_regs_child(env, newsp, flags);
6726             fork_end(ret);
6727             /* There is a race condition here.  The parent process could
6728                theoretically read the TID in the child process before the child
6729                tid is set.  This would require using either ptrace
6730                (not implemented) or having *_tidptr to point at a shared memory
6731                mapping.  We can't repeat the spinlock hack used above because
6732                the child process gets its own copy of the lock.  */
6733             if (flags & CLONE_CHILD_SETTID)
6734                 put_user_u32(sys_gettid(), child_tidptr);
6735             if (flags & CLONE_PARENT_SETTID)
6736                 put_user_u32(sys_gettid(), parent_tidptr);
6737             ts = get_task_state(cpu);
6738             if (flags & CLONE_SETTLS)
6739                 cpu_set_tls (env, newtls);
6740             if (flags & CLONE_CHILD_CLEARTID)
6741                 ts->child_tidptr = child_tidptr;
6742         } else {
6743             cpu_clone_regs_parent(env, flags);
6744             if (flags & CLONE_PIDFD) {
6745                 int pid_fd = 0;
6746 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6747                 int pid_child = ret;
6748                 pid_fd = pidfd_open(pid_child, 0);
6749                 if (pid_fd >= 0) {
6750                     qemu_set_cloexec(pid_fd);
6751                 } else {
6752                     pid_fd = 0;
6753                 }
6754 #endif
6755                 put_user_u32(pid_fd, parent_tidptr);
6756             }
6757             fork_end(ret);
6758         }
6759         g_assert(!cpu_in_exclusive_context(cpu));
6760     }
6761     return ret;
6762 }
6763 
6764 /* warning : doesn't handle linux specific flags... */
target_to_host_fcntl_cmd(int cmd)6765 static int target_to_host_fcntl_cmd(int cmd)
6766 {
6767     int ret;
6768 
6769     switch(cmd) {
6770     case TARGET_F_DUPFD:
6771     case TARGET_F_GETFD:
6772     case TARGET_F_SETFD:
6773     case TARGET_F_GETFL:
6774     case TARGET_F_SETFL:
6775     case TARGET_F_OFD_GETLK:
6776     case TARGET_F_OFD_SETLK:
6777     case TARGET_F_OFD_SETLKW:
6778         ret = cmd;
6779         break;
6780     case TARGET_F_GETLK:
6781         ret = F_GETLK;
6782         break;
6783     case TARGET_F_SETLK:
6784         ret = F_SETLK;
6785         break;
6786     case TARGET_F_SETLKW:
6787         ret = F_SETLKW;
6788         break;
6789     case TARGET_F_GETOWN:
6790         ret = F_GETOWN;
6791         break;
6792     case TARGET_F_SETOWN:
6793         ret = F_SETOWN;
6794         break;
6795     case TARGET_F_GETSIG:
6796         ret = F_GETSIG;
6797         break;
6798     case TARGET_F_SETSIG:
6799         ret = F_SETSIG;
6800         break;
6801 #if TARGET_ABI_BITS == 32
6802     case TARGET_F_GETLK64:
6803         ret = F_GETLK;
6804         break;
6805     case TARGET_F_SETLK64:
6806         ret = F_SETLK;
6807         break;
6808     case TARGET_F_SETLKW64:
6809         ret = F_SETLKW;
6810         break;
6811 #endif
6812     case TARGET_F_SETLEASE:
6813         ret = F_SETLEASE;
6814         break;
6815     case TARGET_F_GETLEASE:
6816         ret = F_GETLEASE;
6817         break;
6818 #ifdef F_DUPFD_CLOEXEC
6819     case TARGET_F_DUPFD_CLOEXEC:
6820         ret = F_DUPFD_CLOEXEC;
6821         break;
6822 #endif
6823     case TARGET_F_NOTIFY:
6824         ret = F_NOTIFY;
6825         break;
6826 #ifdef F_GETOWN_EX
6827     case TARGET_F_GETOWN_EX:
6828         ret = F_GETOWN_EX;
6829         break;
6830 #endif
6831 #ifdef F_SETOWN_EX
6832     case TARGET_F_SETOWN_EX:
6833         ret = F_SETOWN_EX;
6834         break;
6835 #endif
6836 #ifdef F_SETPIPE_SZ
6837     case TARGET_F_SETPIPE_SZ:
6838         ret = F_SETPIPE_SZ;
6839         break;
6840     case TARGET_F_GETPIPE_SZ:
6841         ret = F_GETPIPE_SZ;
6842         break;
6843 #endif
6844 #ifdef F_ADD_SEALS
6845     case TARGET_F_ADD_SEALS:
6846         ret = F_ADD_SEALS;
6847         break;
6848     case TARGET_F_GET_SEALS:
6849         ret = F_GET_SEALS;
6850         break;
6851 #endif
6852     default:
6853         ret = -TARGET_EINVAL;
6854         break;
6855     }
6856 
6857 #if defined(__powerpc64__)
6858     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6859      * is not supported by kernel. The glibc fcntl call actually adjusts
6860      * them to 5, 6 and 7 before making the syscall(). Since we make the
6861      * syscall directly, adjust to what is supported by the kernel.
6862      */
6863     if (ret >= F_GETLK && ret <= F_SETLKW) {
6864         ret -= F_GETLK - 5;
6865     }
6866 #endif
6867 
6868     return ret;
6869 }
6870 
6871 #define FLOCK_TRANSTBL \
6872     switch (type) { \
6873     TRANSTBL_CONVERT(F_RDLCK); \
6874     TRANSTBL_CONVERT(F_WRLCK); \
6875     TRANSTBL_CONVERT(F_UNLCK); \
6876     }
6877 
target_to_host_flock(int type)6878 static int target_to_host_flock(int type)
6879 {
6880 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6881     FLOCK_TRANSTBL
6882 #undef  TRANSTBL_CONVERT
6883     return -TARGET_EINVAL;
6884 }
6885 
host_to_target_flock(int type)6886 static int host_to_target_flock(int type)
6887 {
6888 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6889     FLOCK_TRANSTBL
6890 #undef  TRANSTBL_CONVERT
6891     /* if we don't know how to convert the value coming
6892      * from the host we copy to the target field as-is
6893      */
6894     return type;
6895 }
6896 
copy_from_user_flock(struct flock * fl,abi_ulong target_flock_addr)6897 static inline abi_long copy_from_user_flock(struct flock *fl,
6898                                             abi_ulong target_flock_addr)
6899 {
6900     struct target_flock *target_fl;
6901     int l_type;
6902 
6903     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6904         return -TARGET_EFAULT;
6905     }
6906 
6907     __get_user(l_type, &target_fl->l_type);
6908     l_type = target_to_host_flock(l_type);
6909     if (l_type < 0) {
6910         return l_type;
6911     }
6912     fl->l_type = l_type;
6913     __get_user(fl->l_whence, &target_fl->l_whence);
6914     __get_user(fl->l_start, &target_fl->l_start);
6915     __get_user(fl->l_len, &target_fl->l_len);
6916     __get_user(fl->l_pid, &target_fl->l_pid);
6917     unlock_user_struct(target_fl, target_flock_addr, 0);
6918     return 0;
6919 }
6920 
copy_to_user_flock(abi_ulong target_flock_addr,const struct flock * fl)6921 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6922                                           const struct flock *fl)
6923 {
6924     struct target_flock *target_fl;
6925     short l_type;
6926 
6927     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6928         return -TARGET_EFAULT;
6929     }
6930 
6931     l_type = host_to_target_flock(fl->l_type);
6932     __put_user(l_type, &target_fl->l_type);
6933     __put_user(fl->l_whence, &target_fl->l_whence);
6934     __put_user(fl->l_start, &target_fl->l_start);
6935     __put_user(fl->l_len, &target_fl->l_len);
6936     __put_user(fl->l_pid, &target_fl->l_pid);
6937     unlock_user_struct(target_fl, target_flock_addr, 1);
6938     return 0;
6939 }
6940 
6941 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6942 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6943 
6944 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6945 struct target_oabi_flock64 {
6946     abi_short l_type;
6947     abi_short l_whence;
6948     abi_llong l_start;
6949     abi_llong l_len;
6950     abi_int   l_pid;
6951 } QEMU_PACKED;
6952 
copy_from_user_oabi_flock64(struct flock * fl,abi_ulong target_flock_addr)6953 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6954                                                    abi_ulong target_flock_addr)
6955 {
6956     struct target_oabi_flock64 *target_fl;
6957     int l_type;
6958 
6959     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6960         return -TARGET_EFAULT;
6961     }
6962 
6963     __get_user(l_type, &target_fl->l_type);
6964     l_type = target_to_host_flock(l_type);
6965     if (l_type < 0) {
6966         return l_type;
6967     }
6968     fl->l_type = l_type;
6969     __get_user(fl->l_whence, &target_fl->l_whence);
6970     __get_user(fl->l_start, &target_fl->l_start);
6971     __get_user(fl->l_len, &target_fl->l_len);
6972     __get_user(fl->l_pid, &target_fl->l_pid);
6973     unlock_user_struct(target_fl, target_flock_addr, 0);
6974     return 0;
6975 }
6976 
copy_to_user_oabi_flock64(abi_ulong target_flock_addr,const struct flock * fl)6977 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6978                                                  const struct flock *fl)
6979 {
6980     struct target_oabi_flock64 *target_fl;
6981     short l_type;
6982 
6983     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6984         return -TARGET_EFAULT;
6985     }
6986 
6987     l_type = host_to_target_flock(fl->l_type);
6988     __put_user(l_type, &target_fl->l_type);
6989     __put_user(fl->l_whence, &target_fl->l_whence);
6990     __put_user(fl->l_start, &target_fl->l_start);
6991     __put_user(fl->l_len, &target_fl->l_len);
6992     __put_user(fl->l_pid, &target_fl->l_pid);
6993     unlock_user_struct(target_fl, target_flock_addr, 1);
6994     return 0;
6995 }
6996 #endif
6997 
copy_from_user_flock64(struct flock * fl,abi_ulong target_flock_addr)6998 static inline abi_long copy_from_user_flock64(struct flock *fl,
6999                                               abi_ulong target_flock_addr)
7000 {
7001     struct target_flock64 *target_fl;
7002     int l_type;
7003 
7004     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7005         return -TARGET_EFAULT;
7006     }
7007 
7008     __get_user(l_type, &target_fl->l_type);
7009     l_type = target_to_host_flock(l_type);
7010     if (l_type < 0) {
7011         return l_type;
7012     }
7013     fl->l_type = l_type;
7014     __get_user(fl->l_whence, &target_fl->l_whence);
7015     __get_user(fl->l_start, &target_fl->l_start);
7016     __get_user(fl->l_len, &target_fl->l_len);
7017     __get_user(fl->l_pid, &target_fl->l_pid);
7018     unlock_user_struct(target_fl, target_flock_addr, 0);
7019     return 0;
7020 }
7021 
copy_to_user_flock64(abi_ulong target_flock_addr,const struct flock * fl)7022 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7023                                             const struct flock *fl)
7024 {
7025     struct target_flock64 *target_fl;
7026     short l_type;
7027 
7028     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7029         return -TARGET_EFAULT;
7030     }
7031 
7032     l_type = host_to_target_flock(fl->l_type);
7033     __put_user(l_type, &target_fl->l_type);
7034     __put_user(fl->l_whence, &target_fl->l_whence);
7035     __put_user(fl->l_start, &target_fl->l_start);
7036     __put_user(fl->l_len, &target_fl->l_len);
7037     __put_user(fl->l_pid, &target_fl->l_pid);
7038     unlock_user_struct(target_fl, target_flock_addr, 1);
7039     return 0;
7040 }
7041 
do_fcntl(int fd,int cmd,abi_ulong arg)7042 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7043 {
7044     struct flock fl;
7045 #ifdef F_GETOWN_EX
7046     struct f_owner_ex fox;
7047     struct target_f_owner_ex *target_fox;
7048 #endif
7049     abi_long ret;
7050     int host_cmd = target_to_host_fcntl_cmd(cmd);
7051 
7052     if (host_cmd == -TARGET_EINVAL)
7053 	    return host_cmd;
7054 
7055     switch(cmd) {
7056     case TARGET_F_GETLK:
7057         ret = copy_from_user_flock(&fl, arg);
7058         if (ret) {
7059             return ret;
7060         }
7061         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7062         if (ret == 0) {
7063             ret = copy_to_user_flock(arg, &fl);
7064         }
7065         break;
7066 
7067     case TARGET_F_SETLK:
7068     case TARGET_F_SETLKW:
7069         ret = copy_from_user_flock(&fl, arg);
7070         if (ret) {
7071             return ret;
7072         }
7073         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7074         break;
7075 
7076     case TARGET_F_GETLK64:
7077     case TARGET_F_OFD_GETLK:
7078         ret = copy_from_user_flock64(&fl, arg);
7079         if (ret) {
7080             return ret;
7081         }
7082         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7083         if (ret == 0) {
7084             ret = copy_to_user_flock64(arg, &fl);
7085         }
7086         break;
7087     case TARGET_F_SETLK64:
7088     case TARGET_F_SETLKW64:
7089     case TARGET_F_OFD_SETLK:
7090     case TARGET_F_OFD_SETLKW:
7091         ret = copy_from_user_flock64(&fl, arg);
7092         if (ret) {
7093             return ret;
7094         }
7095         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7096         break;
7097 
7098     case TARGET_F_GETFL:
7099         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7100         if (ret >= 0) {
7101             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7102             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7103             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7104                 ret |= TARGET_O_LARGEFILE;
7105             }
7106         }
7107         break;
7108 
7109     case TARGET_F_SETFL:
7110         ret = get_errno(safe_fcntl(fd, host_cmd,
7111                                    target_to_host_bitmask(arg,
7112                                                           fcntl_flags_tbl)));
7113         break;
7114 
7115 #ifdef F_GETOWN_EX
7116     case TARGET_F_GETOWN_EX:
7117         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7118         if (ret >= 0) {
7119             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7120                 return -TARGET_EFAULT;
7121             target_fox->type = tswap32(fox.type);
7122             target_fox->pid = tswap32(fox.pid);
7123             unlock_user_struct(target_fox, arg, 1);
7124         }
7125         break;
7126 #endif
7127 
7128 #ifdef F_SETOWN_EX
7129     case TARGET_F_SETOWN_EX:
7130         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7131             return -TARGET_EFAULT;
7132         fox.type = tswap32(target_fox->type);
7133         fox.pid = tswap32(target_fox->pid);
7134         unlock_user_struct(target_fox, arg, 0);
7135         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7136         break;
7137 #endif
7138 
7139     case TARGET_F_SETSIG:
7140         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7141         break;
7142 
7143     case TARGET_F_GETSIG:
7144         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7145         break;
7146 
7147     case TARGET_F_SETOWN:
7148     case TARGET_F_GETOWN:
7149     case TARGET_F_SETLEASE:
7150     case TARGET_F_GETLEASE:
7151     case TARGET_F_SETPIPE_SZ:
7152     case TARGET_F_GETPIPE_SZ:
7153     case TARGET_F_ADD_SEALS:
7154     case TARGET_F_GET_SEALS:
7155         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7156         break;
7157 
7158     default:
7159         ret = get_errno(safe_fcntl(fd, cmd, arg));
7160         break;
7161     }
7162     return ret;
7163 }
7164 
7165 #ifdef USE_UID16
7166 
high2lowuid(int uid)7167 static inline int high2lowuid(int uid)
7168 {
7169     if (uid > 65535)
7170         return 65534;
7171     else
7172         return uid;
7173 }
7174 
high2lowgid(int gid)7175 static inline int high2lowgid(int gid)
7176 {
7177     if (gid > 65535)
7178         return 65534;
7179     else
7180         return gid;
7181 }
7182 
low2highuid(int uid)7183 static inline int low2highuid(int uid)
7184 {
7185     if ((int16_t)uid == -1)
7186         return -1;
7187     else
7188         return uid;
7189 }
7190 
low2highgid(int gid)7191 static inline int low2highgid(int gid)
7192 {
7193     if ((int16_t)gid == -1)
7194         return -1;
7195     else
7196         return gid;
7197 }
tswapid(int id)7198 static inline int tswapid(int id)
7199 {
7200     return tswap16(id);
7201 }
7202 
7203 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7204 
7205 #else /* !USE_UID16 */
high2lowuid(int uid)7206 static inline int high2lowuid(int uid)
7207 {
7208     return uid;
7209 }
high2lowgid(int gid)7210 static inline int high2lowgid(int gid)
7211 {
7212     return gid;
7213 }
low2highuid(int uid)7214 static inline int low2highuid(int uid)
7215 {
7216     return uid;
7217 }
low2highgid(int gid)7218 static inline int low2highgid(int gid)
7219 {
7220     return gid;
7221 }
tswapid(int id)7222 static inline int tswapid(int id)
7223 {
7224     return tswap32(id);
7225 }
7226 
7227 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7228 
7229 #endif /* USE_UID16 */
7230 
7231 /* We must do direct syscalls for setting UID/GID, because we want to
7232  * implement the Linux system call semantics of "change only for this thread",
7233  * not the libc/POSIX semantics of "change for all threads in process".
7234  * (See http://ewontfix.com/17/ for more details.)
7235  * We use the 32-bit version of the syscalls if present; if it is not
7236  * then either the host architecture supports 32-bit UIDs natively with
7237  * the standard syscall, or the 16-bit UID is the best we can do.
7238  */
7239 #ifdef __NR_setuid32
7240 #define __NR_sys_setuid __NR_setuid32
7241 #else
7242 #define __NR_sys_setuid __NR_setuid
7243 #endif
7244 #ifdef __NR_setgid32
7245 #define __NR_sys_setgid __NR_setgid32
7246 #else
7247 #define __NR_sys_setgid __NR_setgid
7248 #endif
7249 #ifdef __NR_setresuid32
7250 #define __NR_sys_setresuid __NR_setresuid32
7251 #else
7252 #define __NR_sys_setresuid __NR_setresuid
7253 #endif
7254 #ifdef __NR_setresgid32
7255 #define __NR_sys_setresgid __NR_setresgid32
7256 #else
7257 #define __NR_sys_setresgid __NR_setresgid
7258 #endif
7259 #ifdef __NR_setgroups32
7260 #define __NR_sys_setgroups __NR_setgroups32
7261 #else
7262 #define __NR_sys_setgroups __NR_setgroups
7263 #endif
7264 #ifdef __NR_sys_setreuid32
7265 #define __NR_sys_setreuid __NR_setreuid32
7266 #else
7267 #define __NR_sys_setreuid __NR_setreuid
7268 #endif
7269 #ifdef __NR_sys_setregid32
7270 #define __NR_sys_setregid __NR_setregid32
7271 #else
7272 #define __NR_sys_setregid __NR_setregid
7273 #endif
7274 
7275 _syscall1(int, sys_setuid, uid_t, uid)
7276 _syscall1(int, sys_setgid, gid_t, gid)
7277 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7278 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7279 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7280 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7281 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7282 
syscall_init(void)7283 void syscall_init(void)
7284 {
7285     IOCTLEntry *ie;
7286     const argtype *arg_type;
7287     int size;
7288 
7289     thunk_init(STRUCT_MAX);
7290 
7291 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7292 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7293 #include "syscall_types.h"
7294 #undef STRUCT
7295 #undef STRUCT_SPECIAL
7296 
7297     /* we patch the ioctl size if necessary. We rely on the fact that
7298        no ioctl has all the bits at '1' in the size field */
7299     ie = ioctl_entries;
7300     while (ie->target_cmd != 0) {
7301         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7302             TARGET_IOC_SIZEMASK) {
7303             arg_type = ie->arg_type;
7304             if (arg_type[0] != TYPE_PTR) {
7305                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7306                         ie->target_cmd);
7307                 exit(1);
7308             }
7309             arg_type++;
7310             size = thunk_type_size(arg_type, 0);
7311             ie->target_cmd = (ie->target_cmd &
7312                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7313                 (size << TARGET_IOC_SIZESHIFT);
7314         }
7315 
7316         /* automatic consistency check if same arch */
7317 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7318     (defined(__x86_64__) && defined(TARGET_X86_64))
7319         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7320             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7321                     ie->name, ie->target_cmd, ie->host_cmd);
7322         }
7323 #endif
7324         ie++;
7325     }
7326 }
7327 
7328 #ifdef TARGET_NR_truncate64
target_truncate64(CPUArchState * cpu_env,const char * arg1,abi_long arg2,abi_long arg3,abi_long arg4)7329 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7330                                          abi_long arg2,
7331                                          abi_long arg3,
7332                                          abi_long arg4)
7333 {
7334     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7335         arg2 = arg3;
7336         arg3 = arg4;
7337     }
7338     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7339 }
7340 #endif
7341 
7342 #ifdef TARGET_NR_ftruncate64
target_ftruncate64(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4)7343 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7344                                           abi_long arg2,
7345                                           abi_long arg3,
7346                                           abi_long arg4)
7347 {
7348     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7349         arg2 = arg3;
7350         arg3 = arg4;
7351     }
7352     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7353 }
7354 #endif
7355 
7356 #if defined(TARGET_NR_timer_settime) || \
7357     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec(struct itimerspec * host_its,abi_ulong target_addr)7358 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7359                                                  abi_ulong target_addr)
7360 {
7361     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7362                                 offsetof(struct target_itimerspec,
7363                                          it_interval)) ||
7364         target_to_host_timespec(&host_its->it_value, target_addr +
7365                                 offsetof(struct target_itimerspec,
7366                                          it_value))) {
7367         return -TARGET_EFAULT;
7368     }
7369 
7370     return 0;
7371 }
7372 #endif
7373 
7374 #if defined(TARGET_NR_timer_settime64) || \
7375     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec64(struct itimerspec * host_its,abi_ulong target_addr)7376 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7377                                                    abi_ulong target_addr)
7378 {
7379     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7380                                   offsetof(struct target__kernel_itimerspec,
7381                                            it_interval)) ||
7382         target_to_host_timespec64(&host_its->it_value, target_addr +
7383                                   offsetof(struct target__kernel_itimerspec,
7384                                            it_value))) {
7385         return -TARGET_EFAULT;
7386     }
7387 
7388     return 0;
7389 }
7390 #endif
7391 
7392 #if ((defined(TARGET_NR_timerfd_gettime) || \
7393       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7394       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
host_to_target_itimerspec(abi_ulong target_addr,struct itimerspec * host_its)7395 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7396                                                  struct itimerspec *host_its)
7397 {
7398     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7399                                                        it_interval),
7400                                 &host_its->it_interval) ||
7401         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7402                                                        it_value),
7403                                 &host_its->it_value)) {
7404         return -TARGET_EFAULT;
7405     }
7406     return 0;
7407 }
7408 #endif
7409 
7410 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7411       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7412       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
host_to_target_itimerspec64(abi_ulong target_addr,struct itimerspec * host_its)7413 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7414                                                    struct itimerspec *host_its)
7415 {
7416     if (host_to_target_timespec64(target_addr +
7417                                   offsetof(struct target__kernel_itimerspec,
7418                                            it_interval),
7419                                   &host_its->it_interval) ||
7420         host_to_target_timespec64(target_addr +
7421                                   offsetof(struct target__kernel_itimerspec,
7422                                            it_value),
7423                                   &host_its->it_value)) {
7424         return -TARGET_EFAULT;
7425     }
7426     return 0;
7427 }
7428 #endif
7429 
7430 #if defined(TARGET_NR_adjtimex) || \
7431     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
target_to_host_timex(struct timex * host_tx,abi_long target_addr)7432 static inline abi_long target_to_host_timex(struct timex *host_tx,
7433                                             abi_long target_addr)
7434 {
7435     struct target_timex *target_tx;
7436 
7437     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7438         return -TARGET_EFAULT;
7439     }
7440 
7441     __get_user(host_tx->modes, &target_tx->modes);
7442     __get_user(host_tx->offset, &target_tx->offset);
7443     __get_user(host_tx->freq, &target_tx->freq);
7444     __get_user(host_tx->maxerror, &target_tx->maxerror);
7445     __get_user(host_tx->esterror, &target_tx->esterror);
7446     __get_user(host_tx->status, &target_tx->status);
7447     __get_user(host_tx->constant, &target_tx->constant);
7448     __get_user(host_tx->precision, &target_tx->precision);
7449     __get_user(host_tx->tolerance, &target_tx->tolerance);
7450     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7451     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7452     __get_user(host_tx->tick, &target_tx->tick);
7453     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7454     __get_user(host_tx->jitter, &target_tx->jitter);
7455     __get_user(host_tx->shift, &target_tx->shift);
7456     __get_user(host_tx->stabil, &target_tx->stabil);
7457     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7458     __get_user(host_tx->calcnt, &target_tx->calcnt);
7459     __get_user(host_tx->errcnt, &target_tx->errcnt);
7460     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7461     __get_user(host_tx->tai, &target_tx->tai);
7462 
7463     unlock_user_struct(target_tx, target_addr, 0);
7464     return 0;
7465 }
7466 
host_to_target_timex(abi_long target_addr,struct timex * host_tx)7467 static inline abi_long host_to_target_timex(abi_long target_addr,
7468                                             struct timex *host_tx)
7469 {
7470     struct target_timex *target_tx;
7471 
7472     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7473         return -TARGET_EFAULT;
7474     }
7475 
7476     __put_user(host_tx->modes, &target_tx->modes);
7477     __put_user(host_tx->offset, &target_tx->offset);
7478     __put_user(host_tx->freq, &target_tx->freq);
7479     __put_user(host_tx->maxerror, &target_tx->maxerror);
7480     __put_user(host_tx->esterror, &target_tx->esterror);
7481     __put_user(host_tx->status, &target_tx->status);
7482     __put_user(host_tx->constant, &target_tx->constant);
7483     __put_user(host_tx->precision, &target_tx->precision);
7484     __put_user(host_tx->tolerance, &target_tx->tolerance);
7485     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7486     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7487     __put_user(host_tx->tick, &target_tx->tick);
7488     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7489     __put_user(host_tx->jitter, &target_tx->jitter);
7490     __put_user(host_tx->shift, &target_tx->shift);
7491     __put_user(host_tx->stabil, &target_tx->stabil);
7492     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7493     __put_user(host_tx->calcnt, &target_tx->calcnt);
7494     __put_user(host_tx->errcnt, &target_tx->errcnt);
7495     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7496     __put_user(host_tx->tai, &target_tx->tai);
7497 
7498     unlock_user_struct(target_tx, target_addr, 1);
7499     return 0;
7500 }
7501 #endif
7502 
7503 
7504 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
target_to_host_timex64(struct timex * host_tx,abi_long target_addr)7505 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7506                                               abi_long target_addr)
7507 {
7508     struct target__kernel_timex *target_tx;
7509 
7510     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7511                                  offsetof(struct target__kernel_timex,
7512                                           time))) {
7513         return -TARGET_EFAULT;
7514     }
7515 
7516     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7517         return -TARGET_EFAULT;
7518     }
7519 
7520     __get_user(host_tx->modes, &target_tx->modes);
7521     __get_user(host_tx->offset, &target_tx->offset);
7522     __get_user(host_tx->freq, &target_tx->freq);
7523     __get_user(host_tx->maxerror, &target_tx->maxerror);
7524     __get_user(host_tx->esterror, &target_tx->esterror);
7525     __get_user(host_tx->status, &target_tx->status);
7526     __get_user(host_tx->constant, &target_tx->constant);
7527     __get_user(host_tx->precision, &target_tx->precision);
7528     __get_user(host_tx->tolerance, &target_tx->tolerance);
7529     __get_user(host_tx->tick, &target_tx->tick);
7530     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7531     __get_user(host_tx->jitter, &target_tx->jitter);
7532     __get_user(host_tx->shift, &target_tx->shift);
7533     __get_user(host_tx->stabil, &target_tx->stabil);
7534     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7535     __get_user(host_tx->calcnt, &target_tx->calcnt);
7536     __get_user(host_tx->errcnt, &target_tx->errcnt);
7537     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7538     __get_user(host_tx->tai, &target_tx->tai);
7539 
7540     unlock_user_struct(target_tx, target_addr, 0);
7541     return 0;
7542 }
7543 
host_to_target_timex64(abi_long target_addr,struct timex * host_tx)7544 static inline abi_long host_to_target_timex64(abi_long target_addr,
7545                                               struct timex *host_tx)
7546 {
7547     struct target__kernel_timex *target_tx;
7548 
7549    if (copy_to_user_timeval64(target_addr +
7550                               offsetof(struct target__kernel_timex, time),
7551                               &host_tx->time)) {
7552         return -TARGET_EFAULT;
7553     }
7554 
7555     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7556         return -TARGET_EFAULT;
7557     }
7558 
7559     __put_user(host_tx->modes, &target_tx->modes);
7560     __put_user(host_tx->offset, &target_tx->offset);
7561     __put_user(host_tx->freq, &target_tx->freq);
7562     __put_user(host_tx->maxerror, &target_tx->maxerror);
7563     __put_user(host_tx->esterror, &target_tx->esterror);
7564     __put_user(host_tx->status, &target_tx->status);
7565     __put_user(host_tx->constant, &target_tx->constant);
7566     __put_user(host_tx->precision, &target_tx->precision);
7567     __put_user(host_tx->tolerance, &target_tx->tolerance);
7568     __put_user(host_tx->tick, &target_tx->tick);
7569     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7570     __put_user(host_tx->jitter, &target_tx->jitter);
7571     __put_user(host_tx->shift, &target_tx->shift);
7572     __put_user(host_tx->stabil, &target_tx->stabil);
7573     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7574     __put_user(host_tx->calcnt, &target_tx->calcnt);
7575     __put_user(host_tx->errcnt, &target_tx->errcnt);
7576     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7577     __put_user(host_tx->tai, &target_tx->tai);
7578 
7579     unlock_user_struct(target_tx, target_addr, 1);
7580     return 0;
7581 }
7582 #endif
7583 
7584 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7585 #define sigev_notify_thread_id _sigev_un._tid
7586 #endif
7587 
target_to_host_sigevent(struct sigevent * host_sevp,abi_ulong target_addr)7588 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7589                                                abi_ulong target_addr)
7590 {
7591     struct target_sigevent *target_sevp;
7592 
7593     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7594         return -TARGET_EFAULT;
7595     }
7596 
7597     /* This union is awkward on 64 bit systems because it has a 32 bit
7598      * integer and a pointer in it; we follow the conversion approach
7599      * used for handling sigval types in signal.c so the guest should get
7600      * the correct value back even if we did a 64 bit byteswap and it's
7601      * using the 32 bit integer.
7602      */
7603     host_sevp->sigev_value.sival_ptr =
7604         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7605     host_sevp->sigev_signo =
7606         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7607     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7608     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7609 
7610     unlock_user_struct(target_sevp, target_addr, 1);
7611     return 0;
7612 }
7613 
7614 #if defined(TARGET_NR_mlockall)
target_to_host_mlockall_arg(int arg)7615 static inline int target_to_host_mlockall_arg(int arg)
7616 {
7617     int result = 0;
7618 
7619     if (arg & TARGET_MCL_CURRENT) {
7620         result |= MCL_CURRENT;
7621     }
7622     if (arg & TARGET_MCL_FUTURE) {
7623         result |= MCL_FUTURE;
7624     }
7625 #ifdef MCL_ONFAULT
7626     if (arg & TARGET_MCL_ONFAULT) {
7627         result |= MCL_ONFAULT;
7628     }
7629 #endif
7630 
7631     return result;
7632 }
7633 #endif
7634 
target_to_host_msync_arg(abi_long arg)7635 static inline int target_to_host_msync_arg(abi_long arg)
7636 {
7637     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7638            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7639            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7640            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7641 }
7642 
7643 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7644      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7645      defined(TARGET_NR_newfstatat))
host_to_target_stat64(CPUArchState * cpu_env,abi_ulong target_addr,struct stat * host_st)7646 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7647                                              abi_ulong target_addr,
7648                                              struct stat *host_st)
7649 {
7650 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7651     if (cpu_env->eabi) {
7652         struct target_eabi_stat64 *target_st;
7653 
7654         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7655             return -TARGET_EFAULT;
7656         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7657         __put_user(host_st->st_dev, &target_st->st_dev);
7658         __put_user(host_st->st_ino, &target_st->st_ino);
7659 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7660         __put_user(host_st->st_ino, &target_st->__st_ino);
7661 #endif
7662         __put_user(host_st->st_mode, &target_st->st_mode);
7663         __put_user(host_st->st_nlink, &target_st->st_nlink);
7664         __put_user(host_st->st_uid, &target_st->st_uid);
7665         __put_user(host_st->st_gid, &target_st->st_gid);
7666         __put_user(host_st->st_rdev, &target_st->st_rdev);
7667         __put_user(host_st->st_size, &target_st->st_size);
7668         __put_user(host_st->st_blksize, &target_st->st_blksize);
7669         __put_user(host_st->st_blocks, &target_st->st_blocks);
7670         __put_user(host_st->st_atime, &target_st->target_st_atime);
7671         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7672         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7673 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7674         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7675         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7676         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7677 #endif
7678         unlock_user_struct(target_st, target_addr, 1);
7679     } else
7680 #endif
7681     {
7682 #if defined(TARGET_HAS_STRUCT_STAT64)
7683         struct target_stat64 *target_st;
7684 #else
7685         struct target_stat *target_st;
7686 #endif
7687 
7688         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7689             return -TARGET_EFAULT;
7690         memset(target_st, 0, sizeof(*target_st));
7691         __put_user(host_st->st_dev, &target_st->st_dev);
7692         __put_user(host_st->st_ino, &target_st->st_ino);
7693 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7694         __put_user(host_st->st_ino, &target_st->__st_ino);
7695 #endif
7696         __put_user(host_st->st_mode, &target_st->st_mode);
7697         __put_user(host_st->st_nlink, &target_st->st_nlink);
7698         __put_user(host_st->st_uid, &target_st->st_uid);
7699         __put_user(host_st->st_gid, &target_st->st_gid);
7700         __put_user(host_st->st_rdev, &target_st->st_rdev);
7701         /* XXX: better use of kernel struct */
7702         __put_user(host_st->st_size, &target_st->st_size);
7703         __put_user(host_st->st_blksize, &target_st->st_blksize);
7704         __put_user(host_st->st_blocks, &target_st->st_blocks);
7705         __put_user(host_st->st_atime, &target_st->target_st_atime);
7706         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7707         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7708 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7709         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7710         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7711         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7712 #endif
7713         unlock_user_struct(target_st, target_addr, 1);
7714     }
7715 
7716     return 0;
7717 }
7718 #endif
7719 
7720 #if defined(TARGET_NR_statx) && defined(__NR_statx)
host_to_target_statx(struct target_statx * host_stx,abi_ulong target_addr)7721 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7722                                             abi_ulong target_addr)
7723 {
7724     struct target_statx *target_stx;
7725 
7726     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7727         return -TARGET_EFAULT;
7728     }
7729     memset(target_stx, 0, sizeof(*target_stx));
7730 
7731     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7732     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7733     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7734     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7735     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7736     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7737     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7738     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7739     __put_user(host_stx->stx_size, &target_stx->stx_size);
7740     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7741     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7742     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7743     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7744     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7745     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7746     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7747     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7748     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7749     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7750     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7751     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7752     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7753     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7754 
7755     unlock_user_struct(target_stx, target_addr, 1);
7756 
7757     return 0;
7758 }
7759 #endif
7760 
do_sys_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7761 static int do_sys_futex(int *uaddr, int op, int val,
7762                          const struct timespec *timeout, int *uaddr2,
7763                          int val3)
7764 {
7765 #if HOST_LONG_BITS == 64
7766 #if defined(__NR_futex)
7767     /* always a 64-bit time_t, it doesn't define _time64 version  */
7768     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7769 
7770 #endif
7771 #else /* HOST_LONG_BITS == 64 */
7772 #if defined(__NR_futex_time64)
7773     if (sizeof(timeout->tv_sec) == 8) {
7774         /* _time64 function on 32bit arch */
7775         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7776     }
7777 #endif
7778 #if defined(__NR_futex)
7779     /* old function on 32bit arch */
7780     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7781 #endif
7782 #endif /* HOST_LONG_BITS == 64 */
7783     g_assert_not_reached();
7784 }
7785 
do_safe_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7786 static int do_safe_futex(int *uaddr, int op, int val,
7787                          const struct timespec *timeout, int *uaddr2,
7788                          int val3)
7789 {
7790 #if HOST_LONG_BITS == 64
7791 #if defined(__NR_futex)
7792     /* always a 64-bit time_t, it doesn't define _time64 version  */
7793     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7794 #endif
7795 #else /* HOST_LONG_BITS == 64 */
7796 #if defined(__NR_futex_time64)
7797     if (sizeof(timeout->tv_sec) == 8) {
7798         /* _time64 function on 32bit arch */
7799         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7800                                            val3));
7801     }
7802 #endif
7803 #if defined(__NR_futex)
7804     /* old function on 32bit arch */
7805     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7806 #endif
7807 #endif /* HOST_LONG_BITS == 64 */
7808     return -TARGET_ENOSYS;
7809 }
7810 
7811 /* ??? Using host futex calls even when target atomic operations
7812    are not really atomic probably breaks things.  However implementing
7813    futexes locally would make futexes shared between multiple processes
7814    tricky.  However they're probably useless because guest atomic
7815    operations won't work either.  */
7816 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
do_futex(CPUState * cpu,bool time64,target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)7817 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7818                     int op, int val, target_ulong timeout,
7819                     target_ulong uaddr2, int val3)
7820 {
7821     struct timespec ts, *pts = NULL;
7822     void *haddr2 = NULL;
7823     int base_op;
7824 
7825     /* We assume FUTEX_* constants are the same on both host and target. */
7826 #ifdef FUTEX_CMD_MASK
7827     base_op = op & FUTEX_CMD_MASK;
7828 #else
7829     base_op = op;
7830 #endif
7831     switch (base_op) {
7832     case FUTEX_WAIT:
7833     case FUTEX_WAIT_BITSET:
7834         val = tswap32(val);
7835         break;
7836     case FUTEX_WAIT_REQUEUE_PI:
7837         val = tswap32(val);
7838         haddr2 = g2h(cpu, uaddr2);
7839         break;
7840     case FUTEX_LOCK_PI:
7841     case FUTEX_LOCK_PI2:
7842         break;
7843     case FUTEX_WAKE:
7844     case FUTEX_WAKE_BITSET:
7845     case FUTEX_TRYLOCK_PI:
7846     case FUTEX_UNLOCK_PI:
7847         timeout = 0;
7848         break;
7849     case FUTEX_FD:
7850         val = target_to_host_signal(val);
7851         timeout = 0;
7852         break;
7853     case FUTEX_CMP_REQUEUE:
7854     case FUTEX_CMP_REQUEUE_PI:
7855         val3 = tswap32(val3);
7856         /* fall through */
7857     case FUTEX_REQUEUE:
7858     case FUTEX_WAKE_OP:
7859         /*
7860          * For these, the 4th argument is not TIMEOUT, but VAL2.
7861          * But the prototype of do_safe_futex takes a pointer, so
7862          * insert casts to satisfy the compiler.  We do not need
7863          * to tswap VAL2 since it's not compared to guest memory.
7864           */
7865         pts = (struct timespec *)(uintptr_t)timeout;
7866         timeout = 0;
7867         haddr2 = g2h(cpu, uaddr2);
7868         break;
7869     default:
7870         return -TARGET_ENOSYS;
7871     }
7872     if (timeout) {
7873         pts = &ts;
7874         if (time64
7875             ? target_to_host_timespec64(pts, timeout)
7876             : target_to_host_timespec(pts, timeout)) {
7877             return -TARGET_EFAULT;
7878         }
7879     }
7880     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7881 }
7882 #endif
7883 
7884 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_name_to_handle_at(abi_long dirfd,abi_long pathname,abi_long handle,abi_long mount_id,abi_long flags)7885 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7886                                      abi_long handle, abi_long mount_id,
7887                                      abi_long flags)
7888 {
7889     struct file_handle *target_fh;
7890     struct file_handle *fh;
7891     int mid = 0;
7892     abi_long ret;
7893     char *name;
7894     unsigned int size, total_size;
7895 
7896     if (get_user_s32(size, handle)) {
7897         return -TARGET_EFAULT;
7898     }
7899 
7900     name = lock_user_string(pathname);
7901     if (!name) {
7902         return -TARGET_EFAULT;
7903     }
7904 
7905     total_size = sizeof(struct file_handle) + size;
7906     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7907     if (!target_fh) {
7908         unlock_user(name, pathname, 0);
7909         return -TARGET_EFAULT;
7910     }
7911 
7912     fh = g_malloc0(total_size);
7913     fh->handle_bytes = size;
7914 
7915     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7916     unlock_user(name, pathname, 0);
7917 
7918     /* man name_to_handle_at(2):
7919      * Other than the use of the handle_bytes field, the caller should treat
7920      * the file_handle structure as an opaque data type
7921      */
7922 
7923     memcpy(target_fh, fh, total_size);
7924     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7925     target_fh->handle_type = tswap32(fh->handle_type);
7926     g_free(fh);
7927     unlock_user(target_fh, handle, total_size);
7928 
7929     if (put_user_s32(mid, mount_id)) {
7930         return -TARGET_EFAULT;
7931     }
7932 
7933     return ret;
7934 
7935 }
7936 #endif
7937 
7938 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_open_by_handle_at(abi_long mount_fd,abi_long handle,abi_long flags)7939 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7940                                      abi_long flags)
7941 {
7942     struct file_handle *target_fh;
7943     struct file_handle *fh;
7944     unsigned int size, total_size;
7945     abi_long ret;
7946 
7947     if (get_user_s32(size, handle)) {
7948         return -TARGET_EFAULT;
7949     }
7950 
7951     total_size = sizeof(struct file_handle) + size;
7952     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7953     if (!target_fh) {
7954         return -TARGET_EFAULT;
7955     }
7956 
7957     fh = g_memdup(target_fh, total_size);
7958     fh->handle_bytes = size;
7959     fh->handle_type = tswap32(target_fh->handle_type);
7960 
7961     ret = get_errno(open_by_handle_at(mount_fd, fh,
7962                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7963 
7964     g_free(fh);
7965 
7966     unlock_user(target_fh, handle, total_size);
7967 
7968     return ret;
7969 }
7970 #endif
7971 
7972 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7973 
do_signalfd4(int fd,abi_long mask,int flags)7974 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7975 {
7976     int host_flags;
7977     target_sigset_t *target_mask;
7978     sigset_t host_mask;
7979     abi_long ret;
7980 
7981     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7982         return -TARGET_EINVAL;
7983     }
7984     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7985         return -TARGET_EFAULT;
7986     }
7987 
7988     target_to_host_sigset(&host_mask, target_mask);
7989 
7990     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7991 
7992     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7993     if (ret >= 0) {
7994         fd_trans_register(ret, &target_signalfd_trans);
7995     }
7996 
7997     unlock_user_struct(target_mask, mask, 0);
7998 
7999     return ret;
8000 }
8001 #endif
8002 
8003 /* Map host to target signal numbers for the wait family of syscalls.
8004    Assume all other status bits are the same.  */
host_to_target_waitstatus(int status)8005 int host_to_target_waitstatus(int status)
8006 {
8007     if (WIFSIGNALED(status)) {
8008         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8009     }
8010     if (WIFSTOPPED(status)) {
8011         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8012                | (status & 0xff);
8013     }
8014     return status;
8015 }
8016 
open_self_cmdline(CPUArchState * cpu_env,int fd)8017 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8018 {
8019     CPUState *cpu = env_cpu(cpu_env);
8020     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
8021     int i;
8022 
8023     for (i = 0; i < bprm->argc; i++) {
8024         size_t len = strlen(bprm->argv[i]) + 1;
8025 
8026         if (write(fd, bprm->argv[i], len) != len) {
8027             return -1;
8028         }
8029     }
8030 
8031     return 0;
8032 }
8033 
8034 struct open_self_maps_data {
8035     TaskState *ts;
8036     IntervalTreeRoot *host_maps;
8037     int fd;
8038     bool smaps;
8039 };
8040 
8041 /*
8042  * Subroutine to output one line of /proc/self/maps,
8043  * or one region of /proc/self/smaps.
8044  */
8045 
8046 #ifdef TARGET_HPPA
8047 # define test_stack(S, E, L)  (E == L)
8048 #else
8049 # define test_stack(S, E, L)  (S == L)
8050 #endif
8051 
open_self_maps_4(const struct open_self_maps_data * d,const MapInfo * mi,abi_ptr start,abi_ptr end,unsigned flags)8052 static void open_self_maps_4(const struct open_self_maps_data *d,
8053                              const MapInfo *mi, abi_ptr start,
8054                              abi_ptr end, unsigned flags)
8055 {
8056     const struct image_info *info = d->ts->info;
8057     const char *path = mi->path;
8058     uint64_t offset;
8059     int fd = d->fd;
8060     int count;
8061 
8062     if (test_stack(start, end, info->stack_limit)) {
8063         path = "[stack]";
8064     } else if (start == info->brk) {
8065         path = "[heap]";
8066     } else if (start == info->vdso) {
8067         path = "[vdso]";
8068 #ifdef TARGET_X86_64
8069     } else if (start == TARGET_VSYSCALL_PAGE) {
8070         path = "[vsyscall]";
8071 #endif
8072     }
8073 
8074     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8075     offset = mi->offset;
8076     if (mi->dev) {
8077         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8078         offset += hstart - mi->itree.start;
8079     }
8080 
8081     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8082                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8083                     start, end,
8084                     (flags & PAGE_READ) ? 'r' : '-',
8085                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8086                     (flags & PAGE_EXEC) ? 'x' : '-',
8087                     mi->is_priv ? 'p' : 's',
8088                     offset, major(mi->dev), minor(mi->dev),
8089                     (uint64_t)mi->inode);
8090     if (path) {
8091         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8092     } else {
8093         dprintf(fd, "\n");
8094     }
8095 
8096     if (d->smaps) {
8097         unsigned long size = end - start;
8098         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8099         unsigned long size_kb = size >> 10;
8100 
8101         dprintf(fd, "Size:                  %lu kB\n"
8102                 "KernelPageSize:        %lu kB\n"
8103                 "MMUPageSize:           %lu kB\n"
8104                 "Rss:                   0 kB\n"
8105                 "Pss:                   0 kB\n"
8106                 "Pss_Dirty:             0 kB\n"
8107                 "Shared_Clean:          0 kB\n"
8108                 "Shared_Dirty:          0 kB\n"
8109                 "Private_Clean:         0 kB\n"
8110                 "Private_Dirty:         0 kB\n"
8111                 "Referenced:            0 kB\n"
8112                 "Anonymous:             %lu kB\n"
8113                 "LazyFree:              0 kB\n"
8114                 "AnonHugePages:         0 kB\n"
8115                 "ShmemPmdMapped:        0 kB\n"
8116                 "FilePmdMapped:         0 kB\n"
8117                 "Shared_Hugetlb:        0 kB\n"
8118                 "Private_Hugetlb:       0 kB\n"
8119                 "Swap:                  0 kB\n"
8120                 "SwapPss:               0 kB\n"
8121                 "Locked:                0 kB\n"
8122                 "THPeligible:    0\n"
8123                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8124                 size_kb, page_size_kb, page_size_kb,
8125                 (flags & PAGE_ANON ? size_kb : 0),
8126                 (flags & PAGE_READ) ? " rd" : "",
8127                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8128                 (flags & PAGE_EXEC) ? " ex" : "",
8129                 mi->is_priv ? "" : " sh",
8130                 (flags & PAGE_READ) ? " mr" : "",
8131                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8132                 (flags & PAGE_EXEC) ? " me" : "",
8133                 mi->is_priv ? "" : " ms");
8134     }
8135 }
8136 
8137 /*
8138  * Callback for walk_memory_regions, when read_self_maps() fails.
8139  * Proceed without the benefit of host /proc/self/maps cross-check.
8140  */
open_self_maps_3(void * opaque,vaddr guest_start,vaddr guest_end,int flags)8141 static int open_self_maps_3(void *opaque, vaddr guest_start,
8142                             vaddr guest_end, int flags)
8143 {
8144     static const MapInfo mi = { .is_priv = true };
8145 
8146     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8147     return 0;
8148 }
8149 
8150 /*
8151  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8152  */
open_self_maps_2(void * opaque,vaddr guest_start,vaddr guest_end,int flags)8153 static int open_self_maps_2(void *opaque, vaddr guest_start,
8154                             vaddr guest_end, int flags)
8155 {
8156     const struct open_self_maps_data *d = opaque;
8157     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8158     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8159 
8160 #ifdef TARGET_X86_64
8161     /*
8162      * Because of the extremely high position of the page within the guest
8163      * virtual address space, this is not backed by host memory at all.
8164      * Therefore the loop below would fail.  This is the only instance
8165      * of not having host backing memory.
8166      */
8167     if (guest_start == TARGET_VSYSCALL_PAGE) {
8168         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8169     }
8170 #endif
8171 
8172     while (1) {
8173         IntervalTreeNode *n =
8174             interval_tree_iter_first(d->host_maps, host_start, host_start);
8175         MapInfo *mi = container_of(n, MapInfo, itree);
8176         uintptr_t this_hlast = MIN(host_last, n->last);
8177         target_ulong this_gend = h2g(this_hlast) + 1;
8178 
8179         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8180 
8181         if (this_hlast == host_last) {
8182             return 0;
8183         }
8184         host_start = this_hlast + 1;
8185         guest_start = h2g(host_start);
8186     }
8187 }
8188 
open_self_maps_1(CPUArchState * env,int fd,bool smaps)8189 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8190 {
8191     struct open_self_maps_data d = {
8192         .ts = get_task_state(env_cpu(env)),
8193         .fd = fd,
8194         .smaps = smaps
8195     };
8196 
8197     mmap_lock();
8198     d.host_maps = read_self_maps();
8199     if (d.host_maps) {
8200         walk_memory_regions(&d, open_self_maps_2);
8201         free_self_maps(d.host_maps);
8202     } else {
8203         walk_memory_regions(&d, open_self_maps_3);
8204     }
8205     mmap_unlock();
8206     return 0;
8207 }
8208 
open_self_maps(CPUArchState * cpu_env,int fd)8209 static int open_self_maps(CPUArchState *cpu_env, int fd)
8210 {
8211     return open_self_maps_1(cpu_env, fd, false);
8212 }
8213 
open_self_smaps(CPUArchState * cpu_env,int fd)8214 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8215 {
8216     return open_self_maps_1(cpu_env, fd, true);
8217 }
8218 
open_self_stat(CPUArchState * cpu_env,int fd)8219 static int open_self_stat(CPUArchState *cpu_env, int fd)
8220 {
8221     CPUState *cpu = env_cpu(cpu_env);
8222     TaskState *ts = get_task_state(cpu);
8223     g_autoptr(GString) buf = g_string_new(NULL);
8224     int i;
8225 
8226     for (i = 0; i < 44; i++) {
8227         if (i == 0) {
8228             /* pid */
8229             g_string_printf(buf, FMT_pid " ", getpid());
8230         } else if (i == 1) {
8231             /* app name */
8232             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8233             bin = bin ? bin + 1 : ts->bprm->argv[0];
8234             g_string_printf(buf, "(%.15s) ", bin);
8235         } else if (i == 2) {
8236             /* task state */
8237             g_string_assign(buf, "R "); /* we are running right now */
8238         } else if (i == 3) {
8239             /* ppid */
8240             g_string_printf(buf, FMT_pid " ", getppid());
8241         } else if (i == 4) {
8242             /* pgid */
8243             g_string_printf(buf, FMT_pid " ", getpgrp());
8244         } else if (i == 19) {
8245             /* num_threads */
8246             int cpus = 0;
8247             WITH_RCU_READ_LOCK_GUARD() {
8248                 CPUState *cpu_iter;
8249                 CPU_FOREACH(cpu_iter) {
8250                     cpus++;
8251                 }
8252             }
8253             g_string_printf(buf, "%d ", cpus);
8254         } else if (i == 21) {
8255             /* starttime */
8256             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8257         } else if (i == 27) {
8258             /* stack bottom */
8259             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8260         } else {
8261             /* for the rest, there is MasterCard */
8262             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8263         }
8264 
8265         if (write(fd, buf->str, buf->len) != buf->len) {
8266             return -1;
8267         }
8268     }
8269 
8270     return 0;
8271 }
8272 
open_self_auxv(CPUArchState * cpu_env,int fd)8273 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8274 {
8275     CPUState *cpu = env_cpu(cpu_env);
8276     TaskState *ts = get_task_state(cpu);
8277     abi_ulong auxv = ts->info->saved_auxv;
8278     abi_ulong len = ts->info->auxv_len;
8279     char *ptr;
8280 
8281     /*
8282      * Auxiliary vector is stored in target process stack.
8283      * read in whole auxv vector and copy it to file
8284      */
8285     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8286     if (ptr != NULL) {
8287         while (len > 0) {
8288             ssize_t r;
8289             r = write(fd, ptr, len);
8290             if (r <= 0) {
8291                 break;
8292             }
8293             len -= r;
8294             ptr += r;
8295         }
8296         lseek(fd, 0, SEEK_SET);
8297         unlock_user(ptr, auxv, len);
8298     }
8299 
8300     return 0;
8301 }
8302 
is_proc_myself(const char * filename,const char * entry)8303 static int is_proc_myself(const char *filename, const char *entry)
8304 {
8305     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8306         filename += strlen("/proc/");
8307         if (!strncmp(filename, "self/", strlen("self/"))) {
8308             filename += strlen("self/");
8309         } else if (*filename >= '1' && *filename <= '9') {
8310             char myself[80];
8311             snprintf(myself, sizeof(myself), "%d/", getpid());
8312             if (!strncmp(filename, myself, strlen(myself))) {
8313                 filename += strlen(myself);
8314             } else {
8315                 return 0;
8316             }
8317         } else {
8318             return 0;
8319         }
8320         if (!strcmp(filename, entry)) {
8321             return 1;
8322         }
8323     }
8324     return 0;
8325 }
8326 
excp_dump_file(FILE * logfile,CPUArchState * env,const char * fmt,int code)8327 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8328                       const char *fmt, int code)
8329 {
8330     if (logfile) {
8331         CPUState *cs = env_cpu(env);
8332 
8333         fprintf(logfile, fmt, code);
8334         fprintf(logfile, "Failing executable: %s\n", exec_path);
8335         cpu_dump_state(cs, logfile, 0);
8336         open_self_maps(env, fileno(logfile));
8337     }
8338 }
8339 
target_exception_dump(CPUArchState * env,const char * fmt,int code)8340 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8341 {
8342     /* dump to console */
8343     excp_dump_file(stderr, env, fmt, code);
8344 
8345     /* dump to log file */
8346     if (qemu_log_separate()) {
8347         FILE *logfile = qemu_log_trylock();
8348 
8349         excp_dump_file(logfile, env, fmt, code);
8350         qemu_log_unlock(logfile);
8351     }
8352 }
8353 
8354 #include "target_proc.h"
8355 
8356 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8357     defined(HAVE_ARCH_PROC_CPUINFO) || \
8358     defined(HAVE_ARCH_PROC_HARDWARE)
is_proc(const char * filename,const char * entry)8359 static int is_proc(const char *filename, const char *entry)
8360 {
8361     return strcmp(filename, entry) == 0;
8362 }
8363 #endif
8364 
8365 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
open_net_route(CPUArchState * cpu_env,int fd)8366 static int open_net_route(CPUArchState *cpu_env, int fd)
8367 {
8368     FILE *fp;
8369     char *line = NULL;
8370     size_t len = 0;
8371     ssize_t read;
8372 
8373     fp = fopen("/proc/net/route", "r");
8374     if (fp == NULL) {
8375         return -1;
8376     }
8377 
8378     /* read header */
8379 
8380     read = getline(&line, &len, fp);
8381     dprintf(fd, "%s", line);
8382 
8383     /* read routes */
8384 
8385     while ((read = getline(&line, &len, fp)) != -1) {
8386         char iface[16];
8387         uint32_t dest, gw, mask;
8388         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8389         int fields;
8390 
8391         fields = sscanf(line,
8392                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8393                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8394                         &mask, &mtu, &window, &irtt);
8395         if (fields != 11) {
8396             continue;
8397         }
8398         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8399                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8400                 metric, tswap32(mask), mtu, window, irtt);
8401     }
8402 
8403     free(line);
8404     fclose(fp);
8405 
8406     return 0;
8407 }
8408 #endif
8409 
maybe_do_fake_open(CPUArchState * cpu_env,int dirfd,const char * fname,int flags,mode_t mode,int openat2_resolve,bool safe)8410 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8411                               const char *fname, int flags, mode_t mode,
8412                               int openat2_resolve, bool safe)
8413 {
8414     g_autofree char *proc_name = NULL;
8415     const char *pathname;
8416     struct fake_open {
8417         const char *filename;
8418         int (*fill)(CPUArchState *cpu_env, int fd);
8419         int (*cmp)(const char *s1, const char *s2);
8420     };
8421     const struct fake_open *fake_open;
8422     static const struct fake_open fakes[] = {
8423         { "maps", open_self_maps, is_proc_myself },
8424         { "smaps", open_self_smaps, is_proc_myself },
8425         { "stat", open_self_stat, is_proc_myself },
8426         { "auxv", open_self_auxv, is_proc_myself },
8427         { "cmdline", open_self_cmdline, is_proc_myself },
8428 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8429         { "/proc/net/route", open_net_route, is_proc },
8430 #endif
8431 #if defined(HAVE_ARCH_PROC_CPUINFO)
8432         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8433 #endif
8434 #if defined(HAVE_ARCH_PROC_HARDWARE)
8435         { "/proc/hardware", open_hardware, is_proc },
8436 #endif
8437         { NULL, NULL, NULL }
8438     };
8439 
8440     /* if this is a file from /proc/ filesystem, expand full name */
8441     proc_name = realpath(fname, NULL);
8442     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8443         pathname = proc_name;
8444     } else {
8445         pathname = fname;
8446     }
8447 
8448     if (is_proc_myself(pathname, "exe")) {
8449         /* Honor openat2 resolve flags */
8450         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8451             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8452             errno = ELOOP;
8453             return -1;
8454         }
8455         if (safe) {
8456             return safe_openat(dirfd, exec_path, flags, mode);
8457         } else {
8458             return openat(dirfd, exec_path, flags, mode);
8459         }
8460     }
8461 
8462     for (fake_open = fakes; fake_open->filename; fake_open++) {
8463         if (fake_open->cmp(pathname, fake_open->filename)) {
8464             break;
8465         }
8466     }
8467 
8468     if (fake_open->filename) {
8469         const char *tmpdir;
8470         char filename[PATH_MAX];
8471         int fd, r;
8472 
8473         fd = memfd_create("qemu-open", 0);
8474         if (fd < 0) {
8475             if (errno != ENOSYS) {
8476                 return fd;
8477             }
8478             /* create temporary file to map stat to */
8479             tmpdir = getenv("TMPDIR");
8480             if (!tmpdir)
8481                 tmpdir = "/tmp";
8482             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8483             fd = mkstemp(filename);
8484             if (fd < 0) {
8485                 return fd;
8486             }
8487             unlink(filename);
8488         }
8489 
8490         if ((r = fake_open->fill(cpu_env, fd))) {
8491             int e = errno;
8492             close(fd);
8493             errno = e;
8494             return r;
8495         }
8496         lseek(fd, 0, SEEK_SET);
8497 
8498         return fd;
8499     }
8500 
8501     return -2;
8502 }
8503 
do_guest_openat(CPUArchState * cpu_env,int dirfd,const char * pathname,int flags,mode_t mode,bool safe)8504 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8505                     int flags, mode_t mode, bool safe)
8506 {
8507     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8508     if (fd > -2) {
8509         return fd;
8510     }
8511 
8512     if (safe) {
8513         return safe_openat(dirfd, path(pathname), flags, mode);
8514     } else {
8515         return openat(dirfd, path(pathname), flags, mode);
8516     }
8517 }
8518 
8519 
do_openat2(CPUArchState * cpu_env,abi_long dirfd,abi_ptr guest_pathname,abi_ptr guest_open_how,abi_ulong guest_size)8520 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8521                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8522                       abi_ulong guest_size)
8523 {
8524     struct open_how_ver0 how = {0};
8525     char *pathname;
8526     int ret;
8527 
8528     if (guest_size < sizeof(struct target_open_how_ver0)) {
8529         return -TARGET_EINVAL;
8530     }
8531     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8532     if (ret) {
8533         if (ret == -TARGET_E2BIG) {
8534             qemu_log_mask(LOG_UNIMP,
8535                           "Unimplemented openat2 open_how size: "
8536                           TARGET_ABI_FMT_lu "\n", guest_size);
8537         }
8538         return ret;
8539     }
8540     pathname = lock_user_string(guest_pathname);
8541     if (!pathname) {
8542         return -TARGET_EFAULT;
8543     }
8544 
8545     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8546     how.mode = tswap64(how.mode);
8547     how.resolve = tswap64(how.resolve);
8548     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8549                                 how.resolve, true);
8550     if (fd > -2) {
8551         ret = get_errno(fd);
8552     } else {
8553         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8554                                      sizeof(struct open_how_ver0)));
8555     }
8556 
8557     fd_trans_unregister(ret);
8558     unlock_user(pathname, guest_pathname, 0);
8559     return ret;
8560 }
8561 
do_guest_readlink(const char * pathname,char * buf,size_t bufsiz)8562 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8563 {
8564     ssize_t ret;
8565 
8566     if (!pathname || !buf) {
8567         errno = EFAULT;
8568         return -1;
8569     }
8570 
8571     if (!bufsiz) {
8572         /* Short circuit this for the magic exe check. */
8573         errno = EINVAL;
8574         return -1;
8575     }
8576 
8577     if (is_proc_myself((const char *)pathname, "exe")) {
8578         /*
8579          * Don't worry about sign mismatch as earlier mapping
8580          * logic would have thrown a bad address error.
8581          */
8582         ret = MIN(strlen(exec_path), bufsiz);
8583         /* We cannot NUL terminate the string. */
8584         memcpy(buf, exec_path, ret);
8585     } else {
8586         ret = readlink(path(pathname), buf, bufsiz);
8587     }
8588 
8589     return ret;
8590 }
8591 
do_execv(CPUArchState * cpu_env,int dirfd,abi_long pathname,abi_long guest_argp,abi_long guest_envp,int flags,bool is_execveat)8592 static int do_execv(CPUArchState *cpu_env, int dirfd,
8593                     abi_long pathname, abi_long guest_argp,
8594                     abi_long guest_envp, int flags, bool is_execveat)
8595 {
8596     int ret;
8597     char **argp, **envp;
8598     int argc, envc;
8599     abi_ulong gp;
8600     abi_ulong addr;
8601     char **q;
8602     void *p;
8603 
8604     argc = 0;
8605 
8606     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8607         if (get_user_ual(addr, gp)) {
8608             return -TARGET_EFAULT;
8609         }
8610         if (!addr) {
8611             break;
8612         }
8613         argc++;
8614     }
8615     envc = 0;
8616     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8617         if (get_user_ual(addr, gp)) {
8618             return -TARGET_EFAULT;
8619         }
8620         if (!addr) {
8621             break;
8622         }
8623         envc++;
8624     }
8625 
8626     argp = g_new0(char *, argc + 1);
8627     envp = g_new0(char *, envc + 1);
8628 
8629     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8630         if (get_user_ual(addr, gp)) {
8631             goto execve_efault;
8632         }
8633         if (!addr) {
8634             break;
8635         }
8636         *q = lock_user_string(addr);
8637         if (!*q) {
8638             goto execve_efault;
8639         }
8640     }
8641     *q = NULL;
8642 
8643     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8644         if (get_user_ual(addr, gp)) {
8645             goto execve_efault;
8646         }
8647         if (!addr) {
8648             break;
8649         }
8650         *q = lock_user_string(addr);
8651         if (!*q) {
8652             goto execve_efault;
8653         }
8654     }
8655     *q = NULL;
8656 
8657     /*
8658      * Although execve() is not an interruptible syscall it is
8659      * a special case where we must use the safe_syscall wrapper:
8660      * if we allow a signal to happen before we make the host
8661      * syscall then we will 'lose' it, because at the point of
8662      * execve the process leaves QEMU's control. So we use the
8663      * safe syscall wrapper to ensure that we either take the
8664      * signal as a guest signal, or else it does not happen
8665      * before the execve completes and makes it the other
8666      * program's problem.
8667      */
8668     p = lock_user_string(pathname);
8669     if (!p) {
8670         goto execve_efault;
8671     }
8672 
8673     const char *exe = p;
8674     if (is_proc_myself(p, "exe")) {
8675         exe = exec_path;
8676     }
8677     ret = is_execveat
8678         ? safe_execveat(dirfd, exe, argp, envp, flags)
8679         : safe_execve(exe, argp, envp);
8680     ret = get_errno(ret);
8681 
8682     unlock_user(p, pathname, 0);
8683 
8684     goto execve_end;
8685 
8686 execve_efault:
8687     ret = -TARGET_EFAULT;
8688 
8689 execve_end:
8690     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8691         if (get_user_ual(addr, gp) || !addr) {
8692             break;
8693         }
8694         unlock_user(*q, addr, 0);
8695     }
8696     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8697         if (get_user_ual(addr, gp) || !addr) {
8698             break;
8699         }
8700         unlock_user(*q, addr, 0);
8701     }
8702 
8703     g_free(argp);
8704     g_free(envp);
8705     return ret;
8706 }
8707 
8708 #define TIMER_MAGIC 0x0caf0000
8709 #define TIMER_MAGIC_MASK 0xffff0000
8710 
8711 /* Convert QEMU provided timer ID back to internal 16bit index format */
get_timer_id(abi_long arg)8712 static target_timer_t get_timer_id(abi_long arg)
8713 {
8714     target_timer_t timerid = arg;
8715 
8716     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8717         return -TARGET_EINVAL;
8718     }
8719 
8720     timerid &= 0xffff;
8721 
8722     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8723         return -TARGET_EINVAL;
8724     }
8725 
8726     return timerid;
8727 }
8728 
target_to_host_cpu_mask(unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8729 static int target_to_host_cpu_mask(unsigned long *host_mask,
8730                                    size_t host_size,
8731                                    abi_ulong target_addr,
8732                                    size_t target_size)
8733 {
8734     unsigned target_bits = sizeof(abi_ulong) * 8;
8735     unsigned host_bits = sizeof(*host_mask) * 8;
8736     abi_ulong *target_mask;
8737     unsigned i, j;
8738 
8739     assert(host_size >= target_size);
8740 
8741     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8742     if (!target_mask) {
8743         return -TARGET_EFAULT;
8744     }
8745     memset(host_mask, 0, host_size);
8746 
8747     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8748         unsigned bit = i * target_bits;
8749         abi_ulong val;
8750 
8751         __get_user(val, &target_mask[i]);
8752         for (j = 0; j < target_bits; j++, bit++) {
8753             if (val & (1UL << j)) {
8754                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8755             }
8756         }
8757     }
8758 
8759     unlock_user(target_mask, target_addr, 0);
8760     return 0;
8761 }
8762 
host_to_target_cpu_mask(const unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8763 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8764                                    size_t host_size,
8765                                    abi_ulong target_addr,
8766                                    size_t target_size)
8767 {
8768     unsigned target_bits = sizeof(abi_ulong) * 8;
8769     unsigned host_bits = sizeof(*host_mask) * 8;
8770     abi_ulong *target_mask;
8771     unsigned i, j;
8772 
8773     assert(host_size >= target_size);
8774 
8775     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8776     if (!target_mask) {
8777         return -TARGET_EFAULT;
8778     }
8779 
8780     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8781         unsigned bit = i * target_bits;
8782         abi_ulong val = 0;
8783 
8784         for (j = 0; j < target_bits; j++, bit++) {
8785             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8786                 val |= 1UL << j;
8787             }
8788         }
8789         __put_user(val, &target_mask[i]);
8790     }
8791 
8792     unlock_user(target_mask, target_addr, target_size);
8793     return 0;
8794 }
8795 
8796 #ifdef TARGET_NR_getdents
do_getdents(abi_long dirfd,abi_long arg2,abi_long count)8797 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8798 {
8799     g_autofree void *hdirp = NULL;
8800     void *tdirp;
8801     int hlen, hoff, toff;
8802     int hreclen, treclen;
8803     off_t prev_diroff = 0;
8804 
8805     hdirp = g_try_malloc(count);
8806     if (!hdirp) {
8807         return -TARGET_ENOMEM;
8808     }
8809 
8810 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8811     hlen = sys_getdents(dirfd, hdirp, count);
8812 #else
8813     hlen = sys_getdents64(dirfd, hdirp, count);
8814 #endif
8815 
8816     hlen = get_errno(hlen);
8817     if (is_error(hlen)) {
8818         return hlen;
8819     }
8820 
8821     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8822     if (!tdirp) {
8823         return -TARGET_EFAULT;
8824     }
8825 
8826     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8827 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8828         struct linux_dirent *hde = hdirp + hoff;
8829 #else
8830         struct linux_dirent64 *hde = hdirp + hoff;
8831 #endif
8832         struct target_dirent *tde = tdirp + toff;
8833         int namelen;
8834         uint8_t type;
8835 
8836         namelen = strlen(hde->d_name);
8837         hreclen = hde->d_reclen;
8838         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8839         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8840 
8841         if (toff + treclen > count) {
8842             /*
8843              * If the host struct is smaller than the target struct, or
8844              * requires less alignment and thus packs into less space,
8845              * then the host can return more entries than we can pass
8846              * on to the guest.
8847              */
8848             if (toff == 0) {
8849                 toff = -TARGET_EINVAL; /* result buffer is too small */
8850                 break;
8851             }
8852             /*
8853              * Return what we have, resetting the file pointer to the
8854              * location of the first record not returned.
8855              */
8856             lseek(dirfd, prev_diroff, SEEK_SET);
8857             break;
8858         }
8859 
8860         prev_diroff = hde->d_off;
8861         tde->d_ino = tswapal(hde->d_ino);
8862         tde->d_off = tswapal(hde->d_off);
8863         tde->d_reclen = tswap16(treclen);
8864         memcpy(tde->d_name, hde->d_name, namelen + 1);
8865 
8866         /*
8867          * The getdents type is in what was formerly a padding byte at the
8868          * end of the structure.
8869          */
8870 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8871         type = *((uint8_t *)hde + hreclen - 1);
8872 #else
8873         type = hde->d_type;
8874 #endif
8875         *((uint8_t *)tde + treclen - 1) = type;
8876     }
8877 
8878     unlock_user(tdirp, arg2, toff);
8879     return toff;
8880 }
8881 #endif /* TARGET_NR_getdents */
8882 
8883 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
do_getdents64(abi_long dirfd,abi_long arg2,abi_long count)8884 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8885 {
8886     g_autofree void *hdirp = NULL;
8887     void *tdirp;
8888     int hlen, hoff, toff;
8889     int hreclen, treclen;
8890     off_t prev_diroff = 0;
8891 
8892     hdirp = g_try_malloc(count);
8893     if (!hdirp) {
8894         return -TARGET_ENOMEM;
8895     }
8896 
8897     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8898     if (is_error(hlen)) {
8899         return hlen;
8900     }
8901 
8902     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8903     if (!tdirp) {
8904         return -TARGET_EFAULT;
8905     }
8906 
8907     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8908         struct linux_dirent64 *hde = hdirp + hoff;
8909         struct target_dirent64 *tde = tdirp + toff;
8910         int namelen;
8911 
8912         namelen = strlen(hde->d_name) + 1;
8913         hreclen = hde->d_reclen;
8914         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8915         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8916 
8917         if (toff + treclen > count) {
8918             /*
8919              * If the host struct is smaller than the target struct, or
8920              * requires less alignment and thus packs into less space,
8921              * then the host can return more entries than we can pass
8922              * on to the guest.
8923              */
8924             if (toff == 0) {
8925                 toff = -TARGET_EINVAL; /* result buffer is too small */
8926                 break;
8927             }
8928             /*
8929              * Return what we have, resetting the file pointer to the
8930              * location of the first record not returned.
8931              */
8932             lseek(dirfd, prev_diroff, SEEK_SET);
8933             break;
8934         }
8935 
8936         prev_diroff = hde->d_off;
8937         tde->d_ino = tswap64(hde->d_ino);
8938         tde->d_off = tswap64(hde->d_off);
8939         tde->d_reclen = tswap16(treclen);
8940         tde->d_type = hde->d_type;
8941         memcpy(tde->d_name, hde->d_name, namelen);
8942     }
8943 
8944     unlock_user(tdirp, arg2, toff);
8945     return toff;
8946 }
8947 #endif /* TARGET_NR_getdents64 */
8948 
8949 #if defined(TARGET_NR_riscv_hwprobe)
8950 
8951 #define RISCV_HWPROBE_KEY_MVENDORID     0
8952 #define RISCV_HWPROBE_KEY_MARCHID       1
8953 #define RISCV_HWPROBE_KEY_MIMPID        2
8954 
8955 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8956 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8957 
8958 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8959 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8960 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8961 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8962 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8963 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8964 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8965 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8966 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8967 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8968 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8969 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8970 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8971 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8972 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8973 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8974 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8975 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8976 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8977 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8978 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8979 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8980 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8981 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8982 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8983 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8984 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8985 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8986 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8987 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8988 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8989 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8990 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8991 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8992 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8993 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8994 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8995 
8996 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8997 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8998 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8999 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
9000 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
9001 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
9002 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
9003 
9004 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
9005 
9006 struct riscv_hwprobe {
9007     abi_llong  key;
9008     abi_ullong value;
9009 };
9010 
risc_hwprobe_fill_pairs(CPURISCVState * env,struct riscv_hwprobe * pair,size_t pair_count)9011 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9012                                     struct riscv_hwprobe *pair,
9013                                     size_t pair_count)
9014 {
9015     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9016 
9017     for (; pair_count > 0; pair_count--, pair++) {
9018         abi_llong key;
9019         abi_ullong value;
9020         __put_user(0, &pair->value);
9021         __get_user(key, &pair->key);
9022         switch (key) {
9023         case RISCV_HWPROBE_KEY_MVENDORID:
9024             __put_user(cfg->mvendorid, &pair->value);
9025             break;
9026         case RISCV_HWPROBE_KEY_MARCHID:
9027             __put_user(cfg->marchid, &pair->value);
9028             break;
9029         case RISCV_HWPROBE_KEY_MIMPID:
9030             __put_user(cfg->mimpid, &pair->value);
9031             break;
9032         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9033             value = riscv_has_ext(env, RVI) &&
9034                     riscv_has_ext(env, RVM) &&
9035                     riscv_has_ext(env, RVA) ?
9036                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9037             __put_user(value, &pair->value);
9038             break;
9039         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9040             value = riscv_has_ext(env, RVF) &&
9041                     riscv_has_ext(env, RVD) ?
9042                     RISCV_HWPROBE_IMA_FD : 0;
9043             value |= riscv_has_ext(env, RVC) ?
9044                      RISCV_HWPROBE_IMA_C : 0;
9045             value |= riscv_has_ext(env, RVV) ?
9046                      RISCV_HWPROBE_IMA_V : 0;
9047             value |= cfg->ext_zba ?
9048                      RISCV_HWPROBE_EXT_ZBA : 0;
9049             value |= cfg->ext_zbb ?
9050                      RISCV_HWPROBE_EXT_ZBB : 0;
9051             value |= cfg->ext_zbs ?
9052                      RISCV_HWPROBE_EXT_ZBS : 0;
9053             value |= cfg->ext_zicboz ?
9054                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9055             value |= cfg->ext_zbc ?
9056                      RISCV_HWPROBE_EXT_ZBC : 0;
9057             value |= cfg->ext_zbkb ?
9058                      RISCV_HWPROBE_EXT_ZBKB : 0;
9059             value |= cfg->ext_zbkc ?
9060                      RISCV_HWPROBE_EXT_ZBKC : 0;
9061             value |= cfg->ext_zbkx ?
9062                      RISCV_HWPROBE_EXT_ZBKX : 0;
9063             value |= cfg->ext_zknd ?
9064                      RISCV_HWPROBE_EXT_ZKND : 0;
9065             value |= cfg->ext_zkne ?
9066                      RISCV_HWPROBE_EXT_ZKNE : 0;
9067             value |= cfg->ext_zknh ?
9068                      RISCV_HWPROBE_EXT_ZKNH : 0;
9069             value |= cfg->ext_zksed ?
9070                      RISCV_HWPROBE_EXT_ZKSED : 0;
9071             value |= cfg->ext_zksh ?
9072                      RISCV_HWPROBE_EXT_ZKSH : 0;
9073             value |= cfg->ext_zkt ?
9074                      RISCV_HWPROBE_EXT_ZKT : 0;
9075             value |= cfg->ext_zvbb ?
9076                      RISCV_HWPROBE_EXT_ZVBB : 0;
9077             value |= cfg->ext_zvbc ?
9078                      RISCV_HWPROBE_EXT_ZVBC : 0;
9079             value |= cfg->ext_zvkb ?
9080                      RISCV_HWPROBE_EXT_ZVKB : 0;
9081             value |= cfg->ext_zvkg ?
9082                      RISCV_HWPROBE_EXT_ZVKG : 0;
9083             value |= cfg->ext_zvkned ?
9084                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9085             value |= cfg->ext_zvknha ?
9086                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9087             value |= cfg->ext_zvknhb ?
9088                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9089             value |= cfg->ext_zvksed ?
9090                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9091             value |= cfg->ext_zvksh ?
9092                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9093             value |= cfg->ext_zvkt ?
9094                      RISCV_HWPROBE_EXT_ZVKT : 0;
9095             value |= cfg->ext_zfh ?
9096                      RISCV_HWPROBE_EXT_ZFH : 0;
9097             value |= cfg->ext_zfhmin ?
9098                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9099             value |= cfg->ext_zihintntl ?
9100                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9101             value |= cfg->ext_zvfh ?
9102                      RISCV_HWPROBE_EXT_ZVFH : 0;
9103             value |= cfg->ext_zvfhmin ?
9104                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9105             value |= cfg->ext_zfa ?
9106                      RISCV_HWPROBE_EXT_ZFA : 0;
9107             value |= cfg->ext_ztso ?
9108                      RISCV_HWPROBE_EXT_ZTSO : 0;
9109             value |= cfg->ext_zacas ?
9110                      RISCV_HWPROBE_EXT_ZACAS : 0;
9111             value |= cfg->ext_zicond ?
9112                      RISCV_HWPROBE_EXT_ZICOND : 0;
9113             __put_user(value, &pair->value);
9114             break;
9115         case RISCV_HWPROBE_KEY_CPUPERF_0:
9116             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9117             break;
9118         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9119             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9120             __put_user(value, &pair->value);
9121             break;
9122         default:
9123             __put_user(-1, &pair->key);
9124             break;
9125         }
9126     }
9127 }
9128 
9129 /*
9130  * If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT.
9131  * If the cpumast_t has no bits set: -EINVAL.
9132  * Otherwise the cpumask_t contains some bit set: 0.
9133  * Unlike the kernel, we do not mask cpumask_t by the set of online cpus,
9134  * nor bound the search by cpumask_size().
9135  */
nonempty_cpu_set(abi_ulong cpusetsize,abi_ptr target_cpus)9136 static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus)
9137 {
9138     unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1);
9139     int ret = -TARGET_EFAULT;
9140 
9141     if (p) {
9142         ret = -TARGET_EINVAL;
9143         /*
9144          * Since we only care about the empty/non-empty state of the cpumask_t
9145          * not the individual bits, we do not need to repartition the bits
9146          * from target abi_ulong to host unsigned long.
9147          *
9148          * Note that the kernel does not round up cpusetsize to a multiple of
9149          * sizeof(abi_ulong).  After bounding cpusetsize by cpumask_size(),
9150          * it copies exactly cpusetsize bytes into a zeroed buffer.
9151          */
9152         for (abi_ulong i = 0; i < cpusetsize; ++i) {
9153             if (p[i]) {
9154                 ret = 0;
9155                 break;
9156             }
9157         }
9158         unlock_user(p, target_cpus, 0);
9159     }
9160     return ret;
9161 }
9162 
do_riscv_hwprobe(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)9163 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9164                                  abi_long arg2, abi_long arg3,
9165                                  abi_long arg4, abi_long arg5)
9166 {
9167     int ret;
9168     struct riscv_hwprobe *host_pairs;
9169 
9170     /* flags must be 0 */
9171     if (arg5 != 0) {
9172         return -TARGET_EINVAL;
9173     }
9174 
9175     /* check cpu_set */
9176     if (arg3 != 0) {
9177         ret = nonempty_cpu_set(arg3, arg4);
9178         if (ret != 0) {
9179             return ret;
9180         }
9181     } else if (arg4 != 0) {
9182         return -TARGET_EINVAL;
9183     }
9184 
9185     /* no pairs */
9186     if (arg2 == 0) {
9187         return 0;
9188     }
9189 
9190     host_pairs = lock_user(VERIFY_WRITE, arg1,
9191                            sizeof(*host_pairs) * (size_t)arg2, 0);
9192     if (host_pairs == NULL) {
9193         return -TARGET_EFAULT;
9194     }
9195     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9196     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9197     return 0;
9198 }
9199 #endif /* TARGET_NR_riscv_hwprobe */
9200 
9201 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
_syscall2(int,pivot_root,const char *,new_root,const char *,put_old)9202 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9203 #endif
9204 
9205 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9206 #define __NR_sys_open_tree __NR_open_tree
9207 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9208           unsigned int, __flags)
9209 #endif
9210 
9211 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9212 #define __NR_sys_move_mount __NR_move_mount
9213 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9214            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9215 #endif
9216 
9217 /* This is an internal helper for do_syscall so that it is easier
9218  * to have a single return point, so that actions, such as logging
9219  * of syscall results, can be performed.
9220  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9221  */
9222 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9223                             abi_long arg2, abi_long arg3, abi_long arg4,
9224                             abi_long arg5, abi_long arg6, abi_long arg7,
9225                             abi_long arg8)
9226 {
9227     CPUState *cpu = env_cpu(cpu_env);
9228     abi_long ret;
9229 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9230     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9231     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9232     || defined(TARGET_NR_statx)
9233     struct stat st;
9234 #endif
9235 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9236     || defined(TARGET_NR_fstatfs)
9237     struct statfs stfs;
9238 #endif
9239     void *p;
9240 
9241     switch(num) {
9242     case TARGET_NR_exit:
9243         /* In old applications this may be used to implement _exit(2).
9244            However in threaded applications it is used for thread termination,
9245            and _exit_group is used for application termination.
9246            Do thread termination if we have more then one thread.  */
9247 
9248         if (block_signals()) {
9249             return -QEMU_ERESTARTSYS;
9250         }
9251 
9252         pthread_mutex_lock(&clone_lock);
9253 
9254         if (CPU_NEXT(first_cpu)) {
9255             TaskState *ts = get_task_state(cpu);
9256 
9257             if (ts->child_tidptr) {
9258                 put_user_u32(0, ts->child_tidptr);
9259                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9260                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9261             }
9262 
9263             object_unparent(OBJECT(cpu));
9264             object_unref(OBJECT(cpu));
9265             /*
9266              * At this point the CPU should be unrealized and removed
9267              * from cpu lists. We can clean-up the rest of the thread
9268              * data without the lock held.
9269              */
9270 
9271             pthread_mutex_unlock(&clone_lock);
9272 
9273             thread_cpu = NULL;
9274             g_free(ts);
9275             rcu_unregister_thread();
9276             pthread_exit(NULL);
9277         }
9278 
9279         pthread_mutex_unlock(&clone_lock);
9280         preexit_cleanup(cpu_env, arg1);
9281         _exit(arg1);
9282         return 0; /* avoid warning */
9283     case TARGET_NR_read:
9284         if (arg2 == 0 && arg3 == 0) {
9285             return get_errno(safe_read(arg1, 0, 0));
9286         } else {
9287             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9288                 return -TARGET_EFAULT;
9289             ret = get_errno(safe_read(arg1, p, arg3));
9290             if (ret >= 0 &&
9291                 fd_trans_host_to_target_data(arg1)) {
9292                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9293             }
9294             unlock_user(p, arg2, ret);
9295         }
9296         return ret;
9297     case TARGET_NR_write:
9298         if (arg2 == 0 && arg3 == 0) {
9299             return get_errno(safe_write(arg1, 0, 0));
9300         }
9301         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9302             return -TARGET_EFAULT;
9303         if (fd_trans_target_to_host_data(arg1)) {
9304             void *copy = g_malloc(arg3);
9305             memcpy(copy, p, arg3);
9306             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9307             if (ret >= 0) {
9308                 ret = get_errno(safe_write(arg1, copy, ret));
9309             }
9310             g_free(copy);
9311         } else {
9312             ret = get_errno(safe_write(arg1, p, arg3));
9313         }
9314         unlock_user(p, arg2, 0);
9315         return ret;
9316 
9317 #ifdef TARGET_NR_open
9318     case TARGET_NR_open:
9319         if (!(p = lock_user_string(arg1)))
9320             return -TARGET_EFAULT;
9321         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9322                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9323                                   arg3, true));
9324         fd_trans_unregister(ret);
9325         unlock_user(p, arg1, 0);
9326         return ret;
9327 #endif
9328     case TARGET_NR_openat:
9329         if (!(p = lock_user_string(arg2)))
9330             return -TARGET_EFAULT;
9331         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9332                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9333                                   arg4, true));
9334         fd_trans_unregister(ret);
9335         unlock_user(p, arg2, 0);
9336         return ret;
9337     case TARGET_NR_openat2:
9338         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9339         return ret;
9340 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9341     case TARGET_NR_name_to_handle_at:
9342         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9343         return ret;
9344 #endif
9345 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9346     case TARGET_NR_open_by_handle_at:
9347         ret = do_open_by_handle_at(arg1, arg2, arg3);
9348         fd_trans_unregister(ret);
9349         return ret;
9350 #endif
9351 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9352     case TARGET_NR_pidfd_open:
9353         return get_errno(pidfd_open(arg1, arg2));
9354 #endif
9355 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9356     case TARGET_NR_pidfd_send_signal:
9357         {
9358             siginfo_t uinfo, *puinfo;
9359 
9360             if (arg3) {
9361                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9362                 if (!p) {
9363                     return -TARGET_EFAULT;
9364                  }
9365                  target_to_host_siginfo(&uinfo, p);
9366                  unlock_user(p, arg3, 0);
9367                  puinfo = &uinfo;
9368             } else {
9369                  puinfo = NULL;
9370             }
9371             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9372                                               puinfo, arg4));
9373         }
9374         return ret;
9375 #endif
9376 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9377     case TARGET_NR_pidfd_getfd:
9378         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9379 #endif
9380     case TARGET_NR_close:
9381         fd_trans_unregister(arg1);
9382         return get_errno(close(arg1));
9383 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9384     case TARGET_NR_close_range:
9385         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9386         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9387             abi_long fd, maxfd;
9388             maxfd = MIN(arg2, target_fd_max);
9389             for (fd = arg1; fd < maxfd; fd++) {
9390                 fd_trans_unregister(fd);
9391             }
9392         }
9393         return ret;
9394 #endif
9395 
9396     case TARGET_NR_brk:
9397         return do_brk(arg1);
9398 #ifdef TARGET_NR_fork
9399     case TARGET_NR_fork:
9400         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9401 #endif
9402 #ifdef TARGET_NR_waitpid
9403     case TARGET_NR_waitpid:
9404         {
9405             int status;
9406             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9407             if (!is_error(ret) && arg2 && ret
9408                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9409                 return -TARGET_EFAULT;
9410         }
9411         return ret;
9412 #endif
9413 #ifdef TARGET_NR_waitid
9414     case TARGET_NR_waitid:
9415         {
9416             struct rusage ru;
9417             siginfo_t info;
9418 
9419             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9420                                         arg4, (arg5 ? &ru : NULL)));
9421             if (!is_error(ret)) {
9422                 if (arg3) {
9423                     p = lock_user(VERIFY_WRITE, arg3,
9424                                   sizeof(target_siginfo_t), 0);
9425                     if (!p) {
9426                         return -TARGET_EFAULT;
9427                     }
9428                     host_to_target_siginfo(p, &info);
9429                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9430                 }
9431                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9432                     return -TARGET_EFAULT;
9433                 }
9434             }
9435         }
9436         return ret;
9437 #endif
9438 #ifdef TARGET_NR_creat /* not on alpha */
9439     case TARGET_NR_creat:
9440         if (!(p = lock_user_string(arg1)))
9441             return -TARGET_EFAULT;
9442         ret = get_errno(creat(p, arg2));
9443         fd_trans_unregister(ret);
9444         unlock_user(p, arg1, 0);
9445         return ret;
9446 #endif
9447 #ifdef TARGET_NR_link
9448     case TARGET_NR_link:
9449         {
9450             void * p2;
9451             p = lock_user_string(arg1);
9452             p2 = lock_user_string(arg2);
9453             if (!p || !p2)
9454                 ret = -TARGET_EFAULT;
9455             else
9456                 ret = get_errno(link(p, p2));
9457             unlock_user(p2, arg2, 0);
9458             unlock_user(p, arg1, 0);
9459         }
9460         return ret;
9461 #endif
9462 #if defined(TARGET_NR_linkat)
9463     case TARGET_NR_linkat:
9464         {
9465             void * p2 = NULL;
9466             if (!arg2 || !arg4)
9467                 return -TARGET_EFAULT;
9468             p  = lock_user_string(arg2);
9469             p2 = lock_user_string(arg4);
9470             if (!p || !p2)
9471                 ret = -TARGET_EFAULT;
9472             else
9473                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9474             unlock_user(p, arg2, 0);
9475             unlock_user(p2, arg4, 0);
9476         }
9477         return ret;
9478 #endif
9479 #ifdef TARGET_NR_unlink
9480     case TARGET_NR_unlink:
9481         if (!(p = lock_user_string(arg1)))
9482             return -TARGET_EFAULT;
9483         ret = get_errno(unlink(p));
9484         unlock_user(p, arg1, 0);
9485         return ret;
9486 #endif
9487 #if defined(TARGET_NR_unlinkat)
9488     case TARGET_NR_unlinkat:
9489         if (!(p = lock_user_string(arg2)))
9490             return -TARGET_EFAULT;
9491         ret = get_errno(unlinkat(arg1, p, arg3));
9492         unlock_user(p, arg2, 0);
9493         return ret;
9494 #endif
9495     case TARGET_NR_execveat:
9496         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9497     case TARGET_NR_execve:
9498         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9499     case TARGET_NR_chdir:
9500         if (!(p = lock_user_string(arg1)))
9501             return -TARGET_EFAULT;
9502         ret = get_errno(chdir(p));
9503         unlock_user(p, arg1, 0);
9504         return ret;
9505 #ifdef TARGET_NR_time
9506     case TARGET_NR_time:
9507         {
9508             time_t host_time;
9509             ret = get_errno(time(&host_time));
9510             if (!is_error(ret)
9511                 && arg1
9512                 && put_user_sal(host_time, arg1))
9513                 return -TARGET_EFAULT;
9514         }
9515         return ret;
9516 #endif
9517 #ifdef TARGET_NR_mknod
9518     case TARGET_NR_mknod:
9519         if (!(p = lock_user_string(arg1)))
9520             return -TARGET_EFAULT;
9521         ret = get_errno(mknod(p, arg2, arg3));
9522         unlock_user(p, arg1, 0);
9523         return ret;
9524 #endif
9525 #if defined(TARGET_NR_mknodat)
9526     case TARGET_NR_mknodat:
9527         if (!(p = lock_user_string(arg2)))
9528             return -TARGET_EFAULT;
9529         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9530         unlock_user(p, arg2, 0);
9531         return ret;
9532 #endif
9533 #ifdef TARGET_NR_chmod
9534     case TARGET_NR_chmod:
9535         if (!(p = lock_user_string(arg1)))
9536             return -TARGET_EFAULT;
9537         ret = get_errno(chmod(p, arg2));
9538         unlock_user(p, arg1, 0);
9539         return ret;
9540 #endif
9541 #ifdef TARGET_NR_lseek
9542     case TARGET_NR_lseek:
9543         return get_errno(lseek(arg1, arg2, arg3));
9544 #endif
9545 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9546     /* Alpha specific */
9547     case TARGET_NR_getxpid:
9548         cpu_env->ir[IR_A4] = getppid();
9549         return get_errno(getpid());
9550 #endif
9551 #ifdef TARGET_NR_getpid
9552     case TARGET_NR_getpid:
9553         return get_errno(getpid());
9554 #endif
9555     case TARGET_NR_mount:
9556         {
9557             /* need to look at the data field */
9558             void *p2, *p3;
9559 
9560             if (arg1) {
9561                 p = lock_user_string(arg1);
9562                 if (!p) {
9563                     return -TARGET_EFAULT;
9564                 }
9565             } else {
9566                 p = NULL;
9567             }
9568 
9569             p2 = lock_user_string(arg2);
9570             if (!p2) {
9571                 if (arg1) {
9572                     unlock_user(p, arg1, 0);
9573                 }
9574                 return -TARGET_EFAULT;
9575             }
9576 
9577             if (arg3) {
9578                 p3 = lock_user_string(arg3);
9579                 if (!p3) {
9580                     if (arg1) {
9581                         unlock_user(p, arg1, 0);
9582                     }
9583                     unlock_user(p2, arg2, 0);
9584                     return -TARGET_EFAULT;
9585                 }
9586             } else {
9587                 p3 = NULL;
9588             }
9589 
9590             /* FIXME - arg5 should be locked, but it isn't clear how to
9591              * do that since it's not guaranteed to be a NULL-terminated
9592              * string.
9593              */
9594             if (!arg5) {
9595                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9596             } else {
9597                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9598             }
9599             ret = get_errno(ret);
9600 
9601             if (arg1) {
9602                 unlock_user(p, arg1, 0);
9603             }
9604             unlock_user(p2, arg2, 0);
9605             if (arg3) {
9606                 unlock_user(p3, arg3, 0);
9607             }
9608         }
9609         return ret;
9610 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9611 #if defined(TARGET_NR_umount)
9612     case TARGET_NR_umount:
9613 #endif
9614 #if defined(TARGET_NR_oldumount)
9615     case TARGET_NR_oldumount:
9616 #endif
9617         if (!(p = lock_user_string(arg1)))
9618             return -TARGET_EFAULT;
9619         ret = get_errno(umount(p));
9620         unlock_user(p, arg1, 0);
9621         return ret;
9622 #endif
9623 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9624     case TARGET_NR_move_mount:
9625         {
9626             void *p2, *p4;
9627 
9628             if (!arg2 || !arg4) {
9629                 return -TARGET_EFAULT;
9630             }
9631 
9632             p2 = lock_user_string(arg2);
9633             if (!p2) {
9634                 return -TARGET_EFAULT;
9635             }
9636 
9637             p4 = lock_user_string(arg4);
9638             if (!p4) {
9639                 unlock_user(p2, arg2, 0);
9640                 return -TARGET_EFAULT;
9641             }
9642             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9643 
9644             unlock_user(p2, arg2, 0);
9645             unlock_user(p4, arg4, 0);
9646 
9647             return ret;
9648         }
9649 #endif
9650 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9651     case TARGET_NR_open_tree:
9652         {
9653             void *p2;
9654             int host_flags;
9655 
9656             if (!arg2) {
9657                 return -TARGET_EFAULT;
9658             }
9659 
9660             p2 = lock_user_string(arg2);
9661             if (!p2) {
9662                 return -TARGET_EFAULT;
9663             }
9664 
9665             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9666             if (arg3 & TARGET_O_CLOEXEC) {
9667                 host_flags |= O_CLOEXEC;
9668             }
9669 
9670             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9671 
9672             unlock_user(p2, arg2, 0);
9673 
9674             return ret;
9675         }
9676 #endif
9677 #ifdef TARGET_NR_stime /* not on alpha */
9678     case TARGET_NR_stime:
9679         {
9680             struct timespec ts;
9681             ts.tv_nsec = 0;
9682             if (get_user_sal(ts.tv_sec, arg1)) {
9683                 return -TARGET_EFAULT;
9684             }
9685             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9686         }
9687 #endif
9688 #ifdef TARGET_NR_alarm /* not on alpha */
9689     case TARGET_NR_alarm:
9690         return alarm(arg1);
9691 #endif
9692 #ifdef TARGET_NR_pause /* not on alpha */
9693     case TARGET_NR_pause:
9694         if (!block_signals()) {
9695             sigsuspend(&get_task_state(cpu)->signal_mask);
9696         }
9697         return -TARGET_EINTR;
9698 #endif
9699 #ifdef TARGET_NR_utime
9700     case TARGET_NR_utime:
9701         {
9702             struct utimbuf tbuf, *host_tbuf;
9703             struct target_utimbuf *target_tbuf;
9704             if (arg2) {
9705                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9706                     return -TARGET_EFAULT;
9707                 tbuf.actime = tswapal(target_tbuf->actime);
9708                 tbuf.modtime = tswapal(target_tbuf->modtime);
9709                 unlock_user_struct(target_tbuf, arg2, 0);
9710                 host_tbuf = &tbuf;
9711             } else {
9712                 host_tbuf = NULL;
9713             }
9714             if (!(p = lock_user_string(arg1)))
9715                 return -TARGET_EFAULT;
9716             ret = get_errno(utime(p, host_tbuf));
9717             unlock_user(p, arg1, 0);
9718         }
9719         return ret;
9720 #endif
9721 #ifdef TARGET_NR_utimes
9722     case TARGET_NR_utimes:
9723         {
9724             struct timeval *tvp, tv[2];
9725             if (arg2) {
9726                 if (copy_from_user_timeval(&tv[0], arg2)
9727                     || copy_from_user_timeval(&tv[1],
9728                                               arg2 + sizeof(struct target_timeval)))
9729                     return -TARGET_EFAULT;
9730                 tvp = tv;
9731             } else {
9732                 tvp = NULL;
9733             }
9734             if (!(p = lock_user_string(arg1)))
9735                 return -TARGET_EFAULT;
9736             ret = get_errno(utimes(p, tvp));
9737             unlock_user(p, arg1, 0);
9738         }
9739         return ret;
9740 #endif
9741 #if defined(TARGET_NR_futimesat)
9742     case TARGET_NR_futimesat:
9743         {
9744             struct timeval *tvp, tv[2];
9745             if (arg3) {
9746                 if (copy_from_user_timeval(&tv[0], arg3)
9747                     || copy_from_user_timeval(&tv[1],
9748                                               arg3 + sizeof(struct target_timeval)))
9749                     return -TARGET_EFAULT;
9750                 tvp = tv;
9751             } else {
9752                 tvp = NULL;
9753             }
9754             if (!(p = lock_user_string(arg2))) {
9755                 return -TARGET_EFAULT;
9756             }
9757             ret = get_errno(futimesat(arg1, path(p), tvp));
9758             unlock_user(p, arg2, 0);
9759         }
9760         return ret;
9761 #endif
9762 #ifdef TARGET_NR_access
9763     case TARGET_NR_access:
9764         if (!(p = lock_user_string(arg1))) {
9765             return -TARGET_EFAULT;
9766         }
9767         ret = get_errno(access(path(p), arg2));
9768         unlock_user(p, arg1, 0);
9769         return ret;
9770 #endif
9771 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9772     case TARGET_NR_faccessat:
9773         if (!(p = lock_user_string(arg2))) {
9774             return -TARGET_EFAULT;
9775         }
9776         ret = get_errno(faccessat(arg1, p, arg3, 0));
9777         unlock_user(p, arg2, 0);
9778         return ret;
9779 #endif
9780 #if defined(TARGET_NR_faccessat2)
9781     case TARGET_NR_faccessat2:
9782         if (!(p = lock_user_string(arg2))) {
9783             return -TARGET_EFAULT;
9784         }
9785         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9786         unlock_user(p, arg2, 0);
9787         return ret;
9788 #endif
9789 #ifdef TARGET_NR_nice /* not on alpha */
9790     case TARGET_NR_nice:
9791         return get_errno(nice(arg1));
9792 #endif
9793     case TARGET_NR_sync:
9794         sync();
9795         return 0;
9796 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9797     case TARGET_NR_syncfs:
9798         return get_errno(syncfs(arg1));
9799 #endif
9800     case TARGET_NR_kill:
9801         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9802 #ifdef TARGET_NR_rename
9803     case TARGET_NR_rename:
9804         {
9805             void *p2;
9806             p = lock_user_string(arg1);
9807             p2 = lock_user_string(arg2);
9808             if (!p || !p2)
9809                 ret = -TARGET_EFAULT;
9810             else
9811                 ret = get_errno(rename(p, p2));
9812             unlock_user(p2, arg2, 0);
9813             unlock_user(p, arg1, 0);
9814         }
9815         return ret;
9816 #endif
9817 #if defined(TARGET_NR_renameat)
9818     case TARGET_NR_renameat:
9819         {
9820             void *p2;
9821             p  = lock_user_string(arg2);
9822             p2 = lock_user_string(arg4);
9823             if (!p || !p2)
9824                 ret = -TARGET_EFAULT;
9825             else
9826                 ret = get_errno(renameat(arg1, p, arg3, p2));
9827             unlock_user(p2, arg4, 0);
9828             unlock_user(p, arg2, 0);
9829         }
9830         return ret;
9831 #endif
9832 #if defined(TARGET_NR_renameat2)
9833     case TARGET_NR_renameat2:
9834         {
9835             void *p2;
9836             p  = lock_user_string(arg2);
9837             p2 = lock_user_string(arg4);
9838             if (!p || !p2) {
9839                 ret = -TARGET_EFAULT;
9840             } else {
9841                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9842             }
9843             unlock_user(p2, arg4, 0);
9844             unlock_user(p, arg2, 0);
9845         }
9846         return ret;
9847 #endif
9848 #ifdef TARGET_NR_mkdir
9849     case TARGET_NR_mkdir:
9850         if (!(p = lock_user_string(arg1)))
9851             return -TARGET_EFAULT;
9852         ret = get_errno(mkdir(p, arg2));
9853         unlock_user(p, arg1, 0);
9854         return ret;
9855 #endif
9856 #if defined(TARGET_NR_mkdirat)
9857     case TARGET_NR_mkdirat:
9858         if (!(p = lock_user_string(arg2)))
9859             return -TARGET_EFAULT;
9860         ret = get_errno(mkdirat(arg1, p, arg3));
9861         unlock_user(p, arg2, 0);
9862         return ret;
9863 #endif
9864 #ifdef TARGET_NR_rmdir
9865     case TARGET_NR_rmdir:
9866         if (!(p = lock_user_string(arg1)))
9867             return -TARGET_EFAULT;
9868         ret = get_errno(rmdir(p));
9869         unlock_user(p, arg1, 0);
9870         return ret;
9871 #endif
9872     case TARGET_NR_dup:
9873         ret = get_errno(dup(arg1));
9874         if (ret >= 0) {
9875             fd_trans_dup(arg1, ret);
9876         }
9877         return ret;
9878 #ifdef TARGET_NR_pipe
9879     case TARGET_NR_pipe:
9880         return do_pipe(cpu_env, arg1, 0, 0);
9881 #endif
9882 #ifdef TARGET_NR_pipe2
9883     case TARGET_NR_pipe2:
9884         return do_pipe(cpu_env, arg1,
9885                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9886 #endif
9887     case TARGET_NR_times:
9888         {
9889             struct target_tms *tmsp;
9890             struct tms tms;
9891             ret = get_errno(times(&tms));
9892             if (arg1) {
9893                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9894                 if (!tmsp)
9895                     return -TARGET_EFAULT;
9896                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9897                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9898                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9899                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9900             }
9901             if (!is_error(ret))
9902                 ret = host_to_target_clock_t(ret);
9903         }
9904         return ret;
9905     case TARGET_NR_acct:
9906         if (arg1 == 0) {
9907             ret = get_errno(acct(NULL));
9908         } else {
9909             if (!(p = lock_user_string(arg1))) {
9910                 return -TARGET_EFAULT;
9911             }
9912             ret = get_errno(acct(path(p)));
9913             unlock_user(p, arg1, 0);
9914         }
9915         return ret;
9916 #ifdef TARGET_NR_umount2
9917     case TARGET_NR_umount2:
9918         if (!(p = lock_user_string(arg1)))
9919             return -TARGET_EFAULT;
9920         ret = get_errno(umount2(p, arg2));
9921         unlock_user(p, arg1, 0);
9922         return ret;
9923 #endif
9924     case TARGET_NR_ioctl:
9925         return do_ioctl(arg1, arg2, arg3);
9926 #ifdef TARGET_NR_fcntl
9927     case TARGET_NR_fcntl:
9928         return do_fcntl(arg1, arg2, arg3);
9929 #endif
9930     case TARGET_NR_setpgid:
9931         return get_errno(setpgid(arg1, arg2));
9932     case TARGET_NR_umask:
9933         return get_errno(umask(arg1));
9934     case TARGET_NR_chroot:
9935         if (!(p = lock_user_string(arg1)))
9936             return -TARGET_EFAULT;
9937         ret = get_errno(chroot(p));
9938         unlock_user(p, arg1, 0);
9939         return ret;
9940 #ifdef TARGET_NR_dup2
9941     case TARGET_NR_dup2:
9942         ret = get_errno(dup2(arg1, arg2));
9943         if (ret >= 0) {
9944             fd_trans_dup(arg1, arg2);
9945         }
9946         return ret;
9947 #endif
9948 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9949     case TARGET_NR_dup3:
9950     {
9951         int host_flags;
9952 
9953         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9954             return -EINVAL;
9955         }
9956         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9957         ret = get_errno(dup3(arg1, arg2, host_flags));
9958         if (ret >= 0) {
9959             fd_trans_dup(arg1, arg2);
9960         }
9961         return ret;
9962     }
9963 #endif
9964 #ifdef TARGET_NR_getppid /* not on alpha */
9965     case TARGET_NR_getppid:
9966         return get_errno(getppid());
9967 #endif
9968 #ifdef TARGET_NR_getpgrp
9969     case TARGET_NR_getpgrp:
9970         return get_errno(getpgrp());
9971 #endif
9972     case TARGET_NR_setsid:
9973         return get_errno(setsid());
9974 #ifdef TARGET_NR_sigaction
9975     case TARGET_NR_sigaction:
9976         {
9977 #if defined(TARGET_MIPS)
9978 	    struct target_sigaction act, oact, *pact, *old_act;
9979 
9980 	    if (arg2) {
9981                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9982                     return -TARGET_EFAULT;
9983 		act._sa_handler = old_act->_sa_handler;
9984 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9985 		act.sa_flags = old_act->sa_flags;
9986 		unlock_user_struct(old_act, arg2, 0);
9987 		pact = &act;
9988 	    } else {
9989 		pact = NULL;
9990 	    }
9991 
9992         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9993 
9994 	    if (!is_error(ret) && arg3) {
9995                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9996                     return -TARGET_EFAULT;
9997 		old_act->_sa_handler = oact._sa_handler;
9998 		old_act->sa_flags = oact.sa_flags;
9999 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
10000 		old_act->sa_mask.sig[1] = 0;
10001 		old_act->sa_mask.sig[2] = 0;
10002 		old_act->sa_mask.sig[3] = 0;
10003 		unlock_user_struct(old_act, arg3, 1);
10004 	    }
10005 #else
10006             struct target_old_sigaction *old_act;
10007             struct target_sigaction act, oact, *pact;
10008             if (arg2) {
10009                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
10010                     return -TARGET_EFAULT;
10011                 act._sa_handler = old_act->_sa_handler;
10012                 target_siginitset(&act.sa_mask, old_act->sa_mask);
10013                 act.sa_flags = old_act->sa_flags;
10014 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10015                 act.sa_restorer = old_act->sa_restorer;
10016 #endif
10017                 unlock_user_struct(old_act, arg2, 0);
10018                 pact = &act;
10019             } else {
10020                 pact = NULL;
10021             }
10022             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
10023             if (!is_error(ret) && arg3) {
10024                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
10025                     return -TARGET_EFAULT;
10026                 old_act->_sa_handler = oact._sa_handler;
10027                 old_act->sa_mask = oact.sa_mask.sig[0];
10028                 old_act->sa_flags = oact.sa_flags;
10029 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10030                 old_act->sa_restorer = oact.sa_restorer;
10031 #endif
10032                 unlock_user_struct(old_act, arg3, 1);
10033             }
10034 #endif
10035         }
10036         return ret;
10037 #endif
10038     case TARGET_NR_rt_sigaction:
10039         {
10040             /*
10041              * For Alpha and SPARC this is a 5 argument syscall, with
10042              * a 'restorer' parameter which must be copied into the
10043              * sa_restorer field of the sigaction struct.
10044              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10045              * and arg5 is the sigsetsize.
10046              */
10047 #if defined(TARGET_ALPHA)
10048             target_ulong sigsetsize = arg4;
10049             target_ulong restorer = arg5;
10050 #elif defined(TARGET_SPARC)
10051             target_ulong restorer = arg4;
10052             target_ulong sigsetsize = arg5;
10053 #else
10054             target_ulong sigsetsize = arg4;
10055             target_ulong restorer = 0;
10056 #endif
10057             struct target_sigaction *act = NULL;
10058             struct target_sigaction *oact = NULL;
10059 
10060             if (sigsetsize != sizeof(target_sigset_t)) {
10061                 return -TARGET_EINVAL;
10062             }
10063             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10064                 return -TARGET_EFAULT;
10065             }
10066             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10067                 ret = -TARGET_EFAULT;
10068             } else {
10069                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10070                 if (oact) {
10071                     unlock_user_struct(oact, arg3, 1);
10072                 }
10073             }
10074             if (act) {
10075                 unlock_user_struct(act, arg2, 0);
10076             }
10077         }
10078         return ret;
10079 #ifdef TARGET_NR_sgetmask /* not on alpha */
10080     case TARGET_NR_sgetmask:
10081         {
10082             sigset_t cur_set;
10083             abi_ulong target_set;
10084             ret = do_sigprocmask(0, NULL, &cur_set);
10085             if (!ret) {
10086                 host_to_target_old_sigset(&target_set, &cur_set);
10087                 ret = target_set;
10088             }
10089         }
10090         return ret;
10091 #endif
10092 #ifdef TARGET_NR_ssetmask /* not on alpha */
10093     case TARGET_NR_ssetmask:
10094         {
10095             sigset_t set, oset;
10096             abi_ulong target_set = arg1;
10097             target_to_host_old_sigset(&set, &target_set);
10098             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10099             if (!ret) {
10100                 host_to_target_old_sigset(&target_set, &oset);
10101                 ret = target_set;
10102             }
10103         }
10104         return ret;
10105 #endif
10106 #ifdef TARGET_NR_sigprocmask
10107     case TARGET_NR_sigprocmask:
10108         {
10109 #if defined(TARGET_ALPHA)
10110             sigset_t set, oldset;
10111             abi_ulong mask;
10112             int how;
10113 
10114             switch (arg1) {
10115             case TARGET_SIG_BLOCK:
10116                 how = SIG_BLOCK;
10117                 break;
10118             case TARGET_SIG_UNBLOCK:
10119                 how = SIG_UNBLOCK;
10120                 break;
10121             case TARGET_SIG_SETMASK:
10122                 how = SIG_SETMASK;
10123                 break;
10124             default:
10125                 return -TARGET_EINVAL;
10126             }
10127             mask = arg2;
10128             target_to_host_old_sigset(&set, &mask);
10129 
10130             ret = do_sigprocmask(how, &set, &oldset);
10131             if (!is_error(ret)) {
10132                 host_to_target_old_sigset(&mask, &oldset);
10133                 ret = mask;
10134                 cpu_env->ir[IR_V0] = 0; /* force no error */
10135             }
10136 #else
10137             sigset_t set, oldset, *set_ptr;
10138             int how;
10139 
10140             if (arg2) {
10141                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10142                 if (!p) {
10143                     return -TARGET_EFAULT;
10144                 }
10145                 target_to_host_old_sigset(&set, p);
10146                 unlock_user(p, arg2, 0);
10147                 set_ptr = &set;
10148                 switch (arg1) {
10149                 case TARGET_SIG_BLOCK:
10150                     how = SIG_BLOCK;
10151                     break;
10152                 case TARGET_SIG_UNBLOCK:
10153                     how = SIG_UNBLOCK;
10154                     break;
10155                 case TARGET_SIG_SETMASK:
10156                     how = SIG_SETMASK;
10157                     break;
10158                 default:
10159                     return -TARGET_EINVAL;
10160                 }
10161             } else {
10162                 how = 0;
10163                 set_ptr = NULL;
10164             }
10165             ret = do_sigprocmask(how, set_ptr, &oldset);
10166             if (!is_error(ret) && arg3) {
10167                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10168                     return -TARGET_EFAULT;
10169                 host_to_target_old_sigset(p, &oldset);
10170                 unlock_user(p, arg3, sizeof(target_sigset_t));
10171             }
10172 #endif
10173         }
10174         return ret;
10175 #endif
10176     case TARGET_NR_rt_sigprocmask:
10177         {
10178             int how = arg1;
10179             sigset_t set, oldset, *set_ptr;
10180 
10181             if (arg4 != sizeof(target_sigset_t)) {
10182                 return -TARGET_EINVAL;
10183             }
10184 
10185             if (arg2) {
10186                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10187                 if (!p) {
10188                     return -TARGET_EFAULT;
10189                 }
10190                 target_to_host_sigset(&set, p);
10191                 unlock_user(p, arg2, 0);
10192                 set_ptr = &set;
10193                 switch(how) {
10194                 case TARGET_SIG_BLOCK:
10195                     how = SIG_BLOCK;
10196                     break;
10197                 case TARGET_SIG_UNBLOCK:
10198                     how = SIG_UNBLOCK;
10199                     break;
10200                 case TARGET_SIG_SETMASK:
10201                     how = SIG_SETMASK;
10202                     break;
10203                 default:
10204                     return -TARGET_EINVAL;
10205                 }
10206             } else {
10207                 how = 0;
10208                 set_ptr = NULL;
10209             }
10210             ret = do_sigprocmask(how, set_ptr, &oldset);
10211             if (!is_error(ret) && arg3) {
10212                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10213                     return -TARGET_EFAULT;
10214                 host_to_target_sigset(p, &oldset);
10215                 unlock_user(p, arg3, sizeof(target_sigset_t));
10216             }
10217         }
10218         return ret;
10219 #ifdef TARGET_NR_sigpending
10220     case TARGET_NR_sigpending:
10221         {
10222             sigset_t set;
10223             ret = get_errno(sigpending(&set));
10224             if (!is_error(ret)) {
10225                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10226                     return -TARGET_EFAULT;
10227                 host_to_target_old_sigset(p, &set);
10228                 unlock_user(p, arg1, sizeof(target_sigset_t));
10229             }
10230         }
10231         return ret;
10232 #endif
10233     case TARGET_NR_rt_sigpending:
10234         {
10235             sigset_t set;
10236 
10237             /* Yes, this check is >, not != like most. We follow the kernel's
10238              * logic and it does it like this because it implements
10239              * NR_sigpending through the same code path, and in that case
10240              * the old_sigset_t is smaller in size.
10241              */
10242             if (arg2 > sizeof(target_sigset_t)) {
10243                 return -TARGET_EINVAL;
10244             }
10245 
10246             ret = get_errno(sigpending(&set));
10247             if (!is_error(ret)) {
10248                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10249                     return -TARGET_EFAULT;
10250                 host_to_target_sigset(p, &set);
10251                 unlock_user(p, arg1, sizeof(target_sigset_t));
10252             }
10253         }
10254         return ret;
10255 #ifdef TARGET_NR_sigsuspend
10256     case TARGET_NR_sigsuspend:
10257         {
10258             sigset_t *set;
10259 
10260 #if defined(TARGET_ALPHA)
10261             TaskState *ts = get_task_state(cpu);
10262             /* target_to_host_old_sigset will bswap back */
10263             abi_ulong mask = tswapal(arg1);
10264             set = &ts->sigsuspend_mask;
10265             target_to_host_old_sigset(set, &mask);
10266 #else
10267             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10268             if (ret != 0) {
10269                 return ret;
10270             }
10271 #endif
10272             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10273             finish_sigsuspend_mask(ret);
10274         }
10275         return ret;
10276 #endif
10277     case TARGET_NR_rt_sigsuspend:
10278         {
10279             sigset_t *set;
10280 
10281             ret = process_sigsuspend_mask(&set, arg1, arg2);
10282             if (ret != 0) {
10283                 return ret;
10284             }
10285             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10286             finish_sigsuspend_mask(ret);
10287         }
10288         return ret;
10289 #ifdef TARGET_NR_rt_sigtimedwait
10290     case TARGET_NR_rt_sigtimedwait:
10291         {
10292             sigset_t set;
10293             struct timespec uts, *puts;
10294             siginfo_t uinfo;
10295 
10296             if (arg4 != sizeof(target_sigset_t)) {
10297                 return -TARGET_EINVAL;
10298             }
10299 
10300             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10301                 return -TARGET_EFAULT;
10302             target_to_host_sigset(&set, p);
10303             unlock_user(p, arg1, 0);
10304             if (arg3) {
10305                 puts = &uts;
10306                 if (target_to_host_timespec(puts, arg3)) {
10307                     return -TARGET_EFAULT;
10308                 }
10309             } else {
10310                 puts = NULL;
10311             }
10312             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10313                                                  SIGSET_T_SIZE));
10314             if (!is_error(ret)) {
10315                 if (arg2) {
10316                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10317                                   0);
10318                     if (!p) {
10319                         return -TARGET_EFAULT;
10320                     }
10321                     host_to_target_siginfo(p, &uinfo);
10322                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10323                 }
10324                 ret = host_to_target_signal(ret);
10325             }
10326         }
10327         return ret;
10328 #endif
10329 #ifdef TARGET_NR_rt_sigtimedwait_time64
10330     case TARGET_NR_rt_sigtimedwait_time64:
10331         {
10332             sigset_t set;
10333             struct timespec uts, *puts;
10334             siginfo_t uinfo;
10335 
10336             if (arg4 != sizeof(target_sigset_t)) {
10337                 return -TARGET_EINVAL;
10338             }
10339 
10340             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10341             if (!p) {
10342                 return -TARGET_EFAULT;
10343             }
10344             target_to_host_sigset(&set, p);
10345             unlock_user(p, arg1, 0);
10346             if (arg3) {
10347                 puts = &uts;
10348                 if (target_to_host_timespec64(puts, arg3)) {
10349                     return -TARGET_EFAULT;
10350                 }
10351             } else {
10352                 puts = NULL;
10353             }
10354             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10355                                                  SIGSET_T_SIZE));
10356             if (!is_error(ret)) {
10357                 if (arg2) {
10358                     p = lock_user(VERIFY_WRITE, arg2,
10359                                   sizeof(target_siginfo_t), 0);
10360                     if (!p) {
10361                         return -TARGET_EFAULT;
10362                     }
10363                     host_to_target_siginfo(p, &uinfo);
10364                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10365                 }
10366                 ret = host_to_target_signal(ret);
10367             }
10368         }
10369         return ret;
10370 #endif
10371     case TARGET_NR_rt_sigqueueinfo:
10372         {
10373             siginfo_t uinfo;
10374 
10375             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10376             if (!p) {
10377                 return -TARGET_EFAULT;
10378             }
10379             target_to_host_siginfo(&uinfo, p);
10380             unlock_user(p, arg3, 0);
10381             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10382         }
10383         return ret;
10384     case TARGET_NR_rt_tgsigqueueinfo:
10385         {
10386             siginfo_t uinfo;
10387 
10388             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10389             if (!p) {
10390                 return -TARGET_EFAULT;
10391             }
10392             target_to_host_siginfo(&uinfo, p);
10393             unlock_user(p, arg4, 0);
10394             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10395         }
10396         return ret;
10397 #ifdef TARGET_NR_sigreturn
10398     case TARGET_NR_sigreturn:
10399         if (block_signals()) {
10400             return -QEMU_ERESTARTSYS;
10401         }
10402         return do_sigreturn(cpu_env);
10403 #endif
10404     case TARGET_NR_rt_sigreturn:
10405         if (block_signals()) {
10406             return -QEMU_ERESTARTSYS;
10407         }
10408         return do_rt_sigreturn(cpu_env);
10409     case TARGET_NR_sethostname:
10410         if (!(p = lock_user_string(arg1)))
10411             return -TARGET_EFAULT;
10412         ret = get_errno(sethostname(p, arg2));
10413         unlock_user(p, arg1, 0);
10414         return ret;
10415 #ifdef TARGET_NR_setrlimit
10416     case TARGET_NR_setrlimit:
10417         {
10418             int resource = target_to_host_resource(arg1);
10419             struct target_rlimit *target_rlim;
10420             struct rlimit rlim;
10421             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10422                 return -TARGET_EFAULT;
10423             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10424             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10425             unlock_user_struct(target_rlim, arg2, 0);
10426             /*
10427              * If we just passed through resource limit settings for memory then
10428              * they would also apply to QEMU's own allocations, and QEMU will
10429              * crash or hang or die if its allocations fail. Ideally we would
10430              * track the guest allocations in QEMU and apply the limits ourselves.
10431              * For now, just tell the guest the call succeeded but don't actually
10432              * limit anything.
10433              */
10434             if (resource != RLIMIT_AS &&
10435                 resource != RLIMIT_DATA &&
10436                 resource != RLIMIT_STACK) {
10437                 return get_errno(setrlimit(resource, &rlim));
10438             } else {
10439                 return 0;
10440             }
10441         }
10442 #endif
10443 #ifdef TARGET_NR_getrlimit
10444     case TARGET_NR_getrlimit:
10445         {
10446             int resource = target_to_host_resource(arg1);
10447             struct target_rlimit *target_rlim;
10448             struct rlimit rlim;
10449 
10450             ret = get_errno(getrlimit(resource, &rlim));
10451             if (!is_error(ret)) {
10452                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10453                     return -TARGET_EFAULT;
10454                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10455                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10456                 unlock_user_struct(target_rlim, arg2, 1);
10457             }
10458         }
10459         return ret;
10460 #endif
10461     case TARGET_NR_getrusage:
10462         {
10463             struct rusage rusage;
10464             ret = get_errno(getrusage(arg1, &rusage));
10465             if (!is_error(ret)) {
10466                 ret = host_to_target_rusage(arg2, &rusage);
10467             }
10468         }
10469         return ret;
10470 #if defined(TARGET_NR_gettimeofday)
10471     case TARGET_NR_gettimeofday:
10472         {
10473             struct timeval tv;
10474             struct timezone tz;
10475 
10476             ret = get_errno(gettimeofday(&tv, &tz));
10477             if (!is_error(ret)) {
10478                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10479                     return -TARGET_EFAULT;
10480                 }
10481                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10482                     return -TARGET_EFAULT;
10483                 }
10484             }
10485         }
10486         return ret;
10487 #endif
10488 #if defined(TARGET_NR_settimeofday)
10489     case TARGET_NR_settimeofday:
10490         {
10491             struct timeval tv, *ptv = NULL;
10492             struct timezone tz, *ptz = NULL;
10493 
10494             if (arg1) {
10495                 if (copy_from_user_timeval(&tv, arg1)) {
10496                     return -TARGET_EFAULT;
10497                 }
10498                 ptv = &tv;
10499             }
10500 
10501             if (arg2) {
10502                 if (copy_from_user_timezone(&tz, arg2)) {
10503                     return -TARGET_EFAULT;
10504                 }
10505                 ptz = &tz;
10506             }
10507 
10508             return get_errno(settimeofday(ptv, ptz));
10509         }
10510 #endif
10511 #if defined(TARGET_NR_select)
10512     case TARGET_NR_select:
10513 #if defined(TARGET_WANT_NI_OLD_SELECT)
10514         /* some architectures used to have old_select here
10515          * but now ENOSYS it.
10516          */
10517         ret = -TARGET_ENOSYS;
10518 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10519         ret = do_old_select(arg1);
10520 #else
10521         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10522 #endif
10523         return ret;
10524 #endif
10525 #ifdef TARGET_NR_pselect6
10526     case TARGET_NR_pselect6:
10527         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10528 #endif
10529 #ifdef TARGET_NR_pselect6_time64
10530     case TARGET_NR_pselect6_time64:
10531         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10532 #endif
10533 #ifdef TARGET_NR_symlink
10534     case TARGET_NR_symlink:
10535         {
10536             void *p2;
10537             p = lock_user_string(arg1);
10538             p2 = lock_user_string(arg2);
10539             if (!p || !p2)
10540                 ret = -TARGET_EFAULT;
10541             else
10542                 ret = get_errno(symlink(p, p2));
10543             unlock_user(p2, arg2, 0);
10544             unlock_user(p, arg1, 0);
10545         }
10546         return ret;
10547 #endif
10548 #if defined(TARGET_NR_symlinkat)
10549     case TARGET_NR_symlinkat:
10550         {
10551             void *p2;
10552             p  = lock_user_string(arg1);
10553             p2 = lock_user_string(arg3);
10554             if (!p || !p2)
10555                 ret = -TARGET_EFAULT;
10556             else
10557                 ret = get_errno(symlinkat(p, arg2, p2));
10558             unlock_user(p2, arg3, 0);
10559             unlock_user(p, arg1, 0);
10560         }
10561         return ret;
10562 #endif
10563 #ifdef TARGET_NR_readlink
10564     case TARGET_NR_readlink:
10565         {
10566             void *p2;
10567             p = lock_user_string(arg1);
10568             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10569             ret = get_errno(do_guest_readlink(p, p2, arg3));
10570             unlock_user(p2, arg2, ret);
10571             unlock_user(p, arg1, 0);
10572         }
10573         return ret;
10574 #endif
10575 #if defined(TARGET_NR_readlinkat)
10576     case TARGET_NR_readlinkat:
10577         {
10578             void *p2;
10579             p  = lock_user_string(arg2);
10580             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10581             if (!p || !p2) {
10582                 ret = -TARGET_EFAULT;
10583             } else if (!arg4) {
10584                 /* Short circuit this for the magic exe check. */
10585                 ret = -TARGET_EINVAL;
10586             } else if (is_proc_myself((const char *)p, "exe")) {
10587                 /*
10588                  * Don't worry about sign mismatch as earlier mapping
10589                  * logic would have thrown a bad address error.
10590                  */
10591                 ret = MIN(strlen(exec_path), arg4);
10592                 /* We cannot NUL terminate the string. */
10593                 memcpy(p2, exec_path, ret);
10594             } else {
10595                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10596             }
10597             unlock_user(p2, arg3, ret);
10598             unlock_user(p, arg2, 0);
10599         }
10600         return ret;
10601 #endif
10602 #ifdef TARGET_NR_swapon
10603     case TARGET_NR_swapon:
10604         if (!(p = lock_user_string(arg1)))
10605             return -TARGET_EFAULT;
10606         ret = get_errno(swapon(p, arg2));
10607         unlock_user(p, arg1, 0);
10608         return ret;
10609 #endif
10610     case TARGET_NR_reboot:
10611         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10612            /* arg4 must be ignored in all other cases */
10613            p = lock_user_string(arg4);
10614            if (!p) {
10615                return -TARGET_EFAULT;
10616            }
10617            ret = get_errno(reboot(arg1, arg2, arg3, p));
10618            unlock_user(p, arg4, 0);
10619         } else {
10620            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10621         }
10622         return ret;
10623 #ifdef TARGET_NR_mmap
10624     case TARGET_NR_mmap:
10625 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10626         {
10627             abi_ulong *v;
10628             abi_ulong v1, v2, v3, v4, v5, v6;
10629             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10630                 return -TARGET_EFAULT;
10631             v1 = tswapal(v[0]);
10632             v2 = tswapal(v[1]);
10633             v3 = tswapal(v[2]);
10634             v4 = tswapal(v[3]);
10635             v5 = tswapal(v[4]);
10636             v6 = tswapal(v[5]);
10637             unlock_user(v, arg1, 0);
10638             return do_mmap(v1, v2, v3, v4, v5, v6);
10639         }
10640 #else
10641         /* mmap pointers are always untagged */
10642         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10643 #endif
10644 #endif
10645 #ifdef TARGET_NR_mmap2
10646     case TARGET_NR_mmap2:
10647 #ifndef MMAP_SHIFT
10648 #define MMAP_SHIFT 12
10649 #endif
10650         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10651                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10652 #endif
10653     case TARGET_NR_munmap:
10654         arg1 = cpu_untagged_addr(cpu, arg1);
10655         return get_errno(target_munmap(arg1, arg2));
10656     case TARGET_NR_mprotect:
10657         arg1 = cpu_untagged_addr(cpu, arg1);
10658         {
10659             TaskState *ts = get_task_state(cpu);
10660             /* Special hack to detect libc making the stack executable.  */
10661             if ((arg3 & PROT_GROWSDOWN)
10662                 && arg1 >= ts->info->stack_limit
10663                 && arg1 <= ts->info->start_stack) {
10664                 arg3 &= ~PROT_GROWSDOWN;
10665                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10666                 arg1 = ts->info->stack_limit;
10667             }
10668         }
10669         return get_errno(target_mprotect(arg1, arg2, arg3));
10670 #ifdef TARGET_NR_mremap
10671     case TARGET_NR_mremap:
10672         arg1 = cpu_untagged_addr(cpu, arg1);
10673         /* mremap new_addr (arg5) is always untagged */
10674         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10675 #endif
10676         /* ??? msync/mlock/munlock are broken for softmmu.  */
10677 #ifdef TARGET_NR_msync
10678     case TARGET_NR_msync:
10679         return get_errno(msync(g2h(cpu, arg1), arg2,
10680                                target_to_host_msync_arg(arg3)));
10681 #endif
10682 #ifdef TARGET_NR_mlock
10683     case TARGET_NR_mlock:
10684         return get_errno(mlock(g2h(cpu, arg1), arg2));
10685 #endif
10686 #ifdef TARGET_NR_munlock
10687     case TARGET_NR_munlock:
10688         return get_errno(munlock(g2h(cpu, arg1), arg2));
10689 #endif
10690 #ifdef TARGET_NR_mlockall
10691     case TARGET_NR_mlockall:
10692         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10693 #endif
10694 #ifdef TARGET_NR_munlockall
10695     case TARGET_NR_munlockall:
10696         return get_errno(munlockall());
10697 #endif
10698 #ifdef TARGET_NR_truncate
10699     case TARGET_NR_truncate:
10700         if (!(p = lock_user_string(arg1)))
10701             return -TARGET_EFAULT;
10702         ret = get_errno(truncate(p, arg2));
10703         unlock_user(p, arg1, 0);
10704         return ret;
10705 #endif
10706 #ifdef TARGET_NR_ftruncate
10707     case TARGET_NR_ftruncate:
10708         return get_errno(ftruncate(arg1, arg2));
10709 #endif
10710     case TARGET_NR_fchmod:
10711         return get_errno(fchmod(arg1, arg2));
10712 #if defined(TARGET_NR_fchmodat)
10713     case TARGET_NR_fchmodat:
10714         if (!(p = lock_user_string(arg2)))
10715             return -TARGET_EFAULT;
10716         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10717         unlock_user(p, arg2, 0);
10718         return ret;
10719 #endif
10720 #if defined(TARGET_NR_fchmodat2) && defined(__NR_fchmodat2)
10721     case TARGET_NR_fchmodat2:
10722         if (!(p = lock_user_string(arg2))) {
10723             return -TARGET_EFAULT;
10724         }
10725         ret = get_errno(safe_fchmodat2(arg1, p, arg3, arg4));
10726         unlock_user(p, arg2, 0);
10727         return ret;
10728 #endif
10729     case TARGET_NR_getpriority:
10730         /* Note that negative values are valid for getpriority, so we must
10731            differentiate based on errno settings.  */
10732         errno = 0;
10733         ret = getpriority(arg1, arg2);
10734         if (ret == -1 && errno != 0) {
10735             return -host_to_target_errno(errno);
10736         }
10737 #ifdef TARGET_ALPHA
10738         /* Return value is the unbiased priority.  Signal no error.  */
10739         cpu_env->ir[IR_V0] = 0;
10740 #else
10741         /* Return value is a biased priority to avoid negative numbers.  */
10742         ret = 20 - ret;
10743 #endif
10744         return ret;
10745     case TARGET_NR_setpriority:
10746         return get_errno(setpriority(arg1, arg2, arg3));
10747 #ifdef TARGET_NR_statfs
10748     case TARGET_NR_statfs:
10749         if (!(p = lock_user_string(arg1))) {
10750             return -TARGET_EFAULT;
10751         }
10752         ret = get_errno(statfs(path(p), &stfs));
10753         unlock_user(p, arg1, 0);
10754     convert_statfs:
10755         if (!is_error(ret)) {
10756             struct target_statfs *target_stfs;
10757 
10758             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10759                 return -TARGET_EFAULT;
10760             __put_user(stfs.f_type, &target_stfs->f_type);
10761             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10762             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10763             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10764             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10765             __put_user(stfs.f_files, &target_stfs->f_files);
10766             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10767             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10768             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10769             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10770             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10771 #ifdef _STATFS_F_FLAGS
10772             __put_user(stfs.f_flags, &target_stfs->f_flags);
10773 #else
10774             __put_user(0, &target_stfs->f_flags);
10775 #endif
10776             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10777             unlock_user_struct(target_stfs, arg2, 1);
10778         }
10779         return ret;
10780 #endif
10781 #ifdef TARGET_NR_fstatfs
10782     case TARGET_NR_fstatfs:
10783         ret = get_errno(fstatfs(arg1, &stfs));
10784         goto convert_statfs;
10785 #endif
10786 #ifdef TARGET_NR_statfs64
10787     case TARGET_NR_statfs64:
10788         if (!(p = lock_user_string(arg1))) {
10789             return -TARGET_EFAULT;
10790         }
10791         ret = get_errno(statfs(path(p), &stfs));
10792         unlock_user(p, arg1, 0);
10793     convert_statfs64:
10794         if (!is_error(ret)) {
10795             struct target_statfs64 *target_stfs;
10796 
10797             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10798                 return -TARGET_EFAULT;
10799             __put_user(stfs.f_type, &target_stfs->f_type);
10800             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10801             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10802             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10803             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10804             __put_user(stfs.f_files, &target_stfs->f_files);
10805             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10806             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10807             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10808             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10809             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10810 #ifdef _STATFS_F_FLAGS
10811             __put_user(stfs.f_flags, &target_stfs->f_flags);
10812 #else
10813             __put_user(0, &target_stfs->f_flags);
10814 #endif
10815             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10816             unlock_user_struct(target_stfs, arg3, 1);
10817         }
10818         return ret;
10819     case TARGET_NR_fstatfs64:
10820         ret = get_errno(fstatfs(arg1, &stfs));
10821         goto convert_statfs64;
10822 #endif
10823 #ifdef TARGET_NR_socketcall
10824     case TARGET_NR_socketcall:
10825         return do_socketcall(arg1, arg2);
10826 #endif
10827 #ifdef TARGET_NR_accept
10828     case TARGET_NR_accept:
10829         return do_accept4(arg1, arg2, arg3, 0);
10830 #endif
10831 #ifdef TARGET_NR_accept4
10832     case TARGET_NR_accept4:
10833         return do_accept4(arg1, arg2, arg3, arg4);
10834 #endif
10835 #ifdef TARGET_NR_bind
10836     case TARGET_NR_bind:
10837         return do_bind(arg1, arg2, arg3);
10838 #endif
10839 #ifdef TARGET_NR_connect
10840     case TARGET_NR_connect:
10841         return do_connect(arg1, arg2, arg3);
10842 #endif
10843 #ifdef TARGET_NR_getpeername
10844     case TARGET_NR_getpeername:
10845         return do_getpeername(arg1, arg2, arg3);
10846 #endif
10847 #ifdef TARGET_NR_getsockname
10848     case TARGET_NR_getsockname:
10849         return do_getsockname(arg1, arg2, arg3);
10850 #endif
10851 #ifdef TARGET_NR_getsockopt
10852     case TARGET_NR_getsockopt:
10853         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10854 #endif
10855 #ifdef TARGET_NR_listen
10856     case TARGET_NR_listen:
10857         return get_errno(listen(arg1, arg2));
10858 #endif
10859 #ifdef TARGET_NR_recv
10860     case TARGET_NR_recv:
10861         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10862 #endif
10863 #ifdef TARGET_NR_recvfrom
10864     case TARGET_NR_recvfrom:
10865         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10866 #endif
10867 #ifdef TARGET_NR_recvmsg
10868     case TARGET_NR_recvmsg:
10869         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10870 #endif
10871 #ifdef TARGET_NR_send
10872     case TARGET_NR_send:
10873         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10874 #endif
10875 #ifdef TARGET_NR_sendmsg
10876     case TARGET_NR_sendmsg:
10877         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10878 #endif
10879 #ifdef TARGET_NR_sendmmsg
10880     case TARGET_NR_sendmmsg:
10881         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10882 #endif
10883 #ifdef TARGET_NR_recvmmsg
10884     case TARGET_NR_recvmmsg:
10885         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10886 #endif
10887 #ifdef TARGET_NR_sendto
10888     case TARGET_NR_sendto:
10889         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10890 #endif
10891 #ifdef TARGET_NR_shutdown
10892     case TARGET_NR_shutdown:
10893         return get_errno(shutdown(arg1, arg2));
10894 #endif
10895 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10896     case TARGET_NR_getrandom:
10897         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10898         if (!p) {
10899             return -TARGET_EFAULT;
10900         }
10901         ret = get_errno(getrandom(p, arg2, arg3));
10902         unlock_user(p, arg1, ret);
10903         return ret;
10904 #endif
10905 #ifdef TARGET_NR_socket
10906     case TARGET_NR_socket:
10907         return do_socket(arg1, arg2, arg3);
10908 #endif
10909 #ifdef TARGET_NR_socketpair
10910     case TARGET_NR_socketpair:
10911         return do_socketpair(arg1, arg2, arg3, arg4);
10912 #endif
10913 #ifdef TARGET_NR_setsockopt
10914     case TARGET_NR_setsockopt:
10915         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10916 #endif
10917 #if defined(TARGET_NR_syslog)
10918     case TARGET_NR_syslog:
10919         {
10920             int len = arg2;
10921 
10922             switch (arg1) {
10923             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10924             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10925             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10926             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10927             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10928             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10929             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10930             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10931                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10932             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10933             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10934             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10935                 {
10936                     if (len < 0) {
10937                         return -TARGET_EINVAL;
10938                     }
10939                     if (len == 0) {
10940                         return 0;
10941                     }
10942                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10943                     if (!p) {
10944                         return -TARGET_EFAULT;
10945                     }
10946                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10947                     unlock_user(p, arg2, arg3);
10948                 }
10949                 return ret;
10950             default:
10951                 return -TARGET_EINVAL;
10952             }
10953         }
10954         break;
10955 #endif
10956     case TARGET_NR_setitimer:
10957         {
10958             struct itimerval value, ovalue, *pvalue;
10959 
10960             if (arg2) {
10961                 pvalue = &value;
10962                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10963                     || copy_from_user_timeval(&pvalue->it_value,
10964                                               arg2 + sizeof(struct target_timeval)))
10965                     return -TARGET_EFAULT;
10966             } else {
10967                 pvalue = NULL;
10968             }
10969             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10970             if (!is_error(ret) && arg3) {
10971                 if (copy_to_user_timeval(arg3,
10972                                          &ovalue.it_interval)
10973                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10974                                             &ovalue.it_value))
10975                     return -TARGET_EFAULT;
10976             }
10977         }
10978         return ret;
10979     case TARGET_NR_getitimer:
10980         {
10981             struct itimerval value;
10982 
10983             ret = get_errno(getitimer(arg1, &value));
10984             if (!is_error(ret) && arg2) {
10985                 if (copy_to_user_timeval(arg2,
10986                                          &value.it_interval)
10987                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10988                                             &value.it_value))
10989                     return -TARGET_EFAULT;
10990             }
10991         }
10992         return ret;
10993 #ifdef TARGET_NR_stat
10994     case TARGET_NR_stat:
10995         if (!(p = lock_user_string(arg1))) {
10996             return -TARGET_EFAULT;
10997         }
10998         ret = get_errno(stat(path(p), &st));
10999         unlock_user(p, arg1, 0);
11000         goto do_stat;
11001 #endif
11002 #ifdef TARGET_NR_lstat
11003     case TARGET_NR_lstat:
11004         if (!(p = lock_user_string(arg1))) {
11005             return -TARGET_EFAULT;
11006         }
11007         ret = get_errno(lstat(path(p), &st));
11008         unlock_user(p, arg1, 0);
11009         goto do_stat;
11010 #endif
11011 #ifdef TARGET_NR_fstat
11012     case TARGET_NR_fstat:
11013         {
11014             ret = get_errno(fstat(arg1, &st));
11015 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
11016         do_stat:
11017 #endif
11018             if (!is_error(ret)) {
11019                 struct target_stat *target_st;
11020 
11021                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
11022                     return -TARGET_EFAULT;
11023                 memset(target_st, 0, sizeof(*target_st));
11024                 __put_user(st.st_dev, &target_st->st_dev);
11025                 __put_user(st.st_ino, &target_st->st_ino);
11026                 __put_user(st.st_mode, &target_st->st_mode);
11027                 __put_user(st.st_uid, &target_st->st_uid);
11028                 __put_user(st.st_gid, &target_st->st_gid);
11029                 __put_user(st.st_nlink, &target_st->st_nlink);
11030                 __put_user(st.st_rdev, &target_st->st_rdev);
11031                 __put_user(st.st_size, &target_st->st_size);
11032                 __put_user(st.st_blksize, &target_st->st_blksize);
11033                 __put_user(st.st_blocks, &target_st->st_blocks);
11034                 __put_user(st.st_atime, &target_st->target_st_atime);
11035                 __put_user(st.st_mtime, &target_st->target_st_mtime);
11036                 __put_user(st.st_ctime, &target_st->target_st_ctime);
11037 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
11038                 __put_user(st.st_atim.tv_nsec,
11039                            &target_st->target_st_atime_nsec);
11040                 __put_user(st.st_mtim.tv_nsec,
11041                            &target_st->target_st_mtime_nsec);
11042                 __put_user(st.st_ctim.tv_nsec,
11043                            &target_st->target_st_ctime_nsec);
11044 #endif
11045                 unlock_user_struct(target_st, arg2, 1);
11046             }
11047         }
11048         return ret;
11049 #endif
11050     case TARGET_NR_vhangup:
11051         return get_errno(vhangup());
11052 #ifdef TARGET_NR_syscall
11053     case TARGET_NR_syscall:
11054         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11055                           arg6, arg7, arg8, 0);
11056 #endif
11057 #if defined(TARGET_NR_wait4)
11058     case TARGET_NR_wait4:
11059         {
11060             int status;
11061             abi_long status_ptr = arg2;
11062             struct rusage rusage, *rusage_ptr;
11063             abi_ulong target_rusage = arg4;
11064             abi_long rusage_err;
11065             if (target_rusage)
11066                 rusage_ptr = &rusage;
11067             else
11068                 rusage_ptr = NULL;
11069             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11070             if (!is_error(ret)) {
11071                 if (status_ptr && ret) {
11072                     status = host_to_target_waitstatus(status);
11073                     if (put_user_s32(status, status_ptr))
11074                         return -TARGET_EFAULT;
11075                 }
11076                 if (target_rusage) {
11077                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11078                     if (rusage_err) {
11079                         ret = rusage_err;
11080                     }
11081                 }
11082             }
11083         }
11084         return ret;
11085 #endif
11086 #ifdef TARGET_NR_swapoff
11087     case TARGET_NR_swapoff:
11088         if (!(p = lock_user_string(arg1)))
11089             return -TARGET_EFAULT;
11090         ret = get_errno(swapoff(p));
11091         unlock_user(p, arg1, 0);
11092         return ret;
11093 #endif
11094     case TARGET_NR_sysinfo:
11095         {
11096             struct target_sysinfo *target_value;
11097             struct sysinfo value;
11098             ret = get_errno(sysinfo(&value));
11099             if (!is_error(ret) && arg1)
11100             {
11101                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11102                     return -TARGET_EFAULT;
11103                 __put_user(value.uptime, &target_value->uptime);
11104                 __put_user(value.loads[0], &target_value->loads[0]);
11105                 __put_user(value.loads[1], &target_value->loads[1]);
11106                 __put_user(value.loads[2], &target_value->loads[2]);
11107                 __put_user(value.totalram, &target_value->totalram);
11108                 __put_user(value.freeram, &target_value->freeram);
11109                 __put_user(value.sharedram, &target_value->sharedram);
11110                 __put_user(value.bufferram, &target_value->bufferram);
11111                 __put_user(value.totalswap, &target_value->totalswap);
11112                 __put_user(value.freeswap, &target_value->freeswap);
11113                 __put_user(value.procs, &target_value->procs);
11114                 __put_user(value.totalhigh, &target_value->totalhigh);
11115                 __put_user(value.freehigh, &target_value->freehigh);
11116                 __put_user(value.mem_unit, &target_value->mem_unit);
11117                 unlock_user_struct(target_value, arg1, 1);
11118             }
11119         }
11120         return ret;
11121 #ifdef TARGET_NR_ipc
11122     case TARGET_NR_ipc:
11123         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11124 #endif
11125 #ifdef TARGET_NR_semget
11126     case TARGET_NR_semget:
11127         return get_errno(semget(arg1, arg2, arg3));
11128 #endif
11129 #ifdef TARGET_NR_semop
11130     case TARGET_NR_semop:
11131         return do_semtimedop(arg1, arg2, arg3, 0, false);
11132 #endif
11133 #ifdef TARGET_NR_semtimedop
11134     case TARGET_NR_semtimedop:
11135         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11136 #endif
11137 #ifdef TARGET_NR_semtimedop_time64
11138     case TARGET_NR_semtimedop_time64:
11139         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11140 #endif
11141 #ifdef TARGET_NR_semctl
11142     case TARGET_NR_semctl:
11143         return do_semctl(arg1, arg2, arg3, arg4);
11144 #endif
11145 #ifdef TARGET_NR_msgctl
11146     case TARGET_NR_msgctl:
11147         return do_msgctl(arg1, arg2, arg3);
11148 #endif
11149 #ifdef TARGET_NR_msgget
11150     case TARGET_NR_msgget:
11151         return get_errno(msgget(arg1, arg2));
11152 #endif
11153 #ifdef TARGET_NR_msgrcv
11154     case TARGET_NR_msgrcv:
11155         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11156 #endif
11157 #ifdef TARGET_NR_msgsnd
11158     case TARGET_NR_msgsnd:
11159         return do_msgsnd(arg1, arg2, arg3, arg4);
11160 #endif
11161 #ifdef TARGET_NR_shmget
11162     case TARGET_NR_shmget:
11163         return get_errno(shmget(arg1, arg2, arg3));
11164 #endif
11165 #ifdef TARGET_NR_shmctl
11166     case TARGET_NR_shmctl:
11167         return do_shmctl(arg1, arg2, arg3);
11168 #endif
11169 #ifdef TARGET_NR_shmat
11170     case TARGET_NR_shmat:
11171         return target_shmat(cpu_env, arg1, arg2, arg3);
11172 #endif
11173 #ifdef TARGET_NR_shmdt
11174     case TARGET_NR_shmdt:
11175         return target_shmdt(arg1);
11176 #endif
11177     case TARGET_NR_fsync:
11178         return get_errno(fsync(arg1));
11179     case TARGET_NR_clone:
11180         /* Linux manages to have three different orderings for its
11181          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11182          * match the kernel's CONFIG_CLONE_* settings.
11183          * Microblaze is further special in that it uses a sixth
11184          * implicit argument to clone for the TLS pointer.
11185          */
11186 #if defined(TARGET_MICROBLAZE)
11187         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11188 #elif defined(TARGET_CLONE_BACKWARDS)
11189         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11190 #elif defined(TARGET_CLONE_BACKWARDS2)
11191         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11192 #else
11193         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11194 #endif
11195         return ret;
11196 #ifdef __NR_exit_group
11197         /* new thread calls */
11198     case TARGET_NR_exit_group:
11199         preexit_cleanup(cpu_env, arg1);
11200         return get_errno(exit_group(arg1));
11201 #endif
11202     case TARGET_NR_setdomainname:
11203         if (!(p = lock_user_string(arg1)))
11204             return -TARGET_EFAULT;
11205         ret = get_errno(setdomainname(p, arg2));
11206         unlock_user(p, arg1, 0);
11207         return ret;
11208     case TARGET_NR_uname:
11209         /* no need to transcode because we use the linux syscall */
11210         {
11211             struct new_utsname * buf;
11212 
11213             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11214                 return -TARGET_EFAULT;
11215             ret = get_errno(sys_uname(buf));
11216             if (!is_error(ret)) {
11217                 /* Overwrite the native machine name with whatever is being
11218                    emulated. */
11219                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11220                           sizeof(buf->machine));
11221                 /* Allow the user to override the reported release.  */
11222                 if (qemu_uname_release && *qemu_uname_release) {
11223                     g_strlcpy(buf->release, qemu_uname_release,
11224                               sizeof(buf->release));
11225                 }
11226             }
11227             unlock_user_struct(buf, arg1, 1);
11228         }
11229         return ret;
11230 #ifdef TARGET_I386
11231     case TARGET_NR_modify_ldt:
11232         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11233 #if !defined(TARGET_X86_64)
11234     case TARGET_NR_vm86:
11235         return do_vm86(cpu_env, arg1, arg2);
11236 #endif
11237 #endif
11238 #if defined(TARGET_NR_adjtimex)
11239     case TARGET_NR_adjtimex:
11240         {
11241             struct timex host_buf;
11242 
11243             if (target_to_host_timex(&host_buf, arg1) != 0) {
11244                 return -TARGET_EFAULT;
11245             }
11246             ret = get_errno(adjtimex(&host_buf));
11247             if (!is_error(ret)) {
11248                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11249                     return -TARGET_EFAULT;
11250                 }
11251             }
11252         }
11253         return ret;
11254 #endif
11255 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11256     case TARGET_NR_clock_adjtime:
11257         {
11258             struct timex htx;
11259 
11260             if (target_to_host_timex(&htx, arg2) != 0) {
11261                 return -TARGET_EFAULT;
11262             }
11263             ret = get_errno(clock_adjtime(arg1, &htx));
11264             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11265                 return -TARGET_EFAULT;
11266             }
11267         }
11268         return ret;
11269 #endif
11270 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11271     case TARGET_NR_clock_adjtime64:
11272         {
11273             struct timex htx;
11274 
11275             if (target_to_host_timex64(&htx, arg2) != 0) {
11276                 return -TARGET_EFAULT;
11277             }
11278             ret = get_errno(clock_adjtime(arg1, &htx));
11279             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11280                     return -TARGET_EFAULT;
11281             }
11282         }
11283         return ret;
11284 #endif
11285     case TARGET_NR_getpgid:
11286         return get_errno(getpgid(arg1));
11287     case TARGET_NR_fchdir:
11288         return get_errno(fchdir(arg1));
11289     case TARGET_NR_personality:
11290         return get_errno(personality(arg1));
11291 #ifdef TARGET_NR__llseek /* Not on alpha */
11292     case TARGET_NR__llseek:
11293         {
11294             int64_t res;
11295 #if !defined(__NR_llseek)
11296             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11297             if (res == -1) {
11298                 ret = get_errno(res);
11299             } else {
11300                 ret = 0;
11301             }
11302 #else
11303             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11304 #endif
11305             if ((ret == 0) && put_user_s64(res, arg4)) {
11306                 return -TARGET_EFAULT;
11307             }
11308         }
11309         return ret;
11310 #endif
11311 #ifdef TARGET_NR_getdents
11312     case TARGET_NR_getdents:
11313         return do_getdents(arg1, arg2, arg3);
11314 #endif /* TARGET_NR_getdents */
11315 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11316     case TARGET_NR_getdents64:
11317         return do_getdents64(arg1, arg2, arg3);
11318 #endif /* TARGET_NR_getdents64 */
11319 #if defined(TARGET_NR__newselect)
11320     case TARGET_NR__newselect:
11321         return do_select(arg1, arg2, arg3, arg4, arg5);
11322 #endif
11323 #ifdef TARGET_NR_poll
11324     case TARGET_NR_poll:
11325         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11326 #endif
11327 #ifdef TARGET_NR_ppoll
11328     case TARGET_NR_ppoll:
11329         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11330 #endif
11331 #ifdef TARGET_NR_ppoll_time64
11332     case TARGET_NR_ppoll_time64:
11333         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11334 #endif
11335     case TARGET_NR_flock:
11336         /* NOTE: the flock constant seems to be the same for every
11337            Linux platform */
11338         return get_errno(safe_flock(arg1, arg2));
11339     case TARGET_NR_readv:
11340         {
11341             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11342             if (vec != NULL) {
11343                 ret = get_errno(safe_readv(arg1, vec, arg3));
11344                 unlock_iovec(vec, arg2, arg3, 1);
11345             } else {
11346                 ret = -host_to_target_errno(errno);
11347             }
11348         }
11349         return ret;
11350     case TARGET_NR_writev:
11351         {
11352             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11353             if (vec != NULL) {
11354                 ret = get_errno(safe_writev(arg1, vec, arg3));
11355                 unlock_iovec(vec, arg2, arg3, 0);
11356             } else {
11357                 ret = -host_to_target_errno(errno);
11358             }
11359         }
11360         return ret;
11361 #if defined(TARGET_NR_preadv)
11362     case TARGET_NR_preadv:
11363         {
11364             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11365             if (vec != NULL) {
11366                 unsigned long low, high;
11367 
11368                 target_to_host_low_high(arg4, arg5, &low, &high);
11369                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11370                 unlock_iovec(vec, arg2, arg3, 1);
11371             } else {
11372                 ret = -host_to_target_errno(errno);
11373            }
11374         }
11375         return ret;
11376 #endif
11377 #if defined(TARGET_NR_pwritev)
11378     case TARGET_NR_pwritev:
11379         {
11380             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11381             if (vec != NULL) {
11382                 unsigned long low, high;
11383 
11384                 target_to_host_low_high(arg4, arg5, &low, &high);
11385                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11386                 unlock_iovec(vec, arg2, arg3, 0);
11387             } else {
11388                 ret = -host_to_target_errno(errno);
11389            }
11390         }
11391         return ret;
11392 #endif
11393     case TARGET_NR_getsid:
11394         return get_errno(getsid(arg1));
11395 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11396     case TARGET_NR_fdatasync:
11397         return get_errno(fdatasync(arg1));
11398 #endif
11399     case TARGET_NR_sched_getaffinity:
11400         {
11401             unsigned int mask_size;
11402             unsigned long *mask;
11403 
11404             /*
11405              * sched_getaffinity needs multiples of ulong, so need to take
11406              * care of mismatches between target ulong and host ulong sizes.
11407              */
11408             if (arg2 & (sizeof(abi_ulong) - 1)) {
11409                 return -TARGET_EINVAL;
11410             }
11411             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11412 
11413             mask = alloca(mask_size);
11414             memset(mask, 0, mask_size);
11415             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11416 
11417             if (!is_error(ret)) {
11418                 if (ret > arg2) {
11419                     /* More data returned than the caller's buffer will fit.
11420                      * This only happens if sizeof(abi_long) < sizeof(long)
11421                      * and the caller passed us a buffer holding an odd number
11422                      * of abi_longs. If the host kernel is actually using the
11423                      * extra 4 bytes then fail EINVAL; otherwise we can just
11424                      * ignore them and only copy the interesting part.
11425                      */
11426                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11427                     if (numcpus > arg2 * 8) {
11428                         return -TARGET_EINVAL;
11429                     }
11430                     ret = arg2;
11431                 }
11432 
11433                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11434                     return -TARGET_EFAULT;
11435                 }
11436             }
11437         }
11438         return ret;
11439     case TARGET_NR_sched_setaffinity:
11440         {
11441             unsigned int mask_size;
11442             unsigned long *mask;
11443 
11444             /*
11445              * sched_setaffinity needs multiples of ulong, so need to take
11446              * care of mismatches between target ulong and host ulong sizes.
11447              */
11448             if (arg2 & (sizeof(abi_ulong) - 1)) {
11449                 return -TARGET_EINVAL;
11450             }
11451             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11452             mask = alloca(mask_size);
11453 
11454             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11455             if (ret) {
11456                 return ret;
11457             }
11458 
11459             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11460         }
11461     case TARGET_NR_getcpu:
11462         {
11463             unsigned cpuid, node;
11464             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11465                                        arg2 ? &node : NULL,
11466                                        NULL));
11467             if (is_error(ret)) {
11468                 return ret;
11469             }
11470             if (arg1 && put_user_u32(cpuid, arg1)) {
11471                 return -TARGET_EFAULT;
11472             }
11473             if (arg2 && put_user_u32(node, arg2)) {
11474                 return -TARGET_EFAULT;
11475             }
11476         }
11477         return ret;
11478     case TARGET_NR_sched_setparam:
11479         {
11480             struct target_sched_param *target_schp;
11481             struct sched_param schp;
11482 
11483             if (arg2 == 0) {
11484                 return -TARGET_EINVAL;
11485             }
11486             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11487                 return -TARGET_EFAULT;
11488             }
11489             schp.sched_priority = tswap32(target_schp->sched_priority);
11490             unlock_user_struct(target_schp, arg2, 0);
11491             return get_errno(sys_sched_setparam(arg1, &schp));
11492         }
11493     case TARGET_NR_sched_getparam:
11494         {
11495             struct target_sched_param *target_schp;
11496             struct sched_param schp;
11497 
11498             if (arg2 == 0) {
11499                 return -TARGET_EINVAL;
11500             }
11501             ret = get_errno(sys_sched_getparam(arg1, &schp));
11502             if (!is_error(ret)) {
11503                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11504                     return -TARGET_EFAULT;
11505                 }
11506                 target_schp->sched_priority = tswap32(schp.sched_priority);
11507                 unlock_user_struct(target_schp, arg2, 1);
11508             }
11509         }
11510         return ret;
11511     case TARGET_NR_sched_setscheduler:
11512         {
11513             struct target_sched_param *target_schp;
11514             struct sched_param schp;
11515             if (arg3 == 0) {
11516                 return -TARGET_EINVAL;
11517             }
11518             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11519                 return -TARGET_EFAULT;
11520             }
11521             schp.sched_priority = tswap32(target_schp->sched_priority);
11522             unlock_user_struct(target_schp, arg3, 0);
11523             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11524         }
11525     case TARGET_NR_sched_getscheduler:
11526         return get_errno(sys_sched_getscheduler(arg1));
11527     case TARGET_NR_sched_getattr:
11528         {
11529             struct target_sched_attr *target_scha;
11530             struct sched_attr scha;
11531             if (arg2 == 0) {
11532                 return -TARGET_EINVAL;
11533             }
11534             if (arg3 > sizeof(scha)) {
11535                 arg3 = sizeof(scha);
11536             }
11537             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11538             if (!is_error(ret)) {
11539                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11540                 if (!target_scha) {
11541                     return -TARGET_EFAULT;
11542                 }
11543                 target_scha->size = tswap32(scha.size);
11544                 target_scha->sched_policy = tswap32(scha.sched_policy);
11545                 target_scha->sched_flags = tswap64(scha.sched_flags);
11546                 target_scha->sched_nice = tswap32(scha.sched_nice);
11547                 target_scha->sched_priority = tswap32(scha.sched_priority);
11548                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11549                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11550                 target_scha->sched_period = tswap64(scha.sched_period);
11551                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11552                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11553                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11554                 }
11555                 unlock_user(target_scha, arg2, arg3);
11556             }
11557             return ret;
11558         }
11559     case TARGET_NR_sched_setattr:
11560         {
11561             struct target_sched_attr *target_scha;
11562             struct sched_attr scha;
11563             uint32_t size;
11564             int zeroed;
11565             if (arg2 == 0) {
11566                 return -TARGET_EINVAL;
11567             }
11568             if (get_user_u32(size, arg2)) {
11569                 return -TARGET_EFAULT;
11570             }
11571             if (!size) {
11572                 size = offsetof(struct target_sched_attr, sched_util_min);
11573             }
11574             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11575                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11576                     return -TARGET_EFAULT;
11577                 }
11578                 return -TARGET_E2BIG;
11579             }
11580 
11581             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11582             if (zeroed < 0) {
11583                 return zeroed;
11584             } else if (zeroed == 0) {
11585                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11586                     return -TARGET_EFAULT;
11587                 }
11588                 return -TARGET_E2BIG;
11589             }
11590             if (size > sizeof(struct target_sched_attr)) {
11591                 size = sizeof(struct target_sched_attr);
11592             }
11593 
11594             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11595             if (!target_scha) {
11596                 return -TARGET_EFAULT;
11597             }
11598             scha.size = size;
11599             scha.sched_policy = tswap32(target_scha->sched_policy);
11600             scha.sched_flags = tswap64(target_scha->sched_flags);
11601             scha.sched_nice = tswap32(target_scha->sched_nice);
11602             scha.sched_priority = tswap32(target_scha->sched_priority);
11603             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11604             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11605             scha.sched_period = tswap64(target_scha->sched_period);
11606             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11607                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11608                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11609             }
11610             unlock_user(target_scha, arg2, 0);
11611             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11612         }
11613     case TARGET_NR_sched_yield:
11614         return get_errno(sched_yield());
11615     case TARGET_NR_sched_get_priority_max:
11616         return get_errno(sched_get_priority_max(arg1));
11617     case TARGET_NR_sched_get_priority_min:
11618         return get_errno(sched_get_priority_min(arg1));
11619 #ifdef TARGET_NR_sched_rr_get_interval
11620     case TARGET_NR_sched_rr_get_interval:
11621         {
11622             struct timespec ts;
11623             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11624             if (!is_error(ret)) {
11625                 ret = host_to_target_timespec(arg2, &ts);
11626             }
11627         }
11628         return ret;
11629 #endif
11630 #ifdef TARGET_NR_sched_rr_get_interval_time64
11631     case TARGET_NR_sched_rr_get_interval_time64:
11632         {
11633             struct timespec ts;
11634             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11635             if (!is_error(ret)) {
11636                 ret = host_to_target_timespec64(arg2, &ts);
11637             }
11638         }
11639         return ret;
11640 #endif
11641 #if defined(TARGET_NR_nanosleep)
11642     case TARGET_NR_nanosleep:
11643         {
11644             struct timespec req, rem;
11645             if (target_to_host_timespec(&req, arg1)) {
11646                 return -TARGET_EFAULT;
11647             }
11648             ret = get_errno(safe_nanosleep(&req, &rem));
11649             if (is_error(ret) && arg2) {
11650                 if (host_to_target_timespec(arg2, &rem)) {
11651                     return -TARGET_EFAULT;
11652                 }
11653             }
11654         }
11655         return ret;
11656 #endif
11657     case TARGET_NR_prctl:
11658         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11659         break;
11660 #ifdef TARGET_NR_arch_prctl
11661     case TARGET_NR_arch_prctl:
11662         return do_arch_prctl(cpu_env, arg1, arg2);
11663 #endif
11664 #ifdef TARGET_NR_pread64
11665     case TARGET_NR_pread64:
11666         if (regpairs_aligned(cpu_env, num)) {
11667             arg4 = arg5;
11668             arg5 = arg6;
11669         }
11670         if (arg2 == 0 && arg3 == 0) {
11671             /* Special-case NULL buffer and zero length, which should succeed */
11672             p = 0;
11673         } else {
11674             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11675             if (!p) {
11676                 return -TARGET_EFAULT;
11677             }
11678         }
11679         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11680         unlock_user(p, arg2, ret);
11681         return ret;
11682     case TARGET_NR_pwrite64:
11683         if (regpairs_aligned(cpu_env, num)) {
11684             arg4 = arg5;
11685             arg5 = arg6;
11686         }
11687         if (arg2 == 0 && arg3 == 0) {
11688             /* Special-case NULL buffer and zero length, which should succeed */
11689             p = 0;
11690         } else {
11691             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11692             if (!p) {
11693                 return -TARGET_EFAULT;
11694             }
11695         }
11696         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11697         unlock_user(p, arg2, 0);
11698         return ret;
11699 #endif
11700     case TARGET_NR_getcwd:
11701         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11702             return -TARGET_EFAULT;
11703         ret = get_errno(sys_getcwd1(p, arg2));
11704         unlock_user(p, arg1, ret);
11705         return ret;
11706     case TARGET_NR_capget:
11707     case TARGET_NR_capset:
11708     {
11709         struct target_user_cap_header *target_header;
11710         struct target_user_cap_data *target_data = NULL;
11711         struct __user_cap_header_struct header;
11712         struct __user_cap_data_struct data[2];
11713         struct __user_cap_data_struct *dataptr = NULL;
11714         int i, target_datalen;
11715         int data_items = 1;
11716 
11717         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11718             return -TARGET_EFAULT;
11719         }
11720         header.version = tswap32(target_header->version);
11721         header.pid = tswap32(target_header->pid);
11722 
11723         if (header.version != _LINUX_CAPABILITY_VERSION) {
11724             /* Version 2 and up takes pointer to two user_data structs */
11725             data_items = 2;
11726         }
11727 
11728         target_datalen = sizeof(*target_data) * data_items;
11729 
11730         if (arg2) {
11731             if (num == TARGET_NR_capget) {
11732                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11733             } else {
11734                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11735             }
11736             if (!target_data) {
11737                 unlock_user_struct(target_header, arg1, 0);
11738                 return -TARGET_EFAULT;
11739             }
11740 
11741             if (num == TARGET_NR_capset) {
11742                 for (i = 0; i < data_items; i++) {
11743                     data[i].effective = tswap32(target_data[i].effective);
11744                     data[i].permitted = tswap32(target_data[i].permitted);
11745                     data[i].inheritable = tswap32(target_data[i].inheritable);
11746                 }
11747             }
11748 
11749             dataptr = data;
11750         }
11751 
11752         if (num == TARGET_NR_capget) {
11753             ret = get_errno(capget(&header, dataptr));
11754         } else {
11755             ret = get_errno(capset(&header, dataptr));
11756         }
11757 
11758         /* The kernel always updates version for both capget and capset */
11759         target_header->version = tswap32(header.version);
11760         unlock_user_struct(target_header, arg1, 1);
11761 
11762         if (arg2) {
11763             if (num == TARGET_NR_capget) {
11764                 for (i = 0; i < data_items; i++) {
11765                     target_data[i].effective = tswap32(data[i].effective);
11766                     target_data[i].permitted = tswap32(data[i].permitted);
11767                     target_data[i].inheritable = tswap32(data[i].inheritable);
11768                 }
11769                 unlock_user(target_data, arg2, target_datalen);
11770             } else {
11771                 unlock_user(target_data, arg2, 0);
11772             }
11773         }
11774         return ret;
11775     }
11776     case TARGET_NR_sigaltstack:
11777         return do_sigaltstack(arg1, arg2, cpu_env);
11778 
11779 #ifdef CONFIG_SENDFILE
11780 #ifdef TARGET_NR_sendfile
11781     case TARGET_NR_sendfile:
11782     {
11783         off_t *offp = NULL;
11784         off_t off;
11785         if (arg3) {
11786             ret = get_user_sal(off, arg3);
11787             if (is_error(ret)) {
11788                 return ret;
11789             }
11790             offp = &off;
11791         }
11792         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11793         if (!is_error(ret) && arg3) {
11794             abi_long ret2 = put_user_sal(off, arg3);
11795             if (is_error(ret2)) {
11796                 ret = ret2;
11797             }
11798         }
11799         return ret;
11800     }
11801 #endif
11802 #ifdef TARGET_NR_sendfile64
11803     case TARGET_NR_sendfile64:
11804     {
11805         off_t *offp = NULL;
11806         off_t off;
11807         if (arg3) {
11808             ret = get_user_s64(off, arg3);
11809             if (is_error(ret)) {
11810                 return ret;
11811             }
11812             offp = &off;
11813         }
11814         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11815         if (!is_error(ret) && arg3) {
11816             abi_long ret2 = put_user_s64(off, arg3);
11817             if (is_error(ret2)) {
11818                 ret = ret2;
11819             }
11820         }
11821         return ret;
11822     }
11823 #endif
11824 #endif
11825 #ifdef TARGET_NR_vfork
11826     case TARGET_NR_vfork:
11827         return get_errno(do_fork(cpu_env,
11828                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11829                          0, 0, 0, 0));
11830 #endif
11831 #ifdef TARGET_NR_ugetrlimit
11832     case TARGET_NR_ugetrlimit:
11833     {
11834 	struct rlimit rlim;
11835 	int resource = target_to_host_resource(arg1);
11836 	ret = get_errno(getrlimit(resource, &rlim));
11837 	if (!is_error(ret)) {
11838 	    struct target_rlimit *target_rlim;
11839             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11840                 return -TARGET_EFAULT;
11841 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11842 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11843             unlock_user_struct(target_rlim, arg2, 1);
11844 	}
11845         return ret;
11846     }
11847 #endif
11848 #ifdef TARGET_NR_truncate64
11849     case TARGET_NR_truncate64:
11850         if (!(p = lock_user_string(arg1)))
11851             return -TARGET_EFAULT;
11852 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11853         unlock_user(p, arg1, 0);
11854         return ret;
11855 #endif
11856 #ifdef TARGET_NR_ftruncate64
11857     case TARGET_NR_ftruncate64:
11858         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11859 #endif
11860 #ifdef TARGET_NR_stat64
11861     case TARGET_NR_stat64:
11862         if (!(p = lock_user_string(arg1))) {
11863             return -TARGET_EFAULT;
11864         }
11865         ret = get_errno(stat(path(p), &st));
11866         unlock_user(p, arg1, 0);
11867         if (!is_error(ret))
11868             ret = host_to_target_stat64(cpu_env, arg2, &st);
11869         return ret;
11870 #endif
11871 #ifdef TARGET_NR_lstat64
11872     case TARGET_NR_lstat64:
11873         if (!(p = lock_user_string(arg1))) {
11874             return -TARGET_EFAULT;
11875         }
11876         ret = get_errno(lstat(path(p), &st));
11877         unlock_user(p, arg1, 0);
11878         if (!is_error(ret))
11879             ret = host_to_target_stat64(cpu_env, arg2, &st);
11880         return ret;
11881 #endif
11882 #ifdef TARGET_NR_fstat64
11883     case TARGET_NR_fstat64:
11884         ret = get_errno(fstat(arg1, &st));
11885         if (!is_error(ret))
11886             ret = host_to_target_stat64(cpu_env, arg2, &st);
11887         return ret;
11888 #endif
11889 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11890 #ifdef TARGET_NR_fstatat64
11891     case TARGET_NR_fstatat64:
11892 #endif
11893 #ifdef TARGET_NR_newfstatat
11894     case TARGET_NR_newfstatat:
11895 #endif
11896         if (!(p = lock_user_string(arg2))) {
11897             return -TARGET_EFAULT;
11898         }
11899         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11900         unlock_user(p, arg2, 0);
11901         if (!is_error(ret))
11902             ret = host_to_target_stat64(cpu_env, arg3, &st);
11903         return ret;
11904 #endif
11905 #if defined(TARGET_NR_statx)
11906     case TARGET_NR_statx:
11907         {
11908             struct target_statx *target_stx;
11909             int dirfd = arg1;
11910             int flags = arg3;
11911 
11912             p = lock_user_string(arg2);
11913             if (p == NULL) {
11914                 return -TARGET_EFAULT;
11915             }
11916 #if defined(__NR_statx)
11917             {
11918                 /*
11919                  * It is assumed that struct statx is architecture independent.
11920                  */
11921                 struct target_statx host_stx;
11922                 int mask = arg4;
11923 
11924                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11925                 if (!is_error(ret)) {
11926                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11927                         unlock_user(p, arg2, 0);
11928                         return -TARGET_EFAULT;
11929                     }
11930                 }
11931 
11932                 if (ret != -TARGET_ENOSYS) {
11933                     unlock_user(p, arg2, 0);
11934                     return ret;
11935                 }
11936             }
11937 #endif
11938             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11939             unlock_user(p, arg2, 0);
11940 
11941             if (!is_error(ret)) {
11942                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11943                     return -TARGET_EFAULT;
11944                 }
11945                 memset(target_stx, 0, sizeof(*target_stx));
11946                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11947                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11948                 __put_user(st.st_ino, &target_stx->stx_ino);
11949                 __put_user(st.st_mode, &target_stx->stx_mode);
11950                 __put_user(st.st_uid, &target_stx->stx_uid);
11951                 __put_user(st.st_gid, &target_stx->stx_gid);
11952                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11953                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11954                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11955                 __put_user(st.st_size, &target_stx->stx_size);
11956                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11957                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11958                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11959                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11960                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11961                 unlock_user_struct(target_stx, arg5, 1);
11962             }
11963         }
11964         return ret;
11965 #endif
11966 #ifdef TARGET_NR_lchown
11967     case TARGET_NR_lchown:
11968         if (!(p = lock_user_string(arg1)))
11969             return -TARGET_EFAULT;
11970         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11971         unlock_user(p, arg1, 0);
11972         return ret;
11973 #endif
11974 #ifdef TARGET_NR_getuid
11975     case TARGET_NR_getuid:
11976         return get_errno(high2lowuid(getuid()));
11977 #endif
11978 #ifdef TARGET_NR_getgid
11979     case TARGET_NR_getgid:
11980         return get_errno(high2lowgid(getgid()));
11981 #endif
11982 #ifdef TARGET_NR_geteuid
11983     case TARGET_NR_geteuid:
11984         return get_errno(high2lowuid(geteuid()));
11985 #endif
11986 #ifdef TARGET_NR_getegid
11987     case TARGET_NR_getegid:
11988         return get_errno(high2lowgid(getegid()));
11989 #endif
11990     case TARGET_NR_setreuid:
11991         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
11992     case TARGET_NR_setregid:
11993         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
11994     case TARGET_NR_getgroups:
11995         { /* the same code as for TARGET_NR_getgroups32 */
11996             int gidsetsize = arg1;
11997             target_id *target_grouplist;
11998             g_autofree gid_t *grouplist = NULL;
11999             int i;
12000 
12001             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12002                 return -TARGET_EINVAL;
12003             }
12004             if (gidsetsize > 0) {
12005                 grouplist = g_try_new(gid_t, gidsetsize);
12006                 if (!grouplist) {
12007                     return -TARGET_ENOMEM;
12008                 }
12009             }
12010             ret = get_errno(getgroups(gidsetsize, grouplist));
12011             if (!is_error(ret) && gidsetsize > 0) {
12012                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12013                                              gidsetsize * sizeof(target_id), 0);
12014                 if (!target_grouplist) {
12015                     return -TARGET_EFAULT;
12016                 }
12017                 for (i = 0; i < ret; i++) {
12018                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
12019                 }
12020                 unlock_user(target_grouplist, arg2,
12021                             gidsetsize * sizeof(target_id));
12022             }
12023             return ret;
12024         }
12025     case TARGET_NR_setgroups:
12026         { /* the same code as for TARGET_NR_setgroups32 */
12027             int gidsetsize = arg1;
12028             target_id *target_grouplist;
12029             g_autofree gid_t *grouplist = NULL;
12030             int i;
12031 
12032             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12033                 return -TARGET_EINVAL;
12034             }
12035             if (gidsetsize > 0) {
12036                 grouplist = g_try_new(gid_t, gidsetsize);
12037                 if (!grouplist) {
12038                     return -TARGET_ENOMEM;
12039                 }
12040                 target_grouplist = lock_user(VERIFY_READ, arg2,
12041                                              gidsetsize * sizeof(target_id), 1);
12042                 if (!target_grouplist) {
12043                     return -TARGET_EFAULT;
12044                 }
12045                 for (i = 0; i < gidsetsize; i++) {
12046                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12047                 }
12048                 unlock_user(target_grouplist, arg2,
12049                             gidsetsize * sizeof(target_id));
12050             }
12051             return get_errno(sys_setgroups(gidsetsize, grouplist));
12052         }
12053     case TARGET_NR_fchown:
12054         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12055 #if defined(TARGET_NR_fchownat)
12056     case TARGET_NR_fchownat:
12057         if (!(p = lock_user_string(arg2)))
12058             return -TARGET_EFAULT;
12059         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12060                                  low2highgid(arg4), arg5));
12061         unlock_user(p, arg2, 0);
12062         return ret;
12063 #endif
12064 #ifdef TARGET_NR_setresuid
12065     case TARGET_NR_setresuid:
12066         return get_errno(sys_setresuid(low2highuid(arg1),
12067                                        low2highuid(arg2),
12068                                        low2highuid(arg3)));
12069 #endif
12070 #ifdef TARGET_NR_getresuid
12071     case TARGET_NR_getresuid:
12072         {
12073             uid_t ruid, euid, suid;
12074             ret = get_errno(getresuid(&ruid, &euid, &suid));
12075             if (!is_error(ret)) {
12076                 if (put_user_id(high2lowuid(ruid), arg1)
12077                     || put_user_id(high2lowuid(euid), arg2)
12078                     || put_user_id(high2lowuid(suid), arg3))
12079                     return -TARGET_EFAULT;
12080             }
12081         }
12082         return ret;
12083 #endif
12084 #ifdef TARGET_NR_getresgid
12085     case TARGET_NR_setresgid:
12086         return get_errno(sys_setresgid(low2highgid(arg1),
12087                                        low2highgid(arg2),
12088                                        low2highgid(arg3)));
12089 #endif
12090 #ifdef TARGET_NR_getresgid
12091     case TARGET_NR_getresgid:
12092         {
12093             gid_t rgid, egid, sgid;
12094             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12095             if (!is_error(ret)) {
12096                 if (put_user_id(high2lowgid(rgid), arg1)
12097                     || put_user_id(high2lowgid(egid), arg2)
12098                     || put_user_id(high2lowgid(sgid), arg3))
12099                     return -TARGET_EFAULT;
12100             }
12101         }
12102         return ret;
12103 #endif
12104 #ifdef TARGET_NR_chown
12105     case TARGET_NR_chown:
12106         if (!(p = lock_user_string(arg1)))
12107             return -TARGET_EFAULT;
12108         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12109         unlock_user(p, arg1, 0);
12110         return ret;
12111 #endif
12112     case TARGET_NR_setuid:
12113         return get_errno(sys_setuid(low2highuid(arg1)));
12114     case TARGET_NR_setgid:
12115         return get_errno(sys_setgid(low2highgid(arg1)));
12116     case TARGET_NR_setfsuid:
12117         return get_errno(setfsuid(arg1));
12118     case TARGET_NR_setfsgid:
12119         return get_errno(setfsgid(arg1));
12120 
12121 #ifdef TARGET_NR_lchown32
12122     case TARGET_NR_lchown32:
12123         if (!(p = lock_user_string(arg1)))
12124             return -TARGET_EFAULT;
12125         ret = get_errno(lchown(p, arg2, arg3));
12126         unlock_user(p, arg1, 0);
12127         return ret;
12128 #endif
12129 #ifdef TARGET_NR_getuid32
12130     case TARGET_NR_getuid32:
12131         return get_errno(getuid());
12132 #endif
12133 
12134 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12135    /* Alpha specific */
12136     case TARGET_NR_getxuid:
12137          {
12138             uid_t euid;
12139             euid=geteuid();
12140             cpu_env->ir[IR_A4]=euid;
12141          }
12142         return get_errno(getuid());
12143 #endif
12144 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12145    /* Alpha specific */
12146     case TARGET_NR_getxgid:
12147          {
12148             uid_t egid;
12149             egid=getegid();
12150             cpu_env->ir[IR_A4]=egid;
12151          }
12152         return get_errno(getgid());
12153 #endif
12154 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12155     /* Alpha specific */
12156     case TARGET_NR_osf_getsysinfo:
12157         ret = -TARGET_EOPNOTSUPP;
12158         switch (arg1) {
12159           case TARGET_GSI_IEEE_FP_CONTROL:
12160             {
12161                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12162                 uint64_t swcr = cpu_env->swcr;
12163 
12164                 swcr &= ~SWCR_STATUS_MASK;
12165                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12166 
12167                 if (put_user_u64 (swcr, arg2))
12168                         return -TARGET_EFAULT;
12169                 ret = 0;
12170             }
12171             break;
12172 
12173           /* case GSI_IEEE_STATE_AT_SIGNAL:
12174              -- Not implemented in linux kernel.
12175              case GSI_UACPROC:
12176              -- Retrieves current unaligned access state; not much used.
12177              case GSI_PROC_TYPE:
12178              -- Retrieves implver information; surely not used.
12179              case GSI_GET_HWRPB:
12180              -- Grabs a copy of the HWRPB; surely not used.
12181           */
12182         }
12183         return ret;
12184 #endif
12185 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12186     /* Alpha specific */
12187     case TARGET_NR_osf_setsysinfo:
12188         ret = -TARGET_EOPNOTSUPP;
12189         switch (arg1) {
12190           case TARGET_SSI_IEEE_FP_CONTROL:
12191             {
12192                 uint64_t swcr, fpcr;
12193 
12194                 if (get_user_u64 (swcr, arg2)) {
12195                     return -TARGET_EFAULT;
12196                 }
12197 
12198                 /*
12199                  * The kernel calls swcr_update_status to update the
12200                  * status bits from the fpcr at every point that it
12201                  * could be queried.  Therefore, we store the status
12202                  * bits only in FPCR.
12203                  */
12204                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12205 
12206                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12207                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12208                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12209                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12210                 ret = 0;
12211             }
12212             break;
12213 
12214           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12215             {
12216                 uint64_t exc, fpcr, fex;
12217 
12218                 if (get_user_u64(exc, arg2)) {
12219                     return -TARGET_EFAULT;
12220                 }
12221                 exc &= SWCR_STATUS_MASK;
12222                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12223 
12224                 /* Old exceptions are not signaled.  */
12225                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12226                 fex = exc & ~fex;
12227                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12228                 fex &= (cpu_env)->swcr;
12229 
12230                 /* Update the hardware fpcr.  */
12231                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12232                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12233 
12234                 if (fex) {
12235                     int si_code = TARGET_FPE_FLTUNK;
12236                     target_siginfo_t info;
12237 
12238                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12239                         si_code = TARGET_FPE_FLTUND;
12240                     }
12241                     if (fex & SWCR_TRAP_ENABLE_INE) {
12242                         si_code = TARGET_FPE_FLTRES;
12243                     }
12244                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12245                         si_code = TARGET_FPE_FLTUND;
12246                     }
12247                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12248                         si_code = TARGET_FPE_FLTOVF;
12249                     }
12250                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12251                         si_code = TARGET_FPE_FLTDIV;
12252                     }
12253                     if (fex & SWCR_TRAP_ENABLE_INV) {
12254                         si_code = TARGET_FPE_FLTINV;
12255                     }
12256 
12257                     info.si_signo = SIGFPE;
12258                     info.si_errno = 0;
12259                     info.si_code = si_code;
12260                     info._sifields._sigfault._addr = (cpu_env)->pc;
12261                     queue_signal(cpu_env, info.si_signo,
12262                                  QEMU_SI_FAULT, &info);
12263                 }
12264                 ret = 0;
12265             }
12266             break;
12267 
12268           /* case SSI_NVPAIRS:
12269              -- Used with SSIN_UACPROC to enable unaligned accesses.
12270              case SSI_IEEE_STATE_AT_SIGNAL:
12271              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12272              -- Not implemented in linux kernel
12273           */
12274         }
12275         return ret;
12276 #endif
12277 #ifdef TARGET_NR_osf_sigprocmask
12278     /* Alpha specific.  */
12279     case TARGET_NR_osf_sigprocmask:
12280         {
12281             abi_ulong mask;
12282             int how;
12283             sigset_t set, oldset;
12284 
12285             switch(arg1) {
12286             case TARGET_SIG_BLOCK:
12287                 how = SIG_BLOCK;
12288                 break;
12289             case TARGET_SIG_UNBLOCK:
12290                 how = SIG_UNBLOCK;
12291                 break;
12292             case TARGET_SIG_SETMASK:
12293                 how = SIG_SETMASK;
12294                 break;
12295             default:
12296                 return -TARGET_EINVAL;
12297             }
12298             mask = arg2;
12299             target_to_host_old_sigset(&set, &mask);
12300             ret = do_sigprocmask(how, &set, &oldset);
12301             if (!ret) {
12302                 host_to_target_old_sigset(&mask, &oldset);
12303                 ret = mask;
12304             }
12305         }
12306         return ret;
12307 #endif
12308 
12309 #ifdef TARGET_NR_getgid32
12310     case TARGET_NR_getgid32:
12311         return get_errno(getgid());
12312 #endif
12313 #ifdef TARGET_NR_geteuid32
12314     case TARGET_NR_geteuid32:
12315         return get_errno(geteuid());
12316 #endif
12317 #ifdef TARGET_NR_getegid32
12318     case TARGET_NR_getegid32:
12319         return get_errno(getegid());
12320 #endif
12321 #ifdef TARGET_NR_setreuid32
12322     case TARGET_NR_setreuid32:
12323         return get_errno(sys_setreuid(arg1, arg2));
12324 #endif
12325 #ifdef TARGET_NR_setregid32
12326     case TARGET_NR_setregid32:
12327         return get_errno(sys_setregid(arg1, arg2));
12328 #endif
12329 #ifdef TARGET_NR_getgroups32
12330     case TARGET_NR_getgroups32:
12331         { /* the same code as for TARGET_NR_getgroups */
12332             int gidsetsize = arg1;
12333             uint32_t *target_grouplist;
12334             g_autofree gid_t *grouplist = NULL;
12335             int i;
12336 
12337             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12338                 return -TARGET_EINVAL;
12339             }
12340             if (gidsetsize > 0) {
12341                 grouplist = g_try_new(gid_t, gidsetsize);
12342                 if (!grouplist) {
12343                     return -TARGET_ENOMEM;
12344                 }
12345             }
12346             ret = get_errno(getgroups(gidsetsize, grouplist));
12347             if (!is_error(ret) && gidsetsize > 0) {
12348                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12349                                              gidsetsize * 4, 0);
12350                 if (!target_grouplist) {
12351                     return -TARGET_EFAULT;
12352                 }
12353                 for (i = 0; i < ret; i++) {
12354                     target_grouplist[i] = tswap32(grouplist[i]);
12355                 }
12356                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12357             }
12358             return ret;
12359         }
12360 #endif
12361 #ifdef TARGET_NR_setgroups32
12362     case TARGET_NR_setgroups32:
12363         { /* the same code as for TARGET_NR_setgroups */
12364             int gidsetsize = arg1;
12365             uint32_t *target_grouplist;
12366             g_autofree gid_t *grouplist = NULL;
12367             int i;
12368 
12369             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12370                 return -TARGET_EINVAL;
12371             }
12372             if (gidsetsize > 0) {
12373                 grouplist = g_try_new(gid_t, gidsetsize);
12374                 if (!grouplist) {
12375                     return -TARGET_ENOMEM;
12376                 }
12377                 target_grouplist = lock_user(VERIFY_READ, arg2,
12378                                              gidsetsize * 4, 1);
12379                 if (!target_grouplist) {
12380                     return -TARGET_EFAULT;
12381                 }
12382                 for (i = 0; i < gidsetsize; i++) {
12383                     grouplist[i] = tswap32(target_grouplist[i]);
12384                 }
12385                 unlock_user(target_grouplist, arg2, 0);
12386             }
12387             return get_errno(sys_setgroups(gidsetsize, grouplist));
12388         }
12389 #endif
12390 #ifdef TARGET_NR_fchown32
12391     case TARGET_NR_fchown32:
12392         return get_errno(fchown(arg1, arg2, arg3));
12393 #endif
12394 #ifdef TARGET_NR_setresuid32
12395     case TARGET_NR_setresuid32:
12396         return get_errno(sys_setresuid(arg1, arg2, arg3));
12397 #endif
12398 #ifdef TARGET_NR_getresuid32
12399     case TARGET_NR_getresuid32:
12400         {
12401             uid_t ruid, euid, suid;
12402             ret = get_errno(getresuid(&ruid, &euid, &suid));
12403             if (!is_error(ret)) {
12404                 if (put_user_u32(ruid, arg1)
12405                     || put_user_u32(euid, arg2)
12406                     || put_user_u32(suid, arg3))
12407                     return -TARGET_EFAULT;
12408             }
12409         }
12410         return ret;
12411 #endif
12412 #ifdef TARGET_NR_setresgid32
12413     case TARGET_NR_setresgid32:
12414         return get_errno(sys_setresgid(arg1, arg2, arg3));
12415 #endif
12416 #ifdef TARGET_NR_getresgid32
12417     case TARGET_NR_getresgid32:
12418         {
12419             gid_t rgid, egid, sgid;
12420             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12421             if (!is_error(ret)) {
12422                 if (put_user_u32(rgid, arg1)
12423                     || put_user_u32(egid, arg2)
12424                     || put_user_u32(sgid, arg3))
12425                     return -TARGET_EFAULT;
12426             }
12427         }
12428         return ret;
12429 #endif
12430 #ifdef TARGET_NR_chown32
12431     case TARGET_NR_chown32:
12432         if (!(p = lock_user_string(arg1)))
12433             return -TARGET_EFAULT;
12434         ret = get_errno(chown(p, arg2, arg3));
12435         unlock_user(p, arg1, 0);
12436         return ret;
12437 #endif
12438 #ifdef TARGET_NR_setuid32
12439     case TARGET_NR_setuid32:
12440         return get_errno(sys_setuid(arg1));
12441 #endif
12442 #ifdef TARGET_NR_setgid32
12443     case TARGET_NR_setgid32:
12444         return get_errno(sys_setgid(arg1));
12445 #endif
12446 #ifdef TARGET_NR_setfsuid32
12447     case TARGET_NR_setfsuid32:
12448         return get_errno(setfsuid(arg1));
12449 #endif
12450 #ifdef TARGET_NR_setfsgid32
12451     case TARGET_NR_setfsgid32:
12452         return get_errno(setfsgid(arg1));
12453 #endif
12454 #ifdef TARGET_NR_mincore
12455     case TARGET_NR_mincore:
12456         {
12457             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12458             if (!a) {
12459                 return -TARGET_ENOMEM;
12460             }
12461             p = lock_user_string(arg3);
12462             if (!p) {
12463                 ret = -TARGET_EFAULT;
12464             } else {
12465                 ret = get_errno(mincore(a, arg2, p));
12466                 unlock_user(p, arg3, ret);
12467             }
12468             unlock_user(a, arg1, 0);
12469         }
12470         return ret;
12471 #endif
12472 #ifdef TARGET_NR_arm_fadvise64_64
12473     case TARGET_NR_arm_fadvise64_64:
12474         /* arm_fadvise64_64 looks like fadvise64_64 but
12475          * with different argument order: fd, advice, offset, len
12476          * rather than the usual fd, offset, len, advice.
12477          * Note that offset and len are both 64-bit so appear as
12478          * pairs of 32-bit registers.
12479          */
12480         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12481                             target_offset64(arg5, arg6), arg2);
12482         return -host_to_target_errno(ret);
12483 #endif
12484 
12485 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12486 
12487 #ifdef TARGET_NR_fadvise64_64
12488     case TARGET_NR_fadvise64_64:
12489 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12490         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12491         ret = arg2;
12492         arg2 = arg3;
12493         arg3 = arg4;
12494         arg4 = arg5;
12495         arg5 = arg6;
12496         arg6 = ret;
12497 #else
12498         /* 6 args: fd, offset (high, low), len (high, low), advice */
12499         if (regpairs_aligned(cpu_env, num)) {
12500             /* offset is in (3,4), len in (5,6) and advice in 7 */
12501             arg2 = arg3;
12502             arg3 = arg4;
12503             arg4 = arg5;
12504             arg5 = arg6;
12505             arg6 = arg7;
12506         }
12507 #endif
12508         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12509                             target_offset64(arg4, arg5), arg6);
12510         return -host_to_target_errno(ret);
12511 #endif
12512 
12513 #ifdef TARGET_NR_fadvise64
12514     case TARGET_NR_fadvise64:
12515         /* 5 args: fd, offset (high, low), len, advice */
12516         if (regpairs_aligned(cpu_env, num)) {
12517             /* offset is in (3,4), len in 5 and advice in 6 */
12518             arg2 = arg3;
12519             arg3 = arg4;
12520             arg4 = arg5;
12521             arg5 = arg6;
12522         }
12523         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12524         return -host_to_target_errno(ret);
12525 #endif
12526 
12527 #else /* not a 32-bit ABI */
12528 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12529 #ifdef TARGET_NR_fadvise64_64
12530     case TARGET_NR_fadvise64_64:
12531 #endif
12532 #ifdef TARGET_NR_fadvise64
12533     case TARGET_NR_fadvise64:
12534 #endif
12535 #ifdef TARGET_S390X
12536         switch (arg4) {
12537         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12538         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12539         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12540         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12541         default: break;
12542         }
12543 #endif
12544         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12545 #endif
12546 #endif /* end of 64-bit ABI fadvise handling */
12547 
12548 #ifdef TARGET_NR_madvise
12549     case TARGET_NR_madvise:
12550         return target_madvise(arg1, arg2, arg3);
12551 #endif
12552 #ifdef TARGET_NR_fcntl64
12553     case TARGET_NR_fcntl64:
12554     {
12555         int cmd;
12556         struct flock fl;
12557         from_flock64_fn *copyfrom = copy_from_user_flock64;
12558         to_flock64_fn *copyto = copy_to_user_flock64;
12559 
12560 #ifdef TARGET_ARM
12561         if (!cpu_env->eabi) {
12562             copyfrom = copy_from_user_oabi_flock64;
12563             copyto = copy_to_user_oabi_flock64;
12564         }
12565 #endif
12566 
12567         cmd = target_to_host_fcntl_cmd(arg2);
12568         if (cmd == -TARGET_EINVAL) {
12569             return cmd;
12570         }
12571 
12572         switch(arg2) {
12573         case TARGET_F_GETLK64:
12574             ret = copyfrom(&fl, arg3);
12575             if (ret) {
12576                 break;
12577             }
12578             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12579             if (ret == 0) {
12580                 ret = copyto(arg3, &fl);
12581             }
12582 	    break;
12583 
12584         case TARGET_F_SETLK64:
12585         case TARGET_F_SETLKW64:
12586             ret = copyfrom(&fl, arg3);
12587             if (ret) {
12588                 break;
12589             }
12590             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12591 	    break;
12592         default:
12593             ret = do_fcntl(arg1, arg2, arg3);
12594             break;
12595         }
12596         return ret;
12597     }
12598 #endif
12599 #ifdef TARGET_NR_cacheflush
12600     case TARGET_NR_cacheflush:
12601         /* self-modifying code is handled automatically, so nothing needed */
12602         return 0;
12603 #endif
12604 #ifdef TARGET_NR_getpagesize
12605     case TARGET_NR_getpagesize:
12606         return TARGET_PAGE_SIZE;
12607 #endif
12608     case TARGET_NR_gettid:
12609         return get_errno(sys_gettid());
12610 #ifdef TARGET_NR_readahead
12611     case TARGET_NR_readahead:
12612 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12613         if (regpairs_aligned(cpu_env, num)) {
12614             arg2 = arg3;
12615             arg3 = arg4;
12616             arg4 = arg5;
12617         }
12618         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12619 #else
12620         ret = get_errno(readahead(arg1, arg2, arg3));
12621 #endif
12622         return ret;
12623 #endif
12624 #ifdef CONFIG_ATTR
12625 #ifdef TARGET_NR_setxattr
12626     case TARGET_NR_listxattr:
12627     case TARGET_NR_llistxattr:
12628     {
12629         void *b = 0;
12630         if (arg2) {
12631             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12632             if (!b) {
12633                 return -TARGET_EFAULT;
12634             }
12635         }
12636         p = lock_user_string(arg1);
12637         if (p) {
12638             if (num == TARGET_NR_listxattr) {
12639                 ret = get_errno(listxattr(p, b, arg3));
12640             } else {
12641                 ret = get_errno(llistxattr(p, b, arg3));
12642             }
12643         } else {
12644             ret = -TARGET_EFAULT;
12645         }
12646         unlock_user(p, arg1, 0);
12647         unlock_user(b, arg2, arg3);
12648         return ret;
12649     }
12650     case TARGET_NR_flistxattr:
12651     {
12652         void *b = 0;
12653         if (arg2) {
12654             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12655             if (!b) {
12656                 return -TARGET_EFAULT;
12657             }
12658         }
12659         ret = get_errno(flistxattr(arg1, b, arg3));
12660         unlock_user(b, arg2, arg3);
12661         return ret;
12662     }
12663     case TARGET_NR_setxattr:
12664     case TARGET_NR_lsetxattr:
12665         {
12666             void *n, *v = 0;
12667             if (arg3) {
12668                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12669                 if (!v) {
12670                     return -TARGET_EFAULT;
12671                 }
12672             }
12673             p = lock_user_string(arg1);
12674             n = lock_user_string(arg2);
12675             if (p && n) {
12676                 if (num == TARGET_NR_setxattr) {
12677                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12678                 } else {
12679                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12680                 }
12681             } else {
12682                 ret = -TARGET_EFAULT;
12683             }
12684             unlock_user(p, arg1, 0);
12685             unlock_user(n, arg2, 0);
12686             unlock_user(v, arg3, 0);
12687         }
12688         return ret;
12689     case TARGET_NR_fsetxattr:
12690         {
12691             void *n, *v = 0;
12692             if (arg3) {
12693                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12694                 if (!v) {
12695                     return -TARGET_EFAULT;
12696                 }
12697             }
12698             n = lock_user_string(arg2);
12699             if (n) {
12700                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12701             } else {
12702                 ret = -TARGET_EFAULT;
12703             }
12704             unlock_user(n, arg2, 0);
12705             unlock_user(v, arg3, 0);
12706         }
12707         return ret;
12708     case TARGET_NR_getxattr:
12709     case TARGET_NR_lgetxattr:
12710         {
12711             void *n, *v = 0;
12712             if (arg3) {
12713                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12714                 if (!v) {
12715                     return -TARGET_EFAULT;
12716                 }
12717             }
12718             p = lock_user_string(arg1);
12719             n = lock_user_string(arg2);
12720             if (p && n) {
12721                 if (num == TARGET_NR_getxattr) {
12722                     ret = get_errno(getxattr(p, n, v, arg4));
12723                 } else {
12724                     ret = get_errno(lgetxattr(p, n, v, arg4));
12725                 }
12726             } else {
12727                 ret = -TARGET_EFAULT;
12728             }
12729             unlock_user(p, arg1, 0);
12730             unlock_user(n, arg2, 0);
12731             unlock_user(v, arg3, arg4);
12732         }
12733         return ret;
12734     case TARGET_NR_fgetxattr:
12735         {
12736             void *n, *v = 0;
12737             if (arg3) {
12738                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12739                 if (!v) {
12740                     return -TARGET_EFAULT;
12741                 }
12742             }
12743             n = lock_user_string(arg2);
12744             if (n) {
12745                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12746             } else {
12747                 ret = -TARGET_EFAULT;
12748             }
12749             unlock_user(n, arg2, 0);
12750             unlock_user(v, arg3, arg4);
12751         }
12752         return ret;
12753     case TARGET_NR_removexattr:
12754     case TARGET_NR_lremovexattr:
12755         {
12756             void *n;
12757             p = lock_user_string(arg1);
12758             n = lock_user_string(arg2);
12759             if (p && n) {
12760                 if (num == TARGET_NR_removexattr) {
12761                     ret = get_errno(removexattr(p, n));
12762                 } else {
12763                     ret = get_errno(lremovexattr(p, n));
12764                 }
12765             } else {
12766                 ret = -TARGET_EFAULT;
12767             }
12768             unlock_user(p, arg1, 0);
12769             unlock_user(n, arg2, 0);
12770         }
12771         return ret;
12772     case TARGET_NR_fremovexattr:
12773         {
12774             void *n;
12775             n = lock_user_string(arg2);
12776             if (n) {
12777                 ret = get_errno(fremovexattr(arg1, n));
12778             } else {
12779                 ret = -TARGET_EFAULT;
12780             }
12781             unlock_user(n, arg2, 0);
12782         }
12783         return ret;
12784 #endif
12785 #endif /* CONFIG_ATTR */
12786 #ifdef TARGET_NR_set_thread_area
12787     case TARGET_NR_set_thread_area:
12788 #if defined(TARGET_MIPS)
12789       cpu_env->active_tc.CP0_UserLocal = arg1;
12790       return 0;
12791 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12792       return do_set_thread_area(cpu_env, arg1);
12793 #elif defined(TARGET_M68K)
12794       {
12795           TaskState *ts = get_task_state(cpu);
12796           ts->tp_value = arg1;
12797           return 0;
12798       }
12799 #else
12800       return -TARGET_ENOSYS;
12801 #endif
12802 #endif
12803 #ifdef TARGET_NR_get_thread_area
12804     case TARGET_NR_get_thread_area:
12805 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12806         return do_get_thread_area(cpu_env, arg1);
12807 #elif defined(TARGET_M68K)
12808         {
12809             TaskState *ts = get_task_state(cpu);
12810             return ts->tp_value;
12811         }
12812 #else
12813         return -TARGET_ENOSYS;
12814 #endif
12815 #endif
12816 #ifdef TARGET_NR_getdomainname
12817     case TARGET_NR_getdomainname:
12818         return -TARGET_ENOSYS;
12819 #endif
12820 
12821 #ifdef TARGET_NR_clock_settime
12822     case TARGET_NR_clock_settime:
12823     {
12824         struct timespec ts;
12825 
12826         ret = target_to_host_timespec(&ts, arg2);
12827         if (!is_error(ret)) {
12828             ret = get_errno(clock_settime(arg1, &ts));
12829         }
12830         return ret;
12831     }
12832 #endif
12833 #ifdef TARGET_NR_clock_settime64
12834     case TARGET_NR_clock_settime64:
12835     {
12836         struct timespec ts;
12837 
12838         ret = target_to_host_timespec64(&ts, arg2);
12839         if (!is_error(ret)) {
12840             ret = get_errno(clock_settime(arg1, &ts));
12841         }
12842         return ret;
12843     }
12844 #endif
12845 #ifdef TARGET_NR_clock_gettime
12846     case TARGET_NR_clock_gettime:
12847     {
12848         struct timespec ts;
12849         ret = get_errno(clock_gettime(arg1, &ts));
12850         if (!is_error(ret)) {
12851             ret = host_to_target_timespec(arg2, &ts);
12852         }
12853         return ret;
12854     }
12855 #endif
12856 #ifdef TARGET_NR_clock_gettime64
12857     case TARGET_NR_clock_gettime64:
12858     {
12859         struct timespec ts;
12860         ret = get_errno(clock_gettime(arg1, &ts));
12861         if (!is_error(ret)) {
12862             ret = host_to_target_timespec64(arg2, &ts);
12863         }
12864         return ret;
12865     }
12866 #endif
12867 #ifdef TARGET_NR_clock_getres
12868     case TARGET_NR_clock_getres:
12869     {
12870         struct timespec ts;
12871         ret = get_errno(clock_getres(arg1, &ts));
12872         if (!is_error(ret)) {
12873             host_to_target_timespec(arg2, &ts);
12874         }
12875         return ret;
12876     }
12877 #endif
12878 #ifdef TARGET_NR_clock_getres_time64
12879     case TARGET_NR_clock_getres_time64:
12880     {
12881         struct timespec ts;
12882         ret = get_errno(clock_getres(arg1, &ts));
12883         if (!is_error(ret)) {
12884             host_to_target_timespec64(arg2, &ts);
12885         }
12886         return ret;
12887     }
12888 #endif
12889 #ifdef TARGET_NR_clock_nanosleep
12890     case TARGET_NR_clock_nanosleep:
12891     {
12892         struct timespec ts;
12893         if (target_to_host_timespec(&ts, arg3)) {
12894             return -TARGET_EFAULT;
12895         }
12896         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12897                                              &ts, arg4 ? &ts : NULL));
12898         /*
12899          * if the call is interrupted by a signal handler, it fails
12900          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12901          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12902          */
12903         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12904             host_to_target_timespec(arg4, &ts)) {
12905               return -TARGET_EFAULT;
12906         }
12907 
12908         return ret;
12909     }
12910 #endif
12911 #ifdef TARGET_NR_clock_nanosleep_time64
12912     case TARGET_NR_clock_nanosleep_time64:
12913     {
12914         struct timespec ts;
12915 
12916         if (target_to_host_timespec64(&ts, arg3)) {
12917             return -TARGET_EFAULT;
12918         }
12919 
12920         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12921                                              &ts, arg4 ? &ts : NULL));
12922 
12923         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12924             host_to_target_timespec64(arg4, &ts)) {
12925             return -TARGET_EFAULT;
12926         }
12927         return ret;
12928     }
12929 #endif
12930 
12931 #if defined(TARGET_NR_set_tid_address)
12932     case TARGET_NR_set_tid_address:
12933     {
12934         TaskState *ts = get_task_state(cpu);
12935         ts->child_tidptr = arg1;
12936         /* do not call host set_tid_address() syscall, instead return tid() */
12937         return get_errno(sys_gettid());
12938     }
12939 #endif
12940 
12941     case TARGET_NR_tkill:
12942         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12943 
12944     case TARGET_NR_tgkill:
12945         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12946                          target_to_host_signal(arg3)));
12947 
12948 #ifdef TARGET_NR_set_robust_list
12949     case TARGET_NR_set_robust_list:
12950     case TARGET_NR_get_robust_list:
12951         /* The ABI for supporting robust futexes has userspace pass
12952          * the kernel a pointer to a linked list which is updated by
12953          * userspace after the syscall; the list is walked by the kernel
12954          * when the thread exits. Since the linked list in QEMU guest
12955          * memory isn't a valid linked list for the host and we have
12956          * no way to reliably intercept the thread-death event, we can't
12957          * support these. Silently return ENOSYS so that guest userspace
12958          * falls back to a non-robust futex implementation (which should
12959          * be OK except in the corner case of the guest crashing while
12960          * holding a mutex that is shared with another process via
12961          * shared memory).
12962          */
12963         return -TARGET_ENOSYS;
12964 #endif
12965 
12966 #if defined(TARGET_NR_utimensat)
12967     case TARGET_NR_utimensat:
12968         {
12969             struct timespec *tsp, ts[2];
12970             if (!arg3) {
12971                 tsp = NULL;
12972             } else {
12973                 if (target_to_host_timespec(ts, arg3)) {
12974                     return -TARGET_EFAULT;
12975                 }
12976                 if (target_to_host_timespec(ts + 1, arg3 +
12977                                             sizeof(struct target_timespec))) {
12978                     return -TARGET_EFAULT;
12979                 }
12980                 tsp = ts;
12981             }
12982             if (!arg2)
12983                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12984             else {
12985                 if (!(p = lock_user_string(arg2))) {
12986                     return -TARGET_EFAULT;
12987                 }
12988                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12989                 unlock_user(p, arg2, 0);
12990             }
12991         }
12992         return ret;
12993 #endif
12994 #ifdef TARGET_NR_utimensat_time64
12995     case TARGET_NR_utimensat_time64:
12996         {
12997             struct timespec *tsp, ts[2];
12998             if (!arg3) {
12999                 tsp = NULL;
13000             } else {
13001                 if (target_to_host_timespec64(ts, arg3)) {
13002                     return -TARGET_EFAULT;
13003                 }
13004                 if (target_to_host_timespec64(ts + 1, arg3 +
13005                                      sizeof(struct target__kernel_timespec))) {
13006                     return -TARGET_EFAULT;
13007                 }
13008                 tsp = ts;
13009             }
13010             if (!arg2)
13011                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
13012             else {
13013                 p = lock_user_string(arg2);
13014                 if (!p) {
13015                     return -TARGET_EFAULT;
13016                 }
13017                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
13018                 unlock_user(p, arg2, 0);
13019             }
13020         }
13021         return ret;
13022 #endif
13023 #ifdef TARGET_NR_futex
13024     case TARGET_NR_futex:
13025         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
13026 #endif
13027 #ifdef TARGET_NR_futex_time64
13028     case TARGET_NR_futex_time64:
13029         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
13030 #endif
13031 #ifdef CONFIG_INOTIFY
13032 #if defined(TARGET_NR_inotify_init)
13033     case TARGET_NR_inotify_init:
13034         ret = get_errno(inotify_init());
13035         if (ret >= 0) {
13036             fd_trans_register(ret, &target_inotify_trans);
13037         }
13038         return ret;
13039 #endif
13040 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
13041     case TARGET_NR_inotify_init1:
13042         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
13043                                           fcntl_flags_tbl)));
13044         if (ret >= 0) {
13045             fd_trans_register(ret, &target_inotify_trans);
13046         }
13047         return ret;
13048 #endif
13049 #if defined(TARGET_NR_inotify_add_watch)
13050     case TARGET_NR_inotify_add_watch:
13051         p = lock_user_string(arg2);
13052         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13053         unlock_user(p, arg2, 0);
13054         return ret;
13055 #endif
13056 #if defined(TARGET_NR_inotify_rm_watch)
13057     case TARGET_NR_inotify_rm_watch:
13058         return get_errno(inotify_rm_watch(arg1, arg2));
13059 #endif
13060 #endif
13061 
13062 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13063     case TARGET_NR_mq_open:
13064         {
13065             struct mq_attr posix_mq_attr;
13066             struct mq_attr *pposix_mq_attr;
13067             int host_flags;
13068 
13069             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13070             pposix_mq_attr = NULL;
13071             if (arg4) {
13072                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13073                     return -TARGET_EFAULT;
13074                 }
13075                 pposix_mq_attr = &posix_mq_attr;
13076             }
13077             p = lock_user_string(arg1 - 1);
13078             if (!p) {
13079                 return -TARGET_EFAULT;
13080             }
13081             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13082             unlock_user (p, arg1, 0);
13083         }
13084         return ret;
13085 
13086     case TARGET_NR_mq_unlink:
13087         p = lock_user_string(arg1 - 1);
13088         if (!p) {
13089             return -TARGET_EFAULT;
13090         }
13091         ret = get_errno(mq_unlink(p));
13092         unlock_user (p, arg1, 0);
13093         return ret;
13094 
13095 #ifdef TARGET_NR_mq_timedsend
13096     case TARGET_NR_mq_timedsend:
13097         {
13098             struct timespec ts;
13099 
13100             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13101             if (arg5 != 0) {
13102                 if (target_to_host_timespec(&ts, arg5)) {
13103                     return -TARGET_EFAULT;
13104                 }
13105                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13106                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13107                     return -TARGET_EFAULT;
13108                 }
13109             } else {
13110                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13111             }
13112             unlock_user (p, arg2, arg3);
13113         }
13114         return ret;
13115 #endif
13116 #ifdef TARGET_NR_mq_timedsend_time64
13117     case TARGET_NR_mq_timedsend_time64:
13118         {
13119             struct timespec ts;
13120 
13121             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13122             if (arg5 != 0) {
13123                 if (target_to_host_timespec64(&ts, arg5)) {
13124                     return -TARGET_EFAULT;
13125                 }
13126                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13127                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13128                     return -TARGET_EFAULT;
13129                 }
13130             } else {
13131                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13132             }
13133             unlock_user(p, arg2, arg3);
13134         }
13135         return ret;
13136 #endif
13137 
13138 #ifdef TARGET_NR_mq_timedreceive
13139     case TARGET_NR_mq_timedreceive:
13140         {
13141             struct timespec ts;
13142             unsigned int prio;
13143 
13144             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13145             if (arg5 != 0) {
13146                 if (target_to_host_timespec(&ts, arg5)) {
13147                     return -TARGET_EFAULT;
13148                 }
13149                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13150                                                      &prio, &ts));
13151                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13152                     return -TARGET_EFAULT;
13153                 }
13154             } else {
13155                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13156                                                      &prio, NULL));
13157             }
13158             unlock_user (p, arg2, arg3);
13159             if (arg4 != 0)
13160                 put_user_u32(prio, arg4);
13161         }
13162         return ret;
13163 #endif
13164 #ifdef TARGET_NR_mq_timedreceive_time64
13165     case TARGET_NR_mq_timedreceive_time64:
13166         {
13167             struct timespec ts;
13168             unsigned int prio;
13169 
13170             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13171             if (arg5 != 0) {
13172                 if (target_to_host_timespec64(&ts, arg5)) {
13173                     return -TARGET_EFAULT;
13174                 }
13175                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13176                                                      &prio, &ts));
13177                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13178                     return -TARGET_EFAULT;
13179                 }
13180             } else {
13181                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13182                                                      &prio, NULL));
13183             }
13184             unlock_user(p, arg2, arg3);
13185             if (arg4 != 0) {
13186                 put_user_u32(prio, arg4);
13187             }
13188         }
13189         return ret;
13190 #endif
13191 
13192     /* Not implemented for now... */
13193 /*     case TARGET_NR_mq_notify: */
13194 /*         break; */
13195 
13196     case TARGET_NR_mq_getsetattr:
13197         {
13198             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13199             ret = 0;
13200             if (arg2 != 0) {
13201                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13202                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13203                                            &posix_mq_attr_out));
13204             } else if (arg3 != 0) {
13205                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13206             }
13207             if (ret == 0 && arg3 != 0) {
13208                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13209             }
13210         }
13211         return ret;
13212 #endif
13213 
13214 #ifdef CONFIG_SPLICE
13215 #ifdef TARGET_NR_tee
13216     case TARGET_NR_tee:
13217         {
13218             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13219         }
13220         return ret;
13221 #endif
13222 #ifdef TARGET_NR_splice
13223     case TARGET_NR_splice:
13224         {
13225             loff_t loff_in, loff_out;
13226             loff_t *ploff_in = NULL, *ploff_out = NULL;
13227             if (arg2) {
13228                 if (get_user_u64(loff_in, arg2)) {
13229                     return -TARGET_EFAULT;
13230                 }
13231                 ploff_in = &loff_in;
13232             }
13233             if (arg4) {
13234                 if (get_user_u64(loff_out, arg4)) {
13235                     return -TARGET_EFAULT;
13236                 }
13237                 ploff_out = &loff_out;
13238             }
13239             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13240             if (arg2) {
13241                 if (put_user_u64(loff_in, arg2)) {
13242                     return -TARGET_EFAULT;
13243                 }
13244             }
13245             if (arg4) {
13246                 if (put_user_u64(loff_out, arg4)) {
13247                     return -TARGET_EFAULT;
13248                 }
13249             }
13250         }
13251         return ret;
13252 #endif
13253 #ifdef TARGET_NR_vmsplice
13254 	case TARGET_NR_vmsplice:
13255         {
13256             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13257             if (vec != NULL) {
13258                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13259                 unlock_iovec(vec, arg2, arg3, 0);
13260             } else {
13261                 ret = -host_to_target_errno(errno);
13262             }
13263         }
13264         return ret;
13265 #endif
13266 #endif /* CONFIG_SPLICE */
13267 #ifdef CONFIG_EVENTFD
13268 #if defined(TARGET_NR_eventfd)
13269     case TARGET_NR_eventfd:
13270         ret = get_errno(eventfd(arg1, 0));
13271         if (ret >= 0) {
13272             fd_trans_register(ret, &target_eventfd_trans);
13273         }
13274         return ret;
13275 #endif
13276 #if defined(TARGET_NR_eventfd2)
13277     case TARGET_NR_eventfd2:
13278     {
13279         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13280         if (arg2 & TARGET_O_NONBLOCK) {
13281             host_flags |= O_NONBLOCK;
13282         }
13283         if (arg2 & TARGET_O_CLOEXEC) {
13284             host_flags |= O_CLOEXEC;
13285         }
13286         ret = get_errno(eventfd(arg1, host_flags));
13287         if (ret >= 0) {
13288             fd_trans_register(ret, &target_eventfd_trans);
13289         }
13290         return ret;
13291     }
13292 #endif
13293 #endif /* CONFIG_EVENTFD  */
13294 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13295     case TARGET_NR_fallocate:
13296 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13297         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13298                                   target_offset64(arg5, arg6)));
13299 #else
13300         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13301 #endif
13302         return ret;
13303 #endif
13304 #if defined(CONFIG_SYNC_FILE_RANGE)
13305 #if defined(TARGET_NR_sync_file_range)
13306     case TARGET_NR_sync_file_range:
13307 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13308 #if defined(TARGET_MIPS)
13309         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13310                                         target_offset64(arg5, arg6), arg7));
13311 #else
13312         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13313                                         target_offset64(arg4, arg5), arg6));
13314 #endif /* !TARGET_MIPS */
13315 #else
13316         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13317 #endif
13318         return ret;
13319 #endif
13320 #if defined(TARGET_NR_sync_file_range2) || \
13321     defined(TARGET_NR_arm_sync_file_range)
13322 #if defined(TARGET_NR_sync_file_range2)
13323     case TARGET_NR_sync_file_range2:
13324 #endif
13325 #if defined(TARGET_NR_arm_sync_file_range)
13326     case TARGET_NR_arm_sync_file_range:
13327 #endif
13328         /* This is like sync_file_range but the arguments are reordered */
13329 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13330         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13331                                         target_offset64(arg5, arg6), arg2));
13332 #else
13333         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13334 #endif
13335         return ret;
13336 #endif
13337 #endif
13338 #if defined(TARGET_NR_signalfd4)
13339     case TARGET_NR_signalfd4:
13340         return do_signalfd4(arg1, arg2, arg4);
13341 #endif
13342 #if defined(TARGET_NR_signalfd)
13343     case TARGET_NR_signalfd:
13344         return do_signalfd4(arg1, arg2, 0);
13345 #endif
13346 #if defined(CONFIG_EPOLL)
13347 #if defined(TARGET_NR_epoll_create)
13348     case TARGET_NR_epoll_create:
13349         return get_errno(epoll_create(arg1));
13350 #endif
13351 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13352     case TARGET_NR_epoll_create1:
13353         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13354 #endif
13355 #if defined(TARGET_NR_epoll_ctl)
13356     case TARGET_NR_epoll_ctl:
13357     {
13358         struct epoll_event ep;
13359         struct epoll_event *epp = 0;
13360         if (arg4) {
13361             if (arg2 != EPOLL_CTL_DEL) {
13362                 struct target_epoll_event *target_ep;
13363                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13364                     return -TARGET_EFAULT;
13365                 }
13366                 ep.events = tswap32(target_ep->events);
13367                 /*
13368                  * The epoll_data_t union is just opaque data to the kernel,
13369                  * so we transfer all 64 bits across and need not worry what
13370                  * actual data type it is.
13371                  */
13372                 ep.data.u64 = tswap64(target_ep->data.u64);
13373                 unlock_user_struct(target_ep, arg4, 0);
13374             }
13375             /*
13376              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13377              * non-null pointer, even though this argument is ignored.
13378              *
13379              */
13380             epp = &ep;
13381         }
13382         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13383     }
13384 #endif
13385 
13386 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13387 #if defined(TARGET_NR_epoll_wait)
13388     case TARGET_NR_epoll_wait:
13389 #endif
13390 #if defined(TARGET_NR_epoll_pwait)
13391     case TARGET_NR_epoll_pwait:
13392 #endif
13393     {
13394         struct target_epoll_event *target_ep;
13395         struct epoll_event *ep;
13396         int epfd = arg1;
13397         int maxevents = arg3;
13398         int timeout = arg4;
13399 
13400         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13401             return -TARGET_EINVAL;
13402         }
13403 
13404         target_ep = lock_user(VERIFY_WRITE, arg2,
13405                               maxevents * sizeof(struct target_epoll_event), 1);
13406         if (!target_ep) {
13407             return -TARGET_EFAULT;
13408         }
13409 
13410         ep = g_try_new(struct epoll_event, maxevents);
13411         if (!ep) {
13412             unlock_user(target_ep, arg2, 0);
13413             return -TARGET_ENOMEM;
13414         }
13415 
13416         switch (num) {
13417 #if defined(TARGET_NR_epoll_pwait)
13418         case TARGET_NR_epoll_pwait:
13419         {
13420             sigset_t *set = NULL;
13421 
13422             if (arg5) {
13423                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13424                 if (ret != 0) {
13425                     break;
13426                 }
13427             }
13428 
13429             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13430                                              set, SIGSET_T_SIZE));
13431 
13432             if (set) {
13433                 finish_sigsuspend_mask(ret);
13434             }
13435             break;
13436         }
13437 #endif
13438 #if defined(TARGET_NR_epoll_wait)
13439         case TARGET_NR_epoll_wait:
13440             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13441                                              NULL, 0));
13442             break;
13443 #endif
13444         default:
13445             ret = -TARGET_ENOSYS;
13446         }
13447         if (!is_error(ret)) {
13448             int i;
13449             for (i = 0; i < ret; i++) {
13450                 target_ep[i].events = tswap32(ep[i].events);
13451                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13452             }
13453             unlock_user(target_ep, arg2,
13454                         ret * sizeof(struct target_epoll_event));
13455         } else {
13456             unlock_user(target_ep, arg2, 0);
13457         }
13458         g_free(ep);
13459         return ret;
13460     }
13461 #endif
13462 #endif
13463 #ifdef TARGET_NR_prlimit64
13464     case TARGET_NR_prlimit64:
13465     {
13466         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13467         struct target_rlimit64 *target_rnew, *target_rold;
13468         struct host_rlimit64 rnew, rold, *rnewp = 0;
13469         int resource = target_to_host_resource(arg2);
13470 
13471         if (arg3 && (resource != RLIMIT_AS &&
13472                      resource != RLIMIT_DATA &&
13473                      resource != RLIMIT_STACK)) {
13474             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13475                 return -TARGET_EFAULT;
13476             }
13477             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13478             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13479             unlock_user_struct(target_rnew, arg3, 0);
13480             rnewp = &rnew;
13481         }
13482 
13483         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13484         if (!is_error(ret) && arg4) {
13485             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13486                 return -TARGET_EFAULT;
13487             }
13488             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13489             __put_user(rold.rlim_max, &target_rold->rlim_max);
13490             unlock_user_struct(target_rold, arg4, 1);
13491         }
13492         return ret;
13493     }
13494 #endif
13495 #ifdef TARGET_NR_gethostname
13496     case TARGET_NR_gethostname:
13497     {
13498         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13499         if (name) {
13500             ret = get_errno(gethostname(name, arg2));
13501             unlock_user(name, arg1, arg2);
13502         } else {
13503             ret = -TARGET_EFAULT;
13504         }
13505         return ret;
13506     }
13507 #endif
13508 #ifdef TARGET_NR_atomic_cmpxchg_32
13509     case TARGET_NR_atomic_cmpxchg_32:
13510     {
13511         /* should use start_exclusive from main.c */
13512         abi_ulong mem_value;
13513         if (get_user_u32(mem_value, arg6)) {
13514             target_siginfo_t info;
13515             info.si_signo = SIGSEGV;
13516             info.si_errno = 0;
13517             info.si_code = TARGET_SEGV_MAPERR;
13518             info._sifields._sigfault._addr = arg6;
13519             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13520             ret = 0xdeadbeef;
13521 
13522         }
13523         if (mem_value == arg2)
13524             put_user_u32(arg1, arg6);
13525         return mem_value;
13526     }
13527 #endif
13528 #ifdef TARGET_NR_atomic_barrier
13529     case TARGET_NR_atomic_barrier:
13530         /* Like the kernel implementation and the
13531            qemu arm barrier, no-op this? */
13532         return 0;
13533 #endif
13534 
13535 #ifdef TARGET_NR_timer_create
13536     case TARGET_NR_timer_create:
13537     {
13538         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13539 
13540         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13541 
13542         int clkid = arg1;
13543         int timer_index = next_free_host_timer();
13544 
13545         if (timer_index < 0) {
13546             ret = -TARGET_EAGAIN;
13547         } else {
13548             timer_t *phtimer = g_posix_timers  + timer_index;
13549 
13550             if (arg2) {
13551                 phost_sevp = &host_sevp;
13552                 ret = target_to_host_sigevent(phost_sevp, arg2);
13553                 if (ret != 0) {
13554                     free_host_timer_slot(timer_index);
13555                     return ret;
13556                 }
13557             }
13558 
13559             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13560             if (ret) {
13561                 free_host_timer_slot(timer_index);
13562             } else {
13563                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13564                     timer_delete(*phtimer);
13565                     free_host_timer_slot(timer_index);
13566                     return -TARGET_EFAULT;
13567                 }
13568             }
13569         }
13570         return ret;
13571     }
13572 #endif
13573 
13574 #ifdef TARGET_NR_timer_settime
13575     case TARGET_NR_timer_settime:
13576     {
13577         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13578          * struct itimerspec * old_value */
13579         target_timer_t timerid = get_timer_id(arg1);
13580 
13581         if (timerid < 0) {
13582             ret = timerid;
13583         } else if (arg3 == 0) {
13584             ret = -TARGET_EINVAL;
13585         } else {
13586             timer_t htimer = g_posix_timers[timerid];
13587             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13588 
13589             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13590                 return -TARGET_EFAULT;
13591             }
13592             ret = get_errno(
13593                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13594             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13595                 return -TARGET_EFAULT;
13596             }
13597         }
13598         return ret;
13599     }
13600 #endif
13601 
13602 #ifdef TARGET_NR_timer_settime64
13603     case TARGET_NR_timer_settime64:
13604     {
13605         target_timer_t timerid = get_timer_id(arg1);
13606 
13607         if (timerid < 0) {
13608             ret = timerid;
13609         } else if (arg3 == 0) {
13610             ret = -TARGET_EINVAL;
13611         } else {
13612             timer_t htimer = g_posix_timers[timerid];
13613             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13614 
13615             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13616                 return -TARGET_EFAULT;
13617             }
13618             ret = get_errno(
13619                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13620             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13621                 return -TARGET_EFAULT;
13622             }
13623         }
13624         return ret;
13625     }
13626 #endif
13627 
13628 #ifdef TARGET_NR_timer_gettime
13629     case TARGET_NR_timer_gettime:
13630     {
13631         /* args: timer_t timerid, struct itimerspec *curr_value */
13632         target_timer_t timerid = get_timer_id(arg1);
13633 
13634         if (timerid < 0) {
13635             ret = timerid;
13636         } else if (!arg2) {
13637             ret = -TARGET_EFAULT;
13638         } else {
13639             timer_t htimer = g_posix_timers[timerid];
13640             struct itimerspec hspec;
13641             ret = get_errno(timer_gettime(htimer, &hspec));
13642 
13643             if (host_to_target_itimerspec(arg2, &hspec)) {
13644                 ret = -TARGET_EFAULT;
13645             }
13646         }
13647         return ret;
13648     }
13649 #endif
13650 
13651 #ifdef TARGET_NR_timer_gettime64
13652     case TARGET_NR_timer_gettime64:
13653     {
13654         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13655         target_timer_t timerid = get_timer_id(arg1);
13656 
13657         if (timerid < 0) {
13658             ret = timerid;
13659         } else if (!arg2) {
13660             ret = -TARGET_EFAULT;
13661         } else {
13662             timer_t htimer = g_posix_timers[timerid];
13663             struct itimerspec hspec;
13664             ret = get_errno(timer_gettime(htimer, &hspec));
13665 
13666             if (host_to_target_itimerspec64(arg2, &hspec)) {
13667                 ret = -TARGET_EFAULT;
13668             }
13669         }
13670         return ret;
13671     }
13672 #endif
13673 
13674 #ifdef TARGET_NR_timer_getoverrun
13675     case TARGET_NR_timer_getoverrun:
13676     {
13677         /* args: timer_t timerid */
13678         target_timer_t timerid = get_timer_id(arg1);
13679 
13680         if (timerid < 0) {
13681             ret = timerid;
13682         } else {
13683             timer_t htimer = g_posix_timers[timerid];
13684             ret = get_errno(timer_getoverrun(htimer));
13685         }
13686         return ret;
13687     }
13688 #endif
13689 
13690 #ifdef TARGET_NR_timer_delete
13691     case TARGET_NR_timer_delete:
13692     {
13693         /* args: timer_t timerid */
13694         target_timer_t timerid = get_timer_id(arg1);
13695 
13696         if (timerid < 0) {
13697             ret = timerid;
13698         } else {
13699             timer_t htimer = g_posix_timers[timerid];
13700             ret = get_errno(timer_delete(htimer));
13701             free_host_timer_slot(timerid);
13702         }
13703         return ret;
13704     }
13705 #endif
13706 
13707 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13708     case TARGET_NR_timerfd_create:
13709         ret = get_errno(timerfd_create(arg1,
13710                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13711         if (ret >= 0) {
13712             fd_trans_register(ret, &target_timerfd_trans);
13713         }
13714         return ret;
13715 #endif
13716 
13717 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13718     case TARGET_NR_timerfd_gettime:
13719         {
13720             struct itimerspec its_curr;
13721 
13722             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13723 
13724             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13725                 return -TARGET_EFAULT;
13726             }
13727         }
13728         return ret;
13729 #endif
13730 
13731 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13732     case TARGET_NR_timerfd_gettime64:
13733         {
13734             struct itimerspec its_curr;
13735 
13736             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13737 
13738             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13739                 return -TARGET_EFAULT;
13740             }
13741         }
13742         return ret;
13743 #endif
13744 
13745 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13746     case TARGET_NR_timerfd_settime:
13747         {
13748             struct itimerspec its_new, its_old, *p_new;
13749 
13750             if (arg3) {
13751                 if (target_to_host_itimerspec(&its_new, arg3)) {
13752                     return -TARGET_EFAULT;
13753                 }
13754                 p_new = &its_new;
13755             } else {
13756                 p_new = NULL;
13757             }
13758 
13759             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13760 
13761             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13762                 return -TARGET_EFAULT;
13763             }
13764         }
13765         return ret;
13766 #endif
13767 
13768 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13769     case TARGET_NR_timerfd_settime64:
13770         {
13771             struct itimerspec its_new, its_old, *p_new;
13772 
13773             if (arg3) {
13774                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13775                     return -TARGET_EFAULT;
13776                 }
13777                 p_new = &its_new;
13778             } else {
13779                 p_new = NULL;
13780             }
13781 
13782             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13783 
13784             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13785                 return -TARGET_EFAULT;
13786             }
13787         }
13788         return ret;
13789 #endif
13790 
13791 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13792     case TARGET_NR_ioprio_get:
13793         return get_errno(ioprio_get(arg1, arg2));
13794 #endif
13795 
13796 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13797     case TARGET_NR_ioprio_set:
13798         return get_errno(ioprio_set(arg1, arg2, arg3));
13799 #endif
13800 
13801 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13802     case TARGET_NR_setns:
13803         return get_errno(setns(arg1, arg2));
13804 #endif
13805 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13806     case TARGET_NR_unshare:
13807         return get_errno(unshare(arg1));
13808 #endif
13809 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13810     case TARGET_NR_kcmp:
13811         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13812 #endif
13813 #ifdef TARGET_NR_swapcontext
13814     case TARGET_NR_swapcontext:
13815         /* PowerPC specific.  */
13816         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13817 #endif
13818 #ifdef TARGET_NR_memfd_create
13819     case TARGET_NR_memfd_create:
13820         p = lock_user_string(arg1);
13821         if (!p) {
13822             return -TARGET_EFAULT;
13823         }
13824         ret = get_errno(memfd_create(p, arg2));
13825         fd_trans_unregister(ret);
13826         unlock_user(p, arg1, 0);
13827         return ret;
13828 #endif
13829 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13830     case TARGET_NR_membarrier:
13831         return get_errno(membarrier(arg1, arg2));
13832 #endif
13833 
13834 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13835     case TARGET_NR_copy_file_range:
13836         {
13837             loff_t inoff, outoff;
13838             loff_t *pinoff = NULL, *poutoff = NULL;
13839 
13840             if (arg2) {
13841                 if (get_user_u64(inoff, arg2)) {
13842                     return -TARGET_EFAULT;
13843                 }
13844                 pinoff = &inoff;
13845             }
13846             if (arg4) {
13847                 if (get_user_u64(outoff, arg4)) {
13848                     return -TARGET_EFAULT;
13849                 }
13850                 poutoff = &outoff;
13851             }
13852             /* Do not sign-extend the count parameter. */
13853             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13854                                                  (abi_ulong)arg5, arg6));
13855             if (!is_error(ret) && ret > 0) {
13856                 if (arg2) {
13857                     if (put_user_u64(inoff, arg2)) {
13858                         return -TARGET_EFAULT;
13859                     }
13860                 }
13861                 if (arg4) {
13862                     if (put_user_u64(outoff, arg4)) {
13863                         return -TARGET_EFAULT;
13864                     }
13865                 }
13866             }
13867         }
13868         return ret;
13869 #endif
13870 
13871 #if defined(TARGET_NR_pivot_root)
13872     case TARGET_NR_pivot_root:
13873         {
13874             void *p2;
13875             p = lock_user_string(arg1); /* new_root */
13876             p2 = lock_user_string(arg2); /* put_old */
13877             if (!p || !p2) {
13878                 ret = -TARGET_EFAULT;
13879             } else {
13880                 ret = get_errno(pivot_root(p, p2));
13881             }
13882             unlock_user(p2, arg2, 0);
13883             unlock_user(p, arg1, 0);
13884         }
13885         return ret;
13886 #endif
13887 
13888 #if defined(TARGET_NR_riscv_hwprobe)
13889     case TARGET_NR_riscv_hwprobe:
13890         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13891 #endif
13892 
13893     default:
13894         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13895         return -TARGET_ENOSYS;
13896     }
13897     return ret;
13898 }
13899 
do_syscall(CPUArchState * cpu_env,int num,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)13900 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13901                     abi_long arg2, abi_long arg3, abi_long arg4,
13902                     abi_long arg5, abi_long arg6, abi_long arg7,
13903                     abi_long arg8)
13904 {
13905     CPUState *cpu = env_cpu(cpu_env);
13906     abi_long ret;
13907 
13908 #ifdef DEBUG_ERESTARTSYS
13909     /* Debug-only code for exercising the syscall-restart code paths
13910      * in the per-architecture cpu main loops: restart every syscall
13911      * the guest makes once before letting it through.
13912      */
13913     {
13914         static bool flag;
13915         flag = !flag;
13916         if (flag) {
13917             return -QEMU_ERESTARTSYS;
13918         }
13919     }
13920 #endif
13921 
13922     record_syscall_start(cpu, num, arg1,
13923                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13924 
13925     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13926         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13927     }
13928 
13929     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13930                       arg5, arg6, arg7, arg8);
13931 
13932     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13933         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13934                           arg3, arg4, arg5, arg6);
13935     }
13936 
13937     record_syscall_return(cpu, num, ret);
13938     return ret;
13939 }
13940