xref: /openbmc/qemu/linux-user/syscall.c (revision 34b92fa32d7cc695fc96ed367e27f08fbfbd301f)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include "exec/mmap-lock.h"
30 #include "exec/tb-flush.h"
31 #include "exec/translation-block.h"
32 #include <elf.h>
33 #include <endian.h>
34 #include <grp.h>
35 #include <sys/ipc.h>
36 #include <sys/msg.h>
37 #include <sys/wait.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/swap.h>
45 #include <linux/capability.h>
46 #include <sched.h>
47 #include <sys/timex.h>
48 #include <sys/socket.h>
49 #include <linux/sockios.h>
50 #include <sys/un.h>
51 #include <sys/uio.h>
52 #include <poll.h>
53 #include <sys/times.h>
54 #include <sys/shm.h>
55 #include <sys/sem.h>
56 #include <sys/statfs.h>
57 #include <utime.h>
58 #include <sys/sysinfo.h>
59 #include <sys/signalfd.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 #include <netinet/tcp.h>
63 #include <netinet/udp.h>
64 #include <linux/wireless.h>
65 #include <linux/icmp.h>
66 #include <linux/icmpv6.h>
67 #include <linux/if_tun.h>
68 #include <linux/in6.h>
69 #include <linux/errqueue.h>
70 #include <linux/random.h>
71 #ifdef CONFIG_TIMERFD
72 #include <sys/timerfd.h>
73 #endif
74 #ifdef CONFIG_EVENTFD
75 #include <sys/eventfd.h>
76 #endif
77 #ifdef CONFIG_EPOLL
78 #include <sys/epoll.h>
79 #endif
80 #ifdef CONFIG_ATTR
81 #include "qemu/xattr.h"
82 #endif
83 #ifdef CONFIG_SENDFILE
84 #include <sys/sendfile.h>
85 #endif
86 #ifdef HAVE_SYS_KCOV_H
87 #include <sys/kcov.h>
88 #endif
89 
90 #define termios host_termios
91 #define winsize host_winsize
92 #define termio host_termio
93 #define sgttyb host_sgttyb /* same as target */
94 #define tchars host_tchars /* same as target */
95 #define ltchars host_ltchars /* same as target */
96 
97 #include <linux/termios.h>
98 #include <linux/unistd.h>
99 #include <linux/cdrom.h>
100 #include <linux/hdreg.h>
101 #include <linux/soundcard.h>
102 #include <linux/kd.h>
103 #include <linux/mtio.h>
104 #include <linux/fs.h>
105 #include <linux/fd.h>
106 #if defined(CONFIG_FIEMAP)
107 #include <linux/fiemap.h>
108 #endif
109 #include <linux/fb.h>
110 #if defined(CONFIG_USBFS)
111 #include <linux/usbdevice_fs.h>
112 #include <linux/usb/ch9.h>
113 #endif
114 #include <linux/vt.h>
115 #include <linux/dm-ioctl.h>
116 #include <linux/reboot.h>
117 #include <linux/route.h>
118 #include <linux/filter.h>
119 #include <linux/blkpg.h>
120 #include <netpacket/packet.h>
121 #include <linux/netlink.h>
122 #include <linux/if_alg.h>
123 #include <linux/rtc.h>
124 #include <sound/asound.h>
125 #ifdef HAVE_BTRFS_H
126 #include <linux/btrfs.h>
127 #endif
128 #ifdef HAVE_DRM_H
129 #include <libdrm/drm.h>
130 #include <libdrm/i915_drm.h>
131 #endif
132 #include "linux_loop.h"
133 #include "uname.h"
134 
135 #include "qemu.h"
136 #include "user-internals.h"
137 #include "strace.h"
138 #include "signal-common.h"
139 #include "loader.h"
140 #include "user-mmap.h"
141 #include "user/page-protection.h"
142 #include "user/safe-syscall.h"
143 #include "user/signal.h"
144 #include "qemu/guest-random.h"
145 #include "qemu/selfmap.h"
146 #include "user/syscall-trace.h"
147 #include "special-errno.h"
148 #include "qapi/error.h"
149 #include "fd-trans.h"
150 #include "user/cpu_loop.h"
151 
152 #ifndef CLONE_IO
153 #define CLONE_IO                0x80000000      /* Clone io context */
154 #endif
155 
156 /* We can't directly call the host clone syscall, because this will
157  * badly confuse libc (breaking mutexes, for example). So we must
158  * divide clone flags into:
159  *  * flag combinations that look like pthread_create()
160  *  * flag combinations that look like fork()
161  *  * flags we can implement within QEMU itself
162  *  * flags we can't support and will return an error for
163  */
164 /* For thread creation, all these flags must be present; for
165  * fork, none must be present.
166  */
167 #define CLONE_THREAD_FLAGS                              \
168     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
169      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
170 
171 /* These flags are ignored:
172  * CLONE_DETACHED is now ignored by the kernel;
173  * CLONE_IO is just an optimisation hint to the I/O scheduler
174  */
175 #define CLONE_IGNORED_FLAGS                     \
176     (CLONE_DETACHED | CLONE_IO)
177 
178 #ifndef CLONE_PIDFD
179 # define CLONE_PIDFD 0x00001000
180 #endif
181 
182 /* Flags for fork which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_FORK_FLAGS               \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
186 
187 /* Flags for thread creation which we can implement within QEMU itself */
188 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
189     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
190      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
191 
192 #define CLONE_INVALID_FORK_FLAGS                                        \
193     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
194 
195 #define CLONE_INVALID_THREAD_FLAGS                                      \
196     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
197        CLONE_IGNORED_FLAGS))
198 
199 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
200  * have almost all been allocated. We cannot support any of
201  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
202  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
203  * The checks against the invalid thread masks above will catch these.
204  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
205  */
206 
207 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
208  * once. This exercises the codepaths for restart.
209  */
210 //#define DEBUG_ERESTARTSYS
211 
212 //#include <linux/msdos_fs.h>
213 #define VFAT_IOCTL_READDIR_BOTH \
214     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
215 #define VFAT_IOCTL_READDIR_SHORT \
216     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
217 
218 #undef _syscall0
219 #undef _syscall1
220 #undef _syscall2
221 #undef _syscall3
222 #undef _syscall4
223 #undef _syscall5
224 #undef _syscall6
225 
226 #define _syscall0(type,name)		\
227 static type name (void)			\
228 {					\
229 	return syscall(__NR_##name);	\
230 }
231 
232 #define _syscall1(type,name,type1,arg1)		\
233 static type name (type1 arg1)			\
234 {						\
235 	return syscall(__NR_##name, arg1);	\
236 }
237 
238 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
239 static type name (type1 arg1,type2 arg2)		\
240 {							\
241 	return syscall(__NR_##name, arg1, arg2);	\
242 }
243 
244 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
245 static type name (type1 arg1,type2 arg2,type3 arg3)		\
246 {								\
247 	return syscall(__NR_##name, arg1, arg2, arg3);		\
248 }
249 
250 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
251 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
252 {										\
253 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
254 }
255 
256 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
257 		  type5,arg5)							\
258 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
259 {										\
260 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
261 }
262 
263 
264 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
265 		  type5,arg5,type6,arg6)					\
266 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
267                   type6 arg6)							\
268 {										\
269 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
270 }
271 
272 
273 #define __NR_sys_uname __NR_uname
274 #define __NR_sys_getcwd1 __NR_getcwd
275 #define __NR_sys_getdents __NR_getdents
276 #define __NR_sys_getdents64 __NR_getdents64
277 #define __NR_sys_getpriority __NR_getpriority
278 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
279 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
280 #define __NR_sys_syslog __NR_syslog
281 #if defined(__NR_futex)
282 # define __NR_sys_futex __NR_futex
283 #endif
284 #if defined(__NR_futex_time64)
285 # define __NR_sys_futex_time64 __NR_futex_time64
286 #endif
287 #define __NR_sys_statx __NR_statx
288 
289 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
290 #define __NR__llseek __NR_lseek
291 #endif
292 
293 /* Newer kernel ports have llseek() instead of _llseek() */
294 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
295 #define TARGET_NR__llseek TARGET_NR_llseek
296 #endif
297 
298 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
299 #ifndef TARGET_O_NONBLOCK_MASK
300 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
301 #endif
302 
303 #define __NR_sys_gettid __NR_gettid
304 _syscall0(int, sys_gettid)
305 
306 /* For the 64-bit guest on 32-bit host case we must emulate
307  * getdents using getdents64, because otherwise the host
308  * might hand us back more dirent records than we can fit
309  * into the guest buffer after structure format conversion.
310  * Otherwise we emulate getdents with getdents if the host has it.
311  */
312 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
313 #define EMULATE_GETDENTS_WITH_GETDENTS
314 #endif
315 
316 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
317 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
318 #endif
319 #if (defined(TARGET_NR_getdents) && \
320       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
321     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
322 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
323 #endif
324 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
325 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
326           loff_t *, res, unsigned int, wh);
327 #endif
328 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
329 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
330           siginfo_t *, uinfo)
331 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
332 #ifdef __NR_exit_group
333 _syscall1(int,exit_group,int,error_code)
334 #endif
335 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
336 #define __NR_sys_close_range __NR_close_range
337 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
338 #ifndef CLOSE_RANGE_CLOEXEC
339 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
340 #endif
341 #endif
342 #if defined(__NR_futex)
343 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
344           const struct timespec *,timeout,int *,uaddr2,int,val3)
345 #endif
346 #if defined(__NR_futex_time64)
347 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
348           const struct timespec *,timeout,int *,uaddr2,int,val3)
349 #endif
350 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
351 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
352 #endif
353 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
354 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
355                              unsigned int, flags);
356 #endif
357 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
358 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
359 #endif
360 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
361 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
362           unsigned long *, user_mask_ptr);
363 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
364 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
365           unsigned long *, user_mask_ptr);
366 /* sched_attr is not defined in glibc < 2.41 */
367 #ifndef SCHED_ATTR_SIZE_VER0
368 struct sched_attr {
369     uint32_t size;
370     uint32_t sched_policy;
371     uint64_t sched_flags;
372     int32_t sched_nice;
373     uint32_t sched_priority;
374     uint64_t sched_runtime;
375     uint64_t sched_deadline;
376     uint64_t sched_period;
377     uint32_t sched_util_min;
378     uint32_t sched_util_max;
379 };
380 #endif
381 #define __NR_sys_sched_getattr __NR_sched_getattr
382 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
383           unsigned int, size, unsigned int, flags);
384 #define __NR_sys_sched_setattr __NR_sched_setattr
385 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
386           unsigned int, flags);
387 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
388 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
389 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
390 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
391           const struct sched_param *, param);
392 #define __NR_sys_sched_getparam __NR_sched_getparam
393 _syscall2(int, sys_sched_getparam, pid_t, pid,
394           struct sched_param *, param);
395 #define __NR_sys_sched_setparam __NR_sched_setparam
396 _syscall2(int, sys_sched_setparam, pid_t, pid,
397           const struct sched_param *, param);
398 #define __NR_sys_getcpu __NR_getcpu
399 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
400 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
401           void *, arg);
402 _syscall2(int, capget, struct __user_cap_header_struct *, header,
403           struct __user_cap_data_struct *, data);
404 _syscall2(int, capset, struct __user_cap_header_struct *, header,
405           struct __user_cap_data_struct *, data);
406 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
407 _syscall2(int, ioprio_get, int, which, int, who)
408 #endif
409 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
410 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
411 #endif
412 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
413 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
414 #endif
415 
416 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
417 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
418           unsigned long, idx1, unsigned long, idx2)
419 #endif
420 
421 /*
422  * It is assumed that struct statx is architecture independent.
423  */
424 #if defined(TARGET_NR_statx) && defined(__NR_statx)
425 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
426           unsigned int, mask, struct target_statx *, statxbuf)
427 #endif
428 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
429 _syscall2(int, membarrier, int, cmd, int, flags)
430 #endif
431 
432 static const bitmask_transtbl fcntl_flags_tbl[] = {
433   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
434   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
435   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
436   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
437   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
438   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
439   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
440   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
441   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
442   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
443   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
444   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
445   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
446 #if defined(O_DIRECT)
447   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
448 #endif
449 #if defined(O_NOATIME)
450   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
451 #endif
452 #if defined(O_CLOEXEC)
453   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
454 #endif
455 #if defined(O_PATH)
456   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
457 #endif
458 #if defined(O_TMPFILE)
459   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
460 #endif
461   /* Don't terminate the list prematurely on 64-bit host+guest.  */
462 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
463   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
464 #endif
465 };
466 
467 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
468 
469 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
470 #if defined(__NR_utimensat)
471 #define __NR_sys_utimensat __NR_utimensat
472 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
473           const struct timespec *,tsp,int,flags)
474 #else
475 static int sys_utimensat(int dirfd, const char *pathname,
476                          const struct timespec times[2], int flags)
477 {
478     errno = ENOSYS;
479     return -1;
480 }
481 #endif
482 #endif /* TARGET_NR_utimensat */
483 
484 #ifdef TARGET_NR_renameat2
485 #if defined(__NR_renameat2)
486 #define __NR_sys_renameat2 __NR_renameat2
487 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
488           const char *, new, unsigned int, flags)
489 #else
490 static int sys_renameat2(int oldfd, const char *old,
491                          int newfd, const char *new, int flags)
492 {
493     if (flags == 0) {
494         return renameat(oldfd, old, newfd, new);
495     }
496     errno = ENOSYS;
497     return -1;
498 }
499 #endif
500 #endif /* TARGET_NR_renameat2 */
501 
502 #ifdef CONFIG_INOTIFY
503 #include <sys/inotify.h>
504 #else
505 /* Userspace can usually survive runtime without inotify */
506 #undef TARGET_NR_inotify_init
507 #undef TARGET_NR_inotify_init1
508 #undef TARGET_NR_inotify_add_watch
509 #undef TARGET_NR_inotify_rm_watch
510 #endif /* CONFIG_INOTIFY  */
511 
512 #if defined(TARGET_NR_prlimit64)
513 #ifndef __NR_prlimit64
514 # define __NR_prlimit64 -1
515 #endif
516 #define __NR_sys_prlimit64 __NR_prlimit64
517 /* The glibc rlimit structure may not be that used by the underlying syscall */
518 struct host_rlimit64 {
519     uint64_t rlim_cur;
520     uint64_t rlim_max;
521 };
522 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
523           const struct host_rlimit64 *, new_limit,
524           struct host_rlimit64 *, old_limit)
525 #endif
526 
527 
528 #if defined(TARGET_NR_timer_create)
529 /* Maximum of 32 active POSIX timers allowed at any one time. */
530 #define GUEST_TIMER_MAX 32
531 static timer_t g_posix_timers[GUEST_TIMER_MAX];
532 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
533 
next_free_host_timer(void)534 static inline int next_free_host_timer(void)
535 {
536     int k;
537     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
538         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
539             return k;
540         }
541     }
542     return -1;
543 }
544 
free_host_timer_slot(int id)545 static inline void free_host_timer_slot(int id)
546 {
547     qatomic_store_release(g_posix_timer_allocated + id, 0);
548 }
549 #endif
550 
host_to_target_errno(int host_errno)551 static inline int host_to_target_errno(int host_errno)
552 {
553     switch (host_errno) {
554 #define E(X)  case X: return TARGET_##X;
555 #include "errnos.c.inc"
556 #undef E
557     default:
558         return host_errno;
559     }
560 }
561 
target_to_host_errno(int target_errno)562 static inline int target_to_host_errno(int target_errno)
563 {
564     switch (target_errno) {
565 #define E(X)  case TARGET_##X: return X;
566 #include "errnos.c.inc"
567 #undef E
568     default:
569         return target_errno;
570     }
571 }
572 
get_errno(abi_long ret)573 abi_long get_errno(abi_long ret)
574 {
575     if (ret == -1)
576         return -host_to_target_errno(errno);
577     else
578         return ret;
579 }
580 
target_strerror(int err)581 const char *target_strerror(int err)
582 {
583     if (err == QEMU_ERESTARTSYS) {
584         return "To be restarted";
585     }
586     if (err == QEMU_ESIGRETURN) {
587         return "Successful exit from sigreturn";
588     }
589 
590     return strerror(target_to_host_errno(err));
591 }
592 
check_zeroed_user(abi_long addr,size_t ksize,size_t usize)593 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
594 {
595     int i;
596     uint8_t b;
597     if (usize <= ksize) {
598         return 1;
599     }
600     for (i = ksize; i < usize; i++) {
601         if (get_user_u8(b, addr + i)) {
602             return -TARGET_EFAULT;
603         }
604         if (b != 0) {
605             return 0;
606         }
607     }
608     return 1;
609 }
610 
611 /*
612  * Copies a target struct to a host struct, in a way that guarantees
613  * backwards-compatibility for struct syscall arguments.
614  *
615  * Similar to kernels uaccess.h:copy_struct_from_user()
616  */
copy_struct_from_user(void * dst,size_t ksize,abi_ptr src,size_t usize)617 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
618 {
619     size_t size = MIN(ksize, usize);
620     size_t rest = MAX(ksize, usize) - size;
621 
622     /* Deal with trailing bytes. */
623     if (usize < ksize) {
624         memset(dst + size, 0, rest);
625     } else if (usize > ksize) {
626         int ret = check_zeroed_user(src, ksize, usize);
627         if (ret <= 0) {
628             return ret ?: -TARGET_E2BIG;
629         }
630     }
631     /* Copy the interoperable parts of the struct. */
632     if (copy_from_user(dst, src, size)) {
633         return -TARGET_EFAULT;
634     }
635     return 0;
636 }
637 
638 #define safe_syscall0(type, name) \
639 static type safe_##name(void) \
640 { \
641     return safe_syscall(__NR_##name); \
642 }
643 
644 #define safe_syscall1(type, name, type1, arg1) \
645 static type safe_##name(type1 arg1) \
646 { \
647     return safe_syscall(__NR_##name, arg1); \
648 }
649 
650 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
651 static type safe_##name(type1 arg1, type2 arg2) \
652 { \
653     return safe_syscall(__NR_##name, arg1, arg2); \
654 }
655 
656 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
657 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
658 { \
659     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
660 }
661 
662 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
663     type4, arg4) \
664 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
665 { \
666     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
667 }
668 
669 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
670     type4, arg4, type5, arg5) \
671 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
672     type5 arg5) \
673 { \
674     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
675 }
676 
677 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
678     type4, arg4, type5, arg5, type6, arg6) \
679 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
680     type5 arg5, type6 arg6) \
681 { \
682     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
683 }
684 
safe_syscall3(ssize_t,read,int,fd,void *,buff,size_t,count)685 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
686 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
687 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
688               int, flags, mode_t, mode)
689 
690 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
691               const struct open_how_ver0 *, how, size_t, size)
692 
693 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
694 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
695               struct rusage *, rusage)
696 #endif
697 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
698               int, options, struct rusage *, rusage)
699 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
700 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
701               char **, argv, char **, envp, int, flags)
702 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
703     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
704 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
705               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
706 #endif
707 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
708 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
709               struct timespec *, tsp, const sigset_t *, sigmask,
710               size_t, sigsetsize)
711 #endif
712 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
713               int, maxevents, int, timeout, const sigset_t *, sigmask,
714               size_t, sigsetsize)
715 #if defined(__NR_futex)
716 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
717               const struct timespec *,timeout,int *,uaddr2,int,val3)
718 #endif
719 #if defined(__NR_futex_time64)
720 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
721               const struct timespec *,timeout,int *,uaddr2,int,val3)
722 #endif
723 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
724 safe_syscall2(int, kill, pid_t, pid, int, sig)
725 safe_syscall2(int, tkill, int, tid, int, sig)
726 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
727 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
728 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
729 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
730               unsigned long, pos_l, unsigned long, pos_h)
731 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
732               unsigned long, pos_l, unsigned long, pos_h)
733 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
734               socklen_t, addrlen)
735 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
736               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
737 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
738               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
739 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
740 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
741 safe_syscall2(int, flock, int, fd, int, operation)
742 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
743 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
744               const struct timespec *, uts, size_t, sigsetsize)
745 #endif
746 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
747               int, flags)
748 #if defined(TARGET_NR_nanosleep)
749 safe_syscall2(int, nanosleep, const struct timespec *, req,
750               struct timespec *, rem)
751 #endif
752 #if defined(TARGET_NR_clock_nanosleep) || \
753     defined(TARGET_NR_clock_nanosleep_time64)
754 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
755               const struct timespec *, req, struct timespec *, rem)
756 #endif
757 #ifdef __NR_ipc
758 #ifdef __s390x__
759 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
760               void *, ptr)
761 #else
762 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
763               void *, ptr, long, fifth)
764 #endif
765 #endif
766 #ifdef __NR_msgsnd
767 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
768               int, flags)
769 #endif
770 #ifdef __NR_msgrcv
771 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
772               long, msgtype, int, flags)
773 #endif
774 #ifdef __NR_semtimedop
775 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
776               unsigned, nsops, const struct timespec *, timeout)
777 #endif
778 #if defined(TARGET_NR_mq_timedsend) || \
779     defined(TARGET_NR_mq_timedsend_time64)
780 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
781               size_t, len, unsigned, prio, const struct timespec *, timeout)
782 #endif
783 #if defined(TARGET_NR_mq_timedreceive) || \
784     defined(TARGET_NR_mq_timedreceive_time64)
785 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
786               size_t, len, unsigned *, prio, const struct timespec *, timeout)
787 #endif
788 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
789 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
790               int, outfd, loff_t *, poutoff, size_t, length,
791               unsigned int, flags)
792 #endif
793 #if defined(TARGET_NR_fchmodat2) && defined(__NR_fchmodat2)
794 safe_syscall4(int, fchmodat2, int, dfd, const char *, filename,
795               unsigned short, mode, unsigned int, flags)
796 #endif
797 
798 /* We do ioctl like this rather than via safe_syscall3 to preserve the
799  * "third argument might be integer or pointer or not present" behaviour of
800  * the libc function.
801  */
802 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
803 /* Similarly for fcntl. Since we always build with LFS enabled,
804  * we should be using the 64-bit structures automatically.
805  */
806 #ifdef __NR_fcntl64
807 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
808 #else
809 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
810 #endif
811 
812 static inline int host_to_target_sock_type(int host_type)
813 {
814     int target_type;
815 
816     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
817     case SOCK_DGRAM:
818         target_type = TARGET_SOCK_DGRAM;
819         break;
820     case SOCK_STREAM:
821         target_type = TARGET_SOCK_STREAM;
822         break;
823     default:
824         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
825         break;
826     }
827 
828 #if defined(SOCK_CLOEXEC)
829     if (host_type & SOCK_CLOEXEC) {
830         target_type |= TARGET_SOCK_CLOEXEC;
831     }
832 #endif
833 
834 #if defined(SOCK_NONBLOCK)
835     if (host_type & SOCK_NONBLOCK) {
836         target_type |= TARGET_SOCK_NONBLOCK;
837     }
838 #endif
839 
840     return target_type;
841 }
842 
843 static abi_ulong target_brk, initial_target_brk;
844 
target_set_brk(abi_ulong new_brk)845 void target_set_brk(abi_ulong new_brk)
846 {
847     target_brk = TARGET_PAGE_ALIGN(new_brk);
848     initial_target_brk = target_brk;
849 }
850 
851 /* do_brk() must return target values and target errnos. */
do_brk(abi_ulong brk_val)852 abi_long do_brk(abi_ulong brk_val)
853 {
854     abi_long mapped_addr;
855     abi_ulong new_brk;
856     abi_ulong old_brk;
857 
858     /* brk pointers are always untagged */
859 
860     /* do not allow to shrink below initial brk value */
861     if (brk_val < initial_target_brk) {
862         return target_brk;
863     }
864 
865     new_brk = TARGET_PAGE_ALIGN(brk_val);
866     old_brk = TARGET_PAGE_ALIGN(target_brk);
867 
868     /* new and old target_brk might be on the same page */
869     if (new_brk == old_brk) {
870         target_brk = brk_val;
871         return target_brk;
872     }
873 
874     /* Release heap if necessary */
875     if (new_brk < old_brk) {
876         target_munmap(new_brk, old_brk - new_brk);
877 
878         target_brk = brk_val;
879         return target_brk;
880     }
881 
882     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
883                               PROT_READ | PROT_WRITE,
884                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
885                               -1, 0);
886 
887     if (mapped_addr == old_brk) {
888         target_brk = brk_val;
889         return target_brk;
890     }
891 
892 #if defined(TARGET_ALPHA)
893     /* We (partially) emulate OSF/1 on Alpha, which requires we
894        return a proper errno, not an unchanged brk value.  */
895     return -TARGET_ENOMEM;
896 #endif
897     /* For everything else, return the previous break. */
898     return target_brk;
899 }
900 
901 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
902     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
copy_from_user_fdset(fd_set * fds,abi_ulong target_fds_addr,int n)903 static inline abi_long copy_from_user_fdset(fd_set *fds,
904                                             abi_ulong target_fds_addr,
905                                             int n)
906 {
907     int i, nw, j, k;
908     abi_ulong b, *target_fds;
909 
910     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
911     if (!(target_fds = lock_user(VERIFY_READ,
912                                  target_fds_addr,
913                                  sizeof(abi_ulong) * nw,
914                                  1)))
915         return -TARGET_EFAULT;
916 
917     FD_ZERO(fds);
918     k = 0;
919     for (i = 0; i < nw; i++) {
920         /* grab the abi_ulong */
921         __get_user(b, &target_fds[i]);
922         for (j = 0; j < TARGET_ABI_BITS; j++) {
923             /* check the bit inside the abi_ulong */
924             if ((b >> j) & 1)
925                 FD_SET(k, fds);
926             k++;
927         }
928     }
929 
930     unlock_user(target_fds, target_fds_addr, 0);
931 
932     return 0;
933 }
934 
copy_from_user_fdset_ptr(fd_set * fds,fd_set ** fds_ptr,abi_ulong target_fds_addr,int n)935 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
936                                                  abi_ulong target_fds_addr,
937                                                  int n)
938 {
939     if (target_fds_addr) {
940         if (copy_from_user_fdset(fds, target_fds_addr, n))
941             return -TARGET_EFAULT;
942         *fds_ptr = fds;
943     } else {
944         *fds_ptr = NULL;
945     }
946     return 0;
947 }
948 
copy_to_user_fdset(abi_ulong target_fds_addr,const fd_set * fds,int n)949 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
950                                           const fd_set *fds,
951                                           int n)
952 {
953     int i, nw, j, k;
954     abi_long v;
955     abi_ulong *target_fds;
956 
957     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
958     if (!(target_fds = lock_user(VERIFY_WRITE,
959                                  target_fds_addr,
960                                  sizeof(abi_ulong) * nw,
961                                  0)))
962         return -TARGET_EFAULT;
963 
964     k = 0;
965     for (i = 0; i < nw; i++) {
966         v = 0;
967         for (j = 0; j < TARGET_ABI_BITS; j++) {
968             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
969             k++;
970         }
971         __put_user(v, &target_fds[i]);
972     }
973 
974     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
975 
976     return 0;
977 }
978 #endif
979 
980 #if defined(__alpha__)
981 #define HOST_HZ 1024
982 #else
983 #define HOST_HZ 100
984 #endif
985 
host_to_target_clock_t(long ticks)986 static inline abi_long host_to_target_clock_t(long ticks)
987 {
988 #if HOST_HZ == TARGET_HZ
989     return ticks;
990 #else
991     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
992 #endif
993 }
994 
host_to_target_rusage(abi_ulong target_addr,const struct rusage * rusage)995 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
996                                              const struct rusage *rusage)
997 {
998     struct target_rusage *target_rusage;
999 
1000     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1001         return -TARGET_EFAULT;
1002     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1003     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1004     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1005     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1006     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1007     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1008     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1009     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1010     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1011     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1012     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1013     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1014     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1015     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1016     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1017     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1018     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1019     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1020     unlock_user_struct(target_rusage, target_addr, 1);
1021 
1022     return 0;
1023 }
1024 
1025 #ifdef TARGET_NR_setrlimit
target_to_host_rlim(abi_ulong target_rlim)1026 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1027 {
1028     abi_ulong target_rlim_swap;
1029     rlim_t result;
1030 
1031     target_rlim_swap = tswapal(target_rlim);
1032     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1033         return RLIM_INFINITY;
1034 
1035     result = target_rlim_swap;
1036     if (target_rlim_swap != (rlim_t)result)
1037         return RLIM_INFINITY;
1038 
1039     return result;
1040 }
1041 #endif
1042 
1043 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
host_to_target_rlim(rlim_t rlim)1044 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1045 {
1046     abi_ulong target_rlim_swap;
1047     abi_ulong result;
1048 
1049     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1050         target_rlim_swap = TARGET_RLIM_INFINITY;
1051     else
1052         target_rlim_swap = rlim;
1053     result = tswapal(target_rlim_swap);
1054 
1055     return result;
1056 }
1057 #endif
1058 
target_to_host_resource(int code)1059 static inline int target_to_host_resource(int code)
1060 {
1061     switch (code) {
1062     case TARGET_RLIMIT_AS:
1063         return RLIMIT_AS;
1064     case TARGET_RLIMIT_CORE:
1065         return RLIMIT_CORE;
1066     case TARGET_RLIMIT_CPU:
1067         return RLIMIT_CPU;
1068     case TARGET_RLIMIT_DATA:
1069         return RLIMIT_DATA;
1070     case TARGET_RLIMIT_FSIZE:
1071         return RLIMIT_FSIZE;
1072     case TARGET_RLIMIT_LOCKS:
1073         return RLIMIT_LOCKS;
1074     case TARGET_RLIMIT_MEMLOCK:
1075         return RLIMIT_MEMLOCK;
1076     case TARGET_RLIMIT_MSGQUEUE:
1077         return RLIMIT_MSGQUEUE;
1078     case TARGET_RLIMIT_NICE:
1079         return RLIMIT_NICE;
1080     case TARGET_RLIMIT_NOFILE:
1081         return RLIMIT_NOFILE;
1082     case TARGET_RLIMIT_NPROC:
1083         return RLIMIT_NPROC;
1084     case TARGET_RLIMIT_RSS:
1085         return RLIMIT_RSS;
1086     case TARGET_RLIMIT_RTPRIO:
1087         return RLIMIT_RTPRIO;
1088 #ifdef RLIMIT_RTTIME
1089     case TARGET_RLIMIT_RTTIME:
1090         return RLIMIT_RTTIME;
1091 #endif
1092     case TARGET_RLIMIT_SIGPENDING:
1093         return RLIMIT_SIGPENDING;
1094     case TARGET_RLIMIT_STACK:
1095         return RLIMIT_STACK;
1096     default:
1097         return code;
1098     }
1099 }
1100 
copy_from_user_timeval(struct timeval * tv,abi_ulong target_tv_addr)1101 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1102                                               abi_ulong target_tv_addr)
1103 {
1104     struct target_timeval *target_tv;
1105 
1106     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1107         return -TARGET_EFAULT;
1108     }
1109 
1110     __get_user(tv->tv_sec, &target_tv->tv_sec);
1111     __get_user(tv->tv_usec, &target_tv->tv_usec);
1112 
1113     unlock_user_struct(target_tv, target_tv_addr, 0);
1114 
1115     return 0;
1116 }
1117 
copy_to_user_timeval(abi_ulong target_tv_addr,const struct timeval * tv)1118 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1119                                             const struct timeval *tv)
1120 {
1121     struct target_timeval *target_tv;
1122 
1123     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1124         return -TARGET_EFAULT;
1125     }
1126 
1127     __put_user(tv->tv_sec, &target_tv->tv_sec);
1128     __put_user(tv->tv_usec, &target_tv->tv_usec);
1129 
1130     unlock_user_struct(target_tv, target_tv_addr, 1);
1131 
1132     return 0;
1133 }
1134 
1135 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
copy_from_user_timeval64(struct timeval * tv,abi_ulong target_tv_addr)1136 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1137                                                 abi_ulong target_tv_addr)
1138 {
1139     struct target__kernel_sock_timeval *target_tv;
1140 
1141     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1142         return -TARGET_EFAULT;
1143     }
1144 
1145     __get_user(tv->tv_sec, &target_tv->tv_sec);
1146     __get_user(tv->tv_usec, &target_tv->tv_usec);
1147 
1148     unlock_user_struct(target_tv, target_tv_addr, 0);
1149 
1150     return 0;
1151 }
1152 #endif
1153 
copy_to_user_timeval64(abi_ulong target_tv_addr,const struct timeval * tv)1154 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1155                                               const struct timeval *tv)
1156 {
1157     struct target__kernel_sock_timeval *target_tv;
1158 
1159     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1160         return -TARGET_EFAULT;
1161     }
1162 
1163     __put_user(tv->tv_sec, &target_tv->tv_sec);
1164     __put_user(tv->tv_usec, &target_tv->tv_usec);
1165 
1166     unlock_user_struct(target_tv, target_tv_addr, 1);
1167 
1168     return 0;
1169 }
1170 
1171 #if defined(TARGET_NR_futex) || \
1172     defined(TARGET_NR_rt_sigtimedwait) || \
1173     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1174     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1175     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1176     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1177     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1178     defined(TARGET_NR_timer_settime) || \
1179     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_timespec(struct timespec * host_ts,abi_ulong target_addr)1180 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1181                                                abi_ulong target_addr)
1182 {
1183     struct target_timespec *target_ts;
1184 
1185     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1186         return -TARGET_EFAULT;
1187     }
1188     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1189     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1190     unlock_user_struct(target_ts, target_addr, 0);
1191     return 0;
1192 }
1193 #endif
1194 
1195 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1196     defined(TARGET_NR_timer_settime64) || \
1197     defined(TARGET_NR_mq_timedsend_time64) || \
1198     defined(TARGET_NR_mq_timedreceive_time64) || \
1199     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1200     defined(TARGET_NR_clock_nanosleep_time64) || \
1201     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1202     defined(TARGET_NR_utimensat) || \
1203     defined(TARGET_NR_utimensat_time64) || \
1204     defined(TARGET_NR_semtimedop_time64) || \
1205     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
target_to_host_timespec64(struct timespec * host_ts,abi_ulong target_addr)1206 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1207                                                  abi_ulong target_addr)
1208 {
1209     struct target__kernel_timespec *target_ts;
1210 
1211     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1212         return -TARGET_EFAULT;
1213     }
1214     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1215     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1216     /* in 32bit mode, this drops the padding */
1217     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1218     unlock_user_struct(target_ts, target_addr, 0);
1219     return 0;
1220 }
1221 #endif
1222 
host_to_target_timespec(abi_ulong target_addr,struct timespec * host_ts)1223 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1224                                                struct timespec *host_ts)
1225 {
1226     struct target_timespec *target_ts;
1227 
1228     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1229         return -TARGET_EFAULT;
1230     }
1231     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1232     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1233     unlock_user_struct(target_ts, target_addr, 1);
1234     return 0;
1235 }
1236 
host_to_target_timespec64(abi_ulong target_addr,struct timespec * host_ts)1237 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1238                                                  struct timespec *host_ts)
1239 {
1240     struct target__kernel_timespec *target_ts;
1241 
1242     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1243         return -TARGET_EFAULT;
1244     }
1245     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1246     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1247     unlock_user_struct(target_ts, target_addr, 1);
1248     return 0;
1249 }
1250 
1251 #if defined(TARGET_NR_gettimeofday)
copy_to_user_timezone(abi_ulong target_tz_addr,struct timezone * tz)1252 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1253                                              struct timezone *tz)
1254 {
1255     struct target_timezone *target_tz;
1256 
1257     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1258         return -TARGET_EFAULT;
1259     }
1260 
1261     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1262     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1263 
1264     unlock_user_struct(target_tz, target_tz_addr, 1);
1265 
1266     return 0;
1267 }
1268 #endif
1269 
1270 #if defined(TARGET_NR_settimeofday)
copy_from_user_timezone(struct timezone * tz,abi_ulong target_tz_addr)1271 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1272                                                abi_ulong target_tz_addr)
1273 {
1274     struct target_timezone *target_tz;
1275 
1276     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1277         return -TARGET_EFAULT;
1278     }
1279 
1280     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1281     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1282 
1283     unlock_user_struct(target_tz, target_tz_addr, 0);
1284 
1285     return 0;
1286 }
1287 #endif
1288 
1289 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1290 #include <mqueue.h>
1291 
copy_from_user_mq_attr(struct mq_attr * attr,abi_ulong target_mq_attr_addr)1292 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1293                                               abi_ulong target_mq_attr_addr)
1294 {
1295     struct target_mq_attr *target_mq_attr;
1296 
1297     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1298                           target_mq_attr_addr, 1))
1299         return -TARGET_EFAULT;
1300 
1301     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1302     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1303     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1304     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1305 
1306     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1307 
1308     return 0;
1309 }
1310 
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,const struct mq_attr * attr)1311 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1312                                             const struct mq_attr *attr)
1313 {
1314     struct target_mq_attr *target_mq_attr;
1315 
1316     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1317                           target_mq_attr_addr, 0))
1318         return -TARGET_EFAULT;
1319 
1320     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1321     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1322     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1323     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1324 
1325     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1326 
1327     return 0;
1328 }
1329 #endif
1330 
1331 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1332 /* do_select() must return target values and target errnos. */
do_select(int n,abi_ulong rfd_addr,abi_ulong wfd_addr,abi_ulong efd_addr,abi_ulong target_tv_addr)1333 static abi_long do_select(int n,
1334                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1335                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1336 {
1337     fd_set rfds, wfds, efds;
1338     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1339     struct timeval tv;
1340     struct timespec ts, *ts_ptr;
1341     abi_long ret;
1342 
1343     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1344     if (ret) {
1345         return ret;
1346     }
1347     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1348     if (ret) {
1349         return ret;
1350     }
1351     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1352     if (ret) {
1353         return ret;
1354     }
1355 
1356     if (target_tv_addr) {
1357         if (copy_from_user_timeval(&tv, target_tv_addr))
1358             return -TARGET_EFAULT;
1359         ts.tv_sec = tv.tv_sec;
1360         ts.tv_nsec = tv.tv_usec * 1000;
1361         ts_ptr = &ts;
1362     } else {
1363         ts_ptr = NULL;
1364     }
1365 
1366     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1367                                   ts_ptr, NULL));
1368 
1369     if (!is_error(ret)) {
1370         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1371             return -TARGET_EFAULT;
1372         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1373             return -TARGET_EFAULT;
1374         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1375             return -TARGET_EFAULT;
1376 
1377         if (target_tv_addr) {
1378             tv.tv_sec = ts.tv_sec;
1379             tv.tv_usec = ts.tv_nsec / 1000;
1380             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1381                 return -TARGET_EFAULT;
1382             }
1383         }
1384     }
1385 
1386     return ret;
1387 }
1388 
1389 #if defined(TARGET_WANT_OLD_SYS_SELECT)
do_old_select(abi_ulong arg1)1390 static abi_long do_old_select(abi_ulong arg1)
1391 {
1392     struct target_sel_arg_struct *sel;
1393     abi_ulong inp, outp, exp, tvp;
1394     long nsel;
1395 
1396     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1397         return -TARGET_EFAULT;
1398     }
1399 
1400     nsel = tswapal(sel->n);
1401     inp = tswapal(sel->inp);
1402     outp = tswapal(sel->outp);
1403     exp = tswapal(sel->exp);
1404     tvp = tswapal(sel->tvp);
1405 
1406     unlock_user_struct(sel, arg1, 0);
1407 
1408     return do_select(nsel, inp, outp, exp, tvp);
1409 }
1410 #endif
1411 #endif
1412 
1413 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
do_pselect6(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,bool time64)1414 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1415                             abi_long arg4, abi_long arg5, abi_long arg6,
1416                             bool time64)
1417 {
1418     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1419     fd_set rfds, wfds, efds;
1420     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1421     struct timespec ts, *ts_ptr;
1422     abi_long ret;
1423 
1424     /*
1425      * The 6th arg is actually two args smashed together,
1426      * so we cannot use the C library.
1427      */
1428     struct {
1429         sigset_t *set;
1430         size_t size;
1431     } sig, *sig_ptr;
1432 
1433     abi_ulong arg_sigset, arg_sigsize, *arg7;
1434 
1435     n = arg1;
1436     rfd_addr = arg2;
1437     wfd_addr = arg3;
1438     efd_addr = arg4;
1439     ts_addr = arg5;
1440 
1441     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1442     if (ret) {
1443         return ret;
1444     }
1445     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1446     if (ret) {
1447         return ret;
1448     }
1449     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1450     if (ret) {
1451         return ret;
1452     }
1453 
1454     /*
1455      * This takes a timespec, and not a timeval, so we cannot
1456      * use the do_select() helper ...
1457      */
1458     if (ts_addr) {
1459         if (time64) {
1460             if (target_to_host_timespec64(&ts, ts_addr)) {
1461                 return -TARGET_EFAULT;
1462             }
1463         } else {
1464             if (target_to_host_timespec(&ts, ts_addr)) {
1465                 return -TARGET_EFAULT;
1466             }
1467         }
1468             ts_ptr = &ts;
1469     } else {
1470         ts_ptr = NULL;
1471     }
1472 
1473     /* Extract the two packed args for the sigset */
1474     sig_ptr = NULL;
1475     if (arg6) {
1476         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1477         if (!arg7) {
1478             return -TARGET_EFAULT;
1479         }
1480         arg_sigset = tswapal(arg7[0]);
1481         arg_sigsize = tswapal(arg7[1]);
1482         unlock_user(arg7, arg6, 0);
1483 
1484         if (arg_sigset) {
1485             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1486             if (ret != 0) {
1487                 return ret;
1488             }
1489             sig_ptr = &sig;
1490             sig.size = SIGSET_T_SIZE;
1491         }
1492     }
1493 
1494     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1495                                   ts_ptr, sig_ptr));
1496 
1497     if (sig_ptr) {
1498         finish_sigsuspend_mask(ret);
1499     }
1500 
1501     if (!is_error(ret)) {
1502         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1503             return -TARGET_EFAULT;
1504         }
1505         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1506             return -TARGET_EFAULT;
1507         }
1508         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1509             return -TARGET_EFAULT;
1510         }
1511         if (time64) {
1512             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1513                 return -TARGET_EFAULT;
1514             }
1515         } else {
1516             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1517                 return -TARGET_EFAULT;
1518             }
1519         }
1520     }
1521     return ret;
1522 }
1523 #endif
1524 
1525 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1526     defined(TARGET_NR_ppoll_time64)
do_ppoll(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,bool ppoll,bool time64)1527 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1528                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1529 {
1530     struct target_pollfd *target_pfd;
1531     unsigned int nfds = arg2;
1532     struct pollfd *pfd;
1533     unsigned int i;
1534     abi_long ret;
1535 
1536     pfd = NULL;
1537     target_pfd = NULL;
1538     if (nfds) {
1539         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1540             return -TARGET_EINVAL;
1541         }
1542         target_pfd = lock_user(VERIFY_WRITE, arg1,
1543                                sizeof(struct target_pollfd) * nfds, 1);
1544         if (!target_pfd) {
1545             return -TARGET_EFAULT;
1546         }
1547 
1548         pfd = alloca(sizeof(struct pollfd) * nfds);
1549         for (i = 0; i < nfds; i++) {
1550             pfd[i].fd = tswap32(target_pfd[i].fd);
1551             pfd[i].events = tswap16(target_pfd[i].events);
1552         }
1553     }
1554     if (ppoll) {
1555         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1556         sigset_t *set = NULL;
1557 
1558         if (arg3) {
1559             if (time64) {
1560                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1561                     unlock_user(target_pfd, arg1, 0);
1562                     return -TARGET_EFAULT;
1563                 }
1564             } else {
1565                 if (target_to_host_timespec(timeout_ts, arg3)) {
1566                     unlock_user(target_pfd, arg1, 0);
1567                     return -TARGET_EFAULT;
1568                 }
1569             }
1570         } else {
1571             timeout_ts = NULL;
1572         }
1573 
1574         if (arg4) {
1575             ret = process_sigsuspend_mask(&set, arg4, arg5);
1576             if (ret != 0) {
1577                 unlock_user(target_pfd, arg1, 0);
1578                 return ret;
1579             }
1580         }
1581 
1582         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1583                                    set, SIGSET_T_SIZE));
1584 
1585         if (set) {
1586             finish_sigsuspend_mask(ret);
1587         }
1588         if (!is_error(ret) && arg3) {
1589             if (time64) {
1590                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1591                     return -TARGET_EFAULT;
1592                 }
1593             } else {
1594                 if (host_to_target_timespec(arg3, timeout_ts)) {
1595                     return -TARGET_EFAULT;
1596                 }
1597             }
1598         }
1599     } else {
1600           struct timespec ts, *pts;
1601 
1602           if (arg3 >= 0) {
1603               /* Convert ms to secs, ns */
1604               ts.tv_sec = arg3 / 1000;
1605               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1606               pts = &ts;
1607           } else {
1608               /* -ve poll() timeout means "infinite" */
1609               pts = NULL;
1610           }
1611           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1612     }
1613 
1614     if (!is_error(ret)) {
1615         for (i = 0; i < nfds; i++) {
1616             target_pfd[i].revents = tswap16(pfd[i].revents);
1617         }
1618     }
1619     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1620     return ret;
1621 }
1622 #endif
1623 
do_pipe(CPUArchState * cpu_env,abi_ulong pipedes,int flags,int is_pipe2)1624 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1625                         int flags, int is_pipe2)
1626 {
1627     int host_pipe[2];
1628     abi_long ret;
1629     ret = pipe2(host_pipe, flags);
1630 
1631     if (is_error(ret))
1632         return get_errno(ret);
1633 
1634     /* Several targets have special calling conventions for the original
1635        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1636     if (!is_pipe2) {
1637 #if defined(TARGET_ALPHA)
1638         cpu_env->ir[IR_A4] = host_pipe[1];
1639         return host_pipe[0];
1640 #elif defined(TARGET_MIPS)
1641         cpu_env->active_tc.gpr[3] = host_pipe[1];
1642         return host_pipe[0];
1643 #elif defined(TARGET_SH4)
1644         cpu_env->gregs[1] = host_pipe[1];
1645         return host_pipe[0];
1646 #elif defined(TARGET_SPARC)
1647         cpu_env->regwptr[1] = host_pipe[1];
1648         return host_pipe[0];
1649 #endif
1650     }
1651 
1652     if (put_user_s32(host_pipe[0], pipedes)
1653         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1654         return -TARGET_EFAULT;
1655     return get_errno(ret);
1656 }
1657 
target_to_host_sockaddr(int fd,struct sockaddr * addr,abi_ulong target_addr,socklen_t len)1658 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1659                                                abi_ulong target_addr,
1660                                                socklen_t len)
1661 {
1662     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1663     sa_family_t sa_family;
1664     struct target_sockaddr *target_saddr;
1665 
1666     if (fd_trans_target_to_host_addr(fd)) {
1667         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1668     }
1669 
1670     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1671     if (!target_saddr)
1672         return -TARGET_EFAULT;
1673 
1674     sa_family = tswap16(target_saddr->sa_family);
1675 
1676     /* Oops. The caller might send a incomplete sun_path; sun_path
1677      * must be terminated by \0 (see the manual page), but
1678      * unfortunately it is quite common to specify sockaddr_un
1679      * length as "strlen(x->sun_path)" while it should be
1680      * "strlen(...) + 1". We'll fix that here if needed.
1681      * Linux kernel has a similar feature.
1682      */
1683 
1684     if (sa_family == AF_UNIX) {
1685         if (len < unix_maxlen && len > 0) {
1686             char *cp = (char*)target_saddr;
1687 
1688             if ( cp[len-1] && !cp[len] )
1689                 len++;
1690         }
1691         if (len > unix_maxlen)
1692             len = unix_maxlen;
1693     }
1694 
1695     memcpy(addr, target_saddr, len);
1696     addr->sa_family = sa_family;
1697     if (sa_family == AF_NETLINK) {
1698         struct sockaddr_nl *nladdr;
1699 
1700         nladdr = (struct sockaddr_nl *)addr;
1701         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1702         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1703     } else if (sa_family == AF_PACKET) {
1704 	struct target_sockaddr_ll *lladdr;
1705 
1706 	lladdr = (struct target_sockaddr_ll *)addr;
1707 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1708 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1709     } else if (sa_family == AF_INET6) {
1710         struct sockaddr_in6 *in6addr;
1711 
1712         in6addr = (struct sockaddr_in6 *)addr;
1713         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1714     }
1715     unlock_user(target_saddr, target_addr, 0);
1716 
1717     return 0;
1718 }
1719 
host_to_target_sockaddr(abi_ulong target_addr,struct sockaddr * addr,socklen_t len)1720 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1721                                                struct sockaddr *addr,
1722                                                socklen_t len)
1723 {
1724     struct target_sockaddr *target_saddr;
1725 
1726     if (len == 0) {
1727         return 0;
1728     }
1729     assert(addr);
1730 
1731     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1732     if (!target_saddr)
1733         return -TARGET_EFAULT;
1734     memcpy(target_saddr, addr, len);
1735     if (len >= offsetof(struct target_sockaddr, sa_family) +
1736         sizeof(target_saddr->sa_family)) {
1737         target_saddr->sa_family = tswap16(addr->sa_family);
1738     }
1739     if (addr->sa_family == AF_NETLINK &&
1740         len >= sizeof(struct target_sockaddr_nl)) {
1741         struct target_sockaddr_nl *target_nl =
1742                (struct target_sockaddr_nl *)target_saddr;
1743         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1744         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1745     } else if (addr->sa_family == AF_PACKET) {
1746         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1747         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1748         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1749     } else if (addr->sa_family == AF_INET6 &&
1750                len >= sizeof(struct target_sockaddr_in6)) {
1751         struct target_sockaddr_in6 *target_in6 =
1752                (struct target_sockaddr_in6 *)target_saddr;
1753         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1754     }
1755     unlock_user(target_saddr, target_addr, len);
1756 
1757     return 0;
1758 }
1759 
target_to_host_cmsg(struct msghdr * msgh,struct target_msghdr * target_msgh)1760 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1761                                            struct target_msghdr *target_msgh)
1762 {
1763     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1764     abi_long msg_controllen;
1765     abi_ulong target_cmsg_addr;
1766     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1767     socklen_t space = 0;
1768 
1769     msg_controllen = tswapal(target_msgh->msg_controllen);
1770     if (msg_controllen < sizeof (struct target_cmsghdr))
1771         goto the_end;
1772     target_cmsg_addr = tswapal(target_msgh->msg_control);
1773     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1774     target_cmsg_start = target_cmsg;
1775     if (!target_cmsg)
1776         return -TARGET_EFAULT;
1777 
1778     while (cmsg && target_cmsg) {
1779         void *data = CMSG_DATA(cmsg);
1780         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1781 
1782         int len = tswapal(target_cmsg->cmsg_len)
1783             - sizeof(struct target_cmsghdr);
1784 
1785         space += CMSG_SPACE(len);
1786         if (space > msgh->msg_controllen) {
1787             space -= CMSG_SPACE(len);
1788             /* This is a QEMU bug, since we allocated the payload
1789              * area ourselves (unlike overflow in host-to-target
1790              * conversion, which is just the guest giving us a buffer
1791              * that's too small). It can't happen for the payload types
1792              * we currently support; if it becomes an issue in future
1793              * we would need to improve our allocation strategy to
1794              * something more intelligent than "twice the size of the
1795              * target buffer we're reading from".
1796              */
1797             qemu_log_mask(LOG_UNIMP,
1798                           ("Unsupported ancillary data %d/%d: "
1799                            "unhandled msg size\n"),
1800                           tswap32(target_cmsg->cmsg_level),
1801                           tswap32(target_cmsg->cmsg_type));
1802             break;
1803         }
1804 
1805         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1806             cmsg->cmsg_level = SOL_SOCKET;
1807         } else {
1808             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1809         }
1810         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1811         cmsg->cmsg_len = CMSG_LEN(len);
1812 
1813         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1814             int *fd = (int *)data;
1815             int *target_fd = (int *)target_data;
1816             int i, numfds = len / sizeof(int);
1817 
1818             for (i = 0; i < numfds; i++) {
1819                 __get_user(fd[i], target_fd + i);
1820             }
1821         } else if (cmsg->cmsg_level == SOL_SOCKET
1822                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1823             struct ucred *cred = (struct ucred *)data;
1824             struct target_ucred *target_cred =
1825                 (struct target_ucred *)target_data;
1826 
1827             __get_user(cred->pid, &target_cred->pid);
1828             __get_user(cred->uid, &target_cred->uid);
1829             __get_user(cred->gid, &target_cred->gid);
1830         } else if (cmsg->cmsg_level == SOL_ALG) {
1831             uint32_t *dst = (uint32_t *)data;
1832 
1833             memcpy(dst, target_data, len);
1834             /* fix endianness of first 32-bit word */
1835             if (len >= sizeof(uint32_t)) {
1836                 *dst = tswap32(*dst);
1837             }
1838         } else {
1839             qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n",
1840                           cmsg->cmsg_level, cmsg->cmsg_type);
1841             memcpy(data, target_data, len);
1842         }
1843 
1844         cmsg = CMSG_NXTHDR(msgh, cmsg);
1845         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1846                                          target_cmsg_start);
1847     }
1848     unlock_user(target_cmsg, target_cmsg_addr, 0);
1849  the_end:
1850     msgh->msg_controllen = space;
1851     return 0;
1852 }
1853 
host_to_target_cmsg(struct target_msghdr * target_msgh,struct msghdr * msgh)1854 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1855                                            struct msghdr *msgh)
1856 {
1857     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1858     abi_long msg_controllen;
1859     abi_ulong target_cmsg_addr;
1860     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1861     socklen_t space = 0;
1862 
1863     msg_controllen = tswapal(target_msgh->msg_controllen);
1864     if (msg_controllen < sizeof (struct target_cmsghdr))
1865         goto the_end;
1866     target_cmsg_addr = tswapal(target_msgh->msg_control);
1867     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1868     target_cmsg_start = target_cmsg;
1869     if (!target_cmsg)
1870         return -TARGET_EFAULT;
1871 
1872     while (cmsg && target_cmsg) {
1873         void *data = CMSG_DATA(cmsg);
1874         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1875 
1876         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1877         int tgt_len, tgt_space;
1878 
1879         /* We never copy a half-header but may copy half-data;
1880          * this is Linux's behaviour in put_cmsg(). Note that
1881          * truncation here is a guest problem (which we report
1882          * to the guest via the CTRUNC bit), unlike truncation
1883          * in target_to_host_cmsg, which is a QEMU bug.
1884          */
1885         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1886             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1887             break;
1888         }
1889 
1890         if (cmsg->cmsg_level == SOL_SOCKET) {
1891             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1892         } else {
1893             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1894         }
1895         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1896 
1897         /* Payload types which need a different size of payload on
1898          * the target must adjust tgt_len here.
1899          */
1900         tgt_len = len;
1901         switch (cmsg->cmsg_level) {
1902         case SOL_SOCKET:
1903             switch (cmsg->cmsg_type) {
1904             case SO_TIMESTAMP:
1905                 tgt_len = sizeof(struct target_timeval);
1906                 break;
1907             default:
1908                 break;
1909             }
1910             break;
1911         default:
1912             break;
1913         }
1914 
1915         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1916             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1917             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1918         }
1919 
1920         /* We must now copy-and-convert len bytes of payload
1921          * into tgt_len bytes of destination space. Bear in mind
1922          * that in both source and destination we may be dealing
1923          * with a truncated value!
1924          */
1925         switch (cmsg->cmsg_level) {
1926         case SOL_SOCKET:
1927             switch (cmsg->cmsg_type) {
1928             case SCM_RIGHTS:
1929             {
1930                 int *fd = (int *)data;
1931                 int *target_fd = (int *)target_data;
1932                 int i, numfds = tgt_len / sizeof(int);
1933 
1934                 for (i = 0; i < numfds; i++) {
1935                     __put_user(fd[i], target_fd + i);
1936                 }
1937                 break;
1938             }
1939             case SO_TIMESTAMP:
1940             {
1941                 struct timeval *tv = (struct timeval *)data;
1942                 struct target_timeval *target_tv =
1943                     (struct target_timeval *)target_data;
1944 
1945                 if (len != sizeof(struct timeval) ||
1946                     tgt_len != sizeof(struct target_timeval)) {
1947                     goto unimplemented;
1948                 }
1949 
1950                 /* copy struct timeval to target */
1951                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1952                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1953                 break;
1954             }
1955             case SCM_CREDENTIALS:
1956             {
1957                 struct ucred *cred = (struct ucred *)data;
1958                 struct target_ucred *target_cred =
1959                     (struct target_ucred *)target_data;
1960 
1961                 __put_user(cred->pid, &target_cred->pid);
1962                 __put_user(cred->uid, &target_cred->uid);
1963                 __put_user(cred->gid, &target_cred->gid);
1964                 break;
1965             }
1966             default:
1967                 goto unimplemented;
1968             }
1969             break;
1970 
1971         case SOL_IP:
1972             switch (cmsg->cmsg_type) {
1973             case IP_TTL:
1974             {
1975                 uint32_t *v = (uint32_t *)data;
1976                 uint32_t *t_int = (uint32_t *)target_data;
1977 
1978                 if (len != sizeof(uint32_t) ||
1979                     tgt_len != sizeof(uint32_t)) {
1980                     goto unimplemented;
1981                 }
1982                 __put_user(*v, t_int);
1983                 break;
1984             }
1985             case IP_RECVERR:
1986             {
1987                 struct errhdr_t {
1988                    struct sock_extended_err ee;
1989                    struct sockaddr_in offender;
1990                 };
1991                 struct errhdr_t *errh = (struct errhdr_t *)data;
1992                 struct errhdr_t *target_errh =
1993                     (struct errhdr_t *)target_data;
1994 
1995                 if (len != sizeof(struct errhdr_t) ||
1996                     tgt_len != sizeof(struct errhdr_t)) {
1997                     goto unimplemented;
1998                 }
1999                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2000                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2001                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2002                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2003                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2004                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2005                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2006                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2007                     (void *) &errh->offender, sizeof(errh->offender));
2008                 break;
2009             }
2010             case IP_PKTINFO:
2011             {
2012                 struct in_pktinfo *pkti = data;
2013                 struct target_in_pktinfo *target_pi = target_data;
2014 
2015                 __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex);
2016                 target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr;
2017                 target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr;
2018                 break;
2019             }
2020             default:
2021                 goto unimplemented;
2022             }
2023             break;
2024 
2025         case SOL_IPV6:
2026             switch (cmsg->cmsg_type) {
2027             case IPV6_HOPLIMIT:
2028             {
2029                 uint32_t *v = (uint32_t *)data;
2030                 uint32_t *t_int = (uint32_t *)target_data;
2031 
2032                 if (len != sizeof(uint32_t) ||
2033                     tgt_len != sizeof(uint32_t)) {
2034                     goto unimplemented;
2035                 }
2036                 __put_user(*v, t_int);
2037                 break;
2038             }
2039             case IPV6_RECVERR:
2040             {
2041                 struct errhdr6_t {
2042                    struct sock_extended_err ee;
2043                    struct sockaddr_in6 offender;
2044                 };
2045                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2046                 struct errhdr6_t *target_errh =
2047                     (struct errhdr6_t *)target_data;
2048 
2049                 if (len != sizeof(struct errhdr6_t) ||
2050                     tgt_len != sizeof(struct errhdr6_t)) {
2051                     goto unimplemented;
2052                 }
2053                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2054                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2055                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2056                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2057                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2058                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2059                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2060                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2061                     (void *) &errh->offender, sizeof(errh->offender));
2062                 break;
2063             }
2064             default:
2065                 goto unimplemented;
2066             }
2067             break;
2068 
2069         default:
2070         unimplemented:
2071             qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n",
2072                           cmsg->cmsg_level, cmsg->cmsg_type);
2073             memcpy(target_data, data, MIN(len, tgt_len));
2074             if (tgt_len > len) {
2075                 memset(target_data + len, 0, tgt_len - len);
2076             }
2077         }
2078 
2079         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2080         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2081         if (msg_controllen < tgt_space) {
2082             tgt_space = msg_controllen;
2083         }
2084         msg_controllen -= tgt_space;
2085         space += tgt_space;
2086         cmsg = CMSG_NXTHDR(msgh, cmsg);
2087         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2088                                          target_cmsg_start);
2089     }
2090     unlock_user(target_cmsg, target_cmsg_addr, space);
2091  the_end:
2092     target_msgh->msg_controllen = tswapal(space);
2093     return 0;
2094 }
2095 
2096 /* do_setsockopt() Must return target values and target errnos. */
do_setsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,socklen_t optlen)2097 static abi_long do_setsockopt(int sockfd, int level, int optname,
2098                               abi_ulong optval_addr, socklen_t optlen)
2099 {
2100     abi_long ret;
2101     int val;
2102 
2103     switch(level) {
2104     case SOL_TCP:
2105     case SOL_UDP:
2106         /* TCP and UDP options all take an 'int' value.  */
2107         if (optlen < sizeof(uint32_t))
2108             return -TARGET_EINVAL;
2109 
2110         if (get_user_u32(val, optval_addr))
2111             return -TARGET_EFAULT;
2112         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2113         break;
2114     case SOL_IP:
2115         switch(optname) {
2116         case IP_TOS:
2117         case IP_TTL:
2118         case IP_HDRINCL:
2119         case IP_ROUTER_ALERT:
2120         case IP_RECVOPTS:
2121         case IP_RETOPTS:
2122         case IP_PKTINFO:
2123         case IP_MTU_DISCOVER:
2124         case IP_RECVERR:
2125         case IP_RECVTTL:
2126         case IP_RECVTOS:
2127 #ifdef IP_FREEBIND
2128         case IP_FREEBIND:
2129 #endif
2130         case IP_MULTICAST_TTL:
2131         case IP_MULTICAST_LOOP:
2132             val = 0;
2133             if (optlen >= sizeof(uint32_t)) {
2134                 if (get_user_u32(val, optval_addr))
2135                     return -TARGET_EFAULT;
2136             } else if (optlen >= 1) {
2137                 if (get_user_u8(val, optval_addr))
2138                     return -TARGET_EFAULT;
2139             }
2140             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2141             break;
2142         case IP_MULTICAST_IF:
2143         case IP_ADD_MEMBERSHIP:
2144         case IP_DROP_MEMBERSHIP:
2145         {
2146             struct ip_mreqn ip_mreq;
2147             struct target_ip_mreqn *target_smreqn;
2148             int min_size;
2149 
2150             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2151                               sizeof(struct target_ip_mreq));
2152 
2153             if (optname == IP_MULTICAST_IF) {
2154                 min_size = sizeof(struct in_addr);
2155             } else {
2156                 min_size = sizeof(struct target_ip_mreq);
2157             }
2158             if (optlen < min_size ||
2159                 optlen > sizeof (struct target_ip_mreqn)) {
2160                 return -TARGET_EINVAL;
2161             }
2162 
2163             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2164             if (!target_smreqn) {
2165                 return -TARGET_EFAULT;
2166             }
2167             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2168             if (optlen >= sizeof(struct target_ip_mreq)) {
2169                 ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2170                 if (optlen >= sizeof(struct target_ip_mreqn)) {
2171                     __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex);
2172                     optlen = sizeof(struct ip_mreqn);
2173                 }
2174             }
2175             unlock_user(target_smreqn, optval_addr, 0);
2176             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2177             break;
2178         }
2179         case IP_BLOCK_SOURCE:
2180         case IP_UNBLOCK_SOURCE:
2181         case IP_ADD_SOURCE_MEMBERSHIP:
2182         case IP_DROP_SOURCE_MEMBERSHIP:
2183         {
2184             struct ip_mreq_source *ip_mreq_source;
2185 
2186             if (optlen != sizeof (struct target_ip_mreq_source))
2187                 return -TARGET_EINVAL;
2188 
2189             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2190             if (!ip_mreq_source) {
2191                 return -TARGET_EFAULT;
2192             }
2193             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2194             unlock_user (ip_mreq_source, optval_addr, 0);
2195             break;
2196         }
2197         default:
2198             goto unimplemented;
2199         }
2200         break;
2201     case SOL_IPV6:
2202         switch (optname) {
2203         case IPV6_MTU_DISCOVER:
2204         case IPV6_MTU:
2205         case IPV6_V6ONLY:
2206         case IPV6_RECVPKTINFO:
2207         case IPV6_UNICAST_HOPS:
2208         case IPV6_MULTICAST_HOPS:
2209         case IPV6_MULTICAST_LOOP:
2210         case IPV6_RECVERR:
2211         case IPV6_RECVHOPLIMIT:
2212         case IPV6_2292HOPLIMIT:
2213         case IPV6_CHECKSUM:
2214         case IPV6_ADDRFORM:
2215         case IPV6_2292PKTINFO:
2216         case IPV6_RECVTCLASS:
2217         case IPV6_RECVRTHDR:
2218         case IPV6_2292RTHDR:
2219         case IPV6_RECVHOPOPTS:
2220         case IPV6_2292HOPOPTS:
2221         case IPV6_RECVDSTOPTS:
2222         case IPV6_2292DSTOPTS:
2223         case IPV6_TCLASS:
2224         case IPV6_ADDR_PREFERENCES:
2225 #ifdef IPV6_RECVPATHMTU
2226         case IPV6_RECVPATHMTU:
2227 #endif
2228 #ifdef IPV6_TRANSPARENT
2229         case IPV6_TRANSPARENT:
2230 #endif
2231 #ifdef IPV6_FREEBIND
2232         case IPV6_FREEBIND:
2233 #endif
2234 #ifdef IPV6_RECVORIGDSTADDR
2235         case IPV6_RECVORIGDSTADDR:
2236 #endif
2237             val = 0;
2238             if (optlen < sizeof(uint32_t)) {
2239                 return -TARGET_EINVAL;
2240             }
2241             if (get_user_u32(val, optval_addr)) {
2242                 return -TARGET_EFAULT;
2243             }
2244             ret = get_errno(setsockopt(sockfd, level, optname,
2245                                        &val, sizeof(val)));
2246             break;
2247         case IPV6_PKTINFO:
2248         {
2249             struct in6_pktinfo pki;
2250 
2251             if (optlen < sizeof(pki)) {
2252                 return -TARGET_EINVAL;
2253             }
2254 
2255             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2256                 return -TARGET_EFAULT;
2257             }
2258 
2259             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2260 
2261             ret = get_errno(setsockopt(sockfd, level, optname,
2262                                        &pki, sizeof(pki)));
2263             break;
2264         }
2265         case IPV6_ADD_MEMBERSHIP:
2266         case IPV6_DROP_MEMBERSHIP:
2267         {
2268             struct ipv6_mreq ipv6mreq;
2269 
2270             if (optlen < sizeof(ipv6mreq)) {
2271                 return -TARGET_EINVAL;
2272             }
2273 
2274             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2275                 return -TARGET_EFAULT;
2276             }
2277 
2278             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2279 
2280             ret = get_errno(setsockopt(sockfd, level, optname,
2281                                        &ipv6mreq, sizeof(ipv6mreq)));
2282             break;
2283         }
2284         default:
2285             goto unimplemented;
2286         }
2287         break;
2288     case SOL_ICMPV6:
2289         switch (optname) {
2290         case ICMPV6_FILTER:
2291         {
2292             struct icmp6_filter icmp6f;
2293 
2294             if (optlen > sizeof(icmp6f)) {
2295                 optlen = sizeof(icmp6f);
2296             }
2297 
2298             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2299                 return -TARGET_EFAULT;
2300             }
2301 
2302             for (val = 0; val < 8; val++) {
2303                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2304             }
2305 
2306             ret = get_errno(setsockopt(sockfd, level, optname,
2307                                        &icmp6f, optlen));
2308             break;
2309         }
2310         default:
2311             goto unimplemented;
2312         }
2313         break;
2314     case SOL_RAW:
2315         switch (optname) {
2316         case ICMP_FILTER:
2317         case IPV6_CHECKSUM:
2318             /* those take an u32 value */
2319             if (optlen < sizeof(uint32_t)) {
2320                 return -TARGET_EINVAL;
2321             }
2322 
2323             if (get_user_u32(val, optval_addr)) {
2324                 return -TARGET_EFAULT;
2325             }
2326             ret = get_errno(setsockopt(sockfd, level, optname,
2327                                        &val, sizeof(val)));
2328             break;
2329 
2330         default:
2331             goto unimplemented;
2332         }
2333         break;
2334 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2335     case SOL_ALG:
2336         switch (optname) {
2337         case ALG_SET_KEY:
2338         {
2339             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2340             if (!alg_key) {
2341                 return -TARGET_EFAULT;
2342             }
2343             ret = get_errno(setsockopt(sockfd, level, optname,
2344                                        alg_key, optlen));
2345             unlock_user(alg_key, optval_addr, optlen);
2346             break;
2347         }
2348         case ALG_SET_AEAD_AUTHSIZE:
2349         {
2350             ret = get_errno(setsockopt(sockfd, level, optname,
2351                                        NULL, optlen));
2352             break;
2353         }
2354         default:
2355             goto unimplemented;
2356         }
2357         break;
2358 #endif
2359     case TARGET_SOL_SOCKET:
2360         switch (optname) {
2361         case TARGET_SO_RCVTIMEO:
2362         case TARGET_SO_SNDTIMEO:
2363         {
2364                 struct timeval tv;
2365 
2366                 if (optlen != sizeof(struct target_timeval)) {
2367                     return -TARGET_EINVAL;
2368                 }
2369 
2370                 if (copy_from_user_timeval(&tv, optval_addr)) {
2371                     return -TARGET_EFAULT;
2372                 }
2373 
2374                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2375                                 optname == TARGET_SO_RCVTIMEO ?
2376                                     SO_RCVTIMEO : SO_SNDTIMEO,
2377                                 &tv, sizeof(tv)));
2378                 return ret;
2379         }
2380         case TARGET_SO_ATTACH_FILTER:
2381         {
2382                 struct target_sock_fprog *tfprog;
2383                 struct target_sock_filter *tfilter;
2384                 struct sock_fprog fprog;
2385                 struct sock_filter *filter;
2386                 int i;
2387 
2388                 if (optlen != sizeof(*tfprog)) {
2389                     return -TARGET_EINVAL;
2390                 }
2391                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2392                     return -TARGET_EFAULT;
2393                 }
2394                 if (!lock_user_struct(VERIFY_READ, tfilter,
2395                                       tswapal(tfprog->filter), 0)) {
2396                     unlock_user_struct(tfprog, optval_addr, 1);
2397                     return -TARGET_EFAULT;
2398                 }
2399 
2400                 fprog.len = tswap16(tfprog->len);
2401                 filter = g_try_new(struct sock_filter, fprog.len);
2402                 if (filter == NULL) {
2403                     unlock_user_struct(tfilter, tfprog->filter, 1);
2404                     unlock_user_struct(tfprog, optval_addr, 1);
2405                     return -TARGET_ENOMEM;
2406                 }
2407                 for (i = 0; i < fprog.len; i++) {
2408                     filter[i].code = tswap16(tfilter[i].code);
2409                     filter[i].jt = tfilter[i].jt;
2410                     filter[i].jf = tfilter[i].jf;
2411                     filter[i].k = tswap32(tfilter[i].k);
2412                 }
2413                 fprog.filter = filter;
2414 
2415                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2416                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2417                 g_free(filter);
2418 
2419                 unlock_user_struct(tfilter, tfprog->filter, 1);
2420                 unlock_user_struct(tfprog, optval_addr, 1);
2421                 return ret;
2422         }
2423 	case TARGET_SO_BINDTODEVICE:
2424 	{
2425 		char *dev_ifname, *addr_ifname;
2426 
2427 		if (optlen > IFNAMSIZ - 1) {
2428 		    optlen = IFNAMSIZ - 1;
2429 		}
2430 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2431 		if (!dev_ifname) {
2432 		    return -TARGET_EFAULT;
2433 		}
2434 		optname = SO_BINDTODEVICE;
2435 		addr_ifname = alloca(IFNAMSIZ);
2436 		memcpy(addr_ifname, dev_ifname, optlen);
2437 		addr_ifname[optlen] = 0;
2438 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2439                                            addr_ifname, optlen));
2440 		unlock_user (dev_ifname, optval_addr, 0);
2441 		return ret;
2442 	}
2443         case TARGET_SO_LINGER:
2444         {
2445                 struct linger lg;
2446                 struct target_linger *tlg;
2447 
2448                 if (optlen != sizeof(struct target_linger)) {
2449                     return -TARGET_EINVAL;
2450                 }
2451                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2452                     return -TARGET_EFAULT;
2453                 }
2454                 __get_user(lg.l_onoff, &tlg->l_onoff);
2455                 __get_user(lg.l_linger, &tlg->l_linger);
2456                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2457                                 &lg, sizeof(lg)));
2458                 unlock_user_struct(tlg, optval_addr, 0);
2459                 return ret;
2460         }
2461             /* Options with 'int' argument.  */
2462         case TARGET_SO_DEBUG:
2463 		optname = SO_DEBUG;
2464 		break;
2465         case TARGET_SO_REUSEADDR:
2466 		optname = SO_REUSEADDR;
2467 		break;
2468 #ifdef SO_REUSEPORT
2469         case TARGET_SO_REUSEPORT:
2470                 optname = SO_REUSEPORT;
2471                 break;
2472 #endif
2473         case TARGET_SO_TYPE:
2474 		optname = SO_TYPE;
2475 		break;
2476         case TARGET_SO_ERROR:
2477 		optname = SO_ERROR;
2478 		break;
2479         case TARGET_SO_DONTROUTE:
2480 		optname = SO_DONTROUTE;
2481 		break;
2482         case TARGET_SO_BROADCAST:
2483 		optname = SO_BROADCAST;
2484 		break;
2485         case TARGET_SO_SNDBUF:
2486 		optname = SO_SNDBUF;
2487 		break;
2488         case TARGET_SO_SNDBUFFORCE:
2489                 optname = SO_SNDBUFFORCE;
2490                 break;
2491         case TARGET_SO_RCVBUF:
2492 		optname = SO_RCVBUF;
2493 		break;
2494         case TARGET_SO_RCVBUFFORCE:
2495                 optname = SO_RCVBUFFORCE;
2496                 break;
2497         case TARGET_SO_KEEPALIVE:
2498 		optname = SO_KEEPALIVE;
2499 		break;
2500         case TARGET_SO_OOBINLINE:
2501 		optname = SO_OOBINLINE;
2502 		break;
2503         case TARGET_SO_NO_CHECK:
2504 		optname = SO_NO_CHECK;
2505 		break;
2506         case TARGET_SO_PRIORITY:
2507 		optname = SO_PRIORITY;
2508 		break;
2509 #ifdef SO_BSDCOMPAT
2510         case TARGET_SO_BSDCOMPAT:
2511 		optname = SO_BSDCOMPAT;
2512 		break;
2513 #endif
2514         case TARGET_SO_PASSCRED:
2515 		optname = SO_PASSCRED;
2516 		break;
2517         case TARGET_SO_PASSSEC:
2518                 optname = SO_PASSSEC;
2519                 break;
2520         case TARGET_SO_TIMESTAMP:
2521 		optname = SO_TIMESTAMP;
2522 		break;
2523         case TARGET_SO_RCVLOWAT:
2524 		optname = SO_RCVLOWAT;
2525 		break;
2526         default:
2527             goto unimplemented;
2528         }
2529 	if (optlen < sizeof(uint32_t))
2530             return -TARGET_EINVAL;
2531 
2532 	if (get_user_u32(val, optval_addr))
2533             return -TARGET_EFAULT;
2534 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2535         break;
2536 #ifdef SOL_NETLINK
2537     case SOL_NETLINK:
2538         switch (optname) {
2539         case NETLINK_PKTINFO:
2540         case NETLINK_ADD_MEMBERSHIP:
2541         case NETLINK_DROP_MEMBERSHIP:
2542         case NETLINK_BROADCAST_ERROR:
2543         case NETLINK_NO_ENOBUFS:
2544 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2545         case NETLINK_LISTEN_ALL_NSID:
2546         case NETLINK_CAP_ACK:
2547 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2548 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2549         case NETLINK_EXT_ACK:
2550 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2551 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2552         case NETLINK_GET_STRICT_CHK:
2553 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2554             break;
2555         default:
2556             goto unimplemented;
2557         }
2558         val = 0;
2559         if (optlen < sizeof(uint32_t)) {
2560             return -TARGET_EINVAL;
2561         }
2562         if (get_user_u32(val, optval_addr)) {
2563             return -TARGET_EFAULT;
2564         }
2565         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2566                                    sizeof(val)));
2567         break;
2568 #endif /* SOL_NETLINK */
2569     default:
2570     unimplemented:
2571         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2572                       level, optname);
2573         ret = -TARGET_ENOPROTOOPT;
2574     }
2575     return ret;
2576 }
2577 
2578 /* do_getsockopt() Must return target values and target errnos. */
do_getsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,abi_ulong optlen)2579 static abi_long do_getsockopt(int sockfd, int level, int optname,
2580                               abi_ulong optval_addr, abi_ulong optlen)
2581 {
2582     abi_long ret;
2583     int len, val;
2584     socklen_t lv;
2585 
2586     switch(level) {
2587     case TARGET_SOL_SOCKET:
2588         level = SOL_SOCKET;
2589         switch (optname) {
2590         /* These don't just return a single integer */
2591         case TARGET_SO_PEERNAME:
2592             goto unimplemented;
2593         case TARGET_SO_RCVTIMEO: {
2594             struct timeval tv;
2595             socklen_t tvlen;
2596 
2597             optname = SO_RCVTIMEO;
2598 
2599 get_timeout:
2600             if (get_user_u32(len, optlen)) {
2601                 return -TARGET_EFAULT;
2602             }
2603             if (len < 0) {
2604                 return -TARGET_EINVAL;
2605             }
2606 
2607             tvlen = sizeof(tv);
2608             ret = get_errno(getsockopt(sockfd, level, optname,
2609                                        &tv, &tvlen));
2610             if (ret < 0) {
2611                 return ret;
2612             }
2613             if (len > sizeof(struct target_timeval)) {
2614                 len = sizeof(struct target_timeval);
2615             }
2616             if (copy_to_user_timeval(optval_addr, &tv)) {
2617                 return -TARGET_EFAULT;
2618             }
2619             if (put_user_u32(len, optlen)) {
2620                 return -TARGET_EFAULT;
2621             }
2622             break;
2623         }
2624         case TARGET_SO_SNDTIMEO:
2625             optname = SO_SNDTIMEO;
2626             goto get_timeout;
2627         case TARGET_SO_PEERCRED: {
2628             struct ucred cr;
2629             socklen_t crlen;
2630             struct target_ucred *tcr;
2631 
2632             if (get_user_u32(len, optlen)) {
2633                 return -TARGET_EFAULT;
2634             }
2635             if (len < 0) {
2636                 return -TARGET_EINVAL;
2637             }
2638 
2639             crlen = sizeof(cr);
2640             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2641                                        &cr, &crlen));
2642             if (ret < 0) {
2643                 return ret;
2644             }
2645             if (len > crlen) {
2646                 len = crlen;
2647             }
2648             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2649                 return -TARGET_EFAULT;
2650             }
2651             __put_user(cr.pid, &tcr->pid);
2652             __put_user(cr.uid, &tcr->uid);
2653             __put_user(cr.gid, &tcr->gid);
2654             unlock_user_struct(tcr, optval_addr, 1);
2655             if (put_user_u32(len, optlen)) {
2656                 return -TARGET_EFAULT;
2657             }
2658             break;
2659         }
2660         case TARGET_SO_PEERSEC: {
2661             char *name;
2662 
2663             if (get_user_u32(len, optlen)) {
2664                 return -TARGET_EFAULT;
2665             }
2666             if (len < 0) {
2667                 return -TARGET_EINVAL;
2668             }
2669             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2670             if (!name) {
2671                 return -TARGET_EFAULT;
2672             }
2673             lv = len;
2674             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2675                                        name, &lv));
2676             if (put_user_u32(lv, optlen)) {
2677                 ret = -TARGET_EFAULT;
2678             }
2679             unlock_user(name, optval_addr, lv);
2680             break;
2681         }
2682         case TARGET_SO_LINGER:
2683         {
2684             struct linger lg;
2685             socklen_t lglen;
2686             struct target_linger *tlg;
2687 
2688             if (get_user_u32(len, optlen)) {
2689                 return -TARGET_EFAULT;
2690             }
2691             if (len < 0) {
2692                 return -TARGET_EINVAL;
2693             }
2694 
2695             lglen = sizeof(lg);
2696             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2697                                        &lg, &lglen));
2698             if (ret < 0) {
2699                 return ret;
2700             }
2701             if (len > lglen) {
2702                 len = lglen;
2703             }
2704             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2705                 return -TARGET_EFAULT;
2706             }
2707             __put_user(lg.l_onoff, &tlg->l_onoff);
2708             __put_user(lg.l_linger, &tlg->l_linger);
2709             unlock_user_struct(tlg, optval_addr, 1);
2710             if (put_user_u32(len, optlen)) {
2711                 return -TARGET_EFAULT;
2712             }
2713             break;
2714         }
2715         /* Options with 'int' argument.  */
2716         case TARGET_SO_DEBUG:
2717             optname = SO_DEBUG;
2718             goto int_case;
2719         case TARGET_SO_REUSEADDR:
2720             optname = SO_REUSEADDR;
2721             goto int_case;
2722 #ifdef SO_REUSEPORT
2723         case TARGET_SO_REUSEPORT:
2724             optname = SO_REUSEPORT;
2725             goto int_case;
2726 #endif
2727         case TARGET_SO_TYPE:
2728             optname = SO_TYPE;
2729             goto int_case;
2730         case TARGET_SO_ERROR:
2731             optname = SO_ERROR;
2732             goto int_case;
2733         case TARGET_SO_DONTROUTE:
2734             optname = SO_DONTROUTE;
2735             goto int_case;
2736         case TARGET_SO_BROADCAST:
2737             optname = SO_BROADCAST;
2738             goto int_case;
2739         case TARGET_SO_SNDBUF:
2740             optname = SO_SNDBUF;
2741             goto int_case;
2742         case TARGET_SO_RCVBUF:
2743             optname = SO_RCVBUF;
2744             goto int_case;
2745         case TARGET_SO_KEEPALIVE:
2746             optname = SO_KEEPALIVE;
2747             goto int_case;
2748         case TARGET_SO_OOBINLINE:
2749             optname = SO_OOBINLINE;
2750             goto int_case;
2751         case TARGET_SO_NO_CHECK:
2752             optname = SO_NO_CHECK;
2753             goto int_case;
2754         case TARGET_SO_PRIORITY:
2755             optname = SO_PRIORITY;
2756             goto int_case;
2757 #ifdef SO_BSDCOMPAT
2758         case TARGET_SO_BSDCOMPAT:
2759             optname = SO_BSDCOMPAT;
2760             goto int_case;
2761 #endif
2762         case TARGET_SO_PASSCRED:
2763             optname = SO_PASSCRED;
2764             goto int_case;
2765         case TARGET_SO_TIMESTAMP:
2766             optname = SO_TIMESTAMP;
2767             goto int_case;
2768         case TARGET_SO_RCVLOWAT:
2769             optname = SO_RCVLOWAT;
2770             goto int_case;
2771         case TARGET_SO_ACCEPTCONN:
2772             optname = SO_ACCEPTCONN;
2773             goto int_case;
2774         case TARGET_SO_PROTOCOL:
2775             optname = SO_PROTOCOL;
2776             goto int_case;
2777         case TARGET_SO_DOMAIN:
2778             optname = SO_DOMAIN;
2779             goto int_case;
2780         default:
2781             goto int_case;
2782         }
2783         break;
2784     case SOL_TCP:
2785     case SOL_UDP:
2786         /* TCP and UDP options all take an 'int' value.  */
2787     int_case:
2788         if (get_user_u32(len, optlen))
2789             return -TARGET_EFAULT;
2790         if (len < 0)
2791             return -TARGET_EINVAL;
2792         lv = sizeof(lv);
2793         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2794         if (ret < 0)
2795             return ret;
2796         switch (optname) {
2797         case SO_TYPE:
2798             val = host_to_target_sock_type(val);
2799             break;
2800         case SO_ERROR:
2801             val = host_to_target_errno(val);
2802             break;
2803         }
2804         if (len > lv)
2805             len = lv;
2806         if (len == 4) {
2807             if (put_user_u32(val, optval_addr))
2808                 return -TARGET_EFAULT;
2809         } else {
2810             if (put_user_u8(val, optval_addr))
2811                 return -TARGET_EFAULT;
2812         }
2813         if (put_user_u32(len, optlen))
2814             return -TARGET_EFAULT;
2815         break;
2816     case SOL_IP:
2817         switch(optname) {
2818         case IP_TOS:
2819         case IP_TTL:
2820         case IP_HDRINCL:
2821         case IP_ROUTER_ALERT:
2822         case IP_RECVOPTS:
2823         case IP_RETOPTS:
2824         case IP_PKTINFO:
2825         case IP_MTU_DISCOVER:
2826         case IP_RECVERR:
2827         case IP_RECVTOS:
2828 #ifdef IP_FREEBIND
2829         case IP_FREEBIND:
2830 #endif
2831         case IP_MULTICAST_TTL:
2832         case IP_MULTICAST_LOOP:
2833             if (get_user_u32(len, optlen))
2834                 return -TARGET_EFAULT;
2835             if (len < 0)
2836                 return -TARGET_EINVAL;
2837             lv = sizeof(lv);
2838             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2839             if (ret < 0)
2840                 return ret;
2841             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2842                 len = 1;
2843                 if (put_user_u32(len, optlen)
2844                     || put_user_u8(val, optval_addr))
2845                     return -TARGET_EFAULT;
2846             } else {
2847                 if (len > sizeof(int))
2848                     len = sizeof(int);
2849                 if (put_user_u32(len, optlen)
2850                     || put_user_u32(val, optval_addr))
2851                     return -TARGET_EFAULT;
2852             }
2853             break;
2854         default:
2855             ret = -TARGET_ENOPROTOOPT;
2856             break;
2857         }
2858         break;
2859     case SOL_IPV6:
2860         switch (optname) {
2861         case IPV6_MTU_DISCOVER:
2862         case IPV6_MTU:
2863         case IPV6_V6ONLY:
2864         case IPV6_RECVPKTINFO:
2865         case IPV6_UNICAST_HOPS:
2866         case IPV6_MULTICAST_HOPS:
2867         case IPV6_MULTICAST_LOOP:
2868         case IPV6_RECVERR:
2869         case IPV6_RECVHOPLIMIT:
2870         case IPV6_2292HOPLIMIT:
2871         case IPV6_CHECKSUM:
2872         case IPV6_ADDRFORM:
2873         case IPV6_2292PKTINFO:
2874         case IPV6_RECVTCLASS:
2875         case IPV6_RECVRTHDR:
2876         case IPV6_2292RTHDR:
2877         case IPV6_RECVHOPOPTS:
2878         case IPV6_2292HOPOPTS:
2879         case IPV6_RECVDSTOPTS:
2880         case IPV6_2292DSTOPTS:
2881         case IPV6_TCLASS:
2882         case IPV6_ADDR_PREFERENCES:
2883 #ifdef IPV6_RECVPATHMTU
2884         case IPV6_RECVPATHMTU:
2885 #endif
2886 #ifdef IPV6_TRANSPARENT
2887         case IPV6_TRANSPARENT:
2888 #endif
2889 #ifdef IPV6_FREEBIND
2890         case IPV6_FREEBIND:
2891 #endif
2892 #ifdef IPV6_RECVORIGDSTADDR
2893         case IPV6_RECVORIGDSTADDR:
2894 #endif
2895             if (get_user_u32(len, optlen))
2896                 return -TARGET_EFAULT;
2897             if (len < 0)
2898                 return -TARGET_EINVAL;
2899             lv = sizeof(lv);
2900             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2901             if (ret < 0)
2902                 return ret;
2903             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2904                 len = 1;
2905                 if (put_user_u32(len, optlen)
2906                     || put_user_u8(val, optval_addr))
2907                     return -TARGET_EFAULT;
2908             } else {
2909                 if (len > sizeof(int))
2910                     len = sizeof(int);
2911                 if (put_user_u32(len, optlen)
2912                     || put_user_u32(val, optval_addr))
2913                     return -TARGET_EFAULT;
2914             }
2915             break;
2916         default:
2917             ret = -TARGET_ENOPROTOOPT;
2918             break;
2919         }
2920         break;
2921 #ifdef SOL_NETLINK
2922     case SOL_NETLINK:
2923         switch (optname) {
2924         case NETLINK_PKTINFO:
2925         case NETLINK_BROADCAST_ERROR:
2926         case NETLINK_NO_ENOBUFS:
2927 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2928         case NETLINK_LISTEN_ALL_NSID:
2929         case NETLINK_CAP_ACK:
2930 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2931 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2932         case NETLINK_EXT_ACK:
2933 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2934 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2935         case NETLINK_GET_STRICT_CHK:
2936 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2937             if (get_user_u32(len, optlen)) {
2938                 return -TARGET_EFAULT;
2939             }
2940             if (len != sizeof(val)) {
2941                 return -TARGET_EINVAL;
2942             }
2943             lv = len;
2944             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2945             if (ret < 0) {
2946                 return ret;
2947             }
2948             if (put_user_u32(lv, optlen)
2949                 || put_user_u32(val, optval_addr)) {
2950                 return -TARGET_EFAULT;
2951             }
2952             break;
2953 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2954         case NETLINK_LIST_MEMBERSHIPS:
2955         {
2956             uint32_t *results;
2957             int i;
2958             if (get_user_u32(len, optlen)) {
2959                 return -TARGET_EFAULT;
2960             }
2961             if (len < 0) {
2962                 return -TARGET_EINVAL;
2963             }
2964             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2965             if (!results && len > 0) {
2966                 return -TARGET_EFAULT;
2967             }
2968             lv = len;
2969             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2970             if (ret < 0) {
2971                 unlock_user(results, optval_addr, 0);
2972                 return ret;
2973             }
2974             /* swap host endianness to target endianness. */
2975             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2976                 results[i] = tswap32(results[i]);
2977             }
2978             if (put_user_u32(lv, optlen)) {
2979                 return -TARGET_EFAULT;
2980             }
2981             unlock_user(results, optval_addr, 0);
2982             break;
2983         }
2984 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2985         default:
2986             goto unimplemented;
2987         }
2988         break;
2989 #endif /* SOL_NETLINK */
2990     default:
2991     unimplemented:
2992         qemu_log_mask(LOG_UNIMP,
2993                       "getsockopt level=%d optname=%d not yet supported\n",
2994                       level, optname);
2995         ret = -TARGET_EOPNOTSUPP;
2996         break;
2997     }
2998     return ret;
2999 }
3000 
3001 /* Convert target low/high pair representing file offset into the host
3002  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3003  * as the kernel doesn't handle them either.
3004  */
target_to_host_low_high(abi_ulong tlow,abi_ulong thigh,unsigned long * hlow,unsigned long * hhigh)3005 static void target_to_host_low_high(abi_ulong tlow,
3006                                     abi_ulong thigh,
3007                                     unsigned long *hlow,
3008                                     unsigned long *hhigh)
3009 {
3010     uint64_t off = tlow |
3011         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3012         TARGET_LONG_BITS / 2;
3013 
3014     *hlow = off;
3015     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3016 }
3017 
lock_iovec(int type,abi_ulong target_addr,abi_ulong count,int copy)3018 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3019                                 abi_ulong count, int copy)
3020 {
3021     struct target_iovec *target_vec;
3022     struct iovec *vec;
3023     abi_ulong total_len, max_len;
3024     int i;
3025     int err = 0;
3026     bool bad_address = false;
3027 
3028     if (count == 0) {
3029         errno = 0;
3030         return NULL;
3031     }
3032     if (count > IOV_MAX) {
3033         errno = EINVAL;
3034         return NULL;
3035     }
3036 
3037     vec = g_try_new0(struct iovec, count);
3038     if (vec == NULL) {
3039         errno = ENOMEM;
3040         return NULL;
3041     }
3042 
3043     target_vec = lock_user(VERIFY_READ, target_addr,
3044                            count * sizeof(struct target_iovec), 1);
3045     if (target_vec == NULL) {
3046         err = EFAULT;
3047         goto fail2;
3048     }
3049 
3050     /* ??? If host page size > target page size, this will result in a
3051        value larger than what we can actually support.  */
3052     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3053     total_len = 0;
3054 
3055     for (i = 0; i < count; i++) {
3056         abi_ulong base = tswapal(target_vec[i].iov_base);
3057         abi_long len = tswapal(target_vec[i].iov_len);
3058 
3059         if (len < 0) {
3060             err = EINVAL;
3061             goto fail;
3062         } else if (len == 0) {
3063             /* Zero length pointer is ignored.  */
3064             vec[i].iov_base = 0;
3065         } else {
3066             vec[i].iov_base = lock_user(type, base, len, copy);
3067             /* If the first buffer pointer is bad, this is a fault.  But
3068              * subsequent bad buffers will result in a partial write; this
3069              * is realized by filling the vector with null pointers and
3070              * zero lengths. */
3071             if (!vec[i].iov_base) {
3072                 if (i == 0) {
3073                     err = EFAULT;
3074                     goto fail;
3075                 } else {
3076                     bad_address = true;
3077                 }
3078             }
3079             if (bad_address) {
3080                 len = 0;
3081             }
3082             if (len > max_len - total_len) {
3083                 len = max_len - total_len;
3084             }
3085         }
3086         vec[i].iov_len = len;
3087         total_len += len;
3088     }
3089 
3090     unlock_user(target_vec, target_addr, 0);
3091     return vec;
3092 
3093  fail:
3094     while (--i >= 0) {
3095         if (tswapal(target_vec[i].iov_len) > 0) {
3096             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3097         }
3098     }
3099     unlock_user(target_vec, target_addr, 0);
3100  fail2:
3101     g_free(vec);
3102     errno = err;
3103     return NULL;
3104 }
3105 
unlock_iovec(struct iovec * vec,abi_ulong target_addr,abi_ulong count,int copy)3106 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3107                          abi_ulong count, int copy)
3108 {
3109     struct target_iovec *target_vec;
3110     int i;
3111 
3112     target_vec = lock_user(VERIFY_READ, target_addr,
3113                            count * sizeof(struct target_iovec), 1);
3114     if (target_vec) {
3115         for (i = 0; i < count; i++) {
3116             abi_ulong base = tswapal(target_vec[i].iov_base);
3117             abi_long len = tswapal(target_vec[i].iov_len);
3118             if (len < 0) {
3119                 break;
3120             }
3121             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3122         }
3123         unlock_user(target_vec, target_addr, 0);
3124     }
3125 
3126     g_free(vec);
3127 }
3128 
target_to_host_sock_type(int * type)3129 static inline int target_to_host_sock_type(int *type)
3130 {
3131     int host_type = 0;
3132     int target_type = *type;
3133 
3134     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3135     case TARGET_SOCK_DGRAM:
3136         host_type = SOCK_DGRAM;
3137         break;
3138     case TARGET_SOCK_STREAM:
3139         host_type = SOCK_STREAM;
3140         break;
3141     default:
3142         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3143         break;
3144     }
3145     if (target_type & TARGET_SOCK_CLOEXEC) {
3146 #if defined(SOCK_CLOEXEC)
3147         host_type |= SOCK_CLOEXEC;
3148 #else
3149         return -TARGET_EINVAL;
3150 #endif
3151     }
3152     if (target_type & TARGET_SOCK_NONBLOCK) {
3153 #if defined(SOCK_NONBLOCK)
3154         host_type |= SOCK_NONBLOCK;
3155 #elif !defined(O_NONBLOCK)
3156         return -TARGET_EINVAL;
3157 #endif
3158     }
3159     *type = host_type;
3160     return 0;
3161 }
3162 
3163 /* Try to emulate socket type flags after socket creation.  */
sock_flags_fixup(int fd,int target_type)3164 static int sock_flags_fixup(int fd, int target_type)
3165 {
3166 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3167     if (target_type & TARGET_SOCK_NONBLOCK) {
3168         int flags = fcntl(fd, F_GETFL);
3169         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3170             close(fd);
3171             return -TARGET_EINVAL;
3172         }
3173     }
3174 #endif
3175     return fd;
3176 }
3177 
3178 /* do_socket() Must return target values and target errnos. */
do_socket(int domain,int type,int protocol)3179 static abi_long do_socket(int domain, int type, int protocol)
3180 {
3181     int target_type = type;
3182     int ret;
3183 
3184     ret = target_to_host_sock_type(&type);
3185     if (ret) {
3186         return ret;
3187     }
3188 
3189     if (domain == PF_NETLINK && !(
3190 #ifdef CONFIG_RTNETLINK
3191          protocol == NETLINK_ROUTE ||
3192 #endif
3193          protocol == NETLINK_KOBJECT_UEVENT ||
3194          protocol == NETLINK_AUDIT)) {
3195         return -TARGET_EPROTONOSUPPORT;
3196     }
3197 
3198     if (domain == AF_PACKET ||
3199         (domain == AF_INET && type == SOCK_PACKET)) {
3200         protocol = tswap16(protocol);
3201     }
3202 
3203     ret = get_errno(socket(domain, type, protocol));
3204     if (ret >= 0) {
3205         ret = sock_flags_fixup(ret, target_type);
3206         if (type == SOCK_PACKET) {
3207             /* Manage an obsolete case :
3208              * if socket type is SOCK_PACKET, bind by name
3209              */
3210             fd_trans_register(ret, &target_packet_trans);
3211         } else if (domain == PF_NETLINK) {
3212             switch (protocol) {
3213 #ifdef CONFIG_RTNETLINK
3214             case NETLINK_ROUTE:
3215                 fd_trans_register(ret, &target_netlink_route_trans);
3216                 break;
3217 #endif
3218             case NETLINK_KOBJECT_UEVENT:
3219                 /* nothing to do: messages are strings */
3220                 break;
3221             case NETLINK_AUDIT:
3222                 fd_trans_register(ret, &target_netlink_audit_trans);
3223                 break;
3224             default:
3225                 g_assert_not_reached();
3226             }
3227         }
3228     }
3229     return ret;
3230 }
3231 
3232 /* do_bind() Must return target values and target errnos. */
do_bind(int sockfd,abi_ulong target_addr,socklen_t addrlen)3233 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3234                         socklen_t addrlen)
3235 {
3236     void *addr;
3237     abi_long ret;
3238 
3239     if ((int)addrlen < 0) {
3240         return -TARGET_EINVAL;
3241     }
3242 
3243     addr = alloca(addrlen+1);
3244 
3245     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3246     if (ret)
3247         return ret;
3248 
3249     return get_errno(bind(sockfd, addr, addrlen));
3250 }
3251 
3252 /* do_connect() Must return target values and target errnos. */
do_connect(int sockfd,abi_ulong target_addr,socklen_t addrlen)3253 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3254                            socklen_t addrlen)
3255 {
3256     void *addr;
3257     abi_long ret;
3258 
3259     if ((int)addrlen < 0) {
3260         return -TARGET_EINVAL;
3261     }
3262 
3263     addr = alloca(addrlen+1);
3264 
3265     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3266     if (ret)
3267         return ret;
3268 
3269     return get_errno(safe_connect(sockfd, addr, addrlen));
3270 }
3271 
3272 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
do_sendrecvmsg_locked(int fd,struct target_msghdr * msgp,int flags,int send)3273 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3274                                       int flags, int send)
3275 {
3276     abi_long ret, len;
3277     struct msghdr msg;
3278     abi_ulong count;
3279     struct iovec *vec;
3280     abi_ulong target_vec;
3281 
3282     if (msgp->msg_name) {
3283         msg.msg_namelen = tswap32(msgp->msg_namelen);
3284         msg.msg_name = alloca(msg.msg_namelen+1);
3285         ret = target_to_host_sockaddr(fd, msg.msg_name,
3286                                       tswapal(msgp->msg_name),
3287                                       msg.msg_namelen);
3288         if (ret == -TARGET_EFAULT) {
3289             /* For connected sockets msg_name and msg_namelen must
3290              * be ignored, so returning EFAULT immediately is wrong.
3291              * Instead, pass a bad msg_name to the host kernel, and
3292              * let it decide whether to return EFAULT or not.
3293              */
3294             msg.msg_name = (void *)-1;
3295         } else if (ret) {
3296             goto out2;
3297         }
3298     } else {
3299         msg.msg_name = NULL;
3300         msg.msg_namelen = 0;
3301     }
3302     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3303     msg.msg_control = alloca(msg.msg_controllen);
3304     memset(msg.msg_control, 0, msg.msg_controllen);
3305 
3306     msg.msg_flags = tswap32(msgp->msg_flags);
3307 
3308     count = tswapal(msgp->msg_iovlen);
3309     target_vec = tswapal(msgp->msg_iov);
3310 
3311     if (count > IOV_MAX) {
3312         /* sendrcvmsg returns a different errno for this condition than
3313          * readv/writev, so we must catch it here before lock_iovec() does.
3314          */
3315         ret = -TARGET_EMSGSIZE;
3316         goto out2;
3317     }
3318 
3319     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3320                      target_vec, count, send);
3321     if (vec == NULL) {
3322         ret = -host_to_target_errno(errno);
3323         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3324         if (!send || ret) {
3325             goto out2;
3326         }
3327     }
3328     msg.msg_iovlen = count;
3329     msg.msg_iov = vec;
3330 
3331     if (send) {
3332         if (fd_trans_target_to_host_data(fd)) {
3333             void *host_msg;
3334 
3335             host_msg = g_malloc(msg.msg_iov->iov_len);
3336             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3337             ret = fd_trans_target_to_host_data(fd)(host_msg,
3338                                                    msg.msg_iov->iov_len);
3339             if (ret >= 0) {
3340                 msg.msg_iov->iov_base = host_msg;
3341                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3342             }
3343             g_free(host_msg);
3344         } else {
3345             ret = target_to_host_cmsg(&msg, msgp);
3346             if (ret == 0) {
3347                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3348             }
3349         }
3350     } else {
3351         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3352         if (!is_error(ret)) {
3353             len = ret;
3354             if (fd_trans_host_to_target_data(fd)) {
3355                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3356                                                MIN(msg.msg_iov->iov_len, len));
3357             }
3358             if (!is_error(ret)) {
3359                 ret = host_to_target_cmsg(msgp, &msg);
3360             }
3361             if (!is_error(ret)) {
3362                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3363                 msgp->msg_flags = tswap32(msg.msg_flags);
3364                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3365                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3366                                     msg.msg_name, msg.msg_namelen);
3367                     if (ret) {
3368                         goto out;
3369                     }
3370                 }
3371 
3372                 ret = len;
3373             }
3374         }
3375     }
3376 
3377 out:
3378     if (vec) {
3379         unlock_iovec(vec, target_vec, count, !send);
3380     }
3381 out2:
3382     return ret;
3383 }
3384 
do_sendrecvmsg(int fd,abi_ulong target_msg,int flags,int send)3385 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3386                                int flags, int send)
3387 {
3388     abi_long ret;
3389     struct target_msghdr *msgp;
3390 
3391     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3392                           msgp,
3393                           target_msg,
3394                           send ? 1 : 0)) {
3395         return -TARGET_EFAULT;
3396     }
3397     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3398     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3399     return ret;
3400 }
3401 
3402 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3403  * so it might not have this *mmsg-specific flag either.
3404  */
3405 #ifndef MSG_WAITFORONE
3406 #define MSG_WAITFORONE 0x10000
3407 #endif
3408 
do_sendrecvmmsg(int fd,abi_ulong target_msgvec,unsigned int vlen,unsigned int flags,int send)3409 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3410                                 unsigned int vlen, unsigned int flags,
3411                                 int send)
3412 {
3413     struct target_mmsghdr *mmsgp;
3414     abi_long ret = 0;
3415     int i;
3416 
3417     if (vlen > UIO_MAXIOV) {
3418         vlen = UIO_MAXIOV;
3419     }
3420 
3421     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3422     if (!mmsgp) {
3423         return -TARGET_EFAULT;
3424     }
3425 
3426     for (i = 0; i < vlen; i++) {
3427         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3428         if (is_error(ret)) {
3429             break;
3430         }
3431         mmsgp[i].msg_len = tswap32(ret);
3432         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3433         if (flags & MSG_WAITFORONE) {
3434             flags |= MSG_DONTWAIT;
3435         }
3436     }
3437 
3438     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3439 
3440     /* Return number of datagrams sent if we sent any at all;
3441      * otherwise return the error.
3442      */
3443     if (i) {
3444         return i;
3445     }
3446     return ret;
3447 }
3448 
3449 /* do_accept4() Must return target values and target errnos. */
do_accept4(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr,int flags)3450 static abi_long do_accept4(int fd, abi_ulong target_addr,
3451                            abi_ulong target_addrlen_addr, int flags)
3452 {
3453     socklen_t addrlen, ret_addrlen;
3454     void *addr;
3455     abi_long ret;
3456     int host_flags;
3457 
3458     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3459         return -TARGET_EINVAL;
3460     }
3461 
3462     host_flags = 0;
3463     if (flags & TARGET_SOCK_NONBLOCK) {
3464         host_flags |= SOCK_NONBLOCK;
3465     }
3466     if (flags & TARGET_SOCK_CLOEXEC) {
3467         host_flags |= SOCK_CLOEXEC;
3468     }
3469 
3470     if (target_addr == 0) {
3471         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3472     }
3473 
3474     /* linux returns EFAULT if addrlen pointer is invalid */
3475     if (get_user_u32(addrlen, target_addrlen_addr))
3476         return -TARGET_EFAULT;
3477 
3478     if ((int)addrlen < 0) {
3479         return -TARGET_EINVAL;
3480     }
3481 
3482     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3483         return -TARGET_EFAULT;
3484     }
3485 
3486     addr = alloca(addrlen);
3487 
3488     ret_addrlen = addrlen;
3489     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3490     if (!is_error(ret)) {
3491         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3492         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3493             ret = -TARGET_EFAULT;
3494         }
3495     }
3496     return ret;
3497 }
3498 
3499 /* do_getpeername() Must return target values and target errnos. */
do_getpeername(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3500 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3501                                abi_ulong target_addrlen_addr)
3502 {
3503     socklen_t addrlen, ret_addrlen;
3504     void *addr;
3505     abi_long ret;
3506 
3507     if (get_user_u32(addrlen, target_addrlen_addr))
3508         return -TARGET_EFAULT;
3509 
3510     if ((int)addrlen < 0) {
3511         return -TARGET_EINVAL;
3512     }
3513 
3514     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3515         return -TARGET_EFAULT;
3516     }
3517 
3518     addr = alloca(addrlen);
3519 
3520     ret_addrlen = addrlen;
3521     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3522     if (!is_error(ret)) {
3523         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3524         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3525             ret = -TARGET_EFAULT;
3526         }
3527     }
3528     return ret;
3529 }
3530 
3531 /* do_getsockname() Must return target values and target errnos. */
do_getsockname(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3532 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3533                                abi_ulong target_addrlen_addr)
3534 {
3535     socklen_t addrlen, ret_addrlen;
3536     void *addr;
3537     abi_long ret;
3538 
3539     if (get_user_u32(addrlen, target_addrlen_addr))
3540         return -TARGET_EFAULT;
3541 
3542     if ((int)addrlen < 0) {
3543         return -TARGET_EINVAL;
3544     }
3545 
3546     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3547         return -TARGET_EFAULT;
3548     }
3549 
3550     addr = alloca(addrlen);
3551 
3552     ret_addrlen = addrlen;
3553     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3554     if (!is_error(ret)) {
3555         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3556         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3557             ret = -TARGET_EFAULT;
3558         }
3559     }
3560     return ret;
3561 }
3562 
3563 /* do_socketpair() Must return target values and target errnos. */
do_socketpair(int domain,int type,int protocol,abi_ulong target_tab_addr)3564 static abi_long do_socketpair(int domain, int type, int protocol,
3565                               abi_ulong target_tab_addr)
3566 {
3567     int tab[2];
3568     abi_long ret;
3569 
3570     target_to_host_sock_type(&type);
3571 
3572     ret = get_errno(socketpair(domain, type, protocol, tab));
3573     if (!is_error(ret)) {
3574         if (put_user_s32(tab[0], target_tab_addr)
3575             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3576             ret = -TARGET_EFAULT;
3577     }
3578     return ret;
3579 }
3580 
3581 /* do_sendto() Must return target values and target errnos. */
do_sendto(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,socklen_t addrlen)3582 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3583                           abi_ulong target_addr, socklen_t addrlen)
3584 {
3585     void *addr;
3586     void *host_msg = NULL;
3587     void *copy_msg = NULL;
3588     abi_long ret;
3589 
3590     if ((int)addrlen < 0) {
3591         return -TARGET_EINVAL;
3592     }
3593 
3594     if (len != 0) {
3595         host_msg = lock_user(VERIFY_READ, msg, len, 1);
3596         if (!host_msg) {
3597             return -TARGET_EFAULT;
3598         }
3599         if (fd_trans_target_to_host_data(fd)) {
3600             copy_msg = host_msg;
3601             host_msg = g_malloc(len);
3602             memcpy(host_msg, copy_msg, len);
3603             ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3604             if (ret < 0) {
3605                 goto fail;
3606             }
3607         }
3608     }
3609     if (target_addr) {
3610         addr = alloca(addrlen+1);
3611         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3612         if (ret) {
3613             goto fail;
3614         }
3615         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3616     } else {
3617         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3618     }
3619 fail:
3620     if (copy_msg) {
3621         g_free(host_msg);
3622         host_msg = copy_msg;
3623     }
3624     unlock_user(host_msg, msg, 0);
3625     return ret;
3626 }
3627 
3628 /* do_recvfrom() Must return target values and target errnos. */
do_recvfrom(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,abi_ulong target_addrlen)3629 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3630                             abi_ulong target_addr,
3631                             abi_ulong target_addrlen)
3632 {
3633     socklen_t addrlen, ret_addrlen;
3634     void *addr;
3635     void *host_msg;
3636     abi_long ret;
3637 
3638     if (!msg) {
3639         host_msg = NULL;
3640     } else {
3641         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3642         if (!host_msg) {
3643             return -TARGET_EFAULT;
3644         }
3645     }
3646     if (target_addr) {
3647         if (get_user_u32(addrlen, target_addrlen)) {
3648             ret = -TARGET_EFAULT;
3649             goto fail;
3650         }
3651         if ((int)addrlen < 0) {
3652             ret = -TARGET_EINVAL;
3653             goto fail;
3654         }
3655         addr = alloca(addrlen);
3656         ret_addrlen = addrlen;
3657         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3658                                       addr, &ret_addrlen));
3659     } else {
3660         addr = NULL; /* To keep compiler quiet.  */
3661         addrlen = 0; /* To keep compiler quiet.  */
3662         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3663     }
3664     if (!is_error(ret)) {
3665         if (fd_trans_host_to_target_data(fd)) {
3666             abi_long trans;
3667             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3668             if (is_error(trans)) {
3669                 ret = trans;
3670                 goto fail;
3671             }
3672         }
3673         if (target_addr) {
3674             host_to_target_sockaddr(target_addr, addr,
3675                                     MIN(addrlen, ret_addrlen));
3676             if (put_user_u32(ret_addrlen, target_addrlen)) {
3677                 ret = -TARGET_EFAULT;
3678                 goto fail;
3679             }
3680         }
3681         unlock_user(host_msg, msg, len);
3682     } else {
3683 fail:
3684         unlock_user(host_msg, msg, 0);
3685     }
3686     return ret;
3687 }
3688 
3689 #ifdef TARGET_NR_socketcall
3690 /* do_socketcall() must return target values and target errnos. */
do_socketcall(int num,abi_ulong vptr)3691 static abi_long do_socketcall(int num, abi_ulong vptr)
3692 {
3693     static const unsigned nargs[] = { /* number of arguments per operation */
3694         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3695         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3696         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3697         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3698         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3699         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3700         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3701         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3702         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3703         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3704         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3705         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3706         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3707         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3708         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3709         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3710         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3711         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3712         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3713         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3714     };
3715     abi_long a[6]; /* max 6 args */
3716     unsigned i;
3717 
3718     /* check the range of the first argument num */
3719     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3720     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3721         return -TARGET_EINVAL;
3722     }
3723     /* ensure we have space for args */
3724     if (nargs[num] > ARRAY_SIZE(a)) {
3725         return -TARGET_EINVAL;
3726     }
3727     /* collect the arguments in a[] according to nargs[] */
3728     for (i = 0; i < nargs[num]; ++i) {
3729         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3730             return -TARGET_EFAULT;
3731         }
3732     }
3733     /* now when we have the args, invoke the appropriate underlying function */
3734     switch (num) {
3735     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3736         return do_socket(a[0], a[1], a[2]);
3737     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3738         return do_bind(a[0], a[1], a[2]);
3739     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3740         return do_connect(a[0], a[1], a[2]);
3741     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3742         return get_errno(listen(a[0], a[1]));
3743     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3744         return do_accept4(a[0], a[1], a[2], 0);
3745     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3746         return do_getsockname(a[0], a[1], a[2]);
3747     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3748         return do_getpeername(a[0], a[1], a[2]);
3749     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3750         return do_socketpair(a[0], a[1], a[2], a[3]);
3751     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3752         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3753     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3754         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3755     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3756         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3757     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3758         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3759     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3760         return get_errno(shutdown(a[0], a[1]));
3761     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3762         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3763     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3764         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3765     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3766         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3767     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3768         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3769     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3770         return do_accept4(a[0], a[1], a[2], a[3]);
3771     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3772         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3773     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3774         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3775     default:
3776         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3777         return -TARGET_EINVAL;
3778     }
3779 }
3780 #endif
3781 
3782 #ifndef TARGET_SEMID64_DS
3783 /* asm-generic version of this struct */
3784 struct target_semid64_ds
3785 {
3786   struct target_ipc_perm sem_perm;
3787   abi_ulong sem_otime;
3788 #if TARGET_ABI_BITS == 32
3789   abi_ulong __unused1;
3790 #endif
3791   abi_ulong sem_ctime;
3792 #if TARGET_ABI_BITS == 32
3793   abi_ulong __unused2;
3794 #endif
3795   abi_ulong sem_nsems;
3796   abi_ulong __unused3;
3797   abi_ulong __unused4;
3798 };
3799 #endif
3800 
target_to_host_ipc_perm(struct ipc_perm * host_ip,abi_ulong target_addr)3801 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3802                                                abi_ulong target_addr)
3803 {
3804     struct target_ipc_perm *target_ip;
3805     struct target_semid64_ds *target_sd;
3806 
3807     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3808         return -TARGET_EFAULT;
3809     target_ip = &(target_sd->sem_perm);
3810     host_ip->__key = tswap32(target_ip->__key);
3811     host_ip->uid = tswap32(target_ip->uid);
3812     host_ip->gid = tswap32(target_ip->gid);
3813     host_ip->cuid = tswap32(target_ip->cuid);
3814     host_ip->cgid = tswap32(target_ip->cgid);
3815 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3816     host_ip->mode = tswap32(target_ip->mode);
3817 #else
3818     host_ip->mode = tswap16(target_ip->mode);
3819 #endif
3820 #if defined(TARGET_PPC)
3821     host_ip->__seq = tswap32(target_ip->__seq);
3822 #else
3823     host_ip->__seq = tswap16(target_ip->__seq);
3824 #endif
3825     unlock_user_struct(target_sd, target_addr, 0);
3826     return 0;
3827 }
3828 
host_to_target_ipc_perm(abi_ulong target_addr,struct ipc_perm * host_ip)3829 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3830                                                struct ipc_perm *host_ip)
3831 {
3832     struct target_ipc_perm *target_ip;
3833     struct target_semid64_ds *target_sd;
3834 
3835     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3836         return -TARGET_EFAULT;
3837     target_ip = &(target_sd->sem_perm);
3838     target_ip->__key = tswap32(host_ip->__key);
3839     target_ip->uid = tswap32(host_ip->uid);
3840     target_ip->gid = tswap32(host_ip->gid);
3841     target_ip->cuid = tswap32(host_ip->cuid);
3842     target_ip->cgid = tswap32(host_ip->cgid);
3843 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3844     target_ip->mode = tswap32(host_ip->mode);
3845 #else
3846     target_ip->mode = tswap16(host_ip->mode);
3847 #endif
3848 #if defined(TARGET_PPC)
3849     target_ip->__seq = tswap32(host_ip->__seq);
3850 #else
3851     target_ip->__seq = tswap16(host_ip->__seq);
3852 #endif
3853     unlock_user_struct(target_sd, target_addr, 1);
3854     return 0;
3855 }
3856 
target_to_host_semid_ds(struct semid_ds * host_sd,abi_ulong target_addr)3857 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3858                                                abi_ulong target_addr)
3859 {
3860     struct target_semid64_ds *target_sd;
3861 
3862     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3863         return -TARGET_EFAULT;
3864     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3865         return -TARGET_EFAULT;
3866     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3867     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3868     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3869     unlock_user_struct(target_sd, target_addr, 0);
3870     return 0;
3871 }
3872 
host_to_target_semid_ds(abi_ulong target_addr,struct semid_ds * host_sd)3873 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3874                                                struct semid_ds *host_sd)
3875 {
3876     struct target_semid64_ds *target_sd;
3877 
3878     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3879         return -TARGET_EFAULT;
3880     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3881         return -TARGET_EFAULT;
3882     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3883     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3884     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3885     unlock_user_struct(target_sd, target_addr, 1);
3886     return 0;
3887 }
3888 
3889 struct target_seminfo {
3890     int semmap;
3891     int semmni;
3892     int semmns;
3893     int semmnu;
3894     int semmsl;
3895     int semopm;
3896     int semume;
3897     int semusz;
3898     int semvmx;
3899     int semaem;
3900 };
3901 
host_to_target_seminfo(abi_ulong target_addr,struct seminfo * host_seminfo)3902 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3903                                               struct seminfo *host_seminfo)
3904 {
3905     struct target_seminfo *target_seminfo;
3906     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3907         return -TARGET_EFAULT;
3908     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3909     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3910     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3911     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3912     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3913     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3914     __put_user(host_seminfo->semume, &target_seminfo->semume);
3915     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3916     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3917     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3918     unlock_user_struct(target_seminfo, target_addr, 1);
3919     return 0;
3920 }
3921 
3922 union semun {
3923 	int val;
3924 	struct semid_ds *buf;
3925 	unsigned short *array;
3926 	struct seminfo *__buf;
3927 };
3928 
3929 union target_semun {
3930 	int val;
3931 	abi_ulong buf;
3932 	abi_ulong array;
3933 	abi_ulong __buf;
3934 };
3935 
target_to_host_semarray(int semid,unsigned short ** host_array,abi_ulong target_addr)3936 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3937                                                abi_ulong target_addr)
3938 {
3939     int nsems;
3940     unsigned short *array;
3941     union semun semun;
3942     struct semid_ds semid_ds;
3943     int i, ret;
3944 
3945     semun.buf = &semid_ds;
3946 
3947     ret = semctl(semid, 0, IPC_STAT, semun);
3948     if (ret == -1)
3949         return get_errno(ret);
3950 
3951     nsems = semid_ds.sem_nsems;
3952 
3953     *host_array = g_try_new(unsigned short, nsems);
3954     if (!*host_array) {
3955         return -TARGET_ENOMEM;
3956     }
3957     array = lock_user(VERIFY_READ, target_addr,
3958                       nsems*sizeof(unsigned short), 1);
3959     if (!array) {
3960         g_free(*host_array);
3961         return -TARGET_EFAULT;
3962     }
3963 
3964     for(i=0; i<nsems; i++) {
3965         __get_user((*host_array)[i], &array[i]);
3966     }
3967     unlock_user(array, target_addr, 0);
3968 
3969     return 0;
3970 }
3971 
host_to_target_semarray(int semid,abi_ulong target_addr,unsigned short ** host_array)3972 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3973                                                unsigned short **host_array)
3974 {
3975     int nsems;
3976     unsigned short *array;
3977     union semun semun;
3978     struct semid_ds semid_ds;
3979     int i, ret;
3980 
3981     semun.buf = &semid_ds;
3982 
3983     ret = semctl(semid, 0, IPC_STAT, semun);
3984     if (ret == -1)
3985         return get_errno(ret);
3986 
3987     nsems = semid_ds.sem_nsems;
3988 
3989     array = lock_user(VERIFY_WRITE, target_addr,
3990                       nsems*sizeof(unsigned short), 0);
3991     if (!array)
3992         return -TARGET_EFAULT;
3993 
3994     for(i=0; i<nsems; i++) {
3995         __put_user((*host_array)[i], &array[i]);
3996     }
3997     g_free(*host_array);
3998     unlock_user(array, target_addr, 1);
3999 
4000     return 0;
4001 }
4002 
do_semctl(int semid,int semnum,int cmd,abi_ulong target_arg)4003 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4004                                  abi_ulong target_arg)
4005 {
4006     union target_semun target_su = { .buf = target_arg };
4007     union semun arg;
4008     struct semid_ds dsarg;
4009     unsigned short *array = NULL;
4010     struct seminfo seminfo;
4011     abi_long ret = -TARGET_EINVAL;
4012     abi_long err;
4013     cmd &= 0xff;
4014 
4015     switch( cmd ) {
4016 	case GETVAL:
4017 	case SETVAL:
4018             /* In 64 bit cross-endian situations, we will erroneously pick up
4019              * the wrong half of the union for the "val" element.  To rectify
4020              * this, the entire 8-byte structure is byteswapped, followed by
4021 	     * a swap of the 4 byte val field. In other cases, the data is
4022 	     * already in proper host byte order. */
4023 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4024 		target_su.buf = tswapal(target_su.buf);
4025 		arg.val = tswap32(target_su.val);
4026 	    } else {
4027 		arg.val = target_su.val;
4028 	    }
4029             ret = get_errno(semctl(semid, semnum, cmd, arg));
4030             break;
4031 	case GETALL:
4032 	case SETALL:
4033             err = target_to_host_semarray(semid, &array, target_su.array);
4034             if (err)
4035                 return err;
4036             arg.array = array;
4037             ret = get_errno(semctl(semid, semnum, cmd, arg));
4038             err = host_to_target_semarray(semid, target_su.array, &array);
4039             if (err)
4040                 return err;
4041             break;
4042 	case IPC_STAT:
4043 	case IPC_SET:
4044 	case SEM_STAT:
4045             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4046             if (err)
4047                 return err;
4048             arg.buf = &dsarg;
4049             ret = get_errno(semctl(semid, semnum, cmd, arg));
4050             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4051             if (err)
4052                 return err;
4053             break;
4054 	case IPC_INFO:
4055 	case SEM_INFO:
4056             arg.__buf = &seminfo;
4057             ret = get_errno(semctl(semid, semnum, cmd, arg));
4058             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4059             if (err)
4060                 return err;
4061             break;
4062 	case IPC_RMID:
4063 	case GETPID:
4064 	case GETNCNT:
4065 	case GETZCNT:
4066             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4067             break;
4068     }
4069 
4070     return ret;
4071 }
4072 
4073 struct target_sembuf {
4074     unsigned short sem_num;
4075     short sem_op;
4076     short sem_flg;
4077 };
4078 
target_to_host_sembuf(struct sembuf * host_sembuf,abi_ulong target_addr,unsigned nsops)4079 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4080                                              abi_ulong target_addr,
4081                                              unsigned nsops)
4082 {
4083     struct target_sembuf *target_sembuf;
4084     int i;
4085 
4086     target_sembuf = lock_user(VERIFY_READ, target_addr,
4087                               nsops*sizeof(struct target_sembuf), 1);
4088     if (!target_sembuf)
4089         return -TARGET_EFAULT;
4090 
4091     for(i=0; i<nsops; i++) {
4092         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4093         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4094         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4095     }
4096 
4097     unlock_user(target_sembuf, target_addr, 0);
4098 
4099     return 0;
4100 }
4101 
4102 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4103     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4104 
4105 /*
4106  * This macro is required to handle the s390 variants, which passes the
4107  * arguments in a different order than default.
4108  */
4109 #ifdef __s390x__
4110 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4111   (__nsops), (__timeout), (__sops)
4112 #else
4113 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4114   (__nsops), 0, (__sops), (__timeout)
4115 #endif
4116 
do_semtimedop(int semid,abi_long ptr,unsigned nsops,abi_long timeout,bool time64)4117 static inline abi_long do_semtimedop(int semid,
4118                                      abi_long ptr,
4119                                      unsigned nsops,
4120                                      abi_long timeout, bool time64)
4121 {
4122     struct sembuf *sops;
4123     struct timespec ts, *pts = NULL;
4124     abi_long ret;
4125 
4126     if (timeout) {
4127         pts = &ts;
4128         if (time64) {
4129             if (target_to_host_timespec64(pts, timeout)) {
4130                 return -TARGET_EFAULT;
4131             }
4132         } else {
4133             if (target_to_host_timespec(pts, timeout)) {
4134                 return -TARGET_EFAULT;
4135             }
4136         }
4137     }
4138 
4139     if (nsops > TARGET_SEMOPM) {
4140         return -TARGET_E2BIG;
4141     }
4142 
4143     sops = g_new(struct sembuf, nsops);
4144 
4145     if (target_to_host_sembuf(sops, ptr, nsops)) {
4146         g_free(sops);
4147         return -TARGET_EFAULT;
4148     }
4149 
4150     ret = -TARGET_ENOSYS;
4151 #ifdef __NR_semtimedop
4152     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4153 #endif
4154 #ifdef __NR_ipc
4155     if (ret == -TARGET_ENOSYS) {
4156         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4157                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4158     }
4159 #endif
4160     g_free(sops);
4161     return ret;
4162 }
4163 #endif
4164 
4165 struct target_msqid_ds
4166 {
4167     struct target_ipc_perm msg_perm;
4168     abi_ulong msg_stime;
4169 #if TARGET_ABI_BITS == 32
4170     abi_ulong __unused1;
4171 #endif
4172     abi_ulong msg_rtime;
4173 #if TARGET_ABI_BITS == 32
4174     abi_ulong __unused2;
4175 #endif
4176     abi_ulong msg_ctime;
4177 #if TARGET_ABI_BITS == 32
4178     abi_ulong __unused3;
4179 #endif
4180     abi_ulong __msg_cbytes;
4181     abi_ulong msg_qnum;
4182     abi_ulong msg_qbytes;
4183     abi_ulong msg_lspid;
4184     abi_ulong msg_lrpid;
4185     abi_ulong __unused4;
4186     abi_ulong __unused5;
4187 };
4188 
target_to_host_msqid_ds(struct msqid_ds * host_md,abi_ulong target_addr)4189 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4190                                                abi_ulong target_addr)
4191 {
4192     struct target_msqid_ds *target_md;
4193 
4194     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4195         return -TARGET_EFAULT;
4196     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4197         return -TARGET_EFAULT;
4198     host_md->msg_stime = tswapal(target_md->msg_stime);
4199     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4200     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4201     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4202     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4203     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4204     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4205     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4206     unlock_user_struct(target_md, target_addr, 0);
4207     return 0;
4208 }
4209 
host_to_target_msqid_ds(abi_ulong target_addr,struct msqid_ds * host_md)4210 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4211                                                struct msqid_ds *host_md)
4212 {
4213     struct target_msqid_ds *target_md;
4214 
4215     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4216         return -TARGET_EFAULT;
4217     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4218         return -TARGET_EFAULT;
4219     target_md->msg_stime = tswapal(host_md->msg_stime);
4220     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4221     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4222     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4223     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4224     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4225     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4226     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4227     unlock_user_struct(target_md, target_addr, 1);
4228     return 0;
4229 }
4230 
4231 struct target_msginfo {
4232     int msgpool;
4233     int msgmap;
4234     int msgmax;
4235     int msgmnb;
4236     int msgmni;
4237     int msgssz;
4238     int msgtql;
4239     unsigned short int msgseg;
4240 };
4241 
host_to_target_msginfo(abi_ulong target_addr,struct msginfo * host_msginfo)4242 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4243                                               struct msginfo *host_msginfo)
4244 {
4245     struct target_msginfo *target_msginfo;
4246     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4247         return -TARGET_EFAULT;
4248     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4249     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4250     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4251     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4252     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4253     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4254     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4255     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4256     unlock_user_struct(target_msginfo, target_addr, 1);
4257     return 0;
4258 }
4259 
do_msgctl(int msgid,int cmd,abi_long ptr)4260 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4261 {
4262     struct msqid_ds dsarg;
4263     struct msginfo msginfo;
4264     abi_long ret = -TARGET_EINVAL;
4265 
4266     cmd &= 0xff;
4267 
4268     switch (cmd) {
4269     case IPC_STAT:
4270     case IPC_SET:
4271     case MSG_STAT:
4272         if (target_to_host_msqid_ds(&dsarg,ptr))
4273             return -TARGET_EFAULT;
4274         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4275         if (host_to_target_msqid_ds(ptr,&dsarg))
4276             return -TARGET_EFAULT;
4277         break;
4278     case IPC_RMID:
4279         ret = get_errno(msgctl(msgid, cmd, NULL));
4280         break;
4281     case IPC_INFO:
4282     case MSG_INFO:
4283         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4284         if (host_to_target_msginfo(ptr, &msginfo))
4285             return -TARGET_EFAULT;
4286         break;
4287     }
4288 
4289     return ret;
4290 }
4291 
4292 struct target_msgbuf {
4293     abi_long mtype;
4294     char	mtext[1];
4295 };
4296 
do_msgsnd(int msqid,abi_long msgp,ssize_t msgsz,int msgflg)4297 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4298                                  ssize_t msgsz, int msgflg)
4299 {
4300     struct target_msgbuf *target_mb;
4301     struct msgbuf *host_mb;
4302     abi_long ret = 0;
4303 
4304     if (msgsz < 0) {
4305         return -TARGET_EINVAL;
4306     }
4307 
4308     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4309         return -TARGET_EFAULT;
4310     host_mb = g_try_malloc(msgsz + sizeof(long));
4311     if (!host_mb) {
4312         unlock_user_struct(target_mb, msgp, 0);
4313         return -TARGET_ENOMEM;
4314     }
4315     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4316     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4317     ret = -TARGET_ENOSYS;
4318 #ifdef __NR_msgsnd
4319     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4320 #endif
4321 #ifdef __NR_ipc
4322     if (ret == -TARGET_ENOSYS) {
4323 #ifdef __s390x__
4324         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4325                                  host_mb));
4326 #else
4327         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4328                                  host_mb, 0));
4329 #endif
4330     }
4331 #endif
4332     g_free(host_mb);
4333     unlock_user_struct(target_mb, msgp, 0);
4334 
4335     return ret;
4336 }
4337 
4338 #ifdef __NR_ipc
4339 #if defined(__sparc__)
4340 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4341 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4342 #elif defined(__s390x__)
4343 /* The s390 sys_ipc variant has only five parameters.  */
4344 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4345     ((long int[]){(long int)__msgp, __msgtyp})
4346 #else
4347 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4348     ((long int[]){(long int)__msgp, __msgtyp}), 0
4349 #endif
4350 #endif
4351 
do_msgrcv(int msqid,abi_long msgp,ssize_t msgsz,abi_long msgtyp,int msgflg)4352 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4353                                  ssize_t msgsz, abi_long msgtyp,
4354                                  int msgflg)
4355 {
4356     struct target_msgbuf *target_mb;
4357     char *target_mtext;
4358     struct msgbuf *host_mb;
4359     abi_long ret = 0;
4360 
4361     if (msgsz < 0) {
4362         return -TARGET_EINVAL;
4363     }
4364 
4365     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4366         return -TARGET_EFAULT;
4367 
4368     host_mb = g_try_malloc(msgsz + sizeof(long));
4369     if (!host_mb) {
4370         ret = -TARGET_ENOMEM;
4371         goto end;
4372     }
4373     ret = -TARGET_ENOSYS;
4374 #ifdef __NR_msgrcv
4375     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4376 #endif
4377 #ifdef __NR_ipc
4378     if (ret == -TARGET_ENOSYS) {
4379         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4380                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4381     }
4382 #endif
4383 
4384     if (ret > 0) {
4385         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4386         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4387         if (!target_mtext) {
4388             ret = -TARGET_EFAULT;
4389             goto end;
4390         }
4391         memcpy(target_mb->mtext, host_mb->mtext, ret);
4392         unlock_user(target_mtext, target_mtext_addr, ret);
4393     }
4394 
4395     target_mb->mtype = tswapal(host_mb->mtype);
4396 
4397 end:
4398     if (target_mb)
4399         unlock_user_struct(target_mb, msgp, 1);
4400     g_free(host_mb);
4401     return ret;
4402 }
4403 
target_to_host_shmid_ds(struct shmid_ds * host_sd,abi_ulong target_addr)4404 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4405                                                abi_ulong target_addr)
4406 {
4407     struct target_shmid_ds *target_sd;
4408 
4409     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4410         return -TARGET_EFAULT;
4411     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4412         return -TARGET_EFAULT;
4413     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4414     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4415     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4416     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4417     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4418     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4419     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4420     unlock_user_struct(target_sd, target_addr, 0);
4421     return 0;
4422 }
4423 
host_to_target_shmid_ds(abi_ulong target_addr,struct shmid_ds * host_sd)4424 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4425                                                struct shmid_ds *host_sd)
4426 {
4427     struct target_shmid_ds *target_sd;
4428 
4429     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4430         return -TARGET_EFAULT;
4431     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4432         return -TARGET_EFAULT;
4433     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4434     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4435     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4436     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4437     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4438     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4439     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4440     unlock_user_struct(target_sd, target_addr, 1);
4441     return 0;
4442 }
4443 
4444 struct  target_shminfo {
4445     abi_ulong shmmax;
4446     abi_ulong shmmin;
4447     abi_ulong shmmni;
4448     abi_ulong shmseg;
4449     abi_ulong shmall;
4450 };
4451 
host_to_target_shminfo(abi_ulong target_addr,struct shminfo * host_shminfo)4452 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4453                                               struct shminfo *host_shminfo)
4454 {
4455     struct target_shminfo *target_shminfo;
4456     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4457         return -TARGET_EFAULT;
4458     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4459     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4460     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4461     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4462     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4463     unlock_user_struct(target_shminfo, target_addr, 1);
4464     return 0;
4465 }
4466 
4467 struct target_shm_info {
4468     int used_ids;
4469     abi_ulong shm_tot;
4470     abi_ulong shm_rss;
4471     abi_ulong shm_swp;
4472     abi_ulong swap_attempts;
4473     abi_ulong swap_successes;
4474 };
4475 
host_to_target_shm_info(abi_ulong target_addr,struct shm_info * host_shm_info)4476 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4477                                                struct shm_info *host_shm_info)
4478 {
4479     struct target_shm_info *target_shm_info;
4480     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4481         return -TARGET_EFAULT;
4482     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4483     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4484     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4485     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4486     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4487     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4488     unlock_user_struct(target_shm_info, target_addr, 1);
4489     return 0;
4490 }
4491 
do_shmctl(int shmid,int cmd,abi_long buf)4492 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4493 {
4494     struct shmid_ds dsarg;
4495     struct shminfo shminfo;
4496     struct shm_info shm_info;
4497     abi_long ret = -TARGET_EINVAL;
4498 
4499     cmd &= 0xff;
4500 
4501     switch(cmd) {
4502     case IPC_STAT:
4503     case IPC_SET:
4504     case SHM_STAT:
4505         if (target_to_host_shmid_ds(&dsarg, buf))
4506             return -TARGET_EFAULT;
4507         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4508         if (host_to_target_shmid_ds(buf, &dsarg))
4509             return -TARGET_EFAULT;
4510         break;
4511     case IPC_INFO:
4512         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4513         if (host_to_target_shminfo(buf, &shminfo))
4514             return -TARGET_EFAULT;
4515         break;
4516     case SHM_INFO:
4517         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4518         if (host_to_target_shm_info(buf, &shm_info))
4519             return -TARGET_EFAULT;
4520         break;
4521     case IPC_RMID:
4522     case SHM_LOCK:
4523     case SHM_UNLOCK:
4524         ret = get_errno(shmctl(shmid, cmd, NULL));
4525         break;
4526     }
4527 
4528     return ret;
4529 }
4530 
4531 #ifdef TARGET_NR_ipc
4532 /* ??? This only works with linear mappings.  */
4533 /* do_ipc() must return target values and target errnos. */
do_ipc(CPUArchState * cpu_env,unsigned int call,abi_long first,abi_long second,abi_long third,abi_long ptr,abi_long fifth)4534 static abi_long do_ipc(CPUArchState *cpu_env,
4535                        unsigned int call, abi_long first,
4536                        abi_long second, abi_long third,
4537                        abi_long ptr, abi_long fifth)
4538 {
4539     int version;
4540     abi_long ret = 0;
4541 
4542     version = call >> 16;
4543     call &= 0xffff;
4544 
4545     switch (call) {
4546     case IPCOP_semop:
4547         ret = do_semtimedop(first, ptr, second, 0, false);
4548         break;
4549     case IPCOP_semtimedop:
4550     /*
4551      * The s390 sys_ipc variant has only five parameters instead of six
4552      * (as for default variant) and the only difference is the handling of
4553      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4554      * to a struct timespec where the generic variant uses fifth parameter.
4555      */
4556 #if defined(TARGET_S390X)
4557         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4558 #else
4559         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4560 #endif
4561         break;
4562 
4563     case IPCOP_semget:
4564         ret = get_errno(semget(first, second, third));
4565         break;
4566 
4567     case IPCOP_semctl: {
4568         /* The semun argument to semctl is passed by value, so dereference the
4569          * ptr argument. */
4570         abi_ulong atptr;
4571         get_user_ual(atptr, ptr);
4572         ret = do_semctl(first, second, third, atptr);
4573         break;
4574     }
4575 
4576     case IPCOP_msgget:
4577         ret = get_errno(msgget(first, second));
4578         break;
4579 
4580     case IPCOP_msgsnd:
4581         ret = do_msgsnd(first, ptr, second, third);
4582         break;
4583 
4584     case IPCOP_msgctl:
4585         ret = do_msgctl(first, second, ptr);
4586         break;
4587 
4588     case IPCOP_msgrcv:
4589         switch (version) {
4590         case 0:
4591             {
4592                 struct target_ipc_kludge {
4593                     abi_long msgp;
4594                     abi_long msgtyp;
4595                 } *tmp;
4596 
4597                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4598                     ret = -TARGET_EFAULT;
4599                     break;
4600                 }
4601 
4602                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4603 
4604                 unlock_user_struct(tmp, ptr, 0);
4605                 break;
4606             }
4607         default:
4608             ret = do_msgrcv(first, ptr, second, fifth, third);
4609         }
4610         break;
4611 
4612     case IPCOP_shmat:
4613         switch (version) {
4614         default:
4615         {
4616             abi_ulong raddr;
4617             raddr = target_shmat(cpu_env, first, ptr, second);
4618             if (is_error(raddr))
4619                 return get_errno(raddr);
4620             if (put_user_ual(raddr, third))
4621                 return -TARGET_EFAULT;
4622             break;
4623         }
4624         case 1:
4625             ret = -TARGET_EINVAL;
4626             break;
4627         }
4628 	break;
4629     case IPCOP_shmdt:
4630         ret = target_shmdt(ptr);
4631 	break;
4632 
4633     case IPCOP_shmget:
4634 	/* IPC_* flag values are the same on all linux platforms */
4635 	ret = get_errno(shmget(first, second, third));
4636 	break;
4637 
4638 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4639     case IPCOP_shmctl:
4640         ret = do_shmctl(first, second, ptr);
4641         break;
4642     default:
4643         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4644                       call, version);
4645 	ret = -TARGET_ENOSYS;
4646 	break;
4647     }
4648     return ret;
4649 }
4650 #endif
4651 
4652 /* kernel structure types definitions */
4653 
4654 #define STRUCT(name, ...) STRUCT_ ## name,
4655 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4656 enum {
4657 #include "syscall_types.h"
4658 STRUCT_MAX
4659 };
4660 #undef STRUCT
4661 #undef STRUCT_SPECIAL
4662 
4663 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4664 #define STRUCT_SPECIAL(name)
4665 #include "syscall_types.h"
4666 #undef STRUCT
4667 #undef STRUCT_SPECIAL
4668 
4669 #define MAX_STRUCT_SIZE 4096
4670 
4671 #ifdef CONFIG_FIEMAP
4672 /* So fiemap access checks don't overflow on 32 bit systems.
4673  * This is very slightly smaller than the limit imposed by
4674  * the underlying kernel.
4675  */
4676 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4677                             / sizeof(struct fiemap_extent))
4678 
do_ioctl_fs_ioc_fiemap(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4679 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4680                                        int fd, int cmd, abi_long arg)
4681 {
4682     /* The parameter for this ioctl is a struct fiemap followed
4683      * by an array of struct fiemap_extent whose size is set
4684      * in fiemap->fm_extent_count. The array is filled in by the
4685      * ioctl.
4686      */
4687     int target_size_in, target_size_out;
4688     struct fiemap *fm;
4689     const argtype *arg_type = ie->arg_type;
4690     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4691     void *argptr, *p;
4692     abi_long ret;
4693     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4694     uint32_t outbufsz;
4695     int free_fm = 0;
4696 
4697     assert(arg_type[0] == TYPE_PTR);
4698     assert(ie->access == IOC_RW);
4699     arg_type++;
4700     target_size_in = thunk_type_size(arg_type, 0);
4701     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4702     if (!argptr) {
4703         return -TARGET_EFAULT;
4704     }
4705     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4706     unlock_user(argptr, arg, 0);
4707     fm = (struct fiemap *)buf_temp;
4708     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4709         return -TARGET_EINVAL;
4710     }
4711 
4712     outbufsz = sizeof (*fm) +
4713         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4714 
4715     if (outbufsz > MAX_STRUCT_SIZE) {
4716         /* We can't fit all the extents into the fixed size buffer.
4717          * Allocate one that is large enough and use it instead.
4718          */
4719         fm = g_try_malloc(outbufsz);
4720         if (!fm) {
4721             return -TARGET_ENOMEM;
4722         }
4723         memcpy(fm, buf_temp, sizeof(struct fiemap));
4724         free_fm = 1;
4725     }
4726     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4727     if (!is_error(ret)) {
4728         target_size_out = target_size_in;
4729         /* An extent_count of 0 means we were only counting the extents
4730          * so there are no structs to copy
4731          */
4732         if (fm->fm_extent_count != 0) {
4733             target_size_out += fm->fm_mapped_extents * extent_size;
4734         }
4735         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4736         if (!argptr) {
4737             ret = -TARGET_EFAULT;
4738         } else {
4739             /* Convert the struct fiemap */
4740             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4741             if (fm->fm_extent_count != 0) {
4742                 p = argptr + target_size_in;
4743                 /* ...and then all the struct fiemap_extents */
4744                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4745                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4746                                   THUNK_TARGET);
4747                     p += extent_size;
4748                 }
4749             }
4750             unlock_user(argptr, arg, target_size_out);
4751         }
4752     }
4753     if (free_fm) {
4754         g_free(fm);
4755     }
4756     return ret;
4757 }
4758 #endif
4759 
do_ioctl_ifconf(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4760 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4761                                 int fd, int cmd, abi_long arg)
4762 {
4763     const argtype *arg_type = ie->arg_type;
4764     int target_size;
4765     void *argptr;
4766     int ret;
4767     struct ifconf *host_ifconf;
4768     uint32_t outbufsz;
4769     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4770     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4771     int target_ifreq_size;
4772     int nb_ifreq;
4773     int free_buf = 0;
4774     int i;
4775     int target_ifc_len;
4776     abi_long target_ifc_buf;
4777     int host_ifc_len;
4778     char *host_ifc_buf;
4779 
4780     assert(arg_type[0] == TYPE_PTR);
4781     assert(ie->access == IOC_RW);
4782 
4783     arg_type++;
4784     target_size = thunk_type_size(arg_type, 0);
4785 
4786     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4787     if (!argptr)
4788         return -TARGET_EFAULT;
4789     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4790     unlock_user(argptr, arg, 0);
4791 
4792     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4793     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4794     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4795 
4796     if (target_ifc_buf != 0) {
4797         target_ifc_len = host_ifconf->ifc_len;
4798         nb_ifreq = target_ifc_len / target_ifreq_size;
4799         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4800 
4801         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4802         if (outbufsz > MAX_STRUCT_SIZE) {
4803             /*
4804              * We can't fit all the extents into the fixed size buffer.
4805              * Allocate one that is large enough and use it instead.
4806              */
4807             host_ifconf = g_try_malloc(outbufsz);
4808             if (!host_ifconf) {
4809                 return -TARGET_ENOMEM;
4810             }
4811             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4812             free_buf = 1;
4813         }
4814         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4815 
4816         host_ifconf->ifc_len = host_ifc_len;
4817     } else {
4818       host_ifc_buf = NULL;
4819     }
4820     host_ifconf->ifc_buf = host_ifc_buf;
4821 
4822     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4823     if (!is_error(ret)) {
4824 	/* convert host ifc_len to target ifc_len */
4825 
4826         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4827         target_ifc_len = nb_ifreq * target_ifreq_size;
4828         host_ifconf->ifc_len = target_ifc_len;
4829 
4830 	/* restore target ifc_buf */
4831 
4832         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4833 
4834 	/* copy struct ifconf to target user */
4835 
4836         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4837         if (!argptr)
4838             return -TARGET_EFAULT;
4839         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4840         unlock_user(argptr, arg, target_size);
4841 
4842         if (target_ifc_buf != 0) {
4843             /* copy ifreq[] to target user */
4844             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4845             for (i = 0; i < nb_ifreq ; i++) {
4846                 thunk_convert(argptr + i * target_ifreq_size,
4847                               host_ifc_buf + i * sizeof(struct ifreq),
4848                               ifreq_arg_type, THUNK_TARGET);
4849             }
4850             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4851         }
4852     }
4853 
4854     if (free_buf) {
4855         g_free(host_ifconf);
4856     }
4857 
4858     return ret;
4859 }
4860 
4861 #if defined(CONFIG_USBFS)
4862 #if HOST_LONG_BITS > 64
4863 #error USBDEVFS thunks do not support >64 bit hosts yet.
4864 #endif
4865 struct live_urb {
4866     uint64_t target_urb_adr;
4867     uint64_t target_buf_adr;
4868     char *target_buf_ptr;
4869     struct usbdevfs_urb host_urb;
4870 };
4871 
usbdevfs_urb_hashtable(void)4872 static GHashTable *usbdevfs_urb_hashtable(void)
4873 {
4874     static GHashTable *urb_hashtable;
4875 
4876     if (!urb_hashtable) {
4877         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4878     }
4879     return urb_hashtable;
4880 }
4881 
urb_hashtable_insert(struct live_urb * urb)4882 static void urb_hashtable_insert(struct live_urb *urb)
4883 {
4884     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4885     g_hash_table_insert(urb_hashtable, urb, urb);
4886 }
4887 
urb_hashtable_lookup(uint64_t target_urb_adr)4888 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4889 {
4890     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4891     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4892 }
4893 
urb_hashtable_remove(struct live_urb * urb)4894 static void urb_hashtable_remove(struct live_urb *urb)
4895 {
4896     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4897     g_hash_table_remove(urb_hashtable, urb);
4898 }
4899 
4900 static abi_long
do_ioctl_usbdevfs_reapurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4901 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4902                           int fd, int cmd, abi_long arg)
4903 {
4904     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4905     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4906     struct live_urb *lurb;
4907     void *argptr;
4908     uint64_t hurb;
4909     int target_size;
4910     uintptr_t target_urb_adr;
4911     abi_long ret;
4912 
4913     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4914 
4915     memset(buf_temp, 0, sizeof(uint64_t));
4916     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4917     if (is_error(ret)) {
4918         return ret;
4919     }
4920 
4921     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4922     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4923     if (!lurb->target_urb_adr) {
4924         return -TARGET_EFAULT;
4925     }
4926     urb_hashtable_remove(lurb);
4927     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4928         lurb->host_urb.buffer_length);
4929     lurb->target_buf_ptr = NULL;
4930 
4931     /* restore the guest buffer pointer */
4932     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4933 
4934     /* update the guest urb struct */
4935     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4936     if (!argptr) {
4937         g_free(lurb);
4938         return -TARGET_EFAULT;
4939     }
4940     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4941     unlock_user(argptr, lurb->target_urb_adr, target_size);
4942 
4943     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4944     /* write back the urb handle */
4945     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4946     if (!argptr) {
4947         g_free(lurb);
4948         return -TARGET_EFAULT;
4949     }
4950 
4951     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4952     target_urb_adr = lurb->target_urb_adr;
4953     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4954     unlock_user(argptr, arg, target_size);
4955 
4956     g_free(lurb);
4957     return ret;
4958 }
4959 
4960 static abi_long
do_ioctl_usbdevfs_discardurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4961 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4962                              uint8_t *buf_temp __attribute__((unused)),
4963                              int fd, int cmd, abi_long arg)
4964 {
4965     struct live_urb *lurb;
4966 
4967     /* map target address back to host URB with metadata. */
4968     lurb = urb_hashtable_lookup(arg);
4969     if (!lurb) {
4970         return -TARGET_EFAULT;
4971     }
4972     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4973 }
4974 
4975 static abi_long
do_ioctl_usbdevfs_submiturb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4976 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4977                             int fd, int cmd, abi_long arg)
4978 {
4979     const argtype *arg_type = ie->arg_type;
4980     int target_size;
4981     abi_long ret;
4982     void *argptr;
4983     int rw_dir;
4984     struct live_urb *lurb;
4985 
4986     /*
4987      * each submitted URB needs to map to a unique ID for the
4988      * kernel, and that unique ID needs to be a pointer to
4989      * host memory.  hence, we need to malloc for each URB.
4990      * isochronous transfers have a variable length struct.
4991      */
4992     arg_type++;
4993     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4994 
4995     /* construct host copy of urb and metadata */
4996     lurb = g_try_new0(struct live_urb, 1);
4997     if (!lurb) {
4998         return -TARGET_ENOMEM;
4999     }
5000 
5001     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5002     if (!argptr) {
5003         g_free(lurb);
5004         return -TARGET_EFAULT;
5005     }
5006     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5007     unlock_user(argptr, arg, 0);
5008 
5009     lurb->target_urb_adr = arg;
5010     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5011 
5012     /* buffer space used depends on endpoint type so lock the entire buffer */
5013     /* control type urbs should check the buffer contents for true direction */
5014     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5015     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5016         lurb->host_urb.buffer_length, 1);
5017     if (lurb->target_buf_ptr == NULL) {
5018         g_free(lurb);
5019         return -TARGET_EFAULT;
5020     }
5021 
5022     /* update buffer pointer in host copy */
5023     lurb->host_urb.buffer = lurb->target_buf_ptr;
5024 
5025     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5026     if (is_error(ret)) {
5027         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5028         g_free(lurb);
5029     } else {
5030         urb_hashtable_insert(lurb);
5031     }
5032 
5033     return ret;
5034 }
5035 #endif /* CONFIG_USBFS */
5036 
do_ioctl_dm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5037 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5038                             int cmd, abi_long arg)
5039 {
5040     void *argptr;
5041     struct dm_ioctl *host_dm;
5042     abi_long guest_data;
5043     uint32_t guest_data_size;
5044     int target_size;
5045     const argtype *arg_type = ie->arg_type;
5046     abi_long ret;
5047     void *big_buf = NULL;
5048     char *host_data;
5049 
5050     arg_type++;
5051     target_size = thunk_type_size(arg_type, 0);
5052     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5053     if (!argptr) {
5054         ret = -TARGET_EFAULT;
5055         goto out;
5056     }
5057     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5058     unlock_user(argptr, arg, 0);
5059 
5060     /* buf_temp is too small, so fetch things into a bigger buffer */
5061     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5062     memcpy(big_buf, buf_temp, target_size);
5063     buf_temp = big_buf;
5064     host_dm = big_buf;
5065 
5066     guest_data = arg + host_dm->data_start;
5067     if ((guest_data - arg) < 0) {
5068         ret = -TARGET_EINVAL;
5069         goto out;
5070     }
5071     guest_data_size = host_dm->data_size - host_dm->data_start;
5072     host_data = (char*)host_dm + host_dm->data_start;
5073 
5074     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5075     if (!argptr) {
5076         ret = -TARGET_EFAULT;
5077         goto out;
5078     }
5079 
5080     switch (ie->host_cmd) {
5081     case DM_REMOVE_ALL:
5082     case DM_LIST_DEVICES:
5083     case DM_DEV_CREATE:
5084     case DM_DEV_REMOVE:
5085     case DM_DEV_SUSPEND:
5086     case DM_DEV_STATUS:
5087     case DM_DEV_WAIT:
5088     case DM_TABLE_STATUS:
5089     case DM_TABLE_CLEAR:
5090     case DM_TABLE_DEPS:
5091     case DM_LIST_VERSIONS:
5092         /* no input data */
5093         break;
5094     case DM_DEV_RENAME:
5095     case DM_DEV_SET_GEOMETRY:
5096         /* data contains only strings */
5097         memcpy(host_data, argptr, guest_data_size);
5098         break;
5099     case DM_TARGET_MSG:
5100         memcpy(host_data, argptr, guest_data_size);
5101         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5102         break;
5103     case DM_TABLE_LOAD:
5104     {
5105         void *gspec = argptr;
5106         void *cur_data = host_data;
5107         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5108         int spec_size = thunk_type_size(dm_arg_type, 0);
5109         int i;
5110 
5111         for (i = 0; i < host_dm->target_count; i++) {
5112             struct dm_target_spec *spec = cur_data;
5113             uint32_t next;
5114             int slen;
5115 
5116             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5117             slen = strlen((char*)gspec + spec_size) + 1;
5118             next = spec->next;
5119             spec->next = sizeof(*spec) + slen;
5120             strcpy((char*)&spec[1], gspec + spec_size);
5121             gspec += next;
5122             cur_data += spec->next;
5123         }
5124         break;
5125     }
5126     default:
5127         ret = -TARGET_EINVAL;
5128         unlock_user(argptr, guest_data, 0);
5129         goto out;
5130     }
5131     unlock_user(argptr, guest_data, 0);
5132 
5133     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5134     if (!is_error(ret)) {
5135         guest_data = arg + host_dm->data_start;
5136         guest_data_size = host_dm->data_size - host_dm->data_start;
5137         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5138         switch (ie->host_cmd) {
5139         case DM_REMOVE_ALL:
5140         case DM_DEV_CREATE:
5141         case DM_DEV_REMOVE:
5142         case DM_DEV_RENAME:
5143         case DM_DEV_SUSPEND:
5144         case DM_DEV_STATUS:
5145         case DM_TABLE_LOAD:
5146         case DM_TABLE_CLEAR:
5147         case DM_TARGET_MSG:
5148         case DM_DEV_SET_GEOMETRY:
5149             /* no return data */
5150             break;
5151         case DM_LIST_DEVICES:
5152         {
5153             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5154             uint32_t remaining_data = guest_data_size;
5155             void *cur_data = argptr;
5156             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5157             int nl_size = 12; /* can't use thunk_size due to alignment */
5158 
5159             while (1) {
5160                 uint32_t next = nl->next;
5161                 if (next) {
5162                     nl->next = nl_size + (strlen(nl->name) + 1);
5163                 }
5164                 if (remaining_data < nl->next) {
5165                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5166                     break;
5167                 }
5168                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5169                 strcpy(cur_data + nl_size, nl->name);
5170                 cur_data += nl->next;
5171                 remaining_data -= nl->next;
5172                 if (!next) {
5173                     break;
5174                 }
5175                 nl = (void*)nl + next;
5176             }
5177             break;
5178         }
5179         case DM_DEV_WAIT:
5180         case DM_TABLE_STATUS:
5181         {
5182             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5183             void *cur_data = argptr;
5184             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5185             int spec_size = thunk_type_size(dm_arg_type, 0);
5186             int i;
5187 
5188             for (i = 0; i < host_dm->target_count; i++) {
5189                 uint32_t next = spec->next;
5190                 int slen = strlen((char*)&spec[1]) + 1;
5191                 spec->next = (cur_data - argptr) + spec_size + slen;
5192                 if (guest_data_size < spec->next) {
5193                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5194                     break;
5195                 }
5196                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5197                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5198                 cur_data = argptr + spec->next;
5199                 spec = (void*)host_dm + host_dm->data_start + next;
5200             }
5201             break;
5202         }
5203         case DM_TABLE_DEPS:
5204         {
5205             void *hdata = (void*)host_dm + host_dm->data_start;
5206             int count = *(uint32_t*)hdata;
5207             uint64_t *hdev = hdata + 8;
5208             uint64_t *gdev = argptr + 8;
5209             int i;
5210 
5211             *(uint32_t*)argptr = tswap32(count);
5212             for (i = 0; i < count; i++) {
5213                 *gdev = tswap64(*hdev);
5214                 gdev++;
5215                 hdev++;
5216             }
5217             break;
5218         }
5219         case DM_LIST_VERSIONS:
5220         {
5221             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5222             uint32_t remaining_data = guest_data_size;
5223             void *cur_data = argptr;
5224             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5225             int vers_size = thunk_type_size(dm_arg_type, 0);
5226 
5227             while (1) {
5228                 uint32_t next = vers->next;
5229                 if (next) {
5230                     vers->next = vers_size + (strlen(vers->name) + 1);
5231                 }
5232                 if (remaining_data < vers->next) {
5233                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5234                     break;
5235                 }
5236                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5237                 strcpy(cur_data + vers_size, vers->name);
5238                 cur_data += vers->next;
5239                 remaining_data -= vers->next;
5240                 if (!next) {
5241                     break;
5242                 }
5243                 vers = (void*)vers + next;
5244             }
5245             break;
5246         }
5247         default:
5248             unlock_user(argptr, guest_data, 0);
5249             ret = -TARGET_EINVAL;
5250             goto out;
5251         }
5252         unlock_user(argptr, guest_data, guest_data_size);
5253 
5254         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5255         if (!argptr) {
5256             ret = -TARGET_EFAULT;
5257             goto out;
5258         }
5259         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5260         unlock_user(argptr, arg, target_size);
5261     }
5262 out:
5263     g_free(big_buf);
5264     return ret;
5265 }
5266 
do_ioctl_blkpg(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5267 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5268                                int cmd, abi_long arg)
5269 {
5270     void *argptr;
5271     int target_size;
5272     const argtype *arg_type = ie->arg_type;
5273     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5274     abi_long ret;
5275 
5276     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5277     struct blkpg_partition host_part;
5278 
5279     /* Read and convert blkpg */
5280     arg_type++;
5281     target_size = thunk_type_size(arg_type, 0);
5282     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5283     if (!argptr) {
5284         ret = -TARGET_EFAULT;
5285         goto out;
5286     }
5287     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5288     unlock_user(argptr, arg, 0);
5289 
5290     switch (host_blkpg->op) {
5291     case BLKPG_ADD_PARTITION:
5292     case BLKPG_DEL_PARTITION:
5293         /* payload is struct blkpg_partition */
5294         break;
5295     default:
5296         /* Unknown opcode */
5297         ret = -TARGET_EINVAL;
5298         goto out;
5299     }
5300 
5301     /* Read and convert blkpg->data */
5302     arg = (abi_long)(uintptr_t)host_blkpg->data;
5303     target_size = thunk_type_size(part_arg_type, 0);
5304     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5305     if (!argptr) {
5306         ret = -TARGET_EFAULT;
5307         goto out;
5308     }
5309     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5310     unlock_user(argptr, arg, 0);
5311 
5312     /* Swizzle the data pointer to our local copy and call! */
5313     host_blkpg->data = &host_part;
5314     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5315 
5316 out:
5317     return ret;
5318 }
5319 
do_ioctl_rt(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5320 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5321                                 int fd, int cmd, abi_long arg)
5322 {
5323     const argtype *arg_type = ie->arg_type;
5324     const StructEntry *se;
5325     const argtype *field_types;
5326     const int *dst_offsets, *src_offsets;
5327     int target_size;
5328     void *argptr;
5329     abi_ulong *target_rt_dev_ptr = NULL;
5330     unsigned long *host_rt_dev_ptr = NULL;
5331     abi_long ret;
5332     int i;
5333 
5334     assert(ie->access == IOC_W);
5335     assert(*arg_type == TYPE_PTR);
5336     arg_type++;
5337     assert(*arg_type == TYPE_STRUCT);
5338     target_size = thunk_type_size(arg_type, 0);
5339     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5340     if (!argptr) {
5341         return -TARGET_EFAULT;
5342     }
5343     arg_type++;
5344     assert(*arg_type == (int)STRUCT_rtentry);
5345     se = struct_entries + *arg_type++;
5346     assert(se->convert[0] == NULL);
5347     /* convert struct here to be able to catch rt_dev string */
5348     field_types = se->field_types;
5349     dst_offsets = se->field_offsets[THUNK_HOST];
5350     src_offsets = se->field_offsets[THUNK_TARGET];
5351     for (i = 0; i < se->nb_fields; i++) {
5352         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5353             assert(*field_types == TYPE_PTRVOID);
5354             target_rt_dev_ptr = argptr + src_offsets[i];
5355             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5356             if (*target_rt_dev_ptr != 0) {
5357                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5358                                                   tswapal(*target_rt_dev_ptr));
5359                 if (!*host_rt_dev_ptr) {
5360                     unlock_user(argptr, arg, 0);
5361                     return -TARGET_EFAULT;
5362                 }
5363             } else {
5364                 *host_rt_dev_ptr = 0;
5365             }
5366             field_types++;
5367             continue;
5368         }
5369         field_types = thunk_convert(buf_temp + dst_offsets[i],
5370                                     argptr + src_offsets[i],
5371                                     field_types, THUNK_HOST);
5372     }
5373     unlock_user(argptr, arg, 0);
5374 
5375     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5376 
5377     assert(host_rt_dev_ptr != NULL);
5378     assert(target_rt_dev_ptr != NULL);
5379     if (*host_rt_dev_ptr != 0) {
5380         unlock_user((void *)*host_rt_dev_ptr,
5381                     *target_rt_dev_ptr, 0);
5382     }
5383     return ret;
5384 }
5385 
do_ioctl_kdsigaccept(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5386 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5387                                      int fd, int cmd, abi_long arg)
5388 {
5389     int sig = target_to_host_signal(arg);
5390     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5391 }
5392 
do_ioctl_SIOCGSTAMP(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5393 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5394                                     int fd, int cmd, abi_long arg)
5395 {
5396     struct timeval tv;
5397     abi_long ret;
5398 
5399     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5400     if (is_error(ret)) {
5401         return ret;
5402     }
5403 
5404     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5405         if (copy_to_user_timeval(arg, &tv)) {
5406             return -TARGET_EFAULT;
5407         }
5408     } else {
5409         if (copy_to_user_timeval64(arg, &tv)) {
5410             return -TARGET_EFAULT;
5411         }
5412     }
5413 
5414     return ret;
5415 }
5416 
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5417 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5418                                       int fd, int cmd, abi_long arg)
5419 {
5420     struct timespec ts;
5421     abi_long ret;
5422 
5423     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5424     if (is_error(ret)) {
5425         return ret;
5426     }
5427 
5428     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5429         if (host_to_target_timespec(arg, &ts)) {
5430             return -TARGET_EFAULT;
5431         }
5432     } else{
5433         if (host_to_target_timespec64(arg, &ts)) {
5434             return -TARGET_EFAULT;
5435         }
5436     }
5437 
5438     return ret;
5439 }
5440 
5441 #ifdef TIOCGPTPEER
do_ioctl_tiocgptpeer(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5442 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5443                                      int fd, int cmd, abi_long arg)
5444 {
5445     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5446     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5447 }
5448 #endif
5449 
5450 #ifdef HAVE_DRM_H
5451 
unlock_drm_version(struct drm_version * host_ver,struct target_drm_version * target_ver,bool copy)5452 static void unlock_drm_version(struct drm_version *host_ver,
5453                                struct target_drm_version *target_ver,
5454                                bool copy)
5455 {
5456     unlock_user(host_ver->name, target_ver->name,
5457                                 copy ? host_ver->name_len : 0);
5458     unlock_user(host_ver->date, target_ver->date,
5459                                 copy ? host_ver->date_len : 0);
5460     unlock_user(host_ver->desc, target_ver->desc,
5461                                 copy ? host_ver->desc_len : 0);
5462 }
5463 
target_to_host_drmversion(struct drm_version * host_ver,struct target_drm_version * target_ver)5464 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5465                                           struct target_drm_version *target_ver)
5466 {
5467     memset(host_ver, 0, sizeof(*host_ver));
5468 
5469     __get_user(host_ver->name_len, &target_ver->name_len);
5470     if (host_ver->name_len) {
5471         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5472                                    target_ver->name_len, 0);
5473         if (!host_ver->name) {
5474             return -EFAULT;
5475         }
5476     }
5477 
5478     __get_user(host_ver->date_len, &target_ver->date_len);
5479     if (host_ver->date_len) {
5480         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5481                                    target_ver->date_len, 0);
5482         if (!host_ver->date) {
5483             goto err;
5484         }
5485     }
5486 
5487     __get_user(host_ver->desc_len, &target_ver->desc_len);
5488     if (host_ver->desc_len) {
5489         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5490                                    target_ver->desc_len, 0);
5491         if (!host_ver->desc) {
5492             goto err;
5493         }
5494     }
5495 
5496     return 0;
5497 err:
5498     unlock_drm_version(host_ver, target_ver, false);
5499     return -EFAULT;
5500 }
5501 
host_to_target_drmversion(struct target_drm_version * target_ver,struct drm_version * host_ver)5502 static inline void host_to_target_drmversion(
5503                                           struct target_drm_version *target_ver,
5504                                           struct drm_version *host_ver)
5505 {
5506     __put_user(host_ver->version_major, &target_ver->version_major);
5507     __put_user(host_ver->version_minor, &target_ver->version_minor);
5508     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5509     __put_user(host_ver->name_len, &target_ver->name_len);
5510     __put_user(host_ver->date_len, &target_ver->date_len);
5511     __put_user(host_ver->desc_len, &target_ver->desc_len);
5512     unlock_drm_version(host_ver, target_ver, true);
5513 }
5514 
do_ioctl_drm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5515 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5516                              int fd, int cmd, abi_long arg)
5517 {
5518     struct drm_version *ver;
5519     struct target_drm_version *target_ver;
5520     abi_long ret;
5521 
5522     switch (ie->host_cmd) {
5523     case DRM_IOCTL_VERSION:
5524         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5525             return -TARGET_EFAULT;
5526         }
5527         ver = (struct drm_version *)buf_temp;
5528         ret = target_to_host_drmversion(ver, target_ver);
5529         if (!is_error(ret)) {
5530             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5531             if (is_error(ret)) {
5532                 unlock_drm_version(ver, target_ver, false);
5533             } else {
5534                 host_to_target_drmversion(target_ver, ver);
5535             }
5536         }
5537         unlock_user_struct(target_ver, arg, 0);
5538         return ret;
5539     }
5540     return -TARGET_ENOSYS;
5541 }
5542 
do_ioctl_drm_i915_getparam(const IOCTLEntry * ie,struct drm_i915_getparam * gparam,int fd,abi_long arg)5543 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5544                                            struct drm_i915_getparam *gparam,
5545                                            int fd, abi_long arg)
5546 {
5547     abi_long ret;
5548     int value;
5549     struct target_drm_i915_getparam *target_gparam;
5550 
5551     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5552         return -TARGET_EFAULT;
5553     }
5554 
5555     __get_user(gparam->param, &target_gparam->param);
5556     gparam->value = &value;
5557     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5558     put_user_s32(value, target_gparam->value);
5559 
5560     unlock_user_struct(target_gparam, arg, 0);
5561     return ret;
5562 }
5563 
do_ioctl_drm_i915(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5564 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5565                                   int fd, int cmd, abi_long arg)
5566 {
5567     switch (ie->host_cmd) {
5568     case DRM_IOCTL_I915_GETPARAM:
5569         return do_ioctl_drm_i915_getparam(ie,
5570                                           (struct drm_i915_getparam *)buf_temp,
5571                                           fd, arg);
5572     default:
5573         return -TARGET_ENOSYS;
5574     }
5575 }
5576 
5577 #endif
5578 
do_ioctl_TUNSETTXFILTER(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5579 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5580                                         int fd, int cmd, abi_long arg)
5581 {
5582     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5583     struct tun_filter *target_filter;
5584     char *target_addr;
5585 
5586     assert(ie->access == IOC_W);
5587 
5588     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5589     if (!target_filter) {
5590         return -TARGET_EFAULT;
5591     }
5592     filter->flags = tswap16(target_filter->flags);
5593     filter->count = tswap16(target_filter->count);
5594     unlock_user(target_filter, arg, 0);
5595 
5596     if (filter->count) {
5597         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5598             MAX_STRUCT_SIZE) {
5599             return -TARGET_EFAULT;
5600         }
5601 
5602         target_addr = lock_user(VERIFY_READ,
5603                                 arg + offsetof(struct tun_filter, addr),
5604                                 filter->count * ETH_ALEN, 1);
5605         if (!target_addr) {
5606             return -TARGET_EFAULT;
5607         }
5608         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5609         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5610     }
5611 
5612     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5613 }
5614 
5615 IOCTLEntry ioctl_entries[] = {
5616 #define IOCTL(cmd, access, ...) \
5617     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5618 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5619     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5620 #define IOCTL_IGNORE(cmd) \
5621     { TARGET_ ## cmd, 0, #cmd },
5622 #include "ioctls.h"
5623     { 0, 0, },
5624 };
5625 
5626 /* ??? Implement proper locking for ioctls.  */
5627 /* do_ioctl() Must return target values and target errnos. */
do_ioctl(int fd,int cmd,abi_long arg)5628 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5629 {
5630     const IOCTLEntry *ie;
5631     const argtype *arg_type;
5632     abi_long ret;
5633     uint8_t buf_temp[MAX_STRUCT_SIZE];
5634     int target_size;
5635     void *argptr;
5636 
5637     ie = ioctl_entries;
5638     for(;;) {
5639         if (ie->target_cmd == 0) {
5640             qemu_log_mask(
5641                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5642             return -TARGET_ENOTTY;
5643         }
5644         if (ie->target_cmd == cmd)
5645             break;
5646         ie++;
5647     }
5648     arg_type = ie->arg_type;
5649     if (ie->do_ioctl) {
5650         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5651     } else if (!ie->host_cmd) {
5652         /* Some architectures define BSD ioctls in their headers
5653            that are not implemented in Linux.  */
5654         return -TARGET_ENOTTY;
5655     }
5656 
5657     switch(arg_type[0]) {
5658     case TYPE_NULL:
5659         /* no argument */
5660         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5661         break;
5662     case TYPE_PTRVOID:
5663     case TYPE_INT:
5664     case TYPE_LONG:
5665     case TYPE_ULONG:
5666         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5667         break;
5668     case TYPE_PTR:
5669         arg_type++;
5670         target_size = thunk_type_size(arg_type, 0);
5671         switch(ie->access) {
5672         case IOC_R:
5673             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5674             if (!is_error(ret)) {
5675                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5676                 if (!argptr)
5677                     return -TARGET_EFAULT;
5678                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5679                 unlock_user(argptr, arg, target_size);
5680             }
5681             break;
5682         case IOC_W:
5683             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5684             if (!argptr)
5685                 return -TARGET_EFAULT;
5686             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5687             unlock_user(argptr, arg, 0);
5688             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5689             break;
5690         default:
5691         case IOC_RW:
5692             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5693             if (!argptr)
5694                 return -TARGET_EFAULT;
5695             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5696             unlock_user(argptr, arg, 0);
5697             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5698             if (!is_error(ret)) {
5699                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5700                 if (!argptr)
5701                     return -TARGET_EFAULT;
5702                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5703                 unlock_user(argptr, arg, target_size);
5704             }
5705             break;
5706         }
5707         break;
5708     default:
5709         qemu_log_mask(LOG_UNIMP,
5710                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5711                       (long)cmd, arg_type[0]);
5712         ret = -TARGET_ENOTTY;
5713         break;
5714     }
5715     return ret;
5716 }
5717 
5718 static const bitmask_transtbl iflag_tbl[] = {
5719         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5720         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5721         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5722         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5723         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5724         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5725         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5726         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5727         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5728         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5729         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5730         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5731         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5732         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5733         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5734 };
5735 
5736 static const bitmask_transtbl oflag_tbl[] = {
5737 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5738 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5739 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5740 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5741 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5742 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5743 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5744 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5745 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5746 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5747 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5748 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5749 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5750 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5751 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5752 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5753 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5754 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5755 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5756 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5757 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5758 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5759 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5760 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5761 };
5762 
5763 static const bitmask_transtbl cflag_tbl[] = {
5764 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5765 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5766 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5767 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5768 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5769 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5770 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5771 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5772 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5773 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5774 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5775 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5776 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5777 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5778 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5779 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5780 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5781 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5782 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5783 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5784 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5785 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5786 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5787 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5788 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5789 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5790 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5791 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5792 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5793 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5794 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5795 };
5796 
5797 static const bitmask_transtbl lflag_tbl[] = {
5798   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5799   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5800   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5801   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5802   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5803   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5804   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5805   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5806   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5807   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5808   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5809   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5810   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5811   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5812   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5813   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5814 };
5815 
target_to_host_termios(void * dst,const void * src)5816 static void target_to_host_termios (void *dst, const void *src)
5817 {
5818     struct host_termios *host = dst;
5819     const struct target_termios *target = src;
5820 
5821     host->c_iflag =
5822         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5823     host->c_oflag =
5824         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5825     host->c_cflag =
5826         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5827     host->c_lflag =
5828         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5829     host->c_line = target->c_line;
5830 
5831     memset(host->c_cc, 0, sizeof(host->c_cc));
5832     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5833     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5834     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5835     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5836     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5837     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5838     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5839     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5840     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5841     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5842     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5843     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5844     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5845     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5846     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5847     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5848     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5849 }
5850 
host_to_target_termios(void * dst,const void * src)5851 static void host_to_target_termios (void *dst, const void *src)
5852 {
5853     struct target_termios *target = dst;
5854     const struct host_termios *host = src;
5855 
5856     target->c_iflag =
5857         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5858     target->c_oflag =
5859         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5860     target->c_cflag =
5861         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5862     target->c_lflag =
5863         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5864     target->c_line = host->c_line;
5865 
5866     memset(target->c_cc, 0, sizeof(target->c_cc));
5867     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5868     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5869     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5870     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5871     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5872     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5873     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5874     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5875     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5876     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5877     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5878     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5879     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5880     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5881     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5882     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5883     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5884 }
5885 
5886 static const StructEntry struct_termios_def = {
5887     .convert = { host_to_target_termios, target_to_host_termios },
5888     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5889     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5890     .print = print_termios,
5891 };
5892 
5893 /* If the host does not provide these bits, they may be safely discarded. */
5894 #ifndef MAP_SYNC
5895 #define MAP_SYNC 0
5896 #endif
5897 #ifndef MAP_UNINITIALIZED
5898 #define MAP_UNINITIALIZED 0
5899 #endif
5900 
5901 static const bitmask_transtbl mmap_flags_tbl[] = {
5902     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5903     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5904       MAP_ANONYMOUS, MAP_ANONYMOUS },
5905     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5906       MAP_GROWSDOWN, MAP_GROWSDOWN },
5907     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5908       MAP_DENYWRITE, MAP_DENYWRITE },
5909     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5910       MAP_EXECUTABLE, MAP_EXECUTABLE },
5911     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5912     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5913       MAP_NORESERVE, MAP_NORESERVE },
5914     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5915     /* MAP_STACK had been ignored by the kernel for quite some time.
5916        Recognize it for the target insofar as we do not want to pass
5917        it through to the host.  */
5918     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5919     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5920     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5921     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5922       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5923     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5924       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5925 };
5926 
5927 /*
5928  * Arrange for legacy / undefined architecture specific flags to be
5929  * ignored by mmap handling code.
5930  */
5931 #ifndef TARGET_MAP_32BIT
5932 #define TARGET_MAP_32BIT 0
5933 #endif
5934 #ifndef TARGET_MAP_HUGE_2MB
5935 #define TARGET_MAP_HUGE_2MB 0
5936 #endif
5937 #ifndef TARGET_MAP_HUGE_1GB
5938 #define TARGET_MAP_HUGE_1GB 0
5939 #endif
5940 
do_mmap(abi_ulong addr,abi_ulong len,int prot,int target_flags,int fd,off_t offset)5941 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5942                         int target_flags, int fd, off_t offset)
5943 {
5944     /*
5945      * The historical set of flags that all mmap types implicitly support.
5946      */
5947     enum {
5948         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5949                                | TARGET_MAP_PRIVATE
5950                                | TARGET_MAP_FIXED
5951                                | TARGET_MAP_ANONYMOUS
5952                                | TARGET_MAP_DENYWRITE
5953                                | TARGET_MAP_EXECUTABLE
5954                                | TARGET_MAP_UNINITIALIZED
5955                                | TARGET_MAP_GROWSDOWN
5956                                | TARGET_MAP_LOCKED
5957                                | TARGET_MAP_NORESERVE
5958                                | TARGET_MAP_POPULATE
5959                                | TARGET_MAP_NONBLOCK
5960                                | TARGET_MAP_STACK
5961                                | TARGET_MAP_HUGETLB
5962                                | TARGET_MAP_32BIT
5963                                | TARGET_MAP_HUGE_2MB
5964                                | TARGET_MAP_HUGE_1GB
5965     };
5966     int host_flags;
5967 
5968     switch (target_flags & TARGET_MAP_TYPE) {
5969     case TARGET_MAP_PRIVATE:
5970         host_flags = MAP_PRIVATE;
5971         break;
5972     case TARGET_MAP_SHARED:
5973         host_flags = MAP_SHARED;
5974         break;
5975     case TARGET_MAP_SHARED_VALIDATE:
5976         /*
5977          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5978          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5979          */
5980         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5981             return -TARGET_EOPNOTSUPP;
5982         }
5983         host_flags = MAP_SHARED_VALIDATE;
5984         if (target_flags & TARGET_MAP_SYNC) {
5985             host_flags |= MAP_SYNC;
5986         }
5987         break;
5988     default:
5989         return -TARGET_EINVAL;
5990     }
5991     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5992 
5993     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5994 }
5995 
5996 /*
5997  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5998  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5999  */
6000 #if defined(TARGET_I386)
6001 
6002 /* NOTE: there is really one LDT for all the threads */
6003 static uint8_t *ldt_table;
6004 
read_ldt(abi_ulong ptr,unsigned long bytecount)6005 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6006 {
6007     int size;
6008     void *p;
6009 
6010     if (!ldt_table)
6011         return 0;
6012     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6013     if (size > bytecount)
6014         size = bytecount;
6015     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6016     if (!p)
6017         return -TARGET_EFAULT;
6018     /* ??? Should this by byteswapped?  */
6019     memcpy(p, ldt_table, size);
6020     unlock_user(p, ptr, size);
6021     return size;
6022 }
6023 
6024 /* XXX: add locking support */
write_ldt(CPUX86State * env,abi_ulong ptr,unsigned long bytecount,int oldmode)6025 static abi_long write_ldt(CPUX86State *env,
6026                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6027 {
6028     struct target_modify_ldt_ldt_s ldt_info;
6029     struct target_modify_ldt_ldt_s *target_ldt_info;
6030     int seg_32bit, contents, read_exec_only, limit_in_pages;
6031     int seg_not_present, useable, lm;
6032     uint32_t *lp, entry_1, entry_2;
6033 
6034     if (bytecount != sizeof(ldt_info))
6035         return -TARGET_EINVAL;
6036     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6037         return -TARGET_EFAULT;
6038     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6039     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6040     ldt_info.limit = tswap32(target_ldt_info->limit);
6041     ldt_info.flags = tswap32(target_ldt_info->flags);
6042     unlock_user_struct(target_ldt_info, ptr, 0);
6043 
6044     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6045         return -TARGET_EINVAL;
6046     seg_32bit = ldt_info.flags & 1;
6047     contents = (ldt_info.flags >> 1) & 3;
6048     read_exec_only = (ldt_info.flags >> 3) & 1;
6049     limit_in_pages = (ldt_info.flags >> 4) & 1;
6050     seg_not_present = (ldt_info.flags >> 5) & 1;
6051     useable = (ldt_info.flags >> 6) & 1;
6052 #ifdef TARGET_ABI32
6053     lm = 0;
6054 #else
6055     lm = (ldt_info.flags >> 7) & 1;
6056 #endif
6057     if (contents == 3) {
6058         if (oldmode)
6059             return -TARGET_EINVAL;
6060         if (seg_not_present == 0)
6061             return -TARGET_EINVAL;
6062     }
6063     /* allocate the LDT */
6064     if (!ldt_table) {
6065         env->ldt.base = target_mmap(0,
6066                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6067                                     PROT_READ|PROT_WRITE,
6068                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6069         if (env->ldt.base == -1)
6070             return -TARGET_ENOMEM;
6071         memset(g2h_untagged(env->ldt.base), 0,
6072                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6073         env->ldt.limit = 0xffff;
6074         ldt_table = g2h_untagged(env->ldt.base);
6075     }
6076 
6077     /* NOTE: same code as Linux kernel */
6078     /* Allow LDTs to be cleared by the user. */
6079     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6080         if (oldmode ||
6081             (contents == 0		&&
6082              read_exec_only == 1	&&
6083              seg_32bit == 0		&&
6084              limit_in_pages == 0	&&
6085              seg_not_present == 1	&&
6086              useable == 0 )) {
6087             entry_1 = 0;
6088             entry_2 = 0;
6089             goto install;
6090         }
6091     }
6092 
6093     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6094         (ldt_info.limit & 0x0ffff);
6095     entry_2 = (ldt_info.base_addr & 0xff000000) |
6096         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6097         (ldt_info.limit & 0xf0000) |
6098         ((read_exec_only ^ 1) << 9) |
6099         (contents << 10) |
6100         ((seg_not_present ^ 1) << 15) |
6101         (seg_32bit << 22) |
6102         (limit_in_pages << 23) |
6103         (lm << 21) |
6104         0x7000;
6105     if (!oldmode)
6106         entry_2 |= (useable << 20);
6107 
6108     /* Install the new entry ...  */
6109 install:
6110     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6111     lp[0] = tswap32(entry_1);
6112     lp[1] = tswap32(entry_2);
6113     return 0;
6114 }
6115 
6116 /* specific and weird i386 syscalls */
do_modify_ldt(CPUX86State * env,int func,abi_ulong ptr,unsigned long bytecount)6117 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6118                               unsigned long bytecount)
6119 {
6120     abi_long ret;
6121 
6122     switch (func) {
6123     case 0:
6124         ret = read_ldt(ptr, bytecount);
6125         break;
6126     case 1:
6127         ret = write_ldt(env, ptr, bytecount, 1);
6128         break;
6129     case 0x11:
6130         ret = write_ldt(env, ptr, bytecount, 0);
6131         break;
6132     default:
6133         ret = -TARGET_ENOSYS;
6134         break;
6135     }
6136     return ret;
6137 }
6138 
6139 #if defined(TARGET_ABI32)
do_set_thread_area(CPUX86State * env,abi_ulong ptr)6140 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6141 {
6142     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6143     struct target_modify_ldt_ldt_s ldt_info;
6144     struct target_modify_ldt_ldt_s *target_ldt_info;
6145     int seg_32bit, contents, read_exec_only, limit_in_pages;
6146     int seg_not_present, useable, lm;
6147     uint32_t *lp, entry_1, entry_2;
6148     int i;
6149 
6150     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6151     if (!target_ldt_info)
6152         return -TARGET_EFAULT;
6153     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6154     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6155     ldt_info.limit = tswap32(target_ldt_info->limit);
6156     ldt_info.flags = tswap32(target_ldt_info->flags);
6157     if (ldt_info.entry_number == -1) {
6158         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6159             if (gdt_table[i] == 0) {
6160                 ldt_info.entry_number = i;
6161                 target_ldt_info->entry_number = tswap32(i);
6162                 break;
6163             }
6164         }
6165     }
6166     unlock_user_struct(target_ldt_info, ptr, 1);
6167 
6168     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6169         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6170            return -TARGET_EINVAL;
6171     seg_32bit = ldt_info.flags & 1;
6172     contents = (ldt_info.flags >> 1) & 3;
6173     read_exec_only = (ldt_info.flags >> 3) & 1;
6174     limit_in_pages = (ldt_info.flags >> 4) & 1;
6175     seg_not_present = (ldt_info.flags >> 5) & 1;
6176     useable = (ldt_info.flags >> 6) & 1;
6177 #ifdef TARGET_ABI32
6178     lm = 0;
6179 #else
6180     lm = (ldt_info.flags >> 7) & 1;
6181 #endif
6182 
6183     if (contents == 3) {
6184         if (seg_not_present == 0)
6185             return -TARGET_EINVAL;
6186     }
6187 
6188     /* NOTE: same code as Linux kernel */
6189     /* Allow LDTs to be cleared by the user. */
6190     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6191         if ((contents == 0             &&
6192              read_exec_only == 1       &&
6193              seg_32bit == 0            &&
6194              limit_in_pages == 0       &&
6195              seg_not_present == 1      &&
6196              useable == 0 )) {
6197             entry_1 = 0;
6198             entry_2 = 0;
6199             goto install;
6200         }
6201     }
6202 
6203     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6204         (ldt_info.limit & 0x0ffff);
6205     entry_2 = (ldt_info.base_addr & 0xff000000) |
6206         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6207         (ldt_info.limit & 0xf0000) |
6208         ((read_exec_only ^ 1) << 9) |
6209         (contents << 10) |
6210         ((seg_not_present ^ 1) << 15) |
6211         (seg_32bit << 22) |
6212         (limit_in_pages << 23) |
6213         (useable << 20) |
6214         (lm << 21) |
6215         0x7000;
6216 
6217     /* Install the new entry ...  */
6218 install:
6219     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6220     lp[0] = tswap32(entry_1);
6221     lp[1] = tswap32(entry_2);
6222     return 0;
6223 }
6224 
do_get_thread_area(CPUX86State * env,abi_ulong ptr)6225 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6226 {
6227     struct target_modify_ldt_ldt_s *target_ldt_info;
6228     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6229     uint32_t base_addr, limit, flags;
6230     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6231     int seg_not_present, useable, lm;
6232     uint32_t *lp, entry_1, entry_2;
6233 
6234     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6235     if (!target_ldt_info)
6236         return -TARGET_EFAULT;
6237     idx = tswap32(target_ldt_info->entry_number);
6238     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6239         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6240         unlock_user_struct(target_ldt_info, ptr, 1);
6241         return -TARGET_EINVAL;
6242     }
6243     lp = (uint32_t *)(gdt_table + idx);
6244     entry_1 = tswap32(lp[0]);
6245     entry_2 = tswap32(lp[1]);
6246 
6247     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6248     contents = (entry_2 >> 10) & 3;
6249     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6250     seg_32bit = (entry_2 >> 22) & 1;
6251     limit_in_pages = (entry_2 >> 23) & 1;
6252     useable = (entry_2 >> 20) & 1;
6253 #ifdef TARGET_ABI32
6254     lm = 0;
6255 #else
6256     lm = (entry_2 >> 21) & 1;
6257 #endif
6258     flags = (seg_32bit << 0) | (contents << 1) |
6259         (read_exec_only << 3) | (limit_in_pages << 4) |
6260         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6261     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6262     base_addr = (entry_1 >> 16) |
6263         (entry_2 & 0xff000000) |
6264         ((entry_2 & 0xff) << 16);
6265     target_ldt_info->base_addr = tswapal(base_addr);
6266     target_ldt_info->limit = tswap32(limit);
6267     target_ldt_info->flags = tswap32(flags);
6268     unlock_user_struct(target_ldt_info, ptr, 1);
6269     return 0;
6270 }
6271 
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6272 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6273 {
6274     return -TARGET_ENOSYS;
6275 }
6276 #else
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6277 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6278 {
6279     abi_long ret = 0;
6280     abi_ulong val;
6281     int idx;
6282 
6283     switch(code) {
6284     case TARGET_ARCH_SET_GS:
6285     case TARGET_ARCH_SET_FS:
6286         if (code == TARGET_ARCH_SET_GS)
6287             idx = R_GS;
6288         else
6289             idx = R_FS;
6290         cpu_x86_load_seg(env, idx, 0);
6291         env->segs[idx].base = addr;
6292         break;
6293     case TARGET_ARCH_GET_GS:
6294     case TARGET_ARCH_GET_FS:
6295         if (code == TARGET_ARCH_GET_GS)
6296             idx = R_GS;
6297         else
6298             idx = R_FS;
6299         val = env->segs[idx].base;
6300         if (put_user(val, addr, abi_ulong))
6301             ret = -TARGET_EFAULT;
6302         break;
6303     default:
6304         ret = -TARGET_EINVAL;
6305         break;
6306     }
6307     return ret;
6308 }
6309 #endif /* defined(TARGET_ABI32 */
6310 #endif /* defined(TARGET_I386) */
6311 
6312 /*
6313  * These constants are generic.  Supply any that are missing from the host.
6314  */
6315 #ifndef PR_SET_NAME
6316 # define PR_SET_NAME    15
6317 # define PR_GET_NAME    16
6318 #endif
6319 #ifndef PR_SET_FP_MODE
6320 # define PR_SET_FP_MODE 45
6321 # define PR_GET_FP_MODE 46
6322 # define PR_FP_MODE_FR   (1 << 0)
6323 # define PR_FP_MODE_FRE  (1 << 1)
6324 #endif
6325 #ifndef PR_SVE_SET_VL
6326 # define PR_SVE_SET_VL  50
6327 # define PR_SVE_GET_VL  51
6328 # define PR_SVE_VL_LEN_MASK  0xffff
6329 # define PR_SVE_VL_INHERIT   (1 << 17)
6330 #endif
6331 #ifndef PR_PAC_RESET_KEYS
6332 # define PR_PAC_RESET_KEYS  54
6333 # define PR_PAC_APIAKEY   (1 << 0)
6334 # define PR_PAC_APIBKEY   (1 << 1)
6335 # define PR_PAC_APDAKEY   (1 << 2)
6336 # define PR_PAC_APDBKEY   (1 << 3)
6337 # define PR_PAC_APGAKEY   (1 << 4)
6338 #endif
6339 #ifndef PR_SET_TAGGED_ADDR_CTRL
6340 # define PR_SET_TAGGED_ADDR_CTRL 55
6341 # define PR_GET_TAGGED_ADDR_CTRL 56
6342 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6343 #endif
6344 #ifndef PR_SET_IO_FLUSHER
6345 # define PR_SET_IO_FLUSHER 57
6346 # define PR_GET_IO_FLUSHER 58
6347 #endif
6348 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6349 # define PR_SET_SYSCALL_USER_DISPATCH 59
6350 #endif
6351 #ifndef PR_SME_SET_VL
6352 # define PR_SME_SET_VL  63
6353 # define PR_SME_GET_VL  64
6354 # define PR_SME_VL_LEN_MASK  0xffff
6355 # define PR_SME_VL_INHERIT   (1 << 17)
6356 #endif
6357 
6358 #include "target_prctl.h"
6359 
do_prctl_inval0(CPUArchState * env)6360 static abi_long do_prctl_inval0(CPUArchState *env)
6361 {
6362     return -TARGET_EINVAL;
6363 }
6364 
do_prctl_inval1(CPUArchState * env,abi_long arg2)6365 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6366 {
6367     return -TARGET_EINVAL;
6368 }
6369 
6370 #ifndef do_prctl_get_fp_mode
6371 #define do_prctl_get_fp_mode do_prctl_inval0
6372 #endif
6373 #ifndef do_prctl_set_fp_mode
6374 #define do_prctl_set_fp_mode do_prctl_inval1
6375 #endif
6376 #ifndef do_prctl_sve_get_vl
6377 #define do_prctl_sve_get_vl do_prctl_inval0
6378 #endif
6379 #ifndef do_prctl_sve_set_vl
6380 #define do_prctl_sve_set_vl do_prctl_inval1
6381 #endif
6382 #ifndef do_prctl_reset_keys
6383 #define do_prctl_reset_keys do_prctl_inval1
6384 #endif
6385 #ifndef do_prctl_set_tagged_addr_ctrl
6386 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6387 #endif
6388 #ifndef do_prctl_get_tagged_addr_ctrl
6389 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6390 #endif
6391 #ifndef do_prctl_get_unalign
6392 #define do_prctl_get_unalign do_prctl_inval1
6393 #endif
6394 #ifndef do_prctl_set_unalign
6395 #define do_prctl_set_unalign do_prctl_inval1
6396 #endif
6397 #ifndef do_prctl_sme_get_vl
6398 #define do_prctl_sme_get_vl do_prctl_inval0
6399 #endif
6400 #ifndef do_prctl_sme_set_vl
6401 #define do_prctl_sme_set_vl do_prctl_inval1
6402 #endif
6403 
do_prctl(CPUArchState * env,abi_long option,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)6404 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6405                          abi_long arg3, abi_long arg4, abi_long arg5)
6406 {
6407     abi_long ret;
6408 
6409     switch (option) {
6410     case PR_GET_PDEATHSIG:
6411         {
6412             int deathsig;
6413             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6414                                   arg3, arg4, arg5));
6415             if (!is_error(ret) &&
6416                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6417                 return -TARGET_EFAULT;
6418             }
6419             return ret;
6420         }
6421     case PR_SET_PDEATHSIG:
6422         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6423                                arg3, arg4, arg5));
6424     case PR_GET_NAME:
6425         {
6426             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6427             if (!name) {
6428                 return -TARGET_EFAULT;
6429             }
6430             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6431                                   arg3, arg4, arg5));
6432             unlock_user(name, arg2, 16);
6433             return ret;
6434         }
6435     case PR_SET_NAME:
6436         {
6437             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6438             if (!name) {
6439                 return -TARGET_EFAULT;
6440             }
6441             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6442                                   arg3, arg4, arg5));
6443             unlock_user(name, arg2, 0);
6444             return ret;
6445         }
6446     case PR_GET_FP_MODE:
6447         return do_prctl_get_fp_mode(env);
6448     case PR_SET_FP_MODE:
6449         return do_prctl_set_fp_mode(env, arg2);
6450     case PR_SVE_GET_VL:
6451         return do_prctl_sve_get_vl(env);
6452     case PR_SVE_SET_VL:
6453         return do_prctl_sve_set_vl(env, arg2);
6454     case PR_SME_GET_VL:
6455         return do_prctl_sme_get_vl(env);
6456     case PR_SME_SET_VL:
6457         return do_prctl_sme_set_vl(env, arg2);
6458     case PR_PAC_RESET_KEYS:
6459         if (arg3 || arg4 || arg5) {
6460             return -TARGET_EINVAL;
6461         }
6462         return do_prctl_reset_keys(env, arg2);
6463     case PR_SET_TAGGED_ADDR_CTRL:
6464         if (arg3 || arg4 || arg5) {
6465             return -TARGET_EINVAL;
6466         }
6467         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6468     case PR_GET_TAGGED_ADDR_CTRL:
6469         if (arg2 || arg3 || arg4 || arg5) {
6470             return -TARGET_EINVAL;
6471         }
6472         return do_prctl_get_tagged_addr_ctrl(env);
6473 
6474     case PR_GET_UNALIGN:
6475         return do_prctl_get_unalign(env, arg2);
6476     case PR_SET_UNALIGN:
6477         return do_prctl_set_unalign(env, arg2);
6478 
6479     case PR_CAP_AMBIENT:
6480     case PR_CAPBSET_READ:
6481     case PR_CAPBSET_DROP:
6482     case PR_GET_DUMPABLE:
6483     case PR_SET_DUMPABLE:
6484     case PR_GET_KEEPCAPS:
6485     case PR_SET_KEEPCAPS:
6486     case PR_GET_SECUREBITS:
6487     case PR_SET_SECUREBITS:
6488     case PR_GET_TIMING:
6489     case PR_SET_TIMING:
6490     case PR_GET_TIMERSLACK:
6491     case PR_SET_TIMERSLACK:
6492     case PR_MCE_KILL:
6493     case PR_MCE_KILL_GET:
6494     case PR_GET_NO_NEW_PRIVS:
6495     case PR_SET_NO_NEW_PRIVS:
6496     case PR_GET_IO_FLUSHER:
6497     case PR_SET_IO_FLUSHER:
6498     case PR_SET_CHILD_SUBREAPER:
6499     case PR_GET_SPECULATION_CTRL:
6500     case PR_SET_SPECULATION_CTRL:
6501         /* Some prctl options have no pointer arguments and we can pass on. */
6502         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6503 
6504     case PR_GET_CHILD_SUBREAPER:
6505         {
6506             int val;
6507             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6508                                   arg3, arg4, arg5));
6509             if (!is_error(ret) && put_user_s32(val, arg2)) {
6510                 return -TARGET_EFAULT;
6511             }
6512             return ret;
6513         }
6514 
6515     case PR_GET_TID_ADDRESS:
6516         {
6517             TaskState *ts = get_task_state(env_cpu(env));
6518             return put_user_ual(ts->child_tidptr, arg2);
6519         }
6520 
6521     case PR_GET_FPEXC:
6522     case PR_SET_FPEXC:
6523         /* Was used for SPE on PowerPC. */
6524         return -TARGET_EINVAL;
6525 
6526     case PR_GET_ENDIAN:
6527     case PR_SET_ENDIAN:
6528     case PR_GET_FPEMU:
6529     case PR_SET_FPEMU:
6530     case PR_SET_MM:
6531     case PR_GET_SECCOMP:
6532     case PR_SET_SECCOMP:
6533     case PR_SET_SYSCALL_USER_DISPATCH:
6534     case PR_GET_THP_DISABLE:
6535     case PR_SET_THP_DISABLE:
6536     case PR_GET_TSC:
6537     case PR_SET_TSC:
6538         /* Disable to prevent the target disabling stuff we need. */
6539         return -TARGET_EINVAL;
6540 
6541     default:
6542         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6543                       option);
6544         return -TARGET_EINVAL;
6545     }
6546 }
6547 
6548 #define NEW_STACK_SIZE 0x40000
6549 
6550 
6551 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6552 typedef struct {
6553     CPUArchState *env;
6554     pthread_mutex_t mutex;
6555     pthread_cond_t cond;
6556     pthread_t thread;
6557     uint32_t tid;
6558     abi_ulong child_tidptr;
6559     abi_ulong parent_tidptr;
6560     sigset_t sigmask;
6561 } new_thread_info;
6562 
clone_func(void * arg)6563 static void *clone_func(void *arg)
6564 {
6565     new_thread_info *info = arg;
6566     CPUArchState *env;
6567     CPUState *cpu;
6568     TaskState *ts;
6569 
6570     rcu_register_thread();
6571     tcg_register_thread();
6572     env = info->env;
6573     cpu = env_cpu(env);
6574     thread_cpu = cpu;
6575     ts = get_task_state(cpu);
6576     info->tid = sys_gettid();
6577     task_settid(ts);
6578     if (info->child_tidptr)
6579         put_user_u32(info->tid, info->child_tidptr);
6580     if (info->parent_tidptr)
6581         put_user_u32(info->tid, info->parent_tidptr);
6582     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6583     /* Enable signals.  */
6584     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6585     /* Signal to the parent that we're ready.  */
6586     pthread_mutex_lock(&info->mutex);
6587     pthread_cond_broadcast(&info->cond);
6588     pthread_mutex_unlock(&info->mutex);
6589     /* Wait until the parent has finished initializing the tls state.  */
6590     pthread_mutex_lock(&clone_lock);
6591     pthread_mutex_unlock(&clone_lock);
6592     cpu_loop(env);
6593     /* never exits */
6594     return NULL;
6595 }
6596 
6597 /* do_fork() Must return host values and target errnos (unlike most
6598    do_*() functions). */
do_fork(CPUArchState * env,unsigned int flags,abi_ulong newsp,abi_ulong parent_tidptr,target_ulong newtls,abi_ulong child_tidptr)6599 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6600                    abi_ulong parent_tidptr, target_ulong newtls,
6601                    abi_ulong child_tidptr)
6602 {
6603     CPUState *cpu = env_cpu(env);
6604     int ret;
6605     TaskState *ts;
6606     CPUState *new_cpu;
6607     CPUArchState *new_env;
6608     sigset_t sigmask;
6609 
6610     flags &= ~CLONE_IGNORED_FLAGS;
6611 
6612     /* Emulate vfork() with fork() */
6613     if (flags & CLONE_VFORK)
6614         flags &= ~(CLONE_VFORK | CLONE_VM);
6615 
6616     if (flags & CLONE_VM) {
6617         TaskState *parent_ts = get_task_state(cpu);
6618         new_thread_info info;
6619         pthread_attr_t attr;
6620 
6621         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6622             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6623             return -TARGET_EINVAL;
6624         }
6625 
6626         ts = g_new0(TaskState, 1);
6627         init_task_state(ts);
6628 
6629         /* Grab a mutex so that thread setup appears atomic.  */
6630         pthread_mutex_lock(&clone_lock);
6631 
6632         /*
6633          * If this is our first additional thread, we need to ensure we
6634          * generate code for parallel execution and flush old translations.
6635          * Do this now so that the copy gets CF_PARALLEL too.
6636          */
6637         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6638             tcg_cflags_set(cpu, CF_PARALLEL);
6639             tb_flush(cpu);
6640         }
6641 
6642         /* we create a new CPU instance. */
6643         new_env = cpu_copy(env);
6644         /* Init regs that differ from the parent.  */
6645         cpu_clone_regs_child(new_env, newsp, flags);
6646         cpu_clone_regs_parent(env, flags);
6647         new_cpu = env_cpu(new_env);
6648         new_cpu->opaque = ts;
6649         ts->bprm = parent_ts->bprm;
6650         ts->info = parent_ts->info;
6651         ts->signal_mask = parent_ts->signal_mask;
6652 
6653         if (flags & CLONE_CHILD_CLEARTID) {
6654             ts->child_tidptr = child_tidptr;
6655         }
6656 
6657         if (flags & CLONE_SETTLS) {
6658             cpu_set_tls (new_env, newtls);
6659         }
6660 
6661         memset(&info, 0, sizeof(info));
6662         pthread_mutex_init(&info.mutex, NULL);
6663         pthread_mutex_lock(&info.mutex);
6664         pthread_cond_init(&info.cond, NULL);
6665         info.env = new_env;
6666         if (flags & CLONE_CHILD_SETTID) {
6667             info.child_tidptr = child_tidptr;
6668         }
6669         if (flags & CLONE_PARENT_SETTID) {
6670             info.parent_tidptr = parent_tidptr;
6671         }
6672 
6673         ret = pthread_attr_init(&attr);
6674         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6675         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6676         /* It is not safe to deliver signals until the child has finished
6677            initializing, so temporarily block all signals.  */
6678         sigfillset(&sigmask);
6679         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6680         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6681 
6682         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6683         /* TODO: Free new CPU state if thread creation failed.  */
6684 
6685         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6686         pthread_attr_destroy(&attr);
6687         if (ret == 0) {
6688             /* Wait for the child to initialize.  */
6689             pthread_cond_wait(&info.cond, &info.mutex);
6690             ret = info.tid;
6691         } else {
6692             ret = -1;
6693         }
6694         pthread_mutex_unlock(&info.mutex);
6695         pthread_cond_destroy(&info.cond);
6696         pthread_mutex_destroy(&info.mutex);
6697         pthread_mutex_unlock(&clone_lock);
6698     } else {
6699         /* if no CLONE_VM, we consider it is a fork */
6700         if (flags & CLONE_INVALID_FORK_FLAGS) {
6701             return -TARGET_EINVAL;
6702         }
6703 
6704         /* We can't support custom termination signals */
6705         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6706             return -TARGET_EINVAL;
6707         }
6708 
6709 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6710         if (flags & CLONE_PIDFD) {
6711             return -TARGET_EINVAL;
6712         }
6713 #endif
6714 
6715         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6716         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6717             return -TARGET_EINVAL;
6718         }
6719 
6720         if (block_signals()) {
6721             return -QEMU_ERESTARTSYS;
6722         }
6723 
6724         fork_start();
6725         ret = fork();
6726         if (ret == 0) {
6727             /* Child Process.  */
6728             cpu_clone_regs_child(env, newsp, flags);
6729             fork_end(ret);
6730             /* There is a race condition here.  The parent process could
6731                theoretically read the TID in the child process before the child
6732                tid is set.  This would require using either ptrace
6733                (not implemented) or having *_tidptr to point at a shared memory
6734                mapping.  We can't repeat the spinlock hack used above because
6735                the child process gets its own copy of the lock.  */
6736             if (flags & CLONE_CHILD_SETTID)
6737                 put_user_u32(sys_gettid(), child_tidptr);
6738             if (flags & CLONE_PARENT_SETTID)
6739                 put_user_u32(sys_gettid(), parent_tidptr);
6740             ts = get_task_state(cpu);
6741             if (flags & CLONE_SETTLS)
6742                 cpu_set_tls (env, newtls);
6743             if (flags & CLONE_CHILD_CLEARTID)
6744                 ts->child_tidptr = child_tidptr;
6745         } else {
6746             cpu_clone_regs_parent(env, flags);
6747             if (flags & CLONE_PIDFD) {
6748                 int pid_fd = 0;
6749 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6750                 int pid_child = ret;
6751                 pid_fd = pidfd_open(pid_child, 0);
6752                 if (pid_fd >= 0) {
6753                     qemu_set_cloexec(pid_fd);
6754                 } else {
6755                     pid_fd = 0;
6756                 }
6757 #endif
6758                 put_user_u32(pid_fd, parent_tidptr);
6759             }
6760             fork_end(ret);
6761         }
6762         g_assert(!cpu_in_exclusive_context(cpu));
6763     }
6764     return ret;
6765 }
6766 
6767 /* warning : doesn't handle linux specific flags... */
target_to_host_fcntl_cmd(int cmd)6768 static int target_to_host_fcntl_cmd(int cmd)
6769 {
6770     int ret;
6771 
6772     switch(cmd) {
6773     case TARGET_F_DUPFD:
6774     case TARGET_F_GETFD:
6775     case TARGET_F_SETFD:
6776     case TARGET_F_GETFL:
6777     case TARGET_F_SETFL:
6778     case TARGET_F_OFD_GETLK:
6779     case TARGET_F_OFD_SETLK:
6780     case TARGET_F_OFD_SETLKW:
6781         ret = cmd;
6782         break;
6783     case TARGET_F_GETLK:
6784         ret = F_GETLK;
6785         break;
6786     case TARGET_F_SETLK:
6787         ret = F_SETLK;
6788         break;
6789     case TARGET_F_SETLKW:
6790         ret = F_SETLKW;
6791         break;
6792     case TARGET_F_GETOWN:
6793         ret = F_GETOWN;
6794         break;
6795     case TARGET_F_SETOWN:
6796         ret = F_SETOWN;
6797         break;
6798     case TARGET_F_GETSIG:
6799         ret = F_GETSIG;
6800         break;
6801     case TARGET_F_SETSIG:
6802         ret = F_SETSIG;
6803         break;
6804 #if TARGET_ABI_BITS == 32
6805     case TARGET_F_GETLK64:
6806         ret = F_GETLK;
6807         break;
6808     case TARGET_F_SETLK64:
6809         ret = F_SETLK;
6810         break;
6811     case TARGET_F_SETLKW64:
6812         ret = F_SETLKW;
6813         break;
6814 #endif
6815     case TARGET_F_SETLEASE:
6816         ret = F_SETLEASE;
6817         break;
6818     case TARGET_F_GETLEASE:
6819         ret = F_GETLEASE;
6820         break;
6821 #ifdef F_DUPFD_CLOEXEC
6822     case TARGET_F_DUPFD_CLOEXEC:
6823         ret = F_DUPFD_CLOEXEC;
6824         break;
6825 #endif
6826     case TARGET_F_NOTIFY:
6827         ret = F_NOTIFY;
6828         break;
6829 #ifdef F_GETOWN_EX
6830     case TARGET_F_GETOWN_EX:
6831         ret = F_GETOWN_EX;
6832         break;
6833 #endif
6834 #ifdef F_SETOWN_EX
6835     case TARGET_F_SETOWN_EX:
6836         ret = F_SETOWN_EX;
6837         break;
6838 #endif
6839 #ifdef F_SETPIPE_SZ
6840     case TARGET_F_SETPIPE_SZ:
6841         ret = F_SETPIPE_SZ;
6842         break;
6843     case TARGET_F_GETPIPE_SZ:
6844         ret = F_GETPIPE_SZ;
6845         break;
6846 #endif
6847 #ifdef F_ADD_SEALS
6848     case TARGET_F_ADD_SEALS:
6849         ret = F_ADD_SEALS;
6850         break;
6851     case TARGET_F_GET_SEALS:
6852         ret = F_GET_SEALS;
6853         break;
6854 #endif
6855     default:
6856         ret = -TARGET_EINVAL;
6857         break;
6858     }
6859 
6860 #if defined(__powerpc64__)
6861     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6862      * is not supported by kernel. The glibc fcntl call actually adjusts
6863      * them to 5, 6 and 7 before making the syscall(). Since we make the
6864      * syscall directly, adjust to what is supported by the kernel.
6865      */
6866     if (ret >= F_GETLK && ret <= F_SETLKW) {
6867         ret -= F_GETLK - 5;
6868     }
6869 #endif
6870 
6871     return ret;
6872 }
6873 
6874 #define FLOCK_TRANSTBL \
6875     switch (type) { \
6876     TRANSTBL_CONVERT(F_RDLCK); \
6877     TRANSTBL_CONVERT(F_WRLCK); \
6878     TRANSTBL_CONVERT(F_UNLCK); \
6879     }
6880 
target_to_host_flock(int type)6881 static int target_to_host_flock(int type)
6882 {
6883 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6884     FLOCK_TRANSTBL
6885 #undef  TRANSTBL_CONVERT
6886     return -TARGET_EINVAL;
6887 }
6888 
host_to_target_flock(int type)6889 static int host_to_target_flock(int type)
6890 {
6891 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6892     FLOCK_TRANSTBL
6893 #undef  TRANSTBL_CONVERT
6894     /* if we don't know how to convert the value coming
6895      * from the host we copy to the target field as-is
6896      */
6897     return type;
6898 }
6899 
copy_from_user_flock(struct flock * fl,abi_ulong target_flock_addr)6900 static inline abi_long copy_from_user_flock(struct flock *fl,
6901                                             abi_ulong target_flock_addr)
6902 {
6903     struct target_flock *target_fl;
6904     int l_type;
6905 
6906     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6907         return -TARGET_EFAULT;
6908     }
6909 
6910     __get_user(l_type, &target_fl->l_type);
6911     l_type = target_to_host_flock(l_type);
6912     if (l_type < 0) {
6913         return l_type;
6914     }
6915     fl->l_type = l_type;
6916     __get_user(fl->l_whence, &target_fl->l_whence);
6917     __get_user(fl->l_start, &target_fl->l_start);
6918     __get_user(fl->l_len, &target_fl->l_len);
6919     __get_user(fl->l_pid, &target_fl->l_pid);
6920     unlock_user_struct(target_fl, target_flock_addr, 0);
6921     return 0;
6922 }
6923 
copy_to_user_flock(abi_ulong target_flock_addr,const struct flock * fl)6924 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6925                                           const struct flock *fl)
6926 {
6927     struct target_flock *target_fl;
6928     short l_type;
6929 
6930     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6931         return -TARGET_EFAULT;
6932     }
6933 
6934     l_type = host_to_target_flock(fl->l_type);
6935     __put_user(l_type, &target_fl->l_type);
6936     __put_user(fl->l_whence, &target_fl->l_whence);
6937     __put_user(fl->l_start, &target_fl->l_start);
6938     __put_user(fl->l_len, &target_fl->l_len);
6939     __put_user(fl->l_pid, &target_fl->l_pid);
6940     unlock_user_struct(target_fl, target_flock_addr, 1);
6941     return 0;
6942 }
6943 
6944 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6945 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6946 
6947 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6948 struct target_oabi_flock64 {
6949     abi_short l_type;
6950     abi_short l_whence;
6951     abi_llong l_start;
6952     abi_llong l_len;
6953     abi_int   l_pid;
6954 } QEMU_PACKED;
6955 
copy_from_user_oabi_flock64(struct flock * fl,abi_ulong target_flock_addr)6956 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6957                                                    abi_ulong target_flock_addr)
6958 {
6959     struct target_oabi_flock64 *target_fl;
6960     int l_type;
6961 
6962     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6963         return -TARGET_EFAULT;
6964     }
6965 
6966     __get_user(l_type, &target_fl->l_type);
6967     l_type = target_to_host_flock(l_type);
6968     if (l_type < 0) {
6969         return l_type;
6970     }
6971     fl->l_type = l_type;
6972     __get_user(fl->l_whence, &target_fl->l_whence);
6973     __get_user(fl->l_start, &target_fl->l_start);
6974     __get_user(fl->l_len, &target_fl->l_len);
6975     __get_user(fl->l_pid, &target_fl->l_pid);
6976     unlock_user_struct(target_fl, target_flock_addr, 0);
6977     return 0;
6978 }
6979 
copy_to_user_oabi_flock64(abi_ulong target_flock_addr,const struct flock * fl)6980 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6981                                                  const struct flock *fl)
6982 {
6983     struct target_oabi_flock64 *target_fl;
6984     short l_type;
6985 
6986     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6987         return -TARGET_EFAULT;
6988     }
6989 
6990     l_type = host_to_target_flock(fl->l_type);
6991     __put_user(l_type, &target_fl->l_type);
6992     __put_user(fl->l_whence, &target_fl->l_whence);
6993     __put_user(fl->l_start, &target_fl->l_start);
6994     __put_user(fl->l_len, &target_fl->l_len);
6995     __put_user(fl->l_pid, &target_fl->l_pid);
6996     unlock_user_struct(target_fl, target_flock_addr, 1);
6997     return 0;
6998 }
6999 #endif
7000 
copy_from_user_flock64(struct flock * fl,abi_ulong target_flock_addr)7001 static inline abi_long copy_from_user_flock64(struct flock *fl,
7002                                               abi_ulong target_flock_addr)
7003 {
7004     struct target_flock64 *target_fl;
7005     int l_type;
7006 
7007     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7008         return -TARGET_EFAULT;
7009     }
7010 
7011     __get_user(l_type, &target_fl->l_type);
7012     l_type = target_to_host_flock(l_type);
7013     if (l_type < 0) {
7014         return l_type;
7015     }
7016     fl->l_type = l_type;
7017     __get_user(fl->l_whence, &target_fl->l_whence);
7018     __get_user(fl->l_start, &target_fl->l_start);
7019     __get_user(fl->l_len, &target_fl->l_len);
7020     __get_user(fl->l_pid, &target_fl->l_pid);
7021     unlock_user_struct(target_fl, target_flock_addr, 0);
7022     return 0;
7023 }
7024 
copy_to_user_flock64(abi_ulong target_flock_addr,const struct flock * fl)7025 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7026                                             const struct flock *fl)
7027 {
7028     struct target_flock64 *target_fl;
7029     short l_type;
7030 
7031     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7032         return -TARGET_EFAULT;
7033     }
7034 
7035     l_type = host_to_target_flock(fl->l_type);
7036     __put_user(l_type, &target_fl->l_type);
7037     __put_user(fl->l_whence, &target_fl->l_whence);
7038     __put_user(fl->l_start, &target_fl->l_start);
7039     __put_user(fl->l_len, &target_fl->l_len);
7040     __put_user(fl->l_pid, &target_fl->l_pid);
7041     unlock_user_struct(target_fl, target_flock_addr, 1);
7042     return 0;
7043 }
7044 
do_fcntl(int fd,int cmd,abi_ulong arg)7045 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7046 {
7047     struct flock fl;
7048 #ifdef F_GETOWN_EX
7049     struct f_owner_ex fox;
7050     struct target_f_owner_ex *target_fox;
7051 #endif
7052     abi_long ret;
7053     int host_cmd = target_to_host_fcntl_cmd(cmd);
7054 
7055     if (host_cmd == -TARGET_EINVAL)
7056 	    return host_cmd;
7057 
7058     switch(cmd) {
7059     case TARGET_F_GETLK:
7060         ret = copy_from_user_flock(&fl, arg);
7061         if (ret) {
7062             return ret;
7063         }
7064         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7065         if (ret == 0) {
7066             ret = copy_to_user_flock(arg, &fl);
7067         }
7068         break;
7069 
7070     case TARGET_F_SETLK:
7071     case TARGET_F_SETLKW:
7072         ret = copy_from_user_flock(&fl, arg);
7073         if (ret) {
7074             return ret;
7075         }
7076         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7077         break;
7078 
7079     case TARGET_F_GETLK64:
7080     case TARGET_F_OFD_GETLK:
7081         ret = copy_from_user_flock64(&fl, arg);
7082         if (ret) {
7083             return ret;
7084         }
7085         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7086         if (ret == 0) {
7087             ret = copy_to_user_flock64(arg, &fl);
7088         }
7089         break;
7090     case TARGET_F_SETLK64:
7091     case TARGET_F_SETLKW64:
7092     case TARGET_F_OFD_SETLK:
7093     case TARGET_F_OFD_SETLKW:
7094         ret = copy_from_user_flock64(&fl, arg);
7095         if (ret) {
7096             return ret;
7097         }
7098         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7099         break;
7100 
7101     case TARGET_F_GETFL:
7102         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7103         if (ret >= 0) {
7104             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7105             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7106             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7107                 ret |= TARGET_O_LARGEFILE;
7108             }
7109         }
7110         break;
7111 
7112     case TARGET_F_SETFL:
7113         ret = get_errno(safe_fcntl(fd, host_cmd,
7114                                    target_to_host_bitmask(arg,
7115                                                           fcntl_flags_tbl)));
7116         break;
7117 
7118 #ifdef F_GETOWN_EX
7119     case TARGET_F_GETOWN_EX:
7120         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7121         if (ret >= 0) {
7122             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7123                 return -TARGET_EFAULT;
7124             target_fox->type = tswap32(fox.type);
7125             target_fox->pid = tswap32(fox.pid);
7126             unlock_user_struct(target_fox, arg, 1);
7127         }
7128         break;
7129 #endif
7130 
7131 #ifdef F_SETOWN_EX
7132     case TARGET_F_SETOWN_EX:
7133         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7134             return -TARGET_EFAULT;
7135         fox.type = tswap32(target_fox->type);
7136         fox.pid = tswap32(target_fox->pid);
7137         unlock_user_struct(target_fox, arg, 0);
7138         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7139         break;
7140 #endif
7141 
7142     case TARGET_F_SETSIG:
7143         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7144         break;
7145 
7146     case TARGET_F_GETSIG:
7147         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7148         break;
7149 
7150     case TARGET_F_SETOWN:
7151     case TARGET_F_GETOWN:
7152     case TARGET_F_SETLEASE:
7153     case TARGET_F_GETLEASE:
7154     case TARGET_F_SETPIPE_SZ:
7155     case TARGET_F_GETPIPE_SZ:
7156     case TARGET_F_ADD_SEALS:
7157     case TARGET_F_GET_SEALS:
7158         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7159         break;
7160 
7161     default:
7162         ret = get_errno(safe_fcntl(fd, cmd, arg));
7163         break;
7164     }
7165     return ret;
7166 }
7167 
7168 #ifdef USE_UID16
7169 
high2lowuid(int uid)7170 static inline int high2lowuid(int uid)
7171 {
7172     if (uid > 65535)
7173         return 65534;
7174     else
7175         return uid;
7176 }
7177 
high2lowgid(int gid)7178 static inline int high2lowgid(int gid)
7179 {
7180     if (gid > 65535)
7181         return 65534;
7182     else
7183         return gid;
7184 }
7185 
low2highuid(int uid)7186 static inline int low2highuid(int uid)
7187 {
7188     if ((int16_t)uid == -1)
7189         return -1;
7190     else
7191         return uid;
7192 }
7193 
low2highgid(int gid)7194 static inline int low2highgid(int gid)
7195 {
7196     if ((int16_t)gid == -1)
7197         return -1;
7198     else
7199         return gid;
7200 }
tswapid(int id)7201 static inline int tswapid(int id)
7202 {
7203     return tswap16(id);
7204 }
7205 
7206 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7207 
7208 #else /* !USE_UID16 */
high2lowuid(int uid)7209 static inline int high2lowuid(int uid)
7210 {
7211     return uid;
7212 }
high2lowgid(int gid)7213 static inline int high2lowgid(int gid)
7214 {
7215     return gid;
7216 }
low2highuid(int uid)7217 static inline int low2highuid(int uid)
7218 {
7219     return uid;
7220 }
low2highgid(int gid)7221 static inline int low2highgid(int gid)
7222 {
7223     return gid;
7224 }
tswapid(int id)7225 static inline int tswapid(int id)
7226 {
7227     return tswap32(id);
7228 }
7229 
7230 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7231 
7232 #endif /* USE_UID16 */
7233 
7234 /* We must do direct syscalls for setting UID/GID, because we want to
7235  * implement the Linux system call semantics of "change only for this thread",
7236  * not the libc/POSIX semantics of "change for all threads in process".
7237  * (See http://ewontfix.com/17/ for more details.)
7238  * We use the 32-bit version of the syscalls if present; if it is not
7239  * then either the host architecture supports 32-bit UIDs natively with
7240  * the standard syscall, or the 16-bit UID is the best we can do.
7241  */
7242 #ifdef __NR_setuid32
7243 #define __NR_sys_setuid __NR_setuid32
7244 #else
7245 #define __NR_sys_setuid __NR_setuid
7246 #endif
7247 #ifdef __NR_setgid32
7248 #define __NR_sys_setgid __NR_setgid32
7249 #else
7250 #define __NR_sys_setgid __NR_setgid
7251 #endif
7252 #ifdef __NR_setresuid32
7253 #define __NR_sys_setresuid __NR_setresuid32
7254 #else
7255 #define __NR_sys_setresuid __NR_setresuid
7256 #endif
7257 #ifdef __NR_setresgid32
7258 #define __NR_sys_setresgid __NR_setresgid32
7259 #else
7260 #define __NR_sys_setresgid __NR_setresgid
7261 #endif
7262 #ifdef __NR_setgroups32
7263 #define __NR_sys_setgroups __NR_setgroups32
7264 #else
7265 #define __NR_sys_setgroups __NR_setgroups
7266 #endif
7267 #ifdef __NR_sys_setreuid32
7268 #define __NR_sys_setreuid __NR_setreuid32
7269 #else
7270 #define __NR_sys_setreuid __NR_setreuid
7271 #endif
7272 #ifdef __NR_sys_setregid32
7273 #define __NR_sys_setregid __NR_setregid32
7274 #else
7275 #define __NR_sys_setregid __NR_setregid
7276 #endif
7277 
7278 _syscall1(int, sys_setuid, uid_t, uid)
7279 _syscall1(int, sys_setgid, gid_t, gid)
7280 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7281 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7282 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7283 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7284 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7285 
syscall_init(void)7286 void syscall_init(void)
7287 {
7288     IOCTLEntry *ie;
7289     const argtype *arg_type;
7290     int size;
7291 
7292     thunk_init(STRUCT_MAX);
7293 
7294 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7295 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7296 #include "syscall_types.h"
7297 #undef STRUCT
7298 #undef STRUCT_SPECIAL
7299 
7300     /* we patch the ioctl size if necessary. We rely on the fact that
7301        no ioctl has all the bits at '1' in the size field */
7302     ie = ioctl_entries;
7303     while (ie->target_cmd != 0) {
7304         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7305             TARGET_IOC_SIZEMASK) {
7306             arg_type = ie->arg_type;
7307             if (arg_type[0] != TYPE_PTR) {
7308                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7309                         ie->target_cmd);
7310                 exit(1);
7311             }
7312             arg_type++;
7313             size = thunk_type_size(arg_type, 0);
7314             ie->target_cmd = (ie->target_cmd &
7315                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7316                 (size << TARGET_IOC_SIZESHIFT);
7317         }
7318 
7319         /* automatic consistency check if same arch */
7320 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7321     (defined(__x86_64__) && defined(TARGET_X86_64))
7322         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7323             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7324                     ie->name, ie->target_cmd, ie->host_cmd);
7325         }
7326 #endif
7327         ie++;
7328     }
7329 }
7330 
7331 #ifdef TARGET_NR_truncate64
target_truncate64(CPUArchState * cpu_env,const char * arg1,abi_long arg2,abi_long arg3,abi_long arg4)7332 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7333                                          abi_long arg2,
7334                                          abi_long arg3,
7335                                          abi_long arg4)
7336 {
7337     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7338         arg2 = arg3;
7339         arg3 = arg4;
7340     }
7341     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7342 }
7343 #endif
7344 
7345 #ifdef TARGET_NR_ftruncate64
target_ftruncate64(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4)7346 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7347                                           abi_long arg2,
7348                                           abi_long arg3,
7349                                           abi_long arg4)
7350 {
7351     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7352         arg2 = arg3;
7353         arg3 = arg4;
7354     }
7355     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7356 }
7357 #endif
7358 
7359 #if defined(TARGET_NR_timer_settime) || \
7360     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec(struct itimerspec * host_its,abi_ulong target_addr)7361 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7362                                                  abi_ulong target_addr)
7363 {
7364     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7365                                 offsetof(struct target_itimerspec,
7366                                          it_interval)) ||
7367         target_to_host_timespec(&host_its->it_value, target_addr +
7368                                 offsetof(struct target_itimerspec,
7369                                          it_value))) {
7370         return -TARGET_EFAULT;
7371     }
7372 
7373     return 0;
7374 }
7375 #endif
7376 
7377 #if defined(TARGET_NR_timer_settime64) || \
7378     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec64(struct itimerspec * host_its,abi_ulong target_addr)7379 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7380                                                    abi_ulong target_addr)
7381 {
7382     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7383                                   offsetof(struct target__kernel_itimerspec,
7384                                            it_interval)) ||
7385         target_to_host_timespec64(&host_its->it_value, target_addr +
7386                                   offsetof(struct target__kernel_itimerspec,
7387                                            it_value))) {
7388         return -TARGET_EFAULT;
7389     }
7390 
7391     return 0;
7392 }
7393 #endif
7394 
7395 #if ((defined(TARGET_NR_timerfd_gettime) || \
7396       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7397       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
host_to_target_itimerspec(abi_ulong target_addr,struct itimerspec * host_its)7398 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7399                                                  struct itimerspec *host_its)
7400 {
7401     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7402                                                        it_interval),
7403                                 &host_its->it_interval) ||
7404         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7405                                                        it_value),
7406                                 &host_its->it_value)) {
7407         return -TARGET_EFAULT;
7408     }
7409     return 0;
7410 }
7411 #endif
7412 
7413 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7414       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7415       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
host_to_target_itimerspec64(abi_ulong target_addr,struct itimerspec * host_its)7416 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7417                                                    struct itimerspec *host_its)
7418 {
7419     if (host_to_target_timespec64(target_addr +
7420                                   offsetof(struct target__kernel_itimerspec,
7421                                            it_interval),
7422                                   &host_its->it_interval) ||
7423         host_to_target_timespec64(target_addr +
7424                                   offsetof(struct target__kernel_itimerspec,
7425                                            it_value),
7426                                   &host_its->it_value)) {
7427         return -TARGET_EFAULT;
7428     }
7429     return 0;
7430 }
7431 #endif
7432 
7433 #if defined(TARGET_NR_adjtimex) || \
7434     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
target_to_host_timex(struct timex * host_tx,abi_long target_addr)7435 static inline abi_long target_to_host_timex(struct timex *host_tx,
7436                                             abi_long target_addr)
7437 {
7438     struct target_timex *target_tx;
7439 
7440     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7441         return -TARGET_EFAULT;
7442     }
7443 
7444     __get_user(host_tx->modes, &target_tx->modes);
7445     __get_user(host_tx->offset, &target_tx->offset);
7446     __get_user(host_tx->freq, &target_tx->freq);
7447     __get_user(host_tx->maxerror, &target_tx->maxerror);
7448     __get_user(host_tx->esterror, &target_tx->esterror);
7449     __get_user(host_tx->status, &target_tx->status);
7450     __get_user(host_tx->constant, &target_tx->constant);
7451     __get_user(host_tx->precision, &target_tx->precision);
7452     __get_user(host_tx->tolerance, &target_tx->tolerance);
7453     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7454     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7455     __get_user(host_tx->tick, &target_tx->tick);
7456     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7457     __get_user(host_tx->jitter, &target_tx->jitter);
7458     __get_user(host_tx->shift, &target_tx->shift);
7459     __get_user(host_tx->stabil, &target_tx->stabil);
7460     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7461     __get_user(host_tx->calcnt, &target_tx->calcnt);
7462     __get_user(host_tx->errcnt, &target_tx->errcnt);
7463     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7464     __get_user(host_tx->tai, &target_tx->tai);
7465 
7466     unlock_user_struct(target_tx, target_addr, 0);
7467     return 0;
7468 }
7469 
host_to_target_timex(abi_long target_addr,struct timex * host_tx)7470 static inline abi_long host_to_target_timex(abi_long target_addr,
7471                                             struct timex *host_tx)
7472 {
7473     struct target_timex *target_tx;
7474 
7475     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7476         return -TARGET_EFAULT;
7477     }
7478 
7479     __put_user(host_tx->modes, &target_tx->modes);
7480     __put_user(host_tx->offset, &target_tx->offset);
7481     __put_user(host_tx->freq, &target_tx->freq);
7482     __put_user(host_tx->maxerror, &target_tx->maxerror);
7483     __put_user(host_tx->esterror, &target_tx->esterror);
7484     __put_user(host_tx->status, &target_tx->status);
7485     __put_user(host_tx->constant, &target_tx->constant);
7486     __put_user(host_tx->precision, &target_tx->precision);
7487     __put_user(host_tx->tolerance, &target_tx->tolerance);
7488     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7489     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7490     __put_user(host_tx->tick, &target_tx->tick);
7491     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7492     __put_user(host_tx->jitter, &target_tx->jitter);
7493     __put_user(host_tx->shift, &target_tx->shift);
7494     __put_user(host_tx->stabil, &target_tx->stabil);
7495     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7496     __put_user(host_tx->calcnt, &target_tx->calcnt);
7497     __put_user(host_tx->errcnt, &target_tx->errcnt);
7498     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7499     __put_user(host_tx->tai, &target_tx->tai);
7500 
7501     unlock_user_struct(target_tx, target_addr, 1);
7502     return 0;
7503 }
7504 #endif
7505 
7506 
7507 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
target_to_host_timex64(struct timex * host_tx,abi_long target_addr)7508 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7509                                               abi_long target_addr)
7510 {
7511     struct target__kernel_timex *target_tx;
7512 
7513     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7514                                  offsetof(struct target__kernel_timex,
7515                                           time))) {
7516         return -TARGET_EFAULT;
7517     }
7518 
7519     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7520         return -TARGET_EFAULT;
7521     }
7522 
7523     __get_user(host_tx->modes, &target_tx->modes);
7524     __get_user(host_tx->offset, &target_tx->offset);
7525     __get_user(host_tx->freq, &target_tx->freq);
7526     __get_user(host_tx->maxerror, &target_tx->maxerror);
7527     __get_user(host_tx->esterror, &target_tx->esterror);
7528     __get_user(host_tx->status, &target_tx->status);
7529     __get_user(host_tx->constant, &target_tx->constant);
7530     __get_user(host_tx->precision, &target_tx->precision);
7531     __get_user(host_tx->tolerance, &target_tx->tolerance);
7532     __get_user(host_tx->tick, &target_tx->tick);
7533     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7534     __get_user(host_tx->jitter, &target_tx->jitter);
7535     __get_user(host_tx->shift, &target_tx->shift);
7536     __get_user(host_tx->stabil, &target_tx->stabil);
7537     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7538     __get_user(host_tx->calcnt, &target_tx->calcnt);
7539     __get_user(host_tx->errcnt, &target_tx->errcnt);
7540     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7541     __get_user(host_tx->tai, &target_tx->tai);
7542 
7543     unlock_user_struct(target_tx, target_addr, 0);
7544     return 0;
7545 }
7546 
host_to_target_timex64(abi_long target_addr,struct timex * host_tx)7547 static inline abi_long host_to_target_timex64(abi_long target_addr,
7548                                               struct timex *host_tx)
7549 {
7550     struct target__kernel_timex *target_tx;
7551 
7552    if (copy_to_user_timeval64(target_addr +
7553                               offsetof(struct target__kernel_timex, time),
7554                               &host_tx->time)) {
7555         return -TARGET_EFAULT;
7556     }
7557 
7558     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7559         return -TARGET_EFAULT;
7560     }
7561 
7562     __put_user(host_tx->modes, &target_tx->modes);
7563     __put_user(host_tx->offset, &target_tx->offset);
7564     __put_user(host_tx->freq, &target_tx->freq);
7565     __put_user(host_tx->maxerror, &target_tx->maxerror);
7566     __put_user(host_tx->esterror, &target_tx->esterror);
7567     __put_user(host_tx->status, &target_tx->status);
7568     __put_user(host_tx->constant, &target_tx->constant);
7569     __put_user(host_tx->precision, &target_tx->precision);
7570     __put_user(host_tx->tolerance, &target_tx->tolerance);
7571     __put_user(host_tx->tick, &target_tx->tick);
7572     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7573     __put_user(host_tx->jitter, &target_tx->jitter);
7574     __put_user(host_tx->shift, &target_tx->shift);
7575     __put_user(host_tx->stabil, &target_tx->stabil);
7576     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7577     __put_user(host_tx->calcnt, &target_tx->calcnt);
7578     __put_user(host_tx->errcnt, &target_tx->errcnt);
7579     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7580     __put_user(host_tx->tai, &target_tx->tai);
7581 
7582     unlock_user_struct(target_tx, target_addr, 1);
7583     return 0;
7584 }
7585 #endif
7586 
7587 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7588 #define sigev_notify_thread_id _sigev_un._tid
7589 #endif
7590 
target_to_host_sigevent(struct sigevent * host_sevp,abi_ulong target_addr)7591 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7592                                                abi_ulong target_addr)
7593 {
7594     struct target_sigevent *target_sevp;
7595 
7596     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7597         return -TARGET_EFAULT;
7598     }
7599 
7600     /* This union is awkward on 64 bit systems because it has a 32 bit
7601      * integer and a pointer in it; we follow the conversion approach
7602      * used for handling sigval types in signal.c so the guest should get
7603      * the correct value back even if we did a 64 bit byteswap and it's
7604      * using the 32 bit integer.
7605      */
7606     host_sevp->sigev_value.sival_ptr =
7607         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7608     host_sevp->sigev_signo =
7609         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7610     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7611     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7612 
7613     unlock_user_struct(target_sevp, target_addr, 1);
7614     return 0;
7615 }
7616 
7617 #if defined(TARGET_NR_mlockall)
target_to_host_mlockall_arg(int arg)7618 static inline int target_to_host_mlockall_arg(int arg)
7619 {
7620     int result = 0;
7621 
7622     if (arg & TARGET_MCL_CURRENT) {
7623         result |= MCL_CURRENT;
7624     }
7625     if (arg & TARGET_MCL_FUTURE) {
7626         result |= MCL_FUTURE;
7627     }
7628 #ifdef MCL_ONFAULT
7629     if (arg & TARGET_MCL_ONFAULT) {
7630         result |= MCL_ONFAULT;
7631     }
7632 #endif
7633 
7634     return result;
7635 }
7636 #endif
7637 
target_to_host_msync_arg(abi_long arg)7638 static inline int target_to_host_msync_arg(abi_long arg)
7639 {
7640     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7641            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7642            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7643            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7644 }
7645 
7646 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7647      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7648      defined(TARGET_NR_newfstatat))
host_to_target_stat64(CPUArchState * cpu_env,abi_ulong target_addr,struct stat * host_st)7649 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7650                                              abi_ulong target_addr,
7651                                              struct stat *host_st)
7652 {
7653 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7654     if (cpu_env->eabi) {
7655         struct target_eabi_stat64 *target_st;
7656 
7657         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7658             return -TARGET_EFAULT;
7659         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7660         __put_user(host_st->st_dev, &target_st->st_dev);
7661         __put_user(host_st->st_ino, &target_st->st_ino);
7662 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7663         __put_user(host_st->st_ino, &target_st->__st_ino);
7664 #endif
7665         __put_user(host_st->st_mode, &target_st->st_mode);
7666         __put_user(host_st->st_nlink, &target_st->st_nlink);
7667         __put_user(host_st->st_uid, &target_st->st_uid);
7668         __put_user(host_st->st_gid, &target_st->st_gid);
7669         __put_user(host_st->st_rdev, &target_st->st_rdev);
7670         __put_user(host_st->st_size, &target_st->st_size);
7671         __put_user(host_st->st_blksize, &target_st->st_blksize);
7672         __put_user(host_st->st_blocks, &target_st->st_blocks);
7673         __put_user(host_st->st_atime, &target_st->target_st_atime);
7674         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7675         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7676 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7677         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7678         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7679         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7680 #endif
7681         unlock_user_struct(target_st, target_addr, 1);
7682     } else
7683 #endif
7684     {
7685 #if defined(TARGET_HAS_STRUCT_STAT64)
7686         struct target_stat64 *target_st;
7687 #else
7688         struct target_stat *target_st;
7689 #endif
7690 
7691         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7692             return -TARGET_EFAULT;
7693         memset(target_st, 0, sizeof(*target_st));
7694         __put_user(host_st->st_dev, &target_st->st_dev);
7695         __put_user(host_st->st_ino, &target_st->st_ino);
7696 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7697         __put_user(host_st->st_ino, &target_st->__st_ino);
7698 #endif
7699         __put_user(host_st->st_mode, &target_st->st_mode);
7700         __put_user(host_st->st_nlink, &target_st->st_nlink);
7701         __put_user(host_st->st_uid, &target_st->st_uid);
7702         __put_user(host_st->st_gid, &target_st->st_gid);
7703         __put_user(host_st->st_rdev, &target_st->st_rdev);
7704         /* XXX: better use of kernel struct */
7705         __put_user(host_st->st_size, &target_st->st_size);
7706         __put_user(host_st->st_blksize, &target_st->st_blksize);
7707         __put_user(host_st->st_blocks, &target_st->st_blocks);
7708         __put_user(host_st->st_atime, &target_st->target_st_atime);
7709         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7710         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7711 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7712         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7713         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7714         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7715 #endif
7716         unlock_user_struct(target_st, target_addr, 1);
7717     }
7718 
7719     return 0;
7720 }
7721 #endif
7722 
7723 #if defined(TARGET_NR_statx) && defined(__NR_statx)
host_to_target_statx(struct target_statx * host_stx,abi_ulong target_addr)7724 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7725                                             abi_ulong target_addr)
7726 {
7727     struct target_statx *target_stx;
7728 
7729     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7730         return -TARGET_EFAULT;
7731     }
7732     memset(target_stx, 0, sizeof(*target_stx));
7733 
7734     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7735     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7736     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7737     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7738     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7739     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7740     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7741     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7742     __put_user(host_stx->stx_size, &target_stx->stx_size);
7743     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7744     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7745     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7746     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7747     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7748     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7749     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7750     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7751     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7752     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7753     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7754     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7755     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7756     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7757 
7758     unlock_user_struct(target_stx, target_addr, 1);
7759 
7760     return 0;
7761 }
7762 #endif
7763 
do_sys_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7764 static int do_sys_futex(int *uaddr, int op, int val,
7765                          const struct timespec *timeout, int *uaddr2,
7766                          int val3)
7767 {
7768 #if HOST_LONG_BITS == 64
7769 #if defined(__NR_futex)
7770     /* always a 64-bit time_t, it doesn't define _time64 version  */
7771     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7772 
7773 #endif
7774 #else /* HOST_LONG_BITS == 64 */
7775 #if defined(__NR_futex_time64)
7776     if (sizeof(timeout->tv_sec) == 8) {
7777         /* _time64 function on 32bit arch */
7778         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7779     }
7780 #endif
7781 #if defined(__NR_futex)
7782     /* old function on 32bit arch */
7783     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7784 #endif
7785 #endif /* HOST_LONG_BITS == 64 */
7786     g_assert_not_reached();
7787 }
7788 
do_safe_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7789 static int do_safe_futex(int *uaddr, int op, int val,
7790                          const struct timespec *timeout, int *uaddr2,
7791                          int val3)
7792 {
7793 #if HOST_LONG_BITS == 64
7794 #if defined(__NR_futex)
7795     /* always a 64-bit time_t, it doesn't define _time64 version  */
7796     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7797 #endif
7798 #else /* HOST_LONG_BITS == 64 */
7799 #if defined(__NR_futex_time64)
7800     if (sizeof(timeout->tv_sec) == 8) {
7801         /* _time64 function on 32bit arch */
7802         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7803                                            val3));
7804     }
7805 #endif
7806 #if defined(__NR_futex)
7807     /* old function on 32bit arch */
7808     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7809 #endif
7810 #endif /* HOST_LONG_BITS == 64 */
7811     return -TARGET_ENOSYS;
7812 }
7813 
7814 /* ??? Using host futex calls even when target atomic operations
7815    are not really atomic probably breaks things.  However implementing
7816    futexes locally would make futexes shared between multiple processes
7817    tricky.  However they're probably useless because guest atomic
7818    operations won't work either.  */
7819 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
do_futex(CPUState * cpu,bool time64,target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)7820 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7821                     int op, int val, target_ulong timeout,
7822                     target_ulong uaddr2, int val3)
7823 {
7824     struct timespec ts, *pts = NULL;
7825     void *haddr2 = NULL;
7826     int base_op;
7827 
7828     /* We assume FUTEX_* constants are the same on both host and target. */
7829 #ifdef FUTEX_CMD_MASK
7830     base_op = op & FUTEX_CMD_MASK;
7831 #else
7832     base_op = op;
7833 #endif
7834     switch (base_op) {
7835     case FUTEX_WAIT:
7836     case FUTEX_WAIT_BITSET:
7837         val = tswap32(val);
7838         break;
7839     case FUTEX_WAIT_REQUEUE_PI:
7840         val = tswap32(val);
7841         haddr2 = g2h(cpu, uaddr2);
7842         break;
7843     case FUTEX_LOCK_PI:
7844     case FUTEX_LOCK_PI2:
7845         break;
7846     case FUTEX_WAKE:
7847     case FUTEX_WAKE_BITSET:
7848     case FUTEX_TRYLOCK_PI:
7849     case FUTEX_UNLOCK_PI:
7850         timeout = 0;
7851         break;
7852     case FUTEX_FD:
7853         val = target_to_host_signal(val);
7854         timeout = 0;
7855         break;
7856     case FUTEX_CMP_REQUEUE:
7857     case FUTEX_CMP_REQUEUE_PI:
7858         val3 = tswap32(val3);
7859         /* fall through */
7860     case FUTEX_REQUEUE:
7861     case FUTEX_WAKE_OP:
7862         /*
7863          * For these, the 4th argument is not TIMEOUT, but VAL2.
7864          * But the prototype of do_safe_futex takes a pointer, so
7865          * insert casts to satisfy the compiler.  We do not need
7866          * to tswap VAL2 since it's not compared to guest memory.
7867           */
7868         pts = (struct timespec *)(uintptr_t)timeout;
7869         timeout = 0;
7870         haddr2 = g2h(cpu, uaddr2);
7871         break;
7872     default:
7873         return -TARGET_ENOSYS;
7874     }
7875     if (timeout) {
7876         pts = &ts;
7877         if (time64
7878             ? target_to_host_timespec64(pts, timeout)
7879             : target_to_host_timespec(pts, timeout)) {
7880             return -TARGET_EFAULT;
7881         }
7882     }
7883     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7884 }
7885 #endif
7886 
7887 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_name_to_handle_at(abi_long dirfd,abi_long pathname,abi_long handle,abi_long mount_id,abi_long flags)7888 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7889                                      abi_long handle, abi_long mount_id,
7890                                      abi_long flags)
7891 {
7892     struct file_handle *target_fh;
7893     struct file_handle *fh;
7894     int mid = 0;
7895     abi_long ret;
7896     char *name;
7897     unsigned int size, total_size;
7898 
7899     if (get_user_s32(size, handle)) {
7900         return -TARGET_EFAULT;
7901     }
7902 
7903     name = lock_user_string(pathname);
7904     if (!name) {
7905         return -TARGET_EFAULT;
7906     }
7907 
7908     total_size = sizeof(struct file_handle) + size;
7909     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7910     if (!target_fh) {
7911         unlock_user(name, pathname, 0);
7912         return -TARGET_EFAULT;
7913     }
7914 
7915     fh = g_malloc0(total_size);
7916     fh->handle_bytes = size;
7917 
7918     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7919     unlock_user(name, pathname, 0);
7920 
7921     /* man name_to_handle_at(2):
7922      * Other than the use of the handle_bytes field, the caller should treat
7923      * the file_handle structure as an opaque data type
7924      */
7925 
7926     memcpy(target_fh, fh, total_size);
7927     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7928     target_fh->handle_type = tswap32(fh->handle_type);
7929     g_free(fh);
7930     unlock_user(target_fh, handle, total_size);
7931 
7932     if (put_user_s32(mid, mount_id)) {
7933         return -TARGET_EFAULT;
7934     }
7935 
7936     return ret;
7937 
7938 }
7939 #endif
7940 
7941 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_open_by_handle_at(abi_long mount_fd,abi_long handle,abi_long flags)7942 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7943                                      abi_long flags)
7944 {
7945     struct file_handle *target_fh;
7946     struct file_handle *fh;
7947     unsigned int size, total_size;
7948     abi_long ret;
7949 
7950     if (get_user_s32(size, handle)) {
7951         return -TARGET_EFAULT;
7952     }
7953 
7954     total_size = sizeof(struct file_handle) + size;
7955     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7956     if (!target_fh) {
7957         return -TARGET_EFAULT;
7958     }
7959 
7960     fh = g_memdup(target_fh, total_size);
7961     fh->handle_bytes = size;
7962     fh->handle_type = tswap32(target_fh->handle_type);
7963 
7964     ret = get_errno(open_by_handle_at(mount_fd, fh,
7965                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7966 
7967     g_free(fh);
7968 
7969     unlock_user(target_fh, handle, total_size);
7970 
7971     return ret;
7972 }
7973 #endif
7974 
7975 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7976 
do_signalfd4(int fd,abi_long mask,int flags)7977 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7978 {
7979     int host_flags;
7980     target_sigset_t *target_mask;
7981     sigset_t host_mask;
7982     abi_long ret;
7983 
7984     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7985         return -TARGET_EINVAL;
7986     }
7987     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7988         return -TARGET_EFAULT;
7989     }
7990 
7991     target_to_host_sigset(&host_mask, target_mask);
7992 
7993     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7994 
7995     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7996     if (ret >= 0) {
7997         fd_trans_register(ret, &target_signalfd_trans);
7998     }
7999 
8000     unlock_user_struct(target_mask, mask, 0);
8001 
8002     return ret;
8003 }
8004 #endif
8005 
8006 /* Map host to target signal numbers for the wait family of syscalls.
8007    Assume all other status bits are the same.  */
host_to_target_waitstatus(int status)8008 int host_to_target_waitstatus(int status)
8009 {
8010     if (WIFSIGNALED(status)) {
8011         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8012     }
8013     if (WIFSTOPPED(status)) {
8014         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8015                | (status & 0xff);
8016     }
8017     return status;
8018 }
8019 
open_self_cmdline(CPUArchState * cpu_env,int fd)8020 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8021 {
8022     CPUState *cpu = env_cpu(cpu_env);
8023     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
8024     int i;
8025 
8026     for (i = 0; i < bprm->argc; i++) {
8027         size_t len = strlen(bprm->argv[i]) + 1;
8028 
8029         if (write(fd, bprm->argv[i], len) != len) {
8030             return -1;
8031         }
8032     }
8033 
8034     return 0;
8035 }
8036 
8037 struct open_self_maps_data {
8038     TaskState *ts;
8039     IntervalTreeRoot *host_maps;
8040     int fd;
8041     bool smaps;
8042 };
8043 
8044 /*
8045  * Subroutine to output one line of /proc/self/maps,
8046  * or one region of /proc/self/smaps.
8047  */
8048 
8049 #ifdef TARGET_HPPA
8050 # define test_stack(S, E, L)  (E == L)
8051 #else
8052 # define test_stack(S, E, L)  (S == L)
8053 #endif
8054 
open_self_maps_4(const struct open_self_maps_data * d,const MapInfo * mi,abi_ptr start,abi_ptr end,unsigned flags)8055 static void open_self_maps_4(const struct open_self_maps_data *d,
8056                              const MapInfo *mi, abi_ptr start,
8057                              abi_ptr end, unsigned flags)
8058 {
8059     const struct image_info *info = d->ts->info;
8060     const char *path = mi->path;
8061     uint64_t offset;
8062     int fd = d->fd;
8063     int count;
8064 
8065     if (test_stack(start, end, info->stack_limit)) {
8066         path = "[stack]";
8067     } else if (start == info->brk) {
8068         path = "[heap]";
8069     } else if (start == info->vdso) {
8070         path = "[vdso]";
8071 #ifdef TARGET_X86_64
8072     } else if (start == TARGET_VSYSCALL_PAGE) {
8073         path = "[vsyscall]";
8074 #endif
8075     }
8076 
8077     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8078     offset = mi->offset;
8079     if (mi->dev) {
8080         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8081         offset += hstart - mi->itree.start;
8082     }
8083 
8084     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8085                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8086                     start, end,
8087                     (flags & PAGE_READ) ? 'r' : '-',
8088                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8089                     (flags & PAGE_EXEC) ? 'x' : '-',
8090                     mi->is_priv ? 'p' : 's',
8091                     offset, major(mi->dev), minor(mi->dev),
8092                     (uint64_t)mi->inode);
8093     if (path) {
8094         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8095     } else {
8096         dprintf(fd, "\n");
8097     }
8098 
8099     if (d->smaps) {
8100         unsigned long size = end - start;
8101         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8102         unsigned long size_kb = size >> 10;
8103 
8104         dprintf(fd, "Size:                  %lu kB\n"
8105                 "KernelPageSize:        %lu kB\n"
8106                 "MMUPageSize:           %lu kB\n"
8107                 "Rss:                   0 kB\n"
8108                 "Pss:                   0 kB\n"
8109                 "Pss_Dirty:             0 kB\n"
8110                 "Shared_Clean:          0 kB\n"
8111                 "Shared_Dirty:          0 kB\n"
8112                 "Private_Clean:         0 kB\n"
8113                 "Private_Dirty:         0 kB\n"
8114                 "Referenced:            0 kB\n"
8115                 "Anonymous:             %lu kB\n"
8116                 "LazyFree:              0 kB\n"
8117                 "AnonHugePages:         0 kB\n"
8118                 "ShmemPmdMapped:        0 kB\n"
8119                 "FilePmdMapped:         0 kB\n"
8120                 "Shared_Hugetlb:        0 kB\n"
8121                 "Private_Hugetlb:       0 kB\n"
8122                 "Swap:                  0 kB\n"
8123                 "SwapPss:               0 kB\n"
8124                 "Locked:                0 kB\n"
8125                 "THPeligible:    0\n"
8126                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8127                 size_kb, page_size_kb, page_size_kb,
8128                 (flags & PAGE_ANON ? size_kb : 0),
8129                 (flags & PAGE_READ) ? " rd" : "",
8130                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8131                 (flags & PAGE_EXEC) ? " ex" : "",
8132                 mi->is_priv ? "" : " sh",
8133                 (flags & PAGE_READ) ? " mr" : "",
8134                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8135                 (flags & PAGE_EXEC) ? " me" : "",
8136                 mi->is_priv ? "" : " ms");
8137     }
8138 }
8139 
8140 /*
8141  * Callback for walk_memory_regions, when read_self_maps() fails.
8142  * Proceed without the benefit of host /proc/self/maps cross-check.
8143  */
open_self_maps_3(void * opaque,vaddr guest_start,vaddr guest_end,int flags)8144 static int open_self_maps_3(void *opaque, vaddr guest_start,
8145                             vaddr guest_end, int flags)
8146 {
8147     static const MapInfo mi = { .is_priv = true };
8148 
8149     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8150     return 0;
8151 }
8152 
8153 /*
8154  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8155  */
open_self_maps_2(void * opaque,vaddr guest_start,vaddr guest_end,int flags)8156 static int open_self_maps_2(void *opaque, vaddr guest_start,
8157                             vaddr guest_end, int flags)
8158 {
8159     const struct open_self_maps_data *d = opaque;
8160     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8161     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8162 
8163 #ifdef TARGET_X86_64
8164     /*
8165      * Because of the extremely high position of the page within the guest
8166      * virtual address space, this is not backed by host memory at all.
8167      * Therefore the loop below would fail.  This is the only instance
8168      * of not having host backing memory.
8169      */
8170     if (guest_start == TARGET_VSYSCALL_PAGE) {
8171         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8172     }
8173 #endif
8174 
8175     while (1) {
8176         IntervalTreeNode *n =
8177             interval_tree_iter_first(d->host_maps, host_start, host_start);
8178         MapInfo *mi = container_of(n, MapInfo, itree);
8179         uintptr_t this_hlast = MIN(host_last, n->last);
8180         target_ulong this_gend = h2g(this_hlast) + 1;
8181 
8182         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8183 
8184         if (this_hlast == host_last) {
8185             return 0;
8186         }
8187         host_start = this_hlast + 1;
8188         guest_start = h2g(host_start);
8189     }
8190 }
8191 
open_self_maps_1(CPUArchState * env,int fd,bool smaps)8192 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8193 {
8194     struct open_self_maps_data d = {
8195         .ts = get_task_state(env_cpu(env)),
8196         .fd = fd,
8197         .smaps = smaps
8198     };
8199 
8200     mmap_lock();
8201     d.host_maps = read_self_maps();
8202     if (d.host_maps) {
8203         walk_memory_regions(&d, open_self_maps_2);
8204         free_self_maps(d.host_maps);
8205     } else {
8206         walk_memory_regions(&d, open_self_maps_3);
8207     }
8208     mmap_unlock();
8209     return 0;
8210 }
8211 
open_self_maps(CPUArchState * cpu_env,int fd)8212 static int open_self_maps(CPUArchState *cpu_env, int fd)
8213 {
8214     return open_self_maps_1(cpu_env, fd, false);
8215 }
8216 
open_self_smaps(CPUArchState * cpu_env,int fd)8217 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8218 {
8219     return open_self_maps_1(cpu_env, fd, true);
8220 }
8221 
open_self_stat(CPUArchState * cpu_env,int fd)8222 static int open_self_stat(CPUArchState *cpu_env, int fd)
8223 {
8224     CPUState *cpu = env_cpu(cpu_env);
8225     TaskState *ts = get_task_state(cpu);
8226     g_autoptr(GString) buf = g_string_new(NULL);
8227     int i;
8228 
8229     for (i = 0; i < 44; i++) {
8230         if (i == 0) {
8231             /* pid */
8232             g_string_printf(buf, FMT_pid " ", getpid());
8233         } else if (i == 1) {
8234             /* app name */
8235             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8236             bin = bin ? bin + 1 : ts->bprm->argv[0];
8237             g_string_printf(buf, "(%.15s) ", bin);
8238         } else if (i == 2) {
8239             /* task state */
8240             g_string_assign(buf, "R "); /* we are running right now */
8241         } else if (i == 3) {
8242             /* ppid */
8243             g_string_printf(buf, FMT_pid " ", getppid());
8244         } else if (i == 4) {
8245             /* pgid */
8246             g_string_printf(buf, FMT_pid " ", getpgrp());
8247         } else if (i == 19) {
8248             /* num_threads */
8249             int cpus = 0;
8250             WITH_RCU_READ_LOCK_GUARD() {
8251                 CPUState *cpu_iter;
8252                 CPU_FOREACH(cpu_iter) {
8253                     cpus++;
8254                 }
8255             }
8256             g_string_printf(buf, "%d ", cpus);
8257         } else if (i == 21) {
8258             /* starttime */
8259             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8260         } else if (i == 27) {
8261             /* stack bottom */
8262             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8263         } else {
8264             /* for the rest, there is MasterCard */
8265             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8266         }
8267 
8268         if (write(fd, buf->str, buf->len) != buf->len) {
8269             return -1;
8270         }
8271     }
8272 
8273     return 0;
8274 }
8275 
open_self_auxv(CPUArchState * cpu_env,int fd)8276 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8277 {
8278     CPUState *cpu = env_cpu(cpu_env);
8279     TaskState *ts = get_task_state(cpu);
8280     abi_ulong auxv = ts->info->saved_auxv;
8281     abi_ulong len = ts->info->auxv_len;
8282     char *ptr;
8283 
8284     /*
8285      * Auxiliary vector is stored in target process stack.
8286      * read in whole auxv vector and copy it to file
8287      */
8288     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8289     if (ptr != NULL) {
8290         while (len > 0) {
8291             ssize_t r;
8292             r = write(fd, ptr, len);
8293             if (r <= 0) {
8294                 break;
8295             }
8296             len -= r;
8297             ptr += r;
8298         }
8299         lseek(fd, 0, SEEK_SET);
8300         unlock_user(ptr, auxv, len);
8301     }
8302 
8303     return 0;
8304 }
8305 
is_proc_myself(const char * filename,const char * entry)8306 static int is_proc_myself(const char *filename, const char *entry)
8307 {
8308     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8309         filename += strlen("/proc/");
8310         if (!strncmp(filename, "self/", strlen("self/"))) {
8311             filename += strlen("self/");
8312         } else if (*filename >= '1' && *filename <= '9') {
8313             char myself[80];
8314             snprintf(myself, sizeof(myself), "%d/", getpid());
8315             if (!strncmp(filename, myself, strlen(myself))) {
8316                 filename += strlen(myself);
8317             } else {
8318                 return 0;
8319             }
8320         } else {
8321             return 0;
8322         }
8323         if (!strcmp(filename, entry)) {
8324             return 1;
8325         }
8326     }
8327     return 0;
8328 }
8329 
excp_dump_file(FILE * logfile,CPUArchState * env,const char * fmt,int code)8330 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8331                       const char *fmt, int code)
8332 {
8333     if (logfile) {
8334         CPUState *cs = env_cpu(env);
8335 
8336         fprintf(logfile, fmt, code);
8337         fprintf(logfile, "Failing executable: %s\n", exec_path);
8338         cpu_dump_state(cs, logfile, 0);
8339         open_self_maps(env, fileno(logfile));
8340     }
8341 }
8342 
target_exception_dump(CPUArchState * env,const char * fmt,int code)8343 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8344 {
8345     /* dump to console */
8346     excp_dump_file(stderr, env, fmt, code);
8347 
8348     /* dump to log file */
8349     if (qemu_log_separate()) {
8350         FILE *logfile = qemu_log_trylock();
8351 
8352         excp_dump_file(logfile, env, fmt, code);
8353         qemu_log_unlock(logfile);
8354     }
8355 }
8356 
8357 #include "target_proc.h"
8358 
8359 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8360     defined(HAVE_ARCH_PROC_CPUINFO) || \
8361     defined(HAVE_ARCH_PROC_HARDWARE)
is_proc(const char * filename,const char * entry)8362 static int is_proc(const char *filename, const char *entry)
8363 {
8364     return strcmp(filename, entry) == 0;
8365 }
8366 #endif
8367 
8368 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
open_net_route(CPUArchState * cpu_env,int fd)8369 static int open_net_route(CPUArchState *cpu_env, int fd)
8370 {
8371     FILE *fp;
8372     char *line = NULL;
8373     size_t len = 0;
8374     ssize_t read;
8375 
8376     fp = fopen("/proc/net/route", "r");
8377     if (fp == NULL) {
8378         return -1;
8379     }
8380 
8381     /* read header */
8382 
8383     read = getline(&line, &len, fp);
8384     dprintf(fd, "%s", line);
8385 
8386     /* read routes */
8387 
8388     while ((read = getline(&line, &len, fp)) != -1) {
8389         char iface[16];
8390         uint32_t dest, gw, mask;
8391         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8392         int fields;
8393 
8394         fields = sscanf(line,
8395                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8396                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8397                         &mask, &mtu, &window, &irtt);
8398         if (fields != 11) {
8399             continue;
8400         }
8401         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8402                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8403                 metric, tswap32(mask), mtu, window, irtt);
8404     }
8405 
8406     free(line);
8407     fclose(fp);
8408 
8409     return 0;
8410 }
8411 #endif
8412 
maybe_do_fake_open(CPUArchState * cpu_env,int dirfd,const char * fname,int flags,mode_t mode,int openat2_resolve,bool safe)8413 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8414                               const char *fname, int flags, mode_t mode,
8415                               int openat2_resolve, bool safe)
8416 {
8417     g_autofree char *proc_name = NULL;
8418     const char *pathname;
8419     struct fake_open {
8420         const char *filename;
8421         int (*fill)(CPUArchState *cpu_env, int fd);
8422         int (*cmp)(const char *s1, const char *s2);
8423     };
8424     const struct fake_open *fake_open;
8425     static const struct fake_open fakes[] = {
8426         { "maps", open_self_maps, is_proc_myself },
8427         { "smaps", open_self_smaps, is_proc_myself },
8428         { "stat", open_self_stat, is_proc_myself },
8429         { "auxv", open_self_auxv, is_proc_myself },
8430         { "cmdline", open_self_cmdline, is_proc_myself },
8431 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8432         { "/proc/net/route", open_net_route, is_proc },
8433 #endif
8434 #if defined(HAVE_ARCH_PROC_CPUINFO)
8435         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8436 #endif
8437 #if defined(HAVE_ARCH_PROC_HARDWARE)
8438         { "/proc/hardware", open_hardware, is_proc },
8439 #endif
8440         { NULL, NULL, NULL }
8441     };
8442 
8443     /* if this is a file from /proc/ filesystem, expand full name */
8444     proc_name = realpath(fname, NULL);
8445     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8446         pathname = proc_name;
8447     } else {
8448         pathname = fname;
8449     }
8450 
8451     if (is_proc_myself(pathname, "exe")) {
8452         /* Honor openat2 resolve flags */
8453         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8454             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8455             errno = ELOOP;
8456             return -1;
8457         }
8458         if (safe) {
8459             return safe_openat(dirfd, exec_path, flags, mode);
8460         } else {
8461             return openat(dirfd, exec_path, flags, mode);
8462         }
8463     }
8464 
8465     for (fake_open = fakes; fake_open->filename; fake_open++) {
8466         if (fake_open->cmp(pathname, fake_open->filename)) {
8467             break;
8468         }
8469     }
8470 
8471     if (fake_open->filename) {
8472         const char *tmpdir;
8473         char filename[PATH_MAX];
8474         int fd, r;
8475 
8476         fd = memfd_create("qemu-open", 0);
8477         if (fd < 0) {
8478             if (errno != ENOSYS) {
8479                 return fd;
8480             }
8481             /* create temporary file to map stat to */
8482             tmpdir = getenv("TMPDIR");
8483             if (!tmpdir)
8484                 tmpdir = "/tmp";
8485             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8486             fd = mkstemp(filename);
8487             if (fd < 0) {
8488                 return fd;
8489             }
8490             unlink(filename);
8491         }
8492 
8493         if ((r = fake_open->fill(cpu_env, fd))) {
8494             int e = errno;
8495             close(fd);
8496             errno = e;
8497             return r;
8498         }
8499         lseek(fd, 0, SEEK_SET);
8500 
8501         return fd;
8502     }
8503 
8504     return -2;
8505 }
8506 
do_guest_openat(CPUArchState * cpu_env,int dirfd,const char * pathname,int flags,mode_t mode,bool safe)8507 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8508                     int flags, mode_t mode, bool safe)
8509 {
8510     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8511     if (fd > -2) {
8512         return fd;
8513     }
8514 
8515     if (safe) {
8516         return safe_openat(dirfd, path(pathname), flags, mode);
8517     } else {
8518         return openat(dirfd, path(pathname), flags, mode);
8519     }
8520 }
8521 
8522 
do_openat2(CPUArchState * cpu_env,abi_long dirfd,abi_ptr guest_pathname,abi_ptr guest_open_how,abi_ulong guest_size)8523 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8524                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8525                       abi_ulong guest_size)
8526 {
8527     struct open_how_ver0 how = {0};
8528     char *pathname;
8529     int ret;
8530 
8531     if (guest_size < sizeof(struct target_open_how_ver0)) {
8532         return -TARGET_EINVAL;
8533     }
8534     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8535     if (ret) {
8536         if (ret == -TARGET_E2BIG) {
8537             qemu_log_mask(LOG_UNIMP,
8538                           "Unimplemented openat2 open_how size: "
8539                           TARGET_ABI_FMT_lu "\n", guest_size);
8540         }
8541         return ret;
8542     }
8543     pathname = lock_user_string(guest_pathname);
8544     if (!pathname) {
8545         return -TARGET_EFAULT;
8546     }
8547 
8548     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8549     how.mode = tswap64(how.mode);
8550     how.resolve = tswap64(how.resolve);
8551     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8552                                 how.resolve, true);
8553     if (fd > -2) {
8554         ret = get_errno(fd);
8555     } else {
8556         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8557                                      sizeof(struct open_how_ver0)));
8558     }
8559 
8560     fd_trans_unregister(ret);
8561     unlock_user(pathname, guest_pathname, 0);
8562     return ret;
8563 }
8564 
do_guest_readlink(const char * pathname,char * buf,size_t bufsiz)8565 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8566 {
8567     ssize_t ret;
8568 
8569     if (!pathname || !buf) {
8570         errno = EFAULT;
8571         return -1;
8572     }
8573 
8574     if (!bufsiz) {
8575         /* Short circuit this for the magic exe check. */
8576         errno = EINVAL;
8577         return -1;
8578     }
8579 
8580     if (is_proc_myself((const char *)pathname, "exe")) {
8581         /*
8582          * Don't worry about sign mismatch as earlier mapping
8583          * logic would have thrown a bad address error.
8584          */
8585         ret = MIN(strlen(exec_path), bufsiz);
8586         /* We cannot NUL terminate the string. */
8587         memcpy(buf, exec_path, ret);
8588     } else {
8589         ret = readlink(path(pathname), buf, bufsiz);
8590     }
8591 
8592     return ret;
8593 }
8594 
do_execv(CPUArchState * cpu_env,int dirfd,abi_long pathname,abi_long guest_argp,abi_long guest_envp,int flags,bool is_execveat)8595 static int do_execv(CPUArchState *cpu_env, int dirfd,
8596                     abi_long pathname, abi_long guest_argp,
8597                     abi_long guest_envp, int flags, bool is_execveat)
8598 {
8599     int ret;
8600     char **argp, **envp;
8601     int argc, envc;
8602     abi_ulong gp;
8603     abi_ulong addr;
8604     char **q;
8605     void *p;
8606 
8607     argc = 0;
8608 
8609     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8610         if (get_user_ual(addr, gp)) {
8611             return -TARGET_EFAULT;
8612         }
8613         if (!addr) {
8614             break;
8615         }
8616         argc++;
8617     }
8618     envc = 0;
8619     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8620         if (get_user_ual(addr, gp)) {
8621             return -TARGET_EFAULT;
8622         }
8623         if (!addr) {
8624             break;
8625         }
8626         envc++;
8627     }
8628 
8629     argp = g_new0(char *, argc + 1);
8630     envp = g_new0(char *, envc + 1);
8631 
8632     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8633         if (get_user_ual(addr, gp)) {
8634             goto execve_efault;
8635         }
8636         if (!addr) {
8637             break;
8638         }
8639         *q = lock_user_string(addr);
8640         if (!*q) {
8641             goto execve_efault;
8642         }
8643     }
8644     *q = NULL;
8645 
8646     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8647         if (get_user_ual(addr, gp)) {
8648             goto execve_efault;
8649         }
8650         if (!addr) {
8651             break;
8652         }
8653         *q = lock_user_string(addr);
8654         if (!*q) {
8655             goto execve_efault;
8656         }
8657     }
8658     *q = NULL;
8659 
8660     /*
8661      * Although execve() is not an interruptible syscall it is
8662      * a special case where we must use the safe_syscall wrapper:
8663      * if we allow a signal to happen before we make the host
8664      * syscall then we will 'lose' it, because at the point of
8665      * execve the process leaves QEMU's control. So we use the
8666      * safe syscall wrapper to ensure that we either take the
8667      * signal as a guest signal, or else it does not happen
8668      * before the execve completes and makes it the other
8669      * program's problem.
8670      */
8671     p = lock_user_string(pathname);
8672     if (!p) {
8673         goto execve_efault;
8674     }
8675 
8676     const char *exe = p;
8677     if (is_proc_myself(p, "exe")) {
8678         exe = exec_path;
8679     }
8680     ret = is_execveat
8681         ? safe_execveat(dirfd, exe, argp, envp, flags)
8682         : safe_execve(exe, argp, envp);
8683     ret = get_errno(ret);
8684 
8685     unlock_user(p, pathname, 0);
8686 
8687     goto execve_end;
8688 
8689 execve_efault:
8690     ret = -TARGET_EFAULT;
8691 
8692 execve_end:
8693     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8694         if (get_user_ual(addr, gp) || !addr) {
8695             break;
8696         }
8697         unlock_user(*q, addr, 0);
8698     }
8699     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8700         if (get_user_ual(addr, gp) || !addr) {
8701             break;
8702         }
8703         unlock_user(*q, addr, 0);
8704     }
8705 
8706     g_free(argp);
8707     g_free(envp);
8708     return ret;
8709 }
8710 
8711 #define TIMER_MAGIC 0x0caf0000
8712 #define TIMER_MAGIC_MASK 0xffff0000
8713 
8714 /* Convert QEMU provided timer ID back to internal 16bit index format */
get_timer_id(abi_long arg)8715 static target_timer_t get_timer_id(abi_long arg)
8716 {
8717     target_timer_t timerid = arg;
8718 
8719     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8720         return -TARGET_EINVAL;
8721     }
8722 
8723     timerid &= 0xffff;
8724 
8725     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8726         return -TARGET_EINVAL;
8727     }
8728 
8729     return timerid;
8730 }
8731 
target_to_host_cpu_mask(unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8732 static int target_to_host_cpu_mask(unsigned long *host_mask,
8733                                    size_t host_size,
8734                                    abi_ulong target_addr,
8735                                    size_t target_size)
8736 {
8737     unsigned target_bits = sizeof(abi_ulong) * 8;
8738     unsigned host_bits = sizeof(*host_mask) * 8;
8739     abi_ulong *target_mask;
8740     unsigned i, j;
8741 
8742     assert(host_size >= target_size);
8743 
8744     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8745     if (!target_mask) {
8746         return -TARGET_EFAULT;
8747     }
8748     memset(host_mask, 0, host_size);
8749 
8750     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8751         unsigned bit = i * target_bits;
8752         abi_ulong val;
8753 
8754         __get_user(val, &target_mask[i]);
8755         for (j = 0; j < target_bits; j++, bit++) {
8756             if (val & (1UL << j)) {
8757                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8758             }
8759         }
8760     }
8761 
8762     unlock_user(target_mask, target_addr, 0);
8763     return 0;
8764 }
8765 
host_to_target_cpu_mask(const unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8766 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8767                                    size_t host_size,
8768                                    abi_ulong target_addr,
8769                                    size_t target_size)
8770 {
8771     unsigned target_bits = sizeof(abi_ulong) * 8;
8772     unsigned host_bits = sizeof(*host_mask) * 8;
8773     abi_ulong *target_mask;
8774     unsigned i, j;
8775 
8776     assert(host_size >= target_size);
8777 
8778     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8779     if (!target_mask) {
8780         return -TARGET_EFAULT;
8781     }
8782 
8783     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8784         unsigned bit = i * target_bits;
8785         abi_ulong val = 0;
8786 
8787         for (j = 0; j < target_bits; j++, bit++) {
8788             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8789                 val |= 1UL << j;
8790             }
8791         }
8792         __put_user(val, &target_mask[i]);
8793     }
8794 
8795     unlock_user(target_mask, target_addr, target_size);
8796     return 0;
8797 }
8798 
8799 #ifdef TARGET_NR_getdents
do_getdents(abi_long dirfd,abi_long arg2,abi_long count)8800 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8801 {
8802     g_autofree void *hdirp = NULL;
8803     void *tdirp;
8804     int hlen, hoff, toff;
8805     int hreclen, treclen;
8806     off_t prev_diroff = 0;
8807 
8808     hdirp = g_try_malloc(count);
8809     if (!hdirp) {
8810         return -TARGET_ENOMEM;
8811     }
8812 
8813 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8814     hlen = sys_getdents(dirfd, hdirp, count);
8815 #else
8816     hlen = sys_getdents64(dirfd, hdirp, count);
8817 #endif
8818 
8819     hlen = get_errno(hlen);
8820     if (is_error(hlen)) {
8821         return hlen;
8822     }
8823 
8824     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8825     if (!tdirp) {
8826         return -TARGET_EFAULT;
8827     }
8828 
8829     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8830 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8831         struct linux_dirent *hde = hdirp + hoff;
8832 #else
8833         struct linux_dirent64 *hde = hdirp + hoff;
8834 #endif
8835         struct target_dirent *tde = tdirp + toff;
8836         int namelen;
8837         uint8_t type;
8838 
8839         namelen = strlen(hde->d_name);
8840         hreclen = hde->d_reclen;
8841         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8842         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8843 
8844         if (toff + treclen > count) {
8845             /*
8846              * If the host struct is smaller than the target struct, or
8847              * requires less alignment and thus packs into less space,
8848              * then the host can return more entries than we can pass
8849              * on to the guest.
8850              */
8851             if (toff == 0) {
8852                 toff = -TARGET_EINVAL; /* result buffer is too small */
8853                 break;
8854             }
8855             /*
8856              * Return what we have, resetting the file pointer to the
8857              * location of the first record not returned.
8858              */
8859             lseek(dirfd, prev_diroff, SEEK_SET);
8860             break;
8861         }
8862 
8863         prev_diroff = hde->d_off;
8864         tde->d_ino = tswapal(hde->d_ino);
8865         tde->d_off = tswapal(hde->d_off);
8866         tde->d_reclen = tswap16(treclen);
8867         memcpy(tde->d_name, hde->d_name, namelen + 1);
8868 
8869         /*
8870          * The getdents type is in what was formerly a padding byte at the
8871          * end of the structure.
8872          */
8873 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8874         type = *((uint8_t *)hde + hreclen - 1);
8875 #else
8876         type = hde->d_type;
8877 #endif
8878         *((uint8_t *)tde + treclen - 1) = type;
8879     }
8880 
8881     unlock_user(tdirp, arg2, toff);
8882     return toff;
8883 }
8884 #endif /* TARGET_NR_getdents */
8885 
8886 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
do_getdents64(abi_long dirfd,abi_long arg2,abi_long count)8887 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8888 {
8889     g_autofree void *hdirp = NULL;
8890     void *tdirp;
8891     int hlen, hoff, toff;
8892     int hreclen, treclen;
8893     off_t prev_diroff = 0;
8894 
8895     hdirp = g_try_malloc(count);
8896     if (!hdirp) {
8897         return -TARGET_ENOMEM;
8898     }
8899 
8900     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8901     if (is_error(hlen)) {
8902         return hlen;
8903     }
8904 
8905     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8906     if (!tdirp) {
8907         return -TARGET_EFAULT;
8908     }
8909 
8910     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8911         struct linux_dirent64 *hde = hdirp + hoff;
8912         struct target_dirent64 *tde = tdirp + toff;
8913         int namelen;
8914 
8915         namelen = strlen(hde->d_name) + 1;
8916         hreclen = hde->d_reclen;
8917         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8918         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8919 
8920         if (toff + treclen > count) {
8921             /*
8922              * If the host struct is smaller than the target struct, or
8923              * requires less alignment and thus packs into less space,
8924              * then the host can return more entries than we can pass
8925              * on to the guest.
8926              */
8927             if (toff == 0) {
8928                 toff = -TARGET_EINVAL; /* result buffer is too small */
8929                 break;
8930             }
8931             /*
8932              * Return what we have, resetting the file pointer to the
8933              * location of the first record not returned.
8934              */
8935             lseek(dirfd, prev_diroff, SEEK_SET);
8936             break;
8937         }
8938 
8939         prev_diroff = hde->d_off;
8940         tde->d_ino = tswap64(hde->d_ino);
8941         tde->d_off = tswap64(hde->d_off);
8942         tde->d_reclen = tswap16(treclen);
8943         tde->d_type = hde->d_type;
8944         memcpy(tde->d_name, hde->d_name, namelen);
8945     }
8946 
8947     unlock_user(tdirp, arg2, toff);
8948     return toff;
8949 }
8950 #endif /* TARGET_NR_getdents64 */
8951 
8952 #if defined(TARGET_NR_riscv_hwprobe)
8953 
8954 #define RISCV_HWPROBE_KEY_MVENDORID     0
8955 #define RISCV_HWPROBE_KEY_MARCHID       1
8956 #define RISCV_HWPROBE_KEY_MIMPID        2
8957 
8958 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8959 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8960 
8961 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8962 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8963 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8964 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8965 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8966 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8967 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8968 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8969 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8970 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8971 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8972 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8973 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8974 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8975 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8976 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8977 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8978 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8979 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8980 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8981 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8982 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8983 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8984 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8985 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8986 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8987 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8988 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8989 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8990 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8991 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8992 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8993 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8994 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8995 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8996 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8997 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8998 
8999 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
9000 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
9001 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
9002 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
9003 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
9004 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
9005 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
9006 
9007 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
9008 
9009 struct riscv_hwprobe {
9010     abi_llong  key;
9011     abi_ullong value;
9012 };
9013 
risc_hwprobe_fill_pairs(CPURISCVState * env,struct riscv_hwprobe * pair,size_t pair_count)9014 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9015                                     struct riscv_hwprobe *pair,
9016                                     size_t pair_count)
9017 {
9018     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9019 
9020     for (; pair_count > 0; pair_count--, pair++) {
9021         abi_llong key;
9022         abi_ullong value;
9023         __put_user(0, &pair->value);
9024         __get_user(key, &pair->key);
9025         switch (key) {
9026         case RISCV_HWPROBE_KEY_MVENDORID:
9027             __put_user(cfg->mvendorid, &pair->value);
9028             break;
9029         case RISCV_HWPROBE_KEY_MARCHID:
9030             __put_user(cfg->marchid, &pair->value);
9031             break;
9032         case RISCV_HWPROBE_KEY_MIMPID:
9033             __put_user(cfg->mimpid, &pair->value);
9034             break;
9035         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9036             value = riscv_has_ext(env, RVI) &&
9037                     riscv_has_ext(env, RVM) &&
9038                     riscv_has_ext(env, RVA) ?
9039                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9040             __put_user(value, &pair->value);
9041             break;
9042         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9043             value = riscv_has_ext(env, RVF) &&
9044                     riscv_has_ext(env, RVD) ?
9045                     RISCV_HWPROBE_IMA_FD : 0;
9046             value |= riscv_has_ext(env, RVC) ?
9047                      RISCV_HWPROBE_IMA_C : 0;
9048             value |= riscv_has_ext(env, RVV) ?
9049                      RISCV_HWPROBE_IMA_V : 0;
9050             value |= cfg->ext_zba ?
9051                      RISCV_HWPROBE_EXT_ZBA : 0;
9052             value |= cfg->ext_zbb ?
9053                      RISCV_HWPROBE_EXT_ZBB : 0;
9054             value |= cfg->ext_zbs ?
9055                      RISCV_HWPROBE_EXT_ZBS : 0;
9056             value |= cfg->ext_zicboz ?
9057                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9058             value |= cfg->ext_zbc ?
9059                      RISCV_HWPROBE_EXT_ZBC : 0;
9060             value |= cfg->ext_zbkb ?
9061                      RISCV_HWPROBE_EXT_ZBKB : 0;
9062             value |= cfg->ext_zbkc ?
9063                      RISCV_HWPROBE_EXT_ZBKC : 0;
9064             value |= cfg->ext_zbkx ?
9065                      RISCV_HWPROBE_EXT_ZBKX : 0;
9066             value |= cfg->ext_zknd ?
9067                      RISCV_HWPROBE_EXT_ZKND : 0;
9068             value |= cfg->ext_zkne ?
9069                      RISCV_HWPROBE_EXT_ZKNE : 0;
9070             value |= cfg->ext_zknh ?
9071                      RISCV_HWPROBE_EXT_ZKNH : 0;
9072             value |= cfg->ext_zksed ?
9073                      RISCV_HWPROBE_EXT_ZKSED : 0;
9074             value |= cfg->ext_zksh ?
9075                      RISCV_HWPROBE_EXT_ZKSH : 0;
9076             value |= cfg->ext_zkt ?
9077                      RISCV_HWPROBE_EXT_ZKT : 0;
9078             value |= cfg->ext_zvbb ?
9079                      RISCV_HWPROBE_EXT_ZVBB : 0;
9080             value |= cfg->ext_zvbc ?
9081                      RISCV_HWPROBE_EXT_ZVBC : 0;
9082             value |= cfg->ext_zvkb ?
9083                      RISCV_HWPROBE_EXT_ZVKB : 0;
9084             value |= cfg->ext_zvkg ?
9085                      RISCV_HWPROBE_EXT_ZVKG : 0;
9086             value |= cfg->ext_zvkned ?
9087                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9088             value |= cfg->ext_zvknha ?
9089                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9090             value |= cfg->ext_zvknhb ?
9091                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9092             value |= cfg->ext_zvksed ?
9093                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9094             value |= cfg->ext_zvksh ?
9095                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9096             value |= cfg->ext_zvkt ?
9097                      RISCV_HWPROBE_EXT_ZVKT : 0;
9098             value |= cfg->ext_zfh ?
9099                      RISCV_HWPROBE_EXT_ZFH : 0;
9100             value |= cfg->ext_zfhmin ?
9101                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9102             value |= cfg->ext_zihintntl ?
9103                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9104             value |= cfg->ext_zvfh ?
9105                      RISCV_HWPROBE_EXT_ZVFH : 0;
9106             value |= cfg->ext_zvfhmin ?
9107                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9108             value |= cfg->ext_zfa ?
9109                      RISCV_HWPROBE_EXT_ZFA : 0;
9110             value |= cfg->ext_ztso ?
9111                      RISCV_HWPROBE_EXT_ZTSO : 0;
9112             value |= cfg->ext_zacas ?
9113                      RISCV_HWPROBE_EXT_ZACAS : 0;
9114             value |= cfg->ext_zicond ?
9115                      RISCV_HWPROBE_EXT_ZICOND : 0;
9116             __put_user(value, &pair->value);
9117             break;
9118         case RISCV_HWPROBE_KEY_CPUPERF_0:
9119             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9120             break;
9121         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9122             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9123             __put_user(value, &pair->value);
9124             break;
9125         default:
9126             __put_user(-1, &pair->key);
9127             break;
9128         }
9129     }
9130 }
9131 
9132 /*
9133  * If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT.
9134  * If the cpumast_t has no bits set: -EINVAL.
9135  * Otherwise the cpumask_t contains some bit set: 0.
9136  * Unlike the kernel, we do not mask cpumask_t by the set of online cpus,
9137  * nor bound the search by cpumask_size().
9138  */
nonempty_cpu_set(abi_ulong cpusetsize,abi_ptr target_cpus)9139 static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus)
9140 {
9141     unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1);
9142     int ret = -TARGET_EFAULT;
9143 
9144     if (p) {
9145         ret = -TARGET_EINVAL;
9146         /*
9147          * Since we only care about the empty/non-empty state of the cpumask_t
9148          * not the individual bits, we do not need to repartition the bits
9149          * from target abi_ulong to host unsigned long.
9150          *
9151          * Note that the kernel does not round up cpusetsize to a multiple of
9152          * sizeof(abi_ulong).  After bounding cpusetsize by cpumask_size(),
9153          * it copies exactly cpusetsize bytes into a zeroed buffer.
9154          */
9155         for (abi_ulong i = 0; i < cpusetsize; ++i) {
9156             if (p[i]) {
9157                 ret = 0;
9158                 break;
9159             }
9160         }
9161         unlock_user(p, target_cpus, 0);
9162     }
9163     return ret;
9164 }
9165 
do_riscv_hwprobe(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)9166 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9167                                  abi_long arg2, abi_long arg3,
9168                                  abi_long arg4, abi_long arg5)
9169 {
9170     int ret;
9171     struct riscv_hwprobe *host_pairs;
9172 
9173     /* flags must be 0 */
9174     if (arg5 != 0) {
9175         return -TARGET_EINVAL;
9176     }
9177 
9178     /* check cpu_set */
9179     if (arg3 != 0) {
9180         ret = nonempty_cpu_set(arg3, arg4);
9181         if (ret != 0) {
9182             return ret;
9183         }
9184     } else if (arg4 != 0) {
9185         return -TARGET_EINVAL;
9186     }
9187 
9188     /* no pairs */
9189     if (arg2 == 0) {
9190         return 0;
9191     }
9192 
9193     host_pairs = lock_user(VERIFY_WRITE, arg1,
9194                            sizeof(*host_pairs) * (size_t)arg2, 0);
9195     if (host_pairs == NULL) {
9196         return -TARGET_EFAULT;
9197     }
9198     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9199     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9200     return 0;
9201 }
9202 #endif /* TARGET_NR_riscv_hwprobe */
9203 
9204 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
_syscall2(int,pivot_root,const char *,new_root,const char *,put_old)9205 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9206 #endif
9207 
9208 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9209 #define __NR_sys_open_tree __NR_open_tree
9210 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9211           unsigned int, __flags)
9212 #endif
9213 
9214 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9215 #define __NR_sys_move_mount __NR_move_mount
9216 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9217            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9218 #endif
9219 
9220 /* This is an internal helper for do_syscall so that it is easier
9221  * to have a single return point, so that actions, such as logging
9222  * of syscall results, can be performed.
9223  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9224  */
9225 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9226                             abi_long arg2, abi_long arg3, abi_long arg4,
9227                             abi_long arg5, abi_long arg6, abi_long arg7,
9228                             abi_long arg8)
9229 {
9230     CPUState *cpu = env_cpu(cpu_env);
9231     abi_long ret;
9232 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9233     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9234     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9235     || defined(TARGET_NR_statx)
9236     struct stat st;
9237 #endif
9238 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9239     || defined(TARGET_NR_fstatfs)
9240     struct statfs stfs;
9241 #endif
9242     void *p;
9243 
9244     switch(num) {
9245     case TARGET_NR_exit:
9246         /* In old applications this may be used to implement _exit(2).
9247            However in threaded applications it is used for thread termination,
9248            and _exit_group is used for application termination.
9249            Do thread termination if we have more then one thread.  */
9250 
9251         if (block_signals()) {
9252             return -QEMU_ERESTARTSYS;
9253         }
9254 
9255         pthread_mutex_lock(&clone_lock);
9256 
9257         if (CPU_NEXT(first_cpu)) {
9258             TaskState *ts = get_task_state(cpu);
9259 
9260             if (ts->child_tidptr) {
9261                 put_user_u32(0, ts->child_tidptr);
9262                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9263                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9264             }
9265 
9266             object_unparent(OBJECT(cpu));
9267             object_unref(OBJECT(cpu));
9268             /*
9269              * At this point the CPU should be unrealized and removed
9270              * from cpu lists. We can clean-up the rest of the thread
9271              * data without the lock held.
9272              */
9273 
9274             pthread_mutex_unlock(&clone_lock);
9275 
9276             thread_cpu = NULL;
9277             g_free(ts);
9278             rcu_unregister_thread();
9279             pthread_exit(NULL);
9280         }
9281 
9282         pthread_mutex_unlock(&clone_lock);
9283         preexit_cleanup(cpu_env, arg1);
9284         _exit(arg1);
9285         return 0; /* avoid warning */
9286     case TARGET_NR_read:
9287         if (arg2 == 0 && arg3 == 0) {
9288             return get_errno(safe_read(arg1, 0, 0));
9289         } else {
9290             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9291                 return -TARGET_EFAULT;
9292             ret = get_errno(safe_read(arg1, p, arg3));
9293             if (ret >= 0 &&
9294                 fd_trans_host_to_target_data(arg1)) {
9295                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9296             }
9297             unlock_user(p, arg2, ret);
9298         }
9299         return ret;
9300     case TARGET_NR_write:
9301         if (arg2 == 0 && arg3 == 0) {
9302             return get_errno(safe_write(arg1, 0, 0));
9303         }
9304         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9305             return -TARGET_EFAULT;
9306         if (fd_trans_target_to_host_data(arg1)) {
9307             void *copy = g_malloc(arg3);
9308             memcpy(copy, p, arg3);
9309             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9310             if (ret >= 0) {
9311                 ret = get_errno(safe_write(arg1, copy, ret));
9312             }
9313             g_free(copy);
9314         } else {
9315             ret = get_errno(safe_write(arg1, p, arg3));
9316         }
9317         unlock_user(p, arg2, 0);
9318         return ret;
9319 
9320 #ifdef TARGET_NR_open
9321     case TARGET_NR_open:
9322         if (!(p = lock_user_string(arg1)))
9323             return -TARGET_EFAULT;
9324         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9325                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9326                                   arg3, true));
9327         fd_trans_unregister(ret);
9328         unlock_user(p, arg1, 0);
9329         return ret;
9330 #endif
9331     case TARGET_NR_openat:
9332         if (!(p = lock_user_string(arg2)))
9333             return -TARGET_EFAULT;
9334         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9335                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9336                                   arg4, true));
9337         fd_trans_unregister(ret);
9338         unlock_user(p, arg2, 0);
9339         return ret;
9340     case TARGET_NR_openat2:
9341         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9342         return ret;
9343 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9344     case TARGET_NR_name_to_handle_at:
9345         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9346         return ret;
9347 #endif
9348 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9349     case TARGET_NR_open_by_handle_at:
9350         ret = do_open_by_handle_at(arg1, arg2, arg3);
9351         fd_trans_unregister(ret);
9352         return ret;
9353 #endif
9354 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9355     case TARGET_NR_pidfd_open:
9356         return get_errno(pidfd_open(arg1, arg2));
9357 #endif
9358 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9359     case TARGET_NR_pidfd_send_signal:
9360         {
9361             siginfo_t uinfo, *puinfo;
9362 
9363             if (arg3) {
9364                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9365                 if (!p) {
9366                     return -TARGET_EFAULT;
9367                  }
9368                  target_to_host_siginfo(&uinfo, p);
9369                  unlock_user(p, arg3, 0);
9370                  puinfo = &uinfo;
9371             } else {
9372                  puinfo = NULL;
9373             }
9374             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9375                                               puinfo, arg4));
9376         }
9377         return ret;
9378 #endif
9379 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9380     case TARGET_NR_pidfd_getfd:
9381         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9382 #endif
9383     case TARGET_NR_close:
9384         fd_trans_unregister(arg1);
9385         return get_errno(close(arg1));
9386 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9387     case TARGET_NR_close_range:
9388         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9389         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9390             abi_long fd, maxfd;
9391             maxfd = MIN(arg2, target_fd_max);
9392             for (fd = arg1; fd < maxfd; fd++) {
9393                 fd_trans_unregister(fd);
9394             }
9395         }
9396         return ret;
9397 #endif
9398 
9399     case TARGET_NR_brk:
9400         return do_brk(arg1);
9401 #ifdef TARGET_NR_fork
9402     case TARGET_NR_fork:
9403         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9404 #endif
9405 #ifdef TARGET_NR_waitpid
9406     case TARGET_NR_waitpid:
9407         {
9408             int status;
9409             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9410             if (!is_error(ret) && arg2 && ret
9411                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9412                 return -TARGET_EFAULT;
9413         }
9414         return ret;
9415 #endif
9416 #ifdef TARGET_NR_waitid
9417     case TARGET_NR_waitid:
9418         {
9419             struct rusage ru;
9420             siginfo_t info;
9421 
9422             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9423                                         arg4, (arg5 ? &ru : NULL)));
9424             if (!is_error(ret)) {
9425                 if (arg3) {
9426                     p = lock_user(VERIFY_WRITE, arg3,
9427                                   sizeof(target_siginfo_t), 0);
9428                     if (!p) {
9429                         return -TARGET_EFAULT;
9430                     }
9431                     host_to_target_siginfo(p, &info);
9432                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9433                 }
9434                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9435                     return -TARGET_EFAULT;
9436                 }
9437             }
9438         }
9439         return ret;
9440 #endif
9441 #ifdef TARGET_NR_creat /* not on alpha */
9442     case TARGET_NR_creat:
9443         if (!(p = lock_user_string(arg1)))
9444             return -TARGET_EFAULT;
9445         ret = get_errno(creat(p, arg2));
9446         fd_trans_unregister(ret);
9447         unlock_user(p, arg1, 0);
9448         return ret;
9449 #endif
9450 #ifdef TARGET_NR_link
9451     case TARGET_NR_link:
9452         {
9453             void * p2;
9454             p = lock_user_string(arg1);
9455             p2 = lock_user_string(arg2);
9456             if (!p || !p2)
9457                 ret = -TARGET_EFAULT;
9458             else
9459                 ret = get_errno(link(p, p2));
9460             unlock_user(p2, arg2, 0);
9461             unlock_user(p, arg1, 0);
9462         }
9463         return ret;
9464 #endif
9465 #if defined(TARGET_NR_linkat)
9466     case TARGET_NR_linkat:
9467         {
9468             void * p2 = NULL;
9469             if (!arg2 || !arg4)
9470                 return -TARGET_EFAULT;
9471             p  = lock_user_string(arg2);
9472             p2 = lock_user_string(arg4);
9473             if (!p || !p2)
9474                 ret = -TARGET_EFAULT;
9475             else
9476                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9477             unlock_user(p, arg2, 0);
9478             unlock_user(p2, arg4, 0);
9479         }
9480         return ret;
9481 #endif
9482 #ifdef TARGET_NR_unlink
9483     case TARGET_NR_unlink:
9484         if (!(p = lock_user_string(arg1)))
9485             return -TARGET_EFAULT;
9486         ret = get_errno(unlink(p));
9487         unlock_user(p, arg1, 0);
9488         return ret;
9489 #endif
9490 #if defined(TARGET_NR_unlinkat)
9491     case TARGET_NR_unlinkat:
9492         if (!(p = lock_user_string(arg2)))
9493             return -TARGET_EFAULT;
9494         ret = get_errno(unlinkat(arg1, p, arg3));
9495         unlock_user(p, arg2, 0);
9496         return ret;
9497 #endif
9498     case TARGET_NR_execveat:
9499         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9500     case TARGET_NR_execve:
9501         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9502     case TARGET_NR_chdir:
9503         if (!(p = lock_user_string(arg1)))
9504             return -TARGET_EFAULT;
9505         ret = get_errno(chdir(p));
9506         unlock_user(p, arg1, 0);
9507         return ret;
9508 #ifdef TARGET_NR_time
9509     case TARGET_NR_time:
9510         {
9511             time_t host_time;
9512             ret = get_errno(time(&host_time));
9513             if (!is_error(ret)
9514                 && arg1
9515                 && put_user_sal(host_time, arg1))
9516                 return -TARGET_EFAULT;
9517         }
9518         return ret;
9519 #endif
9520 #ifdef TARGET_NR_mknod
9521     case TARGET_NR_mknod:
9522         if (!(p = lock_user_string(arg1)))
9523             return -TARGET_EFAULT;
9524         ret = get_errno(mknod(p, arg2, arg3));
9525         unlock_user(p, arg1, 0);
9526         return ret;
9527 #endif
9528 #if defined(TARGET_NR_mknodat)
9529     case TARGET_NR_mknodat:
9530         if (!(p = lock_user_string(arg2)))
9531             return -TARGET_EFAULT;
9532         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9533         unlock_user(p, arg2, 0);
9534         return ret;
9535 #endif
9536 #ifdef TARGET_NR_chmod
9537     case TARGET_NR_chmod:
9538         if (!(p = lock_user_string(arg1)))
9539             return -TARGET_EFAULT;
9540         ret = get_errno(chmod(p, arg2));
9541         unlock_user(p, arg1, 0);
9542         return ret;
9543 #endif
9544 #ifdef TARGET_NR_lseek
9545     case TARGET_NR_lseek:
9546         return get_errno(lseek(arg1, arg2, arg3));
9547 #endif
9548 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9549     /* Alpha specific */
9550     case TARGET_NR_getxpid:
9551         cpu_env->ir[IR_A4] = getppid();
9552         return get_errno(getpid());
9553 #endif
9554 #ifdef TARGET_NR_getpid
9555     case TARGET_NR_getpid:
9556         return get_errno(getpid());
9557 #endif
9558     case TARGET_NR_mount:
9559         {
9560             /* need to look at the data field */
9561             void *p2, *p3;
9562 
9563             if (arg1) {
9564                 p = lock_user_string(arg1);
9565                 if (!p) {
9566                     return -TARGET_EFAULT;
9567                 }
9568             } else {
9569                 p = NULL;
9570             }
9571 
9572             p2 = lock_user_string(arg2);
9573             if (!p2) {
9574                 if (arg1) {
9575                     unlock_user(p, arg1, 0);
9576                 }
9577                 return -TARGET_EFAULT;
9578             }
9579 
9580             if (arg3) {
9581                 p3 = lock_user_string(arg3);
9582                 if (!p3) {
9583                     if (arg1) {
9584                         unlock_user(p, arg1, 0);
9585                     }
9586                     unlock_user(p2, arg2, 0);
9587                     return -TARGET_EFAULT;
9588                 }
9589             } else {
9590                 p3 = NULL;
9591             }
9592 
9593             /* FIXME - arg5 should be locked, but it isn't clear how to
9594              * do that since it's not guaranteed to be a NULL-terminated
9595              * string.
9596              */
9597             if (!arg5) {
9598                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9599             } else {
9600                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9601             }
9602             ret = get_errno(ret);
9603 
9604             if (arg1) {
9605                 unlock_user(p, arg1, 0);
9606             }
9607             unlock_user(p2, arg2, 0);
9608             if (arg3) {
9609                 unlock_user(p3, arg3, 0);
9610             }
9611         }
9612         return ret;
9613 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9614 #if defined(TARGET_NR_umount)
9615     case TARGET_NR_umount:
9616 #endif
9617 #if defined(TARGET_NR_oldumount)
9618     case TARGET_NR_oldumount:
9619 #endif
9620         if (!(p = lock_user_string(arg1)))
9621             return -TARGET_EFAULT;
9622         ret = get_errno(umount(p));
9623         unlock_user(p, arg1, 0);
9624         return ret;
9625 #endif
9626 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9627     case TARGET_NR_move_mount:
9628         {
9629             void *p2, *p4;
9630 
9631             if (!arg2 || !arg4) {
9632                 return -TARGET_EFAULT;
9633             }
9634 
9635             p2 = lock_user_string(arg2);
9636             if (!p2) {
9637                 return -TARGET_EFAULT;
9638             }
9639 
9640             p4 = lock_user_string(arg4);
9641             if (!p4) {
9642                 unlock_user(p2, arg2, 0);
9643                 return -TARGET_EFAULT;
9644             }
9645             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9646 
9647             unlock_user(p2, arg2, 0);
9648             unlock_user(p4, arg4, 0);
9649 
9650             return ret;
9651         }
9652 #endif
9653 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9654     case TARGET_NR_open_tree:
9655         {
9656             void *p2;
9657             int host_flags;
9658 
9659             if (!arg2) {
9660                 return -TARGET_EFAULT;
9661             }
9662 
9663             p2 = lock_user_string(arg2);
9664             if (!p2) {
9665                 return -TARGET_EFAULT;
9666             }
9667 
9668             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9669             if (arg3 & TARGET_O_CLOEXEC) {
9670                 host_flags |= O_CLOEXEC;
9671             }
9672 
9673             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9674 
9675             unlock_user(p2, arg2, 0);
9676 
9677             return ret;
9678         }
9679 #endif
9680 #ifdef TARGET_NR_stime /* not on alpha */
9681     case TARGET_NR_stime:
9682         {
9683             struct timespec ts;
9684             ts.tv_nsec = 0;
9685             if (get_user_sal(ts.tv_sec, arg1)) {
9686                 return -TARGET_EFAULT;
9687             }
9688             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9689         }
9690 #endif
9691 #ifdef TARGET_NR_alarm /* not on alpha */
9692     case TARGET_NR_alarm:
9693         return alarm(arg1);
9694 #endif
9695 #ifdef TARGET_NR_pause /* not on alpha */
9696     case TARGET_NR_pause:
9697         if (!block_signals()) {
9698             sigsuspend(&get_task_state(cpu)->signal_mask);
9699         }
9700         return -TARGET_EINTR;
9701 #endif
9702 #ifdef TARGET_NR_utime
9703     case TARGET_NR_utime:
9704         {
9705             struct utimbuf tbuf, *host_tbuf;
9706             struct target_utimbuf *target_tbuf;
9707             if (arg2) {
9708                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9709                     return -TARGET_EFAULT;
9710                 tbuf.actime = tswapal(target_tbuf->actime);
9711                 tbuf.modtime = tswapal(target_tbuf->modtime);
9712                 unlock_user_struct(target_tbuf, arg2, 0);
9713                 host_tbuf = &tbuf;
9714             } else {
9715                 host_tbuf = NULL;
9716             }
9717             if (!(p = lock_user_string(arg1)))
9718                 return -TARGET_EFAULT;
9719             ret = get_errno(utime(p, host_tbuf));
9720             unlock_user(p, arg1, 0);
9721         }
9722         return ret;
9723 #endif
9724 #ifdef TARGET_NR_utimes
9725     case TARGET_NR_utimes:
9726         {
9727             struct timeval *tvp, tv[2];
9728             if (arg2) {
9729                 if (copy_from_user_timeval(&tv[0], arg2)
9730                     || copy_from_user_timeval(&tv[1],
9731                                               arg2 + sizeof(struct target_timeval)))
9732                     return -TARGET_EFAULT;
9733                 tvp = tv;
9734             } else {
9735                 tvp = NULL;
9736             }
9737             if (!(p = lock_user_string(arg1)))
9738                 return -TARGET_EFAULT;
9739             ret = get_errno(utimes(p, tvp));
9740             unlock_user(p, arg1, 0);
9741         }
9742         return ret;
9743 #endif
9744 #if defined(TARGET_NR_futimesat)
9745     case TARGET_NR_futimesat:
9746         {
9747             struct timeval *tvp, tv[2];
9748             if (arg3) {
9749                 if (copy_from_user_timeval(&tv[0], arg3)
9750                     || copy_from_user_timeval(&tv[1],
9751                                               arg3 + sizeof(struct target_timeval)))
9752                     return -TARGET_EFAULT;
9753                 tvp = tv;
9754             } else {
9755                 tvp = NULL;
9756             }
9757             if (!(p = lock_user_string(arg2))) {
9758                 return -TARGET_EFAULT;
9759             }
9760             ret = get_errno(futimesat(arg1, path(p), tvp));
9761             unlock_user(p, arg2, 0);
9762         }
9763         return ret;
9764 #endif
9765 #ifdef TARGET_NR_access
9766     case TARGET_NR_access:
9767         if (!(p = lock_user_string(arg1))) {
9768             return -TARGET_EFAULT;
9769         }
9770         ret = get_errno(access(path(p), arg2));
9771         unlock_user(p, arg1, 0);
9772         return ret;
9773 #endif
9774 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9775     case TARGET_NR_faccessat:
9776         if (!(p = lock_user_string(arg2))) {
9777             return -TARGET_EFAULT;
9778         }
9779         ret = get_errno(faccessat(arg1, p, arg3, 0));
9780         unlock_user(p, arg2, 0);
9781         return ret;
9782 #endif
9783 #if defined(TARGET_NR_faccessat2)
9784     case TARGET_NR_faccessat2:
9785         if (!(p = lock_user_string(arg2))) {
9786             return -TARGET_EFAULT;
9787         }
9788         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9789         unlock_user(p, arg2, 0);
9790         return ret;
9791 #endif
9792 #ifdef TARGET_NR_nice /* not on alpha */
9793     case TARGET_NR_nice:
9794         return get_errno(nice(arg1));
9795 #endif
9796     case TARGET_NR_sync:
9797         sync();
9798         return 0;
9799 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9800     case TARGET_NR_syncfs:
9801         return get_errno(syncfs(arg1));
9802 #endif
9803     case TARGET_NR_kill:
9804         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9805 #ifdef TARGET_NR_rename
9806     case TARGET_NR_rename:
9807         {
9808             void *p2;
9809             p = lock_user_string(arg1);
9810             p2 = lock_user_string(arg2);
9811             if (!p || !p2)
9812                 ret = -TARGET_EFAULT;
9813             else
9814                 ret = get_errno(rename(p, p2));
9815             unlock_user(p2, arg2, 0);
9816             unlock_user(p, arg1, 0);
9817         }
9818         return ret;
9819 #endif
9820 #if defined(TARGET_NR_renameat)
9821     case TARGET_NR_renameat:
9822         {
9823             void *p2;
9824             p  = lock_user_string(arg2);
9825             p2 = lock_user_string(arg4);
9826             if (!p || !p2)
9827                 ret = -TARGET_EFAULT;
9828             else
9829                 ret = get_errno(renameat(arg1, p, arg3, p2));
9830             unlock_user(p2, arg4, 0);
9831             unlock_user(p, arg2, 0);
9832         }
9833         return ret;
9834 #endif
9835 #if defined(TARGET_NR_renameat2)
9836     case TARGET_NR_renameat2:
9837         {
9838             void *p2;
9839             p  = lock_user_string(arg2);
9840             p2 = lock_user_string(arg4);
9841             if (!p || !p2) {
9842                 ret = -TARGET_EFAULT;
9843             } else {
9844                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9845             }
9846             unlock_user(p2, arg4, 0);
9847             unlock_user(p, arg2, 0);
9848         }
9849         return ret;
9850 #endif
9851 #ifdef TARGET_NR_mkdir
9852     case TARGET_NR_mkdir:
9853         if (!(p = lock_user_string(arg1)))
9854             return -TARGET_EFAULT;
9855         ret = get_errno(mkdir(p, arg2));
9856         unlock_user(p, arg1, 0);
9857         return ret;
9858 #endif
9859 #if defined(TARGET_NR_mkdirat)
9860     case TARGET_NR_mkdirat:
9861         if (!(p = lock_user_string(arg2)))
9862             return -TARGET_EFAULT;
9863         ret = get_errno(mkdirat(arg1, p, arg3));
9864         unlock_user(p, arg2, 0);
9865         return ret;
9866 #endif
9867 #ifdef TARGET_NR_rmdir
9868     case TARGET_NR_rmdir:
9869         if (!(p = lock_user_string(arg1)))
9870             return -TARGET_EFAULT;
9871         ret = get_errno(rmdir(p));
9872         unlock_user(p, arg1, 0);
9873         return ret;
9874 #endif
9875     case TARGET_NR_dup:
9876         ret = get_errno(dup(arg1));
9877         if (ret >= 0) {
9878             fd_trans_dup(arg1, ret);
9879         }
9880         return ret;
9881 #ifdef TARGET_NR_pipe
9882     case TARGET_NR_pipe:
9883         return do_pipe(cpu_env, arg1, 0, 0);
9884 #endif
9885 #ifdef TARGET_NR_pipe2
9886     case TARGET_NR_pipe2:
9887         return do_pipe(cpu_env, arg1,
9888                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9889 #endif
9890     case TARGET_NR_times:
9891         {
9892             struct target_tms *tmsp;
9893             struct tms tms;
9894             ret = get_errno(times(&tms));
9895             if (arg1) {
9896                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9897                 if (!tmsp)
9898                     return -TARGET_EFAULT;
9899                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9900                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9901                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9902                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9903             }
9904             if (!is_error(ret))
9905                 ret = host_to_target_clock_t(ret);
9906         }
9907         return ret;
9908     case TARGET_NR_acct:
9909         if (arg1 == 0) {
9910             ret = get_errno(acct(NULL));
9911         } else {
9912             if (!(p = lock_user_string(arg1))) {
9913                 return -TARGET_EFAULT;
9914             }
9915             ret = get_errno(acct(path(p)));
9916             unlock_user(p, arg1, 0);
9917         }
9918         return ret;
9919 #ifdef TARGET_NR_umount2
9920     case TARGET_NR_umount2:
9921         if (!(p = lock_user_string(arg1)))
9922             return -TARGET_EFAULT;
9923         ret = get_errno(umount2(p, arg2));
9924         unlock_user(p, arg1, 0);
9925         return ret;
9926 #endif
9927     case TARGET_NR_ioctl:
9928         return do_ioctl(arg1, arg2, arg3);
9929 #ifdef TARGET_NR_fcntl
9930     case TARGET_NR_fcntl:
9931         return do_fcntl(arg1, arg2, arg3);
9932 #endif
9933     case TARGET_NR_setpgid:
9934         return get_errno(setpgid(arg1, arg2));
9935     case TARGET_NR_umask:
9936         return get_errno(umask(arg1));
9937     case TARGET_NR_chroot:
9938         if (!(p = lock_user_string(arg1)))
9939             return -TARGET_EFAULT;
9940         ret = get_errno(chroot(p));
9941         unlock_user(p, arg1, 0);
9942         return ret;
9943 #ifdef TARGET_NR_dup2
9944     case TARGET_NR_dup2:
9945         ret = get_errno(dup2(arg1, arg2));
9946         if (ret >= 0) {
9947             fd_trans_dup(arg1, arg2);
9948         }
9949         return ret;
9950 #endif
9951 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9952     case TARGET_NR_dup3:
9953     {
9954         int host_flags;
9955 
9956         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9957             return -EINVAL;
9958         }
9959         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9960         ret = get_errno(dup3(arg1, arg2, host_flags));
9961         if (ret >= 0) {
9962             fd_trans_dup(arg1, arg2);
9963         }
9964         return ret;
9965     }
9966 #endif
9967 #ifdef TARGET_NR_getppid /* not on alpha */
9968     case TARGET_NR_getppid:
9969         return get_errno(getppid());
9970 #endif
9971 #ifdef TARGET_NR_getpgrp
9972     case TARGET_NR_getpgrp:
9973         return get_errno(getpgrp());
9974 #endif
9975     case TARGET_NR_setsid:
9976         return get_errno(setsid());
9977 #ifdef TARGET_NR_sigaction
9978     case TARGET_NR_sigaction:
9979         {
9980 #if defined(TARGET_MIPS)
9981 	    struct target_sigaction act, oact, *pact, *old_act;
9982 
9983 	    if (arg2) {
9984                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9985                     return -TARGET_EFAULT;
9986 		act._sa_handler = old_act->_sa_handler;
9987 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9988 		act.sa_flags = old_act->sa_flags;
9989 		unlock_user_struct(old_act, arg2, 0);
9990 		pact = &act;
9991 	    } else {
9992 		pact = NULL;
9993 	    }
9994 
9995         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9996 
9997 	    if (!is_error(ret) && arg3) {
9998                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9999                     return -TARGET_EFAULT;
10000 		old_act->_sa_handler = oact._sa_handler;
10001 		old_act->sa_flags = oact.sa_flags;
10002 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
10003 		old_act->sa_mask.sig[1] = 0;
10004 		old_act->sa_mask.sig[2] = 0;
10005 		old_act->sa_mask.sig[3] = 0;
10006 		unlock_user_struct(old_act, arg3, 1);
10007 	    }
10008 #else
10009             struct target_old_sigaction *old_act;
10010             struct target_sigaction act, oact, *pact;
10011             if (arg2) {
10012                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
10013                     return -TARGET_EFAULT;
10014                 act._sa_handler = old_act->_sa_handler;
10015                 target_siginitset(&act.sa_mask, old_act->sa_mask);
10016                 act.sa_flags = old_act->sa_flags;
10017 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10018                 act.sa_restorer = old_act->sa_restorer;
10019 #endif
10020                 unlock_user_struct(old_act, arg2, 0);
10021                 pact = &act;
10022             } else {
10023                 pact = NULL;
10024             }
10025             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
10026             if (!is_error(ret) && arg3) {
10027                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
10028                     return -TARGET_EFAULT;
10029                 old_act->_sa_handler = oact._sa_handler;
10030                 old_act->sa_mask = oact.sa_mask.sig[0];
10031                 old_act->sa_flags = oact.sa_flags;
10032 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10033                 old_act->sa_restorer = oact.sa_restorer;
10034 #endif
10035                 unlock_user_struct(old_act, arg3, 1);
10036             }
10037 #endif
10038         }
10039         return ret;
10040 #endif
10041     case TARGET_NR_rt_sigaction:
10042         {
10043             /*
10044              * For Alpha and SPARC this is a 5 argument syscall, with
10045              * a 'restorer' parameter which must be copied into the
10046              * sa_restorer field of the sigaction struct.
10047              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10048              * and arg5 is the sigsetsize.
10049              */
10050 #if defined(TARGET_ALPHA)
10051             target_ulong sigsetsize = arg4;
10052             target_ulong restorer = arg5;
10053 #elif defined(TARGET_SPARC)
10054             target_ulong restorer = arg4;
10055             target_ulong sigsetsize = arg5;
10056 #else
10057             target_ulong sigsetsize = arg4;
10058             target_ulong restorer = 0;
10059 #endif
10060             struct target_sigaction *act = NULL;
10061             struct target_sigaction *oact = NULL;
10062 
10063             if (sigsetsize != sizeof(target_sigset_t)) {
10064                 return -TARGET_EINVAL;
10065             }
10066             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10067                 return -TARGET_EFAULT;
10068             }
10069             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10070                 ret = -TARGET_EFAULT;
10071             } else {
10072                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10073                 if (oact) {
10074                     unlock_user_struct(oact, arg3, 1);
10075                 }
10076             }
10077             if (act) {
10078                 unlock_user_struct(act, arg2, 0);
10079             }
10080         }
10081         return ret;
10082 #ifdef TARGET_NR_sgetmask /* not on alpha */
10083     case TARGET_NR_sgetmask:
10084         {
10085             sigset_t cur_set;
10086             abi_ulong target_set;
10087             ret = do_sigprocmask(0, NULL, &cur_set);
10088             if (!ret) {
10089                 host_to_target_old_sigset(&target_set, &cur_set);
10090                 ret = target_set;
10091             }
10092         }
10093         return ret;
10094 #endif
10095 #ifdef TARGET_NR_ssetmask /* not on alpha */
10096     case TARGET_NR_ssetmask:
10097         {
10098             sigset_t set, oset;
10099             abi_ulong target_set = arg1;
10100             target_to_host_old_sigset(&set, &target_set);
10101             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10102             if (!ret) {
10103                 host_to_target_old_sigset(&target_set, &oset);
10104                 ret = target_set;
10105             }
10106         }
10107         return ret;
10108 #endif
10109 #ifdef TARGET_NR_sigprocmask
10110     case TARGET_NR_sigprocmask:
10111         {
10112 #if defined(TARGET_ALPHA)
10113             sigset_t set, oldset;
10114             abi_ulong mask;
10115             int how;
10116 
10117             switch (arg1) {
10118             case TARGET_SIG_BLOCK:
10119                 how = SIG_BLOCK;
10120                 break;
10121             case TARGET_SIG_UNBLOCK:
10122                 how = SIG_UNBLOCK;
10123                 break;
10124             case TARGET_SIG_SETMASK:
10125                 how = SIG_SETMASK;
10126                 break;
10127             default:
10128                 return -TARGET_EINVAL;
10129             }
10130             mask = arg2;
10131             target_to_host_old_sigset(&set, &mask);
10132 
10133             ret = do_sigprocmask(how, &set, &oldset);
10134             if (!is_error(ret)) {
10135                 host_to_target_old_sigset(&mask, &oldset);
10136                 ret = mask;
10137                 cpu_env->ir[IR_V0] = 0; /* force no error */
10138             }
10139 #else
10140             sigset_t set, oldset, *set_ptr;
10141             int how;
10142 
10143             if (arg2) {
10144                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10145                 if (!p) {
10146                     return -TARGET_EFAULT;
10147                 }
10148                 target_to_host_old_sigset(&set, p);
10149                 unlock_user(p, arg2, 0);
10150                 set_ptr = &set;
10151                 switch (arg1) {
10152                 case TARGET_SIG_BLOCK:
10153                     how = SIG_BLOCK;
10154                     break;
10155                 case TARGET_SIG_UNBLOCK:
10156                     how = SIG_UNBLOCK;
10157                     break;
10158                 case TARGET_SIG_SETMASK:
10159                     how = SIG_SETMASK;
10160                     break;
10161                 default:
10162                     return -TARGET_EINVAL;
10163                 }
10164             } else {
10165                 how = 0;
10166                 set_ptr = NULL;
10167             }
10168             ret = do_sigprocmask(how, set_ptr, &oldset);
10169             if (!is_error(ret) && arg3) {
10170                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10171                     return -TARGET_EFAULT;
10172                 host_to_target_old_sigset(p, &oldset);
10173                 unlock_user(p, arg3, sizeof(target_sigset_t));
10174             }
10175 #endif
10176         }
10177         return ret;
10178 #endif
10179     case TARGET_NR_rt_sigprocmask:
10180         {
10181             int how = arg1;
10182             sigset_t set, oldset, *set_ptr;
10183 
10184             if (arg4 != sizeof(target_sigset_t)) {
10185                 return -TARGET_EINVAL;
10186             }
10187 
10188             if (arg2) {
10189                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10190                 if (!p) {
10191                     return -TARGET_EFAULT;
10192                 }
10193                 target_to_host_sigset(&set, p);
10194                 unlock_user(p, arg2, 0);
10195                 set_ptr = &set;
10196                 switch(how) {
10197                 case TARGET_SIG_BLOCK:
10198                     how = SIG_BLOCK;
10199                     break;
10200                 case TARGET_SIG_UNBLOCK:
10201                     how = SIG_UNBLOCK;
10202                     break;
10203                 case TARGET_SIG_SETMASK:
10204                     how = SIG_SETMASK;
10205                     break;
10206                 default:
10207                     return -TARGET_EINVAL;
10208                 }
10209             } else {
10210                 how = 0;
10211                 set_ptr = NULL;
10212             }
10213             ret = do_sigprocmask(how, set_ptr, &oldset);
10214             if (!is_error(ret) && arg3) {
10215                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10216                     return -TARGET_EFAULT;
10217                 host_to_target_sigset(p, &oldset);
10218                 unlock_user(p, arg3, sizeof(target_sigset_t));
10219             }
10220         }
10221         return ret;
10222 #ifdef TARGET_NR_sigpending
10223     case TARGET_NR_sigpending:
10224         {
10225             sigset_t set;
10226             ret = get_errno(sigpending(&set));
10227             if (!is_error(ret)) {
10228                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10229                     return -TARGET_EFAULT;
10230                 host_to_target_old_sigset(p, &set);
10231                 unlock_user(p, arg1, sizeof(target_sigset_t));
10232             }
10233         }
10234         return ret;
10235 #endif
10236     case TARGET_NR_rt_sigpending:
10237         {
10238             sigset_t set;
10239 
10240             /* Yes, this check is >, not != like most. We follow the kernel's
10241              * logic and it does it like this because it implements
10242              * NR_sigpending through the same code path, and in that case
10243              * the old_sigset_t is smaller in size.
10244              */
10245             if (arg2 > sizeof(target_sigset_t)) {
10246                 return -TARGET_EINVAL;
10247             }
10248 
10249             ret = get_errno(sigpending(&set));
10250             if (!is_error(ret)) {
10251                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10252                     return -TARGET_EFAULT;
10253                 host_to_target_sigset(p, &set);
10254                 unlock_user(p, arg1, sizeof(target_sigset_t));
10255             }
10256         }
10257         return ret;
10258 #ifdef TARGET_NR_sigsuspend
10259     case TARGET_NR_sigsuspend:
10260         {
10261             sigset_t *set;
10262 
10263 #if defined(TARGET_ALPHA)
10264             TaskState *ts = get_task_state(cpu);
10265             /* target_to_host_old_sigset will bswap back */
10266             abi_ulong mask = tswapal(arg1);
10267             set = &ts->sigsuspend_mask;
10268             target_to_host_old_sigset(set, &mask);
10269 #else
10270             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10271             if (ret != 0) {
10272                 return ret;
10273             }
10274 #endif
10275             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10276             finish_sigsuspend_mask(ret);
10277         }
10278         return ret;
10279 #endif
10280     case TARGET_NR_rt_sigsuspend:
10281         {
10282             sigset_t *set;
10283 
10284             ret = process_sigsuspend_mask(&set, arg1, arg2);
10285             if (ret != 0) {
10286                 return ret;
10287             }
10288             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10289             finish_sigsuspend_mask(ret);
10290         }
10291         return ret;
10292 #ifdef TARGET_NR_rt_sigtimedwait
10293     case TARGET_NR_rt_sigtimedwait:
10294         {
10295             sigset_t set;
10296             struct timespec uts, *puts;
10297             siginfo_t uinfo;
10298 
10299             if (arg4 != sizeof(target_sigset_t)) {
10300                 return -TARGET_EINVAL;
10301             }
10302 
10303             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10304                 return -TARGET_EFAULT;
10305             target_to_host_sigset(&set, p);
10306             unlock_user(p, arg1, 0);
10307             if (arg3) {
10308                 puts = &uts;
10309                 if (target_to_host_timespec(puts, arg3)) {
10310                     return -TARGET_EFAULT;
10311                 }
10312             } else {
10313                 puts = NULL;
10314             }
10315             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10316                                                  SIGSET_T_SIZE));
10317             if (!is_error(ret)) {
10318                 if (arg2) {
10319                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10320                                   0);
10321                     if (!p) {
10322                         return -TARGET_EFAULT;
10323                     }
10324                     host_to_target_siginfo(p, &uinfo);
10325                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10326                 }
10327                 ret = host_to_target_signal(ret);
10328             }
10329         }
10330         return ret;
10331 #endif
10332 #ifdef TARGET_NR_rt_sigtimedwait_time64
10333     case TARGET_NR_rt_sigtimedwait_time64:
10334         {
10335             sigset_t set;
10336             struct timespec uts, *puts;
10337             siginfo_t uinfo;
10338 
10339             if (arg4 != sizeof(target_sigset_t)) {
10340                 return -TARGET_EINVAL;
10341             }
10342 
10343             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10344             if (!p) {
10345                 return -TARGET_EFAULT;
10346             }
10347             target_to_host_sigset(&set, p);
10348             unlock_user(p, arg1, 0);
10349             if (arg3) {
10350                 puts = &uts;
10351                 if (target_to_host_timespec64(puts, arg3)) {
10352                     return -TARGET_EFAULT;
10353                 }
10354             } else {
10355                 puts = NULL;
10356             }
10357             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10358                                                  SIGSET_T_SIZE));
10359             if (!is_error(ret)) {
10360                 if (arg2) {
10361                     p = lock_user(VERIFY_WRITE, arg2,
10362                                   sizeof(target_siginfo_t), 0);
10363                     if (!p) {
10364                         return -TARGET_EFAULT;
10365                     }
10366                     host_to_target_siginfo(p, &uinfo);
10367                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10368                 }
10369                 ret = host_to_target_signal(ret);
10370             }
10371         }
10372         return ret;
10373 #endif
10374     case TARGET_NR_rt_sigqueueinfo:
10375         {
10376             siginfo_t uinfo;
10377 
10378             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10379             if (!p) {
10380                 return -TARGET_EFAULT;
10381             }
10382             target_to_host_siginfo(&uinfo, p);
10383             unlock_user(p, arg3, 0);
10384             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10385         }
10386         return ret;
10387     case TARGET_NR_rt_tgsigqueueinfo:
10388         {
10389             siginfo_t uinfo;
10390 
10391             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10392             if (!p) {
10393                 return -TARGET_EFAULT;
10394             }
10395             target_to_host_siginfo(&uinfo, p);
10396             unlock_user(p, arg4, 0);
10397             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10398         }
10399         return ret;
10400 #ifdef TARGET_NR_sigreturn
10401     case TARGET_NR_sigreturn:
10402         if (block_signals()) {
10403             return -QEMU_ERESTARTSYS;
10404         }
10405         return do_sigreturn(cpu_env);
10406 #endif
10407     case TARGET_NR_rt_sigreturn:
10408         if (block_signals()) {
10409             return -QEMU_ERESTARTSYS;
10410         }
10411         return do_rt_sigreturn(cpu_env);
10412     case TARGET_NR_sethostname:
10413         if (!(p = lock_user_string(arg1)))
10414             return -TARGET_EFAULT;
10415         ret = get_errno(sethostname(p, arg2));
10416         unlock_user(p, arg1, 0);
10417         return ret;
10418 #ifdef TARGET_NR_setrlimit
10419     case TARGET_NR_setrlimit:
10420         {
10421             int resource = target_to_host_resource(arg1);
10422             struct target_rlimit *target_rlim;
10423             struct rlimit rlim;
10424             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10425                 return -TARGET_EFAULT;
10426             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10427             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10428             unlock_user_struct(target_rlim, arg2, 0);
10429             /*
10430              * If we just passed through resource limit settings for memory then
10431              * they would also apply to QEMU's own allocations, and QEMU will
10432              * crash or hang or die if its allocations fail. Ideally we would
10433              * track the guest allocations in QEMU and apply the limits ourselves.
10434              * For now, just tell the guest the call succeeded but don't actually
10435              * limit anything.
10436              */
10437             if (resource != RLIMIT_AS &&
10438                 resource != RLIMIT_DATA &&
10439                 resource != RLIMIT_STACK) {
10440                 return get_errno(setrlimit(resource, &rlim));
10441             } else {
10442                 return 0;
10443             }
10444         }
10445 #endif
10446 #ifdef TARGET_NR_getrlimit
10447     case TARGET_NR_getrlimit:
10448         {
10449             int resource = target_to_host_resource(arg1);
10450             struct target_rlimit *target_rlim;
10451             struct rlimit rlim;
10452 
10453             ret = get_errno(getrlimit(resource, &rlim));
10454             if (!is_error(ret)) {
10455                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10456                     return -TARGET_EFAULT;
10457                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10458                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10459                 unlock_user_struct(target_rlim, arg2, 1);
10460             }
10461         }
10462         return ret;
10463 #endif
10464     case TARGET_NR_getrusage:
10465         {
10466             struct rusage rusage;
10467             ret = get_errno(getrusage(arg1, &rusage));
10468             if (!is_error(ret)) {
10469                 ret = host_to_target_rusage(arg2, &rusage);
10470             }
10471         }
10472         return ret;
10473 #if defined(TARGET_NR_gettimeofday)
10474     case TARGET_NR_gettimeofday:
10475         {
10476             struct timeval tv;
10477             struct timezone tz;
10478 
10479             ret = get_errno(gettimeofday(&tv, &tz));
10480             if (!is_error(ret)) {
10481                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10482                     return -TARGET_EFAULT;
10483                 }
10484                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10485                     return -TARGET_EFAULT;
10486                 }
10487             }
10488         }
10489         return ret;
10490 #endif
10491 #if defined(TARGET_NR_settimeofday)
10492     case TARGET_NR_settimeofday:
10493         {
10494             struct timeval tv, *ptv = NULL;
10495             struct timezone tz, *ptz = NULL;
10496 
10497             if (arg1) {
10498                 if (copy_from_user_timeval(&tv, arg1)) {
10499                     return -TARGET_EFAULT;
10500                 }
10501                 ptv = &tv;
10502             }
10503 
10504             if (arg2) {
10505                 if (copy_from_user_timezone(&tz, arg2)) {
10506                     return -TARGET_EFAULT;
10507                 }
10508                 ptz = &tz;
10509             }
10510 
10511             return get_errno(settimeofday(ptv, ptz));
10512         }
10513 #endif
10514 #if defined(TARGET_NR_select)
10515     case TARGET_NR_select:
10516 #if defined(TARGET_WANT_NI_OLD_SELECT)
10517         /* some architectures used to have old_select here
10518          * but now ENOSYS it.
10519          */
10520         ret = -TARGET_ENOSYS;
10521 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10522         ret = do_old_select(arg1);
10523 #else
10524         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10525 #endif
10526         return ret;
10527 #endif
10528 #ifdef TARGET_NR_pselect6
10529     case TARGET_NR_pselect6:
10530         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10531 #endif
10532 #ifdef TARGET_NR_pselect6_time64
10533     case TARGET_NR_pselect6_time64:
10534         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10535 #endif
10536 #ifdef TARGET_NR_symlink
10537     case TARGET_NR_symlink:
10538         {
10539             void *p2;
10540             p = lock_user_string(arg1);
10541             p2 = lock_user_string(arg2);
10542             if (!p || !p2)
10543                 ret = -TARGET_EFAULT;
10544             else
10545                 ret = get_errno(symlink(p, p2));
10546             unlock_user(p2, arg2, 0);
10547             unlock_user(p, arg1, 0);
10548         }
10549         return ret;
10550 #endif
10551 #if defined(TARGET_NR_symlinkat)
10552     case TARGET_NR_symlinkat:
10553         {
10554             void *p2;
10555             p  = lock_user_string(arg1);
10556             p2 = lock_user_string(arg3);
10557             if (!p || !p2)
10558                 ret = -TARGET_EFAULT;
10559             else
10560                 ret = get_errno(symlinkat(p, arg2, p2));
10561             unlock_user(p2, arg3, 0);
10562             unlock_user(p, arg1, 0);
10563         }
10564         return ret;
10565 #endif
10566 #ifdef TARGET_NR_readlink
10567     case TARGET_NR_readlink:
10568         {
10569             void *p2;
10570             p = lock_user_string(arg1);
10571             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10572             ret = get_errno(do_guest_readlink(p, p2, arg3));
10573             unlock_user(p2, arg2, ret);
10574             unlock_user(p, arg1, 0);
10575         }
10576         return ret;
10577 #endif
10578 #if defined(TARGET_NR_readlinkat)
10579     case TARGET_NR_readlinkat:
10580         {
10581             void *p2;
10582             p  = lock_user_string(arg2);
10583             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10584             if (!p || !p2) {
10585                 ret = -TARGET_EFAULT;
10586             } else if (!arg4) {
10587                 /* Short circuit this for the magic exe check. */
10588                 ret = -TARGET_EINVAL;
10589             } else if (is_proc_myself((const char *)p, "exe")) {
10590                 /*
10591                  * Don't worry about sign mismatch as earlier mapping
10592                  * logic would have thrown a bad address error.
10593                  */
10594                 ret = MIN(strlen(exec_path), arg4);
10595                 /* We cannot NUL terminate the string. */
10596                 memcpy(p2, exec_path, ret);
10597             } else {
10598                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10599             }
10600             unlock_user(p2, arg3, ret);
10601             unlock_user(p, arg2, 0);
10602         }
10603         return ret;
10604 #endif
10605 #ifdef TARGET_NR_swapon
10606     case TARGET_NR_swapon:
10607         if (!(p = lock_user_string(arg1)))
10608             return -TARGET_EFAULT;
10609         ret = get_errno(swapon(p, arg2));
10610         unlock_user(p, arg1, 0);
10611         return ret;
10612 #endif
10613     case TARGET_NR_reboot:
10614         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10615            /* arg4 must be ignored in all other cases */
10616            p = lock_user_string(arg4);
10617            if (!p) {
10618                return -TARGET_EFAULT;
10619            }
10620            ret = get_errno(reboot(arg1, arg2, arg3, p));
10621            unlock_user(p, arg4, 0);
10622         } else {
10623            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10624         }
10625         return ret;
10626 #ifdef TARGET_NR_mmap
10627     case TARGET_NR_mmap:
10628 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10629         {
10630             abi_ulong *v;
10631             abi_ulong v1, v2, v3, v4, v5, v6;
10632             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10633                 return -TARGET_EFAULT;
10634             v1 = tswapal(v[0]);
10635             v2 = tswapal(v[1]);
10636             v3 = tswapal(v[2]);
10637             v4 = tswapal(v[3]);
10638             v5 = tswapal(v[4]);
10639             v6 = tswapal(v[5]);
10640             unlock_user(v, arg1, 0);
10641             return do_mmap(v1, v2, v3, v4, v5, v6);
10642         }
10643 #else
10644         /* mmap pointers are always untagged */
10645         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10646 #endif
10647 #endif
10648 #ifdef TARGET_NR_mmap2
10649     case TARGET_NR_mmap2:
10650 #ifndef MMAP_SHIFT
10651 #define MMAP_SHIFT 12
10652 #endif
10653         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10654                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10655 #endif
10656     case TARGET_NR_munmap:
10657         arg1 = cpu_untagged_addr(cpu, arg1);
10658         return get_errno(target_munmap(arg1, arg2));
10659     case TARGET_NR_mprotect:
10660         arg1 = cpu_untagged_addr(cpu, arg1);
10661         {
10662             TaskState *ts = get_task_state(cpu);
10663             /* Special hack to detect libc making the stack executable.  */
10664             if ((arg3 & PROT_GROWSDOWN)
10665                 && arg1 >= ts->info->stack_limit
10666                 && arg1 <= ts->info->start_stack) {
10667                 arg3 &= ~PROT_GROWSDOWN;
10668                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10669                 arg1 = ts->info->stack_limit;
10670             }
10671         }
10672         return get_errno(target_mprotect(arg1, arg2, arg3));
10673 #ifdef TARGET_NR_mremap
10674     case TARGET_NR_mremap:
10675         arg1 = cpu_untagged_addr(cpu, arg1);
10676         /* mremap new_addr (arg5) is always untagged */
10677         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10678 #endif
10679         /* ??? msync/mlock/munlock are broken for softmmu.  */
10680 #ifdef TARGET_NR_msync
10681     case TARGET_NR_msync:
10682         return get_errno(msync(g2h(cpu, arg1), arg2,
10683                                target_to_host_msync_arg(arg3)));
10684 #endif
10685 #ifdef TARGET_NR_mlock
10686     case TARGET_NR_mlock:
10687         return get_errno(mlock(g2h(cpu, arg1), arg2));
10688 #endif
10689 #ifdef TARGET_NR_munlock
10690     case TARGET_NR_munlock:
10691         return get_errno(munlock(g2h(cpu, arg1), arg2));
10692 #endif
10693 #ifdef TARGET_NR_mlockall
10694     case TARGET_NR_mlockall:
10695         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10696 #endif
10697 #ifdef TARGET_NR_munlockall
10698     case TARGET_NR_munlockall:
10699         return get_errno(munlockall());
10700 #endif
10701 #ifdef TARGET_NR_truncate
10702     case TARGET_NR_truncate:
10703         if (!(p = lock_user_string(arg1)))
10704             return -TARGET_EFAULT;
10705         ret = get_errno(truncate(p, arg2));
10706         unlock_user(p, arg1, 0);
10707         return ret;
10708 #endif
10709 #ifdef TARGET_NR_ftruncate
10710     case TARGET_NR_ftruncate:
10711         return get_errno(ftruncate(arg1, arg2));
10712 #endif
10713     case TARGET_NR_fchmod:
10714         return get_errno(fchmod(arg1, arg2));
10715 #if defined(TARGET_NR_fchmodat)
10716     case TARGET_NR_fchmodat:
10717         if (!(p = lock_user_string(arg2)))
10718             return -TARGET_EFAULT;
10719         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10720         unlock_user(p, arg2, 0);
10721         return ret;
10722 #endif
10723 #if defined(TARGET_NR_fchmodat2) && defined(__NR_fchmodat2)
10724     case TARGET_NR_fchmodat2:
10725         if (!(p = lock_user_string(arg2))) {
10726             return -TARGET_EFAULT;
10727         }
10728         ret = get_errno(safe_fchmodat2(arg1, p, arg3, arg4));
10729         unlock_user(p, arg2, 0);
10730         return ret;
10731 #endif
10732     case TARGET_NR_getpriority:
10733         /* Note that negative values are valid for getpriority, so we must
10734            differentiate based on errno settings.  */
10735         errno = 0;
10736         ret = getpriority(arg1, arg2);
10737         if (ret == -1 && errno != 0) {
10738             return -host_to_target_errno(errno);
10739         }
10740 #ifdef TARGET_ALPHA
10741         /* Return value is the unbiased priority.  Signal no error.  */
10742         cpu_env->ir[IR_V0] = 0;
10743 #else
10744         /* Return value is a biased priority to avoid negative numbers.  */
10745         ret = 20 - ret;
10746 #endif
10747         return ret;
10748     case TARGET_NR_setpriority:
10749         return get_errno(setpriority(arg1, arg2, arg3));
10750 #ifdef TARGET_NR_statfs
10751     case TARGET_NR_statfs:
10752         if (!(p = lock_user_string(arg1))) {
10753             return -TARGET_EFAULT;
10754         }
10755         ret = get_errno(statfs(path(p), &stfs));
10756         unlock_user(p, arg1, 0);
10757     convert_statfs:
10758         if (!is_error(ret)) {
10759             struct target_statfs *target_stfs;
10760 
10761             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10762                 return -TARGET_EFAULT;
10763             __put_user(stfs.f_type, &target_stfs->f_type);
10764             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10765             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10766             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10767             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10768             __put_user(stfs.f_files, &target_stfs->f_files);
10769             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10770             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10771             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10772             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10773             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10774 #ifdef _STATFS_F_FLAGS
10775             __put_user(stfs.f_flags, &target_stfs->f_flags);
10776 #else
10777             __put_user(0, &target_stfs->f_flags);
10778 #endif
10779             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10780             unlock_user_struct(target_stfs, arg2, 1);
10781         }
10782         return ret;
10783 #endif
10784 #ifdef TARGET_NR_fstatfs
10785     case TARGET_NR_fstatfs:
10786         ret = get_errno(fstatfs(arg1, &stfs));
10787         goto convert_statfs;
10788 #endif
10789 #ifdef TARGET_NR_statfs64
10790     case TARGET_NR_statfs64:
10791         if (!(p = lock_user_string(arg1))) {
10792             return -TARGET_EFAULT;
10793         }
10794         ret = get_errno(statfs(path(p), &stfs));
10795         unlock_user(p, arg1, 0);
10796     convert_statfs64:
10797         if (!is_error(ret)) {
10798             struct target_statfs64 *target_stfs;
10799 
10800             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10801                 return -TARGET_EFAULT;
10802             __put_user(stfs.f_type, &target_stfs->f_type);
10803             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10804             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10805             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10806             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10807             __put_user(stfs.f_files, &target_stfs->f_files);
10808             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10809             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10810             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10811             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10812             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10813 #ifdef _STATFS_F_FLAGS
10814             __put_user(stfs.f_flags, &target_stfs->f_flags);
10815 #else
10816             __put_user(0, &target_stfs->f_flags);
10817 #endif
10818             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10819             unlock_user_struct(target_stfs, arg3, 1);
10820         }
10821         return ret;
10822     case TARGET_NR_fstatfs64:
10823         ret = get_errno(fstatfs(arg1, &stfs));
10824         goto convert_statfs64;
10825 #endif
10826 #ifdef TARGET_NR_socketcall
10827     case TARGET_NR_socketcall:
10828         return do_socketcall(arg1, arg2);
10829 #endif
10830 #ifdef TARGET_NR_accept
10831     case TARGET_NR_accept:
10832         return do_accept4(arg1, arg2, arg3, 0);
10833 #endif
10834 #ifdef TARGET_NR_accept4
10835     case TARGET_NR_accept4:
10836         return do_accept4(arg1, arg2, arg3, arg4);
10837 #endif
10838 #ifdef TARGET_NR_bind
10839     case TARGET_NR_bind:
10840         return do_bind(arg1, arg2, arg3);
10841 #endif
10842 #ifdef TARGET_NR_connect
10843     case TARGET_NR_connect:
10844         return do_connect(arg1, arg2, arg3);
10845 #endif
10846 #ifdef TARGET_NR_getpeername
10847     case TARGET_NR_getpeername:
10848         return do_getpeername(arg1, arg2, arg3);
10849 #endif
10850 #ifdef TARGET_NR_getsockname
10851     case TARGET_NR_getsockname:
10852         return do_getsockname(arg1, arg2, arg3);
10853 #endif
10854 #ifdef TARGET_NR_getsockopt
10855     case TARGET_NR_getsockopt:
10856         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10857 #endif
10858 #ifdef TARGET_NR_listen
10859     case TARGET_NR_listen:
10860         return get_errno(listen(arg1, arg2));
10861 #endif
10862 #ifdef TARGET_NR_recv
10863     case TARGET_NR_recv:
10864         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10865 #endif
10866 #ifdef TARGET_NR_recvfrom
10867     case TARGET_NR_recvfrom:
10868         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10869 #endif
10870 #ifdef TARGET_NR_recvmsg
10871     case TARGET_NR_recvmsg:
10872         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10873 #endif
10874 #ifdef TARGET_NR_send
10875     case TARGET_NR_send:
10876         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10877 #endif
10878 #ifdef TARGET_NR_sendmsg
10879     case TARGET_NR_sendmsg:
10880         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10881 #endif
10882 #ifdef TARGET_NR_sendmmsg
10883     case TARGET_NR_sendmmsg:
10884         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10885 #endif
10886 #ifdef TARGET_NR_recvmmsg
10887     case TARGET_NR_recvmmsg:
10888         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10889 #endif
10890 #ifdef TARGET_NR_sendto
10891     case TARGET_NR_sendto:
10892         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10893 #endif
10894 #ifdef TARGET_NR_shutdown
10895     case TARGET_NR_shutdown:
10896         return get_errno(shutdown(arg1, arg2));
10897 #endif
10898 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10899     case TARGET_NR_getrandom:
10900         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10901         if (!p) {
10902             return -TARGET_EFAULT;
10903         }
10904         ret = get_errno(getrandom(p, arg2, arg3));
10905         unlock_user(p, arg1, ret);
10906         return ret;
10907 #endif
10908 #ifdef TARGET_NR_socket
10909     case TARGET_NR_socket:
10910         return do_socket(arg1, arg2, arg3);
10911 #endif
10912 #ifdef TARGET_NR_socketpair
10913     case TARGET_NR_socketpair:
10914         return do_socketpair(arg1, arg2, arg3, arg4);
10915 #endif
10916 #ifdef TARGET_NR_setsockopt
10917     case TARGET_NR_setsockopt:
10918         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10919 #endif
10920 #if defined(TARGET_NR_syslog)
10921     case TARGET_NR_syslog:
10922         {
10923             int len = arg2;
10924 
10925             switch (arg1) {
10926             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10927             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10928             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10929             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10930             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10931             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10932             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10933             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10934                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10935             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10936             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10937             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10938                 {
10939                     if (len < 0) {
10940                         return -TARGET_EINVAL;
10941                     }
10942                     if (len == 0) {
10943                         return 0;
10944                     }
10945                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10946                     if (!p) {
10947                         return -TARGET_EFAULT;
10948                     }
10949                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10950                     unlock_user(p, arg2, arg3);
10951                 }
10952                 return ret;
10953             default:
10954                 return -TARGET_EINVAL;
10955             }
10956         }
10957         break;
10958 #endif
10959     case TARGET_NR_setitimer:
10960         {
10961             struct itimerval value, ovalue, *pvalue;
10962 
10963             if (arg2) {
10964                 pvalue = &value;
10965                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10966                     || copy_from_user_timeval(&pvalue->it_value,
10967                                               arg2 + sizeof(struct target_timeval)))
10968                     return -TARGET_EFAULT;
10969             } else {
10970                 pvalue = NULL;
10971             }
10972             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10973             if (!is_error(ret) && arg3) {
10974                 if (copy_to_user_timeval(arg3,
10975                                          &ovalue.it_interval)
10976                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10977                                             &ovalue.it_value))
10978                     return -TARGET_EFAULT;
10979             }
10980         }
10981         return ret;
10982     case TARGET_NR_getitimer:
10983         {
10984             struct itimerval value;
10985 
10986             ret = get_errno(getitimer(arg1, &value));
10987             if (!is_error(ret) && arg2) {
10988                 if (copy_to_user_timeval(arg2,
10989                                          &value.it_interval)
10990                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10991                                             &value.it_value))
10992                     return -TARGET_EFAULT;
10993             }
10994         }
10995         return ret;
10996 #ifdef TARGET_NR_stat
10997     case TARGET_NR_stat:
10998         if (!(p = lock_user_string(arg1))) {
10999             return -TARGET_EFAULT;
11000         }
11001         ret = get_errno(stat(path(p), &st));
11002         unlock_user(p, arg1, 0);
11003         goto do_stat;
11004 #endif
11005 #ifdef TARGET_NR_lstat
11006     case TARGET_NR_lstat:
11007         if (!(p = lock_user_string(arg1))) {
11008             return -TARGET_EFAULT;
11009         }
11010         ret = get_errno(lstat(path(p), &st));
11011         unlock_user(p, arg1, 0);
11012         goto do_stat;
11013 #endif
11014 #ifdef TARGET_NR_fstat
11015     case TARGET_NR_fstat:
11016         {
11017             ret = get_errno(fstat(arg1, &st));
11018 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
11019         do_stat:
11020 #endif
11021             if (!is_error(ret)) {
11022                 struct target_stat *target_st;
11023 
11024                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
11025                     return -TARGET_EFAULT;
11026                 memset(target_st, 0, sizeof(*target_st));
11027                 __put_user(st.st_dev, &target_st->st_dev);
11028                 __put_user(st.st_ino, &target_st->st_ino);
11029                 __put_user(st.st_mode, &target_st->st_mode);
11030                 __put_user(st.st_uid, &target_st->st_uid);
11031                 __put_user(st.st_gid, &target_st->st_gid);
11032                 __put_user(st.st_nlink, &target_st->st_nlink);
11033                 __put_user(st.st_rdev, &target_st->st_rdev);
11034                 __put_user(st.st_size, &target_st->st_size);
11035                 __put_user(st.st_blksize, &target_st->st_blksize);
11036                 __put_user(st.st_blocks, &target_st->st_blocks);
11037                 __put_user(st.st_atime, &target_st->target_st_atime);
11038                 __put_user(st.st_mtime, &target_st->target_st_mtime);
11039                 __put_user(st.st_ctime, &target_st->target_st_ctime);
11040 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
11041                 __put_user(st.st_atim.tv_nsec,
11042                            &target_st->target_st_atime_nsec);
11043                 __put_user(st.st_mtim.tv_nsec,
11044                            &target_st->target_st_mtime_nsec);
11045                 __put_user(st.st_ctim.tv_nsec,
11046                            &target_st->target_st_ctime_nsec);
11047 #endif
11048                 unlock_user_struct(target_st, arg2, 1);
11049             }
11050         }
11051         return ret;
11052 #endif
11053     case TARGET_NR_vhangup:
11054         return get_errno(vhangup());
11055 #ifdef TARGET_NR_syscall
11056     case TARGET_NR_syscall:
11057         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11058                           arg6, arg7, arg8, 0);
11059 #endif
11060 #if defined(TARGET_NR_wait4)
11061     case TARGET_NR_wait4:
11062         {
11063             int status;
11064             abi_long status_ptr = arg2;
11065             struct rusage rusage, *rusage_ptr;
11066             abi_ulong target_rusage = arg4;
11067             abi_long rusage_err;
11068             if (target_rusage)
11069                 rusage_ptr = &rusage;
11070             else
11071                 rusage_ptr = NULL;
11072             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11073             if (!is_error(ret)) {
11074                 if (status_ptr && ret) {
11075                     status = host_to_target_waitstatus(status);
11076                     if (put_user_s32(status, status_ptr))
11077                         return -TARGET_EFAULT;
11078                 }
11079                 if (target_rusage) {
11080                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11081                     if (rusage_err) {
11082                         ret = rusage_err;
11083                     }
11084                 }
11085             }
11086         }
11087         return ret;
11088 #endif
11089 #ifdef TARGET_NR_swapoff
11090     case TARGET_NR_swapoff:
11091         if (!(p = lock_user_string(arg1)))
11092             return -TARGET_EFAULT;
11093         ret = get_errno(swapoff(p));
11094         unlock_user(p, arg1, 0);
11095         return ret;
11096 #endif
11097     case TARGET_NR_sysinfo:
11098         {
11099             struct target_sysinfo *target_value;
11100             struct sysinfo value;
11101             ret = get_errno(sysinfo(&value));
11102             if (!is_error(ret) && arg1)
11103             {
11104                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11105                     return -TARGET_EFAULT;
11106                 __put_user(value.uptime, &target_value->uptime);
11107                 __put_user(value.loads[0], &target_value->loads[0]);
11108                 __put_user(value.loads[1], &target_value->loads[1]);
11109                 __put_user(value.loads[2], &target_value->loads[2]);
11110                 __put_user(value.totalram, &target_value->totalram);
11111                 __put_user(value.freeram, &target_value->freeram);
11112                 __put_user(value.sharedram, &target_value->sharedram);
11113                 __put_user(value.bufferram, &target_value->bufferram);
11114                 __put_user(value.totalswap, &target_value->totalswap);
11115                 __put_user(value.freeswap, &target_value->freeswap);
11116                 __put_user(value.procs, &target_value->procs);
11117                 __put_user(value.totalhigh, &target_value->totalhigh);
11118                 __put_user(value.freehigh, &target_value->freehigh);
11119                 __put_user(value.mem_unit, &target_value->mem_unit);
11120                 unlock_user_struct(target_value, arg1, 1);
11121             }
11122         }
11123         return ret;
11124 #ifdef TARGET_NR_ipc
11125     case TARGET_NR_ipc:
11126         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11127 #endif
11128 #ifdef TARGET_NR_semget
11129     case TARGET_NR_semget:
11130         return get_errno(semget(arg1, arg2, arg3));
11131 #endif
11132 #ifdef TARGET_NR_semop
11133     case TARGET_NR_semop:
11134         return do_semtimedop(arg1, arg2, arg3, 0, false);
11135 #endif
11136 #ifdef TARGET_NR_semtimedop
11137     case TARGET_NR_semtimedop:
11138         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11139 #endif
11140 #ifdef TARGET_NR_semtimedop_time64
11141     case TARGET_NR_semtimedop_time64:
11142         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11143 #endif
11144 #ifdef TARGET_NR_semctl
11145     case TARGET_NR_semctl:
11146         return do_semctl(arg1, arg2, arg3, arg4);
11147 #endif
11148 #ifdef TARGET_NR_msgctl
11149     case TARGET_NR_msgctl:
11150         return do_msgctl(arg1, arg2, arg3);
11151 #endif
11152 #ifdef TARGET_NR_msgget
11153     case TARGET_NR_msgget:
11154         return get_errno(msgget(arg1, arg2));
11155 #endif
11156 #ifdef TARGET_NR_msgrcv
11157     case TARGET_NR_msgrcv:
11158         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11159 #endif
11160 #ifdef TARGET_NR_msgsnd
11161     case TARGET_NR_msgsnd:
11162         return do_msgsnd(arg1, arg2, arg3, arg4);
11163 #endif
11164 #ifdef TARGET_NR_shmget
11165     case TARGET_NR_shmget:
11166         return get_errno(shmget(arg1, arg2, arg3));
11167 #endif
11168 #ifdef TARGET_NR_shmctl
11169     case TARGET_NR_shmctl:
11170         return do_shmctl(arg1, arg2, arg3);
11171 #endif
11172 #ifdef TARGET_NR_shmat
11173     case TARGET_NR_shmat:
11174         return target_shmat(cpu_env, arg1, arg2, arg3);
11175 #endif
11176 #ifdef TARGET_NR_shmdt
11177     case TARGET_NR_shmdt:
11178         return target_shmdt(arg1);
11179 #endif
11180     case TARGET_NR_fsync:
11181         return get_errno(fsync(arg1));
11182     case TARGET_NR_clone:
11183         /* Linux manages to have three different orderings for its
11184          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11185          * match the kernel's CONFIG_CLONE_* settings.
11186          * Microblaze is further special in that it uses a sixth
11187          * implicit argument to clone for the TLS pointer.
11188          */
11189 #if defined(TARGET_MICROBLAZE)
11190         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11191 #elif defined(TARGET_CLONE_BACKWARDS)
11192         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11193 #elif defined(TARGET_CLONE_BACKWARDS2)
11194         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11195 #else
11196         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11197 #endif
11198         return ret;
11199 #ifdef __NR_exit_group
11200         /* new thread calls */
11201     case TARGET_NR_exit_group:
11202         preexit_cleanup(cpu_env, arg1);
11203         return get_errno(exit_group(arg1));
11204 #endif
11205     case TARGET_NR_setdomainname:
11206         if (!(p = lock_user_string(arg1)))
11207             return -TARGET_EFAULT;
11208         ret = get_errno(setdomainname(p, arg2));
11209         unlock_user(p, arg1, 0);
11210         return ret;
11211     case TARGET_NR_uname:
11212         /* no need to transcode because we use the linux syscall */
11213         {
11214             struct new_utsname * buf;
11215 
11216             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11217                 return -TARGET_EFAULT;
11218             ret = get_errno(sys_uname(buf));
11219             if (!is_error(ret)) {
11220                 /* Overwrite the native machine name with whatever is being
11221                    emulated. */
11222                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11223                           sizeof(buf->machine));
11224                 /* Allow the user to override the reported release.  */
11225                 if (qemu_uname_release && *qemu_uname_release) {
11226                     g_strlcpy(buf->release, qemu_uname_release,
11227                               sizeof(buf->release));
11228                 }
11229             }
11230             unlock_user_struct(buf, arg1, 1);
11231         }
11232         return ret;
11233 #ifdef TARGET_I386
11234     case TARGET_NR_modify_ldt:
11235         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11236 #if !defined(TARGET_X86_64)
11237     case TARGET_NR_vm86:
11238         return do_vm86(cpu_env, arg1, arg2);
11239 #endif
11240 #endif
11241 #if defined(TARGET_NR_adjtimex)
11242     case TARGET_NR_adjtimex:
11243         {
11244             struct timex host_buf;
11245 
11246             if (target_to_host_timex(&host_buf, arg1) != 0) {
11247                 return -TARGET_EFAULT;
11248             }
11249             ret = get_errno(adjtimex(&host_buf));
11250             if (!is_error(ret)) {
11251                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11252                     return -TARGET_EFAULT;
11253                 }
11254             }
11255         }
11256         return ret;
11257 #endif
11258 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11259     case TARGET_NR_clock_adjtime:
11260         {
11261             struct timex htx;
11262 
11263             if (target_to_host_timex(&htx, arg2) != 0) {
11264                 return -TARGET_EFAULT;
11265             }
11266             ret = get_errno(clock_adjtime(arg1, &htx));
11267             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11268                 return -TARGET_EFAULT;
11269             }
11270         }
11271         return ret;
11272 #endif
11273 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11274     case TARGET_NR_clock_adjtime64:
11275         {
11276             struct timex htx;
11277 
11278             if (target_to_host_timex64(&htx, arg2) != 0) {
11279                 return -TARGET_EFAULT;
11280             }
11281             ret = get_errno(clock_adjtime(arg1, &htx));
11282             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11283                     return -TARGET_EFAULT;
11284             }
11285         }
11286         return ret;
11287 #endif
11288     case TARGET_NR_getpgid:
11289         return get_errno(getpgid(arg1));
11290     case TARGET_NR_fchdir:
11291         return get_errno(fchdir(arg1));
11292     case TARGET_NR_personality:
11293         return get_errno(personality(arg1));
11294 #ifdef TARGET_NR__llseek /* Not on alpha */
11295     case TARGET_NR__llseek:
11296         {
11297             int64_t res;
11298 #if !defined(__NR_llseek)
11299             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11300             if (res == -1) {
11301                 ret = get_errno(res);
11302             } else {
11303                 ret = 0;
11304             }
11305 #else
11306             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11307 #endif
11308             if ((ret == 0) && put_user_s64(res, arg4)) {
11309                 return -TARGET_EFAULT;
11310             }
11311         }
11312         return ret;
11313 #endif
11314 #ifdef TARGET_NR_getdents
11315     case TARGET_NR_getdents:
11316         return do_getdents(arg1, arg2, arg3);
11317 #endif /* TARGET_NR_getdents */
11318 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11319     case TARGET_NR_getdents64:
11320         return do_getdents64(arg1, arg2, arg3);
11321 #endif /* TARGET_NR_getdents64 */
11322 #if defined(TARGET_NR__newselect)
11323     case TARGET_NR__newselect:
11324         return do_select(arg1, arg2, arg3, arg4, arg5);
11325 #endif
11326 #ifdef TARGET_NR_poll
11327     case TARGET_NR_poll:
11328         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11329 #endif
11330 #ifdef TARGET_NR_ppoll
11331     case TARGET_NR_ppoll:
11332         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11333 #endif
11334 #ifdef TARGET_NR_ppoll_time64
11335     case TARGET_NR_ppoll_time64:
11336         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11337 #endif
11338     case TARGET_NR_flock:
11339         /* NOTE: the flock constant seems to be the same for every
11340            Linux platform */
11341         return get_errno(safe_flock(arg1, arg2));
11342     case TARGET_NR_readv:
11343         {
11344             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11345             if (vec != NULL) {
11346                 ret = get_errno(safe_readv(arg1, vec, arg3));
11347                 unlock_iovec(vec, arg2, arg3, 1);
11348             } else {
11349                 ret = -host_to_target_errno(errno);
11350             }
11351         }
11352         return ret;
11353     case TARGET_NR_writev:
11354         {
11355             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11356             if (vec != NULL) {
11357                 ret = get_errno(safe_writev(arg1, vec, arg3));
11358                 unlock_iovec(vec, arg2, arg3, 0);
11359             } else {
11360                 ret = -host_to_target_errno(errno);
11361             }
11362         }
11363         return ret;
11364 #if defined(TARGET_NR_preadv)
11365     case TARGET_NR_preadv:
11366         {
11367             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11368             if (vec != NULL) {
11369                 unsigned long low, high;
11370 
11371                 target_to_host_low_high(arg4, arg5, &low, &high);
11372                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11373                 unlock_iovec(vec, arg2, arg3, 1);
11374             } else {
11375                 ret = -host_to_target_errno(errno);
11376            }
11377         }
11378         return ret;
11379 #endif
11380 #if defined(TARGET_NR_pwritev)
11381     case TARGET_NR_pwritev:
11382         {
11383             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11384             if (vec != NULL) {
11385                 unsigned long low, high;
11386 
11387                 target_to_host_low_high(arg4, arg5, &low, &high);
11388                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11389                 unlock_iovec(vec, arg2, arg3, 0);
11390             } else {
11391                 ret = -host_to_target_errno(errno);
11392            }
11393         }
11394         return ret;
11395 #endif
11396     case TARGET_NR_getsid:
11397         return get_errno(getsid(arg1));
11398 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11399     case TARGET_NR_fdatasync:
11400         return get_errno(fdatasync(arg1));
11401 #endif
11402     case TARGET_NR_sched_getaffinity:
11403         {
11404             unsigned int mask_size;
11405             unsigned long *mask;
11406 
11407             /*
11408              * sched_getaffinity needs multiples of ulong, so need to take
11409              * care of mismatches between target ulong and host ulong sizes.
11410              */
11411             if (arg2 & (sizeof(abi_ulong) - 1)) {
11412                 return -TARGET_EINVAL;
11413             }
11414             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11415 
11416             mask = alloca(mask_size);
11417             memset(mask, 0, mask_size);
11418             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11419 
11420             if (!is_error(ret)) {
11421                 if (ret > arg2) {
11422                     /* More data returned than the caller's buffer will fit.
11423                      * This only happens if sizeof(abi_long) < sizeof(long)
11424                      * and the caller passed us a buffer holding an odd number
11425                      * of abi_longs. If the host kernel is actually using the
11426                      * extra 4 bytes then fail EINVAL; otherwise we can just
11427                      * ignore them and only copy the interesting part.
11428                      */
11429                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11430                     if (numcpus > arg2 * 8) {
11431                         return -TARGET_EINVAL;
11432                     }
11433                     ret = arg2;
11434                 }
11435 
11436                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11437                     return -TARGET_EFAULT;
11438                 }
11439             }
11440         }
11441         return ret;
11442     case TARGET_NR_sched_setaffinity:
11443         {
11444             unsigned int mask_size;
11445             unsigned long *mask;
11446 
11447             /*
11448              * sched_setaffinity needs multiples of ulong, so need to take
11449              * care of mismatches between target ulong and host ulong sizes.
11450              */
11451             if (arg2 & (sizeof(abi_ulong) - 1)) {
11452                 return -TARGET_EINVAL;
11453             }
11454             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11455             mask = alloca(mask_size);
11456 
11457             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11458             if (ret) {
11459                 return ret;
11460             }
11461 
11462             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11463         }
11464     case TARGET_NR_getcpu:
11465         {
11466             unsigned cpuid, node;
11467             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11468                                        arg2 ? &node : NULL,
11469                                        NULL));
11470             if (is_error(ret)) {
11471                 return ret;
11472             }
11473             if (arg1 && put_user_u32(cpuid, arg1)) {
11474                 return -TARGET_EFAULT;
11475             }
11476             if (arg2 && put_user_u32(node, arg2)) {
11477                 return -TARGET_EFAULT;
11478             }
11479         }
11480         return ret;
11481     case TARGET_NR_sched_setparam:
11482         {
11483             struct target_sched_param *target_schp;
11484             struct sched_param schp;
11485 
11486             if (arg2 == 0) {
11487                 return -TARGET_EINVAL;
11488             }
11489             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11490                 return -TARGET_EFAULT;
11491             }
11492             schp.sched_priority = tswap32(target_schp->sched_priority);
11493             unlock_user_struct(target_schp, arg2, 0);
11494             return get_errno(sys_sched_setparam(arg1, &schp));
11495         }
11496     case TARGET_NR_sched_getparam:
11497         {
11498             struct target_sched_param *target_schp;
11499             struct sched_param schp;
11500 
11501             if (arg2 == 0) {
11502                 return -TARGET_EINVAL;
11503             }
11504             ret = get_errno(sys_sched_getparam(arg1, &schp));
11505             if (!is_error(ret)) {
11506                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11507                     return -TARGET_EFAULT;
11508                 }
11509                 target_schp->sched_priority = tswap32(schp.sched_priority);
11510                 unlock_user_struct(target_schp, arg2, 1);
11511             }
11512         }
11513         return ret;
11514     case TARGET_NR_sched_setscheduler:
11515         {
11516             struct target_sched_param *target_schp;
11517             struct sched_param schp;
11518             if (arg3 == 0) {
11519                 return -TARGET_EINVAL;
11520             }
11521             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11522                 return -TARGET_EFAULT;
11523             }
11524             schp.sched_priority = tswap32(target_schp->sched_priority);
11525             unlock_user_struct(target_schp, arg3, 0);
11526             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11527         }
11528     case TARGET_NR_sched_getscheduler:
11529         return get_errno(sys_sched_getscheduler(arg1));
11530     case TARGET_NR_sched_getattr:
11531         {
11532             struct target_sched_attr *target_scha;
11533             struct sched_attr scha;
11534             if (arg2 == 0) {
11535                 return -TARGET_EINVAL;
11536             }
11537             if (arg3 > sizeof(scha)) {
11538                 arg3 = sizeof(scha);
11539             }
11540             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11541             if (!is_error(ret)) {
11542                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11543                 if (!target_scha) {
11544                     return -TARGET_EFAULT;
11545                 }
11546                 target_scha->size = tswap32(scha.size);
11547                 target_scha->sched_policy = tswap32(scha.sched_policy);
11548                 target_scha->sched_flags = tswap64(scha.sched_flags);
11549                 target_scha->sched_nice = tswap32(scha.sched_nice);
11550                 target_scha->sched_priority = tswap32(scha.sched_priority);
11551                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11552                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11553                 target_scha->sched_period = tswap64(scha.sched_period);
11554                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11555                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11556                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11557                 }
11558                 unlock_user(target_scha, arg2, arg3);
11559             }
11560             return ret;
11561         }
11562     case TARGET_NR_sched_setattr:
11563         {
11564             struct target_sched_attr *target_scha;
11565             struct sched_attr scha;
11566             uint32_t size;
11567             int zeroed;
11568             if (arg2 == 0) {
11569                 return -TARGET_EINVAL;
11570             }
11571             if (get_user_u32(size, arg2)) {
11572                 return -TARGET_EFAULT;
11573             }
11574             if (!size) {
11575                 size = offsetof(struct target_sched_attr, sched_util_min);
11576             }
11577             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11578                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11579                     return -TARGET_EFAULT;
11580                 }
11581                 return -TARGET_E2BIG;
11582             }
11583 
11584             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11585             if (zeroed < 0) {
11586                 return zeroed;
11587             } else if (zeroed == 0) {
11588                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11589                     return -TARGET_EFAULT;
11590                 }
11591                 return -TARGET_E2BIG;
11592             }
11593             if (size > sizeof(struct target_sched_attr)) {
11594                 size = sizeof(struct target_sched_attr);
11595             }
11596 
11597             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11598             if (!target_scha) {
11599                 return -TARGET_EFAULT;
11600             }
11601             scha.size = size;
11602             scha.sched_policy = tswap32(target_scha->sched_policy);
11603             scha.sched_flags = tswap64(target_scha->sched_flags);
11604             scha.sched_nice = tswap32(target_scha->sched_nice);
11605             scha.sched_priority = tswap32(target_scha->sched_priority);
11606             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11607             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11608             scha.sched_period = tswap64(target_scha->sched_period);
11609             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11610                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11611                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11612             }
11613             unlock_user(target_scha, arg2, 0);
11614             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11615         }
11616     case TARGET_NR_sched_yield:
11617         return get_errno(sched_yield());
11618     case TARGET_NR_sched_get_priority_max:
11619         return get_errno(sched_get_priority_max(arg1));
11620     case TARGET_NR_sched_get_priority_min:
11621         return get_errno(sched_get_priority_min(arg1));
11622 #ifdef TARGET_NR_sched_rr_get_interval
11623     case TARGET_NR_sched_rr_get_interval:
11624         {
11625             struct timespec ts;
11626             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11627             if (!is_error(ret)) {
11628                 ret = host_to_target_timespec(arg2, &ts);
11629             }
11630         }
11631         return ret;
11632 #endif
11633 #ifdef TARGET_NR_sched_rr_get_interval_time64
11634     case TARGET_NR_sched_rr_get_interval_time64:
11635         {
11636             struct timespec ts;
11637             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11638             if (!is_error(ret)) {
11639                 ret = host_to_target_timespec64(arg2, &ts);
11640             }
11641         }
11642         return ret;
11643 #endif
11644 #if defined(TARGET_NR_nanosleep)
11645     case TARGET_NR_nanosleep:
11646         {
11647             struct timespec req, rem;
11648             if (target_to_host_timespec(&req, arg1)) {
11649                 return -TARGET_EFAULT;
11650             }
11651             ret = get_errno(safe_nanosleep(&req, &rem));
11652             if (is_error(ret) && arg2) {
11653                 if (host_to_target_timespec(arg2, &rem)) {
11654                     return -TARGET_EFAULT;
11655                 }
11656             }
11657         }
11658         return ret;
11659 #endif
11660     case TARGET_NR_prctl:
11661         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11662         break;
11663 #ifdef TARGET_NR_arch_prctl
11664     case TARGET_NR_arch_prctl:
11665         return do_arch_prctl(cpu_env, arg1, arg2);
11666 #endif
11667 #ifdef TARGET_NR_pread64
11668     case TARGET_NR_pread64:
11669         if (regpairs_aligned(cpu_env, num)) {
11670             arg4 = arg5;
11671             arg5 = arg6;
11672         }
11673         if (arg2 == 0 && arg3 == 0) {
11674             /* Special-case NULL buffer and zero length, which should succeed */
11675             p = 0;
11676         } else {
11677             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11678             if (!p) {
11679                 return -TARGET_EFAULT;
11680             }
11681         }
11682         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11683         unlock_user(p, arg2, ret);
11684         return ret;
11685     case TARGET_NR_pwrite64:
11686         if (regpairs_aligned(cpu_env, num)) {
11687             arg4 = arg5;
11688             arg5 = arg6;
11689         }
11690         if (arg2 == 0 && arg3 == 0) {
11691             /* Special-case NULL buffer and zero length, which should succeed */
11692             p = 0;
11693         } else {
11694             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11695             if (!p) {
11696                 return -TARGET_EFAULT;
11697             }
11698         }
11699         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11700         unlock_user(p, arg2, 0);
11701         return ret;
11702 #endif
11703     case TARGET_NR_getcwd:
11704         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11705             return -TARGET_EFAULT;
11706         ret = get_errno(sys_getcwd1(p, arg2));
11707         unlock_user(p, arg1, ret);
11708         return ret;
11709     case TARGET_NR_capget:
11710     case TARGET_NR_capset:
11711     {
11712         struct target_user_cap_header *target_header;
11713         struct target_user_cap_data *target_data = NULL;
11714         struct __user_cap_header_struct header;
11715         struct __user_cap_data_struct data[2];
11716         struct __user_cap_data_struct *dataptr = NULL;
11717         int i, target_datalen;
11718         int data_items = 1;
11719 
11720         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11721             return -TARGET_EFAULT;
11722         }
11723         header.version = tswap32(target_header->version);
11724         header.pid = tswap32(target_header->pid);
11725 
11726         if (header.version != _LINUX_CAPABILITY_VERSION) {
11727             /* Version 2 and up takes pointer to two user_data structs */
11728             data_items = 2;
11729         }
11730 
11731         target_datalen = sizeof(*target_data) * data_items;
11732 
11733         if (arg2) {
11734             if (num == TARGET_NR_capget) {
11735                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11736             } else {
11737                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11738             }
11739             if (!target_data) {
11740                 unlock_user_struct(target_header, arg1, 0);
11741                 return -TARGET_EFAULT;
11742             }
11743 
11744             if (num == TARGET_NR_capset) {
11745                 for (i = 0; i < data_items; i++) {
11746                     data[i].effective = tswap32(target_data[i].effective);
11747                     data[i].permitted = tswap32(target_data[i].permitted);
11748                     data[i].inheritable = tswap32(target_data[i].inheritable);
11749                 }
11750             }
11751 
11752             dataptr = data;
11753         }
11754 
11755         if (num == TARGET_NR_capget) {
11756             ret = get_errno(capget(&header, dataptr));
11757         } else {
11758             ret = get_errno(capset(&header, dataptr));
11759         }
11760 
11761         /* The kernel always updates version for both capget and capset */
11762         target_header->version = tswap32(header.version);
11763         unlock_user_struct(target_header, arg1, 1);
11764 
11765         if (arg2) {
11766             if (num == TARGET_NR_capget) {
11767                 for (i = 0; i < data_items; i++) {
11768                     target_data[i].effective = tswap32(data[i].effective);
11769                     target_data[i].permitted = tswap32(data[i].permitted);
11770                     target_data[i].inheritable = tswap32(data[i].inheritable);
11771                 }
11772                 unlock_user(target_data, arg2, target_datalen);
11773             } else {
11774                 unlock_user(target_data, arg2, 0);
11775             }
11776         }
11777         return ret;
11778     }
11779     case TARGET_NR_sigaltstack:
11780         return do_sigaltstack(arg1, arg2, cpu_env);
11781 
11782 #ifdef CONFIG_SENDFILE
11783 #ifdef TARGET_NR_sendfile
11784     case TARGET_NR_sendfile:
11785     {
11786         off_t *offp = NULL;
11787         off_t off;
11788         if (arg3) {
11789             ret = get_user_sal(off, arg3);
11790             if (is_error(ret)) {
11791                 return ret;
11792             }
11793             offp = &off;
11794         }
11795         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11796         if (!is_error(ret) && arg3) {
11797             abi_long ret2 = put_user_sal(off, arg3);
11798             if (is_error(ret2)) {
11799                 ret = ret2;
11800             }
11801         }
11802         return ret;
11803     }
11804 #endif
11805 #ifdef TARGET_NR_sendfile64
11806     case TARGET_NR_sendfile64:
11807     {
11808         off_t *offp = NULL;
11809         off_t off;
11810         if (arg3) {
11811             ret = get_user_s64(off, arg3);
11812             if (is_error(ret)) {
11813                 return ret;
11814             }
11815             offp = &off;
11816         }
11817         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11818         if (!is_error(ret) && arg3) {
11819             abi_long ret2 = put_user_s64(off, arg3);
11820             if (is_error(ret2)) {
11821                 ret = ret2;
11822             }
11823         }
11824         return ret;
11825     }
11826 #endif
11827 #endif
11828 #ifdef TARGET_NR_vfork
11829     case TARGET_NR_vfork:
11830         return get_errno(do_fork(cpu_env,
11831                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11832                          0, 0, 0, 0));
11833 #endif
11834 #ifdef TARGET_NR_ugetrlimit
11835     case TARGET_NR_ugetrlimit:
11836     {
11837 	struct rlimit rlim;
11838 	int resource = target_to_host_resource(arg1);
11839 	ret = get_errno(getrlimit(resource, &rlim));
11840 	if (!is_error(ret)) {
11841 	    struct target_rlimit *target_rlim;
11842             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11843                 return -TARGET_EFAULT;
11844 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11845 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11846             unlock_user_struct(target_rlim, arg2, 1);
11847 	}
11848         return ret;
11849     }
11850 #endif
11851 #ifdef TARGET_NR_truncate64
11852     case TARGET_NR_truncate64:
11853         if (!(p = lock_user_string(arg1)))
11854             return -TARGET_EFAULT;
11855 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11856         unlock_user(p, arg1, 0);
11857         return ret;
11858 #endif
11859 #ifdef TARGET_NR_ftruncate64
11860     case TARGET_NR_ftruncate64:
11861         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11862 #endif
11863 #ifdef TARGET_NR_stat64
11864     case TARGET_NR_stat64:
11865         if (!(p = lock_user_string(arg1))) {
11866             return -TARGET_EFAULT;
11867         }
11868         ret = get_errno(stat(path(p), &st));
11869         unlock_user(p, arg1, 0);
11870         if (!is_error(ret))
11871             ret = host_to_target_stat64(cpu_env, arg2, &st);
11872         return ret;
11873 #endif
11874 #ifdef TARGET_NR_lstat64
11875     case TARGET_NR_lstat64:
11876         if (!(p = lock_user_string(arg1))) {
11877             return -TARGET_EFAULT;
11878         }
11879         ret = get_errno(lstat(path(p), &st));
11880         unlock_user(p, arg1, 0);
11881         if (!is_error(ret))
11882             ret = host_to_target_stat64(cpu_env, arg2, &st);
11883         return ret;
11884 #endif
11885 #ifdef TARGET_NR_fstat64
11886     case TARGET_NR_fstat64:
11887         ret = get_errno(fstat(arg1, &st));
11888         if (!is_error(ret))
11889             ret = host_to_target_stat64(cpu_env, arg2, &st);
11890         return ret;
11891 #endif
11892 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11893 #ifdef TARGET_NR_fstatat64
11894     case TARGET_NR_fstatat64:
11895 #endif
11896 #ifdef TARGET_NR_newfstatat
11897     case TARGET_NR_newfstatat:
11898 #endif
11899         if (!(p = lock_user_string(arg2))) {
11900             return -TARGET_EFAULT;
11901         }
11902         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11903         unlock_user(p, arg2, 0);
11904         if (!is_error(ret))
11905             ret = host_to_target_stat64(cpu_env, arg3, &st);
11906         return ret;
11907 #endif
11908 #if defined(TARGET_NR_statx)
11909     case TARGET_NR_statx:
11910         {
11911             struct target_statx *target_stx;
11912             int dirfd = arg1;
11913             int flags = arg3;
11914 
11915             p = lock_user_string(arg2);
11916             if (p == NULL) {
11917                 return -TARGET_EFAULT;
11918             }
11919 #if defined(__NR_statx)
11920             {
11921                 /*
11922                  * It is assumed that struct statx is architecture independent.
11923                  */
11924                 struct target_statx host_stx;
11925                 int mask = arg4;
11926 
11927                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11928                 if (!is_error(ret)) {
11929                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11930                         unlock_user(p, arg2, 0);
11931                         return -TARGET_EFAULT;
11932                     }
11933                 }
11934 
11935                 if (ret != -TARGET_ENOSYS) {
11936                     unlock_user(p, arg2, 0);
11937                     return ret;
11938                 }
11939             }
11940 #endif
11941             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11942             unlock_user(p, arg2, 0);
11943 
11944             if (!is_error(ret)) {
11945                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11946                     return -TARGET_EFAULT;
11947                 }
11948                 memset(target_stx, 0, sizeof(*target_stx));
11949                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11950                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11951                 __put_user(st.st_ino, &target_stx->stx_ino);
11952                 __put_user(st.st_mode, &target_stx->stx_mode);
11953                 __put_user(st.st_uid, &target_stx->stx_uid);
11954                 __put_user(st.st_gid, &target_stx->stx_gid);
11955                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11956                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11957                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11958                 __put_user(st.st_size, &target_stx->stx_size);
11959                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11960                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11961                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11962                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11963                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11964                 unlock_user_struct(target_stx, arg5, 1);
11965             }
11966         }
11967         return ret;
11968 #endif
11969 #ifdef TARGET_NR_lchown
11970     case TARGET_NR_lchown:
11971         if (!(p = lock_user_string(arg1)))
11972             return -TARGET_EFAULT;
11973         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11974         unlock_user(p, arg1, 0);
11975         return ret;
11976 #endif
11977 #ifdef TARGET_NR_getuid
11978     case TARGET_NR_getuid:
11979         return get_errno(high2lowuid(getuid()));
11980 #endif
11981 #ifdef TARGET_NR_getgid
11982     case TARGET_NR_getgid:
11983         return get_errno(high2lowgid(getgid()));
11984 #endif
11985 #ifdef TARGET_NR_geteuid
11986     case TARGET_NR_geteuid:
11987         return get_errno(high2lowuid(geteuid()));
11988 #endif
11989 #ifdef TARGET_NR_getegid
11990     case TARGET_NR_getegid:
11991         return get_errno(high2lowgid(getegid()));
11992 #endif
11993     case TARGET_NR_setreuid:
11994         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
11995     case TARGET_NR_setregid:
11996         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
11997     case TARGET_NR_getgroups:
11998         { /* the same code as for TARGET_NR_getgroups32 */
11999             int gidsetsize = arg1;
12000             target_id *target_grouplist;
12001             g_autofree gid_t *grouplist = NULL;
12002             int i;
12003 
12004             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12005                 return -TARGET_EINVAL;
12006             }
12007             if (gidsetsize > 0) {
12008                 grouplist = g_try_new(gid_t, gidsetsize);
12009                 if (!grouplist) {
12010                     return -TARGET_ENOMEM;
12011                 }
12012             }
12013             ret = get_errno(getgroups(gidsetsize, grouplist));
12014             if (!is_error(ret) && gidsetsize > 0) {
12015                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12016                                              gidsetsize * sizeof(target_id), 0);
12017                 if (!target_grouplist) {
12018                     return -TARGET_EFAULT;
12019                 }
12020                 for (i = 0; i < ret; i++) {
12021                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
12022                 }
12023                 unlock_user(target_grouplist, arg2,
12024                             gidsetsize * sizeof(target_id));
12025             }
12026             return ret;
12027         }
12028     case TARGET_NR_setgroups:
12029         { /* the same code as for TARGET_NR_setgroups32 */
12030             int gidsetsize = arg1;
12031             target_id *target_grouplist;
12032             g_autofree gid_t *grouplist = NULL;
12033             int i;
12034 
12035             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12036                 return -TARGET_EINVAL;
12037             }
12038             if (gidsetsize > 0) {
12039                 grouplist = g_try_new(gid_t, gidsetsize);
12040                 if (!grouplist) {
12041                     return -TARGET_ENOMEM;
12042                 }
12043                 target_grouplist = lock_user(VERIFY_READ, arg2,
12044                                              gidsetsize * sizeof(target_id), 1);
12045                 if (!target_grouplist) {
12046                     return -TARGET_EFAULT;
12047                 }
12048                 for (i = 0; i < gidsetsize; i++) {
12049                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12050                 }
12051                 unlock_user(target_grouplist, arg2,
12052                             gidsetsize * sizeof(target_id));
12053             }
12054             return get_errno(sys_setgroups(gidsetsize, grouplist));
12055         }
12056     case TARGET_NR_fchown:
12057         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12058 #if defined(TARGET_NR_fchownat)
12059     case TARGET_NR_fchownat:
12060         if (!(p = lock_user_string(arg2)))
12061             return -TARGET_EFAULT;
12062         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12063                                  low2highgid(arg4), arg5));
12064         unlock_user(p, arg2, 0);
12065         return ret;
12066 #endif
12067 #ifdef TARGET_NR_setresuid
12068     case TARGET_NR_setresuid:
12069         return get_errno(sys_setresuid(low2highuid(arg1),
12070                                        low2highuid(arg2),
12071                                        low2highuid(arg3)));
12072 #endif
12073 #ifdef TARGET_NR_getresuid
12074     case TARGET_NR_getresuid:
12075         {
12076             uid_t ruid, euid, suid;
12077             ret = get_errno(getresuid(&ruid, &euid, &suid));
12078             if (!is_error(ret)) {
12079                 if (put_user_id(high2lowuid(ruid), arg1)
12080                     || put_user_id(high2lowuid(euid), arg2)
12081                     || put_user_id(high2lowuid(suid), arg3))
12082                     return -TARGET_EFAULT;
12083             }
12084         }
12085         return ret;
12086 #endif
12087 #ifdef TARGET_NR_getresgid
12088     case TARGET_NR_setresgid:
12089         return get_errno(sys_setresgid(low2highgid(arg1),
12090                                        low2highgid(arg2),
12091                                        low2highgid(arg3)));
12092 #endif
12093 #ifdef TARGET_NR_getresgid
12094     case TARGET_NR_getresgid:
12095         {
12096             gid_t rgid, egid, sgid;
12097             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12098             if (!is_error(ret)) {
12099                 if (put_user_id(high2lowgid(rgid), arg1)
12100                     || put_user_id(high2lowgid(egid), arg2)
12101                     || put_user_id(high2lowgid(sgid), arg3))
12102                     return -TARGET_EFAULT;
12103             }
12104         }
12105         return ret;
12106 #endif
12107 #ifdef TARGET_NR_chown
12108     case TARGET_NR_chown:
12109         if (!(p = lock_user_string(arg1)))
12110             return -TARGET_EFAULT;
12111         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12112         unlock_user(p, arg1, 0);
12113         return ret;
12114 #endif
12115     case TARGET_NR_setuid:
12116         return get_errno(sys_setuid(low2highuid(arg1)));
12117     case TARGET_NR_setgid:
12118         return get_errno(sys_setgid(low2highgid(arg1)));
12119     case TARGET_NR_setfsuid:
12120         return get_errno(setfsuid(arg1));
12121     case TARGET_NR_setfsgid:
12122         return get_errno(setfsgid(arg1));
12123 
12124 #ifdef TARGET_NR_lchown32
12125     case TARGET_NR_lchown32:
12126         if (!(p = lock_user_string(arg1)))
12127             return -TARGET_EFAULT;
12128         ret = get_errno(lchown(p, arg2, arg3));
12129         unlock_user(p, arg1, 0);
12130         return ret;
12131 #endif
12132 #ifdef TARGET_NR_getuid32
12133     case TARGET_NR_getuid32:
12134         return get_errno(getuid());
12135 #endif
12136 
12137 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12138    /* Alpha specific */
12139     case TARGET_NR_getxuid:
12140          {
12141             uid_t euid;
12142             euid=geteuid();
12143             cpu_env->ir[IR_A4]=euid;
12144          }
12145         return get_errno(getuid());
12146 #endif
12147 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12148    /* Alpha specific */
12149     case TARGET_NR_getxgid:
12150          {
12151             uid_t egid;
12152             egid=getegid();
12153             cpu_env->ir[IR_A4]=egid;
12154          }
12155         return get_errno(getgid());
12156 #endif
12157 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12158     /* Alpha specific */
12159     case TARGET_NR_osf_getsysinfo:
12160         ret = -TARGET_EOPNOTSUPP;
12161         switch (arg1) {
12162           case TARGET_GSI_IEEE_FP_CONTROL:
12163             {
12164                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12165                 uint64_t swcr = cpu_env->swcr;
12166 
12167                 swcr &= ~SWCR_STATUS_MASK;
12168                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12169 
12170                 if (put_user_u64 (swcr, arg2))
12171                         return -TARGET_EFAULT;
12172                 ret = 0;
12173             }
12174             break;
12175 
12176           /* case GSI_IEEE_STATE_AT_SIGNAL:
12177              -- Not implemented in linux kernel.
12178              case GSI_UACPROC:
12179              -- Retrieves current unaligned access state; not much used.
12180              case GSI_PROC_TYPE:
12181              -- Retrieves implver information; surely not used.
12182              case GSI_GET_HWRPB:
12183              -- Grabs a copy of the HWRPB; surely not used.
12184           */
12185         }
12186         return ret;
12187 #endif
12188 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12189     /* Alpha specific */
12190     case TARGET_NR_osf_setsysinfo:
12191         ret = -TARGET_EOPNOTSUPP;
12192         switch (arg1) {
12193           case TARGET_SSI_IEEE_FP_CONTROL:
12194             {
12195                 uint64_t swcr, fpcr;
12196 
12197                 if (get_user_u64 (swcr, arg2)) {
12198                     return -TARGET_EFAULT;
12199                 }
12200 
12201                 /*
12202                  * The kernel calls swcr_update_status to update the
12203                  * status bits from the fpcr at every point that it
12204                  * could be queried.  Therefore, we store the status
12205                  * bits only in FPCR.
12206                  */
12207                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12208 
12209                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12210                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12211                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12212                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12213                 ret = 0;
12214             }
12215             break;
12216 
12217           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12218             {
12219                 uint64_t exc, fpcr, fex;
12220 
12221                 if (get_user_u64(exc, arg2)) {
12222                     return -TARGET_EFAULT;
12223                 }
12224                 exc &= SWCR_STATUS_MASK;
12225                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12226 
12227                 /* Old exceptions are not signaled.  */
12228                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12229                 fex = exc & ~fex;
12230                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12231                 fex &= (cpu_env)->swcr;
12232 
12233                 /* Update the hardware fpcr.  */
12234                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12235                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12236 
12237                 if (fex) {
12238                     int si_code = TARGET_FPE_FLTUNK;
12239                     target_siginfo_t info;
12240 
12241                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12242                         si_code = TARGET_FPE_FLTUND;
12243                     }
12244                     if (fex & SWCR_TRAP_ENABLE_INE) {
12245                         si_code = TARGET_FPE_FLTRES;
12246                     }
12247                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12248                         si_code = TARGET_FPE_FLTUND;
12249                     }
12250                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12251                         si_code = TARGET_FPE_FLTOVF;
12252                     }
12253                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12254                         si_code = TARGET_FPE_FLTDIV;
12255                     }
12256                     if (fex & SWCR_TRAP_ENABLE_INV) {
12257                         si_code = TARGET_FPE_FLTINV;
12258                     }
12259 
12260                     info.si_signo = SIGFPE;
12261                     info.si_errno = 0;
12262                     info.si_code = si_code;
12263                     info._sifields._sigfault._addr = (cpu_env)->pc;
12264                     queue_signal(cpu_env, info.si_signo,
12265                                  QEMU_SI_FAULT, &info);
12266                 }
12267                 ret = 0;
12268             }
12269             break;
12270 
12271           /* case SSI_NVPAIRS:
12272              -- Used with SSIN_UACPROC to enable unaligned accesses.
12273              case SSI_IEEE_STATE_AT_SIGNAL:
12274              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12275              -- Not implemented in linux kernel
12276           */
12277         }
12278         return ret;
12279 #endif
12280 #ifdef TARGET_NR_osf_sigprocmask
12281     /* Alpha specific.  */
12282     case TARGET_NR_osf_sigprocmask:
12283         {
12284             abi_ulong mask;
12285             int how;
12286             sigset_t set, oldset;
12287 
12288             switch(arg1) {
12289             case TARGET_SIG_BLOCK:
12290                 how = SIG_BLOCK;
12291                 break;
12292             case TARGET_SIG_UNBLOCK:
12293                 how = SIG_UNBLOCK;
12294                 break;
12295             case TARGET_SIG_SETMASK:
12296                 how = SIG_SETMASK;
12297                 break;
12298             default:
12299                 return -TARGET_EINVAL;
12300             }
12301             mask = arg2;
12302             target_to_host_old_sigset(&set, &mask);
12303             ret = do_sigprocmask(how, &set, &oldset);
12304             if (!ret) {
12305                 host_to_target_old_sigset(&mask, &oldset);
12306                 ret = mask;
12307             }
12308         }
12309         return ret;
12310 #endif
12311 
12312 #ifdef TARGET_NR_getgid32
12313     case TARGET_NR_getgid32:
12314         return get_errno(getgid());
12315 #endif
12316 #ifdef TARGET_NR_geteuid32
12317     case TARGET_NR_geteuid32:
12318         return get_errno(geteuid());
12319 #endif
12320 #ifdef TARGET_NR_getegid32
12321     case TARGET_NR_getegid32:
12322         return get_errno(getegid());
12323 #endif
12324 #ifdef TARGET_NR_setreuid32
12325     case TARGET_NR_setreuid32:
12326         return get_errno(sys_setreuid(arg1, arg2));
12327 #endif
12328 #ifdef TARGET_NR_setregid32
12329     case TARGET_NR_setregid32:
12330         return get_errno(sys_setregid(arg1, arg2));
12331 #endif
12332 #ifdef TARGET_NR_getgroups32
12333     case TARGET_NR_getgroups32:
12334         { /* the same code as for TARGET_NR_getgroups */
12335             int gidsetsize = arg1;
12336             uint32_t *target_grouplist;
12337             g_autofree gid_t *grouplist = NULL;
12338             int i;
12339 
12340             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12341                 return -TARGET_EINVAL;
12342             }
12343             if (gidsetsize > 0) {
12344                 grouplist = g_try_new(gid_t, gidsetsize);
12345                 if (!grouplist) {
12346                     return -TARGET_ENOMEM;
12347                 }
12348             }
12349             ret = get_errno(getgroups(gidsetsize, grouplist));
12350             if (!is_error(ret) && gidsetsize > 0) {
12351                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12352                                              gidsetsize * 4, 0);
12353                 if (!target_grouplist) {
12354                     return -TARGET_EFAULT;
12355                 }
12356                 for (i = 0; i < ret; i++) {
12357                     target_grouplist[i] = tswap32(grouplist[i]);
12358                 }
12359                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12360             }
12361             return ret;
12362         }
12363 #endif
12364 #ifdef TARGET_NR_setgroups32
12365     case TARGET_NR_setgroups32:
12366         { /* the same code as for TARGET_NR_setgroups */
12367             int gidsetsize = arg1;
12368             uint32_t *target_grouplist;
12369             g_autofree gid_t *grouplist = NULL;
12370             int i;
12371 
12372             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12373                 return -TARGET_EINVAL;
12374             }
12375             if (gidsetsize > 0) {
12376                 grouplist = g_try_new(gid_t, gidsetsize);
12377                 if (!grouplist) {
12378                     return -TARGET_ENOMEM;
12379                 }
12380                 target_grouplist = lock_user(VERIFY_READ, arg2,
12381                                              gidsetsize * 4, 1);
12382                 if (!target_grouplist) {
12383                     return -TARGET_EFAULT;
12384                 }
12385                 for (i = 0; i < gidsetsize; i++) {
12386                     grouplist[i] = tswap32(target_grouplist[i]);
12387                 }
12388                 unlock_user(target_grouplist, arg2, 0);
12389             }
12390             return get_errno(sys_setgroups(gidsetsize, grouplist));
12391         }
12392 #endif
12393 #ifdef TARGET_NR_fchown32
12394     case TARGET_NR_fchown32:
12395         return get_errno(fchown(arg1, arg2, arg3));
12396 #endif
12397 #ifdef TARGET_NR_setresuid32
12398     case TARGET_NR_setresuid32:
12399         return get_errno(sys_setresuid(arg1, arg2, arg3));
12400 #endif
12401 #ifdef TARGET_NR_getresuid32
12402     case TARGET_NR_getresuid32:
12403         {
12404             uid_t ruid, euid, suid;
12405             ret = get_errno(getresuid(&ruid, &euid, &suid));
12406             if (!is_error(ret)) {
12407                 if (put_user_u32(ruid, arg1)
12408                     || put_user_u32(euid, arg2)
12409                     || put_user_u32(suid, arg3))
12410                     return -TARGET_EFAULT;
12411             }
12412         }
12413         return ret;
12414 #endif
12415 #ifdef TARGET_NR_setresgid32
12416     case TARGET_NR_setresgid32:
12417         return get_errno(sys_setresgid(arg1, arg2, arg3));
12418 #endif
12419 #ifdef TARGET_NR_getresgid32
12420     case TARGET_NR_getresgid32:
12421         {
12422             gid_t rgid, egid, sgid;
12423             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12424             if (!is_error(ret)) {
12425                 if (put_user_u32(rgid, arg1)
12426                     || put_user_u32(egid, arg2)
12427                     || put_user_u32(sgid, arg3))
12428                     return -TARGET_EFAULT;
12429             }
12430         }
12431         return ret;
12432 #endif
12433 #ifdef TARGET_NR_chown32
12434     case TARGET_NR_chown32:
12435         if (!(p = lock_user_string(arg1)))
12436             return -TARGET_EFAULT;
12437         ret = get_errno(chown(p, arg2, arg3));
12438         unlock_user(p, arg1, 0);
12439         return ret;
12440 #endif
12441 #ifdef TARGET_NR_setuid32
12442     case TARGET_NR_setuid32:
12443         return get_errno(sys_setuid(arg1));
12444 #endif
12445 #ifdef TARGET_NR_setgid32
12446     case TARGET_NR_setgid32:
12447         return get_errno(sys_setgid(arg1));
12448 #endif
12449 #ifdef TARGET_NR_setfsuid32
12450     case TARGET_NR_setfsuid32:
12451         return get_errno(setfsuid(arg1));
12452 #endif
12453 #ifdef TARGET_NR_setfsgid32
12454     case TARGET_NR_setfsgid32:
12455         return get_errno(setfsgid(arg1));
12456 #endif
12457 #ifdef TARGET_NR_mincore
12458     case TARGET_NR_mincore:
12459         {
12460             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12461             if (!a) {
12462                 return -TARGET_ENOMEM;
12463             }
12464             p = lock_user_string(arg3);
12465             if (!p) {
12466                 ret = -TARGET_EFAULT;
12467             } else {
12468                 ret = get_errno(mincore(a, arg2, p));
12469                 unlock_user(p, arg3, ret);
12470             }
12471             unlock_user(a, arg1, 0);
12472         }
12473         return ret;
12474 #endif
12475 #ifdef TARGET_NR_arm_fadvise64_64
12476     case TARGET_NR_arm_fadvise64_64:
12477         /* arm_fadvise64_64 looks like fadvise64_64 but
12478          * with different argument order: fd, advice, offset, len
12479          * rather than the usual fd, offset, len, advice.
12480          * Note that offset and len are both 64-bit so appear as
12481          * pairs of 32-bit registers.
12482          */
12483         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12484                             target_offset64(arg5, arg6), arg2);
12485         return -host_to_target_errno(ret);
12486 #endif
12487 
12488 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12489 
12490 #ifdef TARGET_NR_fadvise64_64
12491     case TARGET_NR_fadvise64_64:
12492 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12493         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12494         ret = arg2;
12495         arg2 = arg3;
12496         arg3 = arg4;
12497         arg4 = arg5;
12498         arg5 = arg6;
12499         arg6 = ret;
12500 #else
12501         /* 6 args: fd, offset (high, low), len (high, low), advice */
12502         if (regpairs_aligned(cpu_env, num)) {
12503             /* offset is in (3,4), len in (5,6) and advice in 7 */
12504             arg2 = arg3;
12505             arg3 = arg4;
12506             arg4 = arg5;
12507             arg5 = arg6;
12508             arg6 = arg7;
12509         }
12510 #endif
12511         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12512                             target_offset64(arg4, arg5), arg6);
12513         return -host_to_target_errno(ret);
12514 #endif
12515 
12516 #ifdef TARGET_NR_fadvise64
12517     case TARGET_NR_fadvise64:
12518         /* 5 args: fd, offset (high, low), len, advice */
12519         if (regpairs_aligned(cpu_env, num)) {
12520             /* offset is in (3,4), len in 5 and advice in 6 */
12521             arg2 = arg3;
12522             arg3 = arg4;
12523             arg4 = arg5;
12524             arg5 = arg6;
12525         }
12526         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12527         return -host_to_target_errno(ret);
12528 #endif
12529 
12530 #else /* not a 32-bit ABI */
12531 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12532 #ifdef TARGET_NR_fadvise64_64
12533     case TARGET_NR_fadvise64_64:
12534 #endif
12535 #ifdef TARGET_NR_fadvise64
12536     case TARGET_NR_fadvise64:
12537 #endif
12538 #ifdef TARGET_S390X
12539         switch (arg4) {
12540         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12541         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12542         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12543         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12544         default: break;
12545         }
12546 #endif
12547         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12548 #endif
12549 #endif /* end of 64-bit ABI fadvise handling */
12550 
12551 #ifdef TARGET_NR_madvise
12552     case TARGET_NR_madvise:
12553         return target_madvise(arg1, arg2, arg3);
12554 #endif
12555 #ifdef TARGET_NR_fcntl64
12556     case TARGET_NR_fcntl64:
12557     {
12558         int cmd;
12559         struct flock fl;
12560         from_flock64_fn *copyfrom = copy_from_user_flock64;
12561         to_flock64_fn *copyto = copy_to_user_flock64;
12562 
12563 #ifdef TARGET_ARM
12564         if (!cpu_env->eabi) {
12565             copyfrom = copy_from_user_oabi_flock64;
12566             copyto = copy_to_user_oabi_flock64;
12567         }
12568 #endif
12569 
12570         cmd = target_to_host_fcntl_cmd(arg2);
12571         if (cmd == -TARGET_EINVAL) {
12572             return cmd;
12573         }
12574 
12575         switch(arg2) {
12576         case TARGET_F_GETLK64:
12577             ret = copyfrom(&fl, arg3);
12578             if (ret) {
12579                 break;
12580             }
12581             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12582             if (ret == 0) {
12583                 ret = copyto(arg3, &fl);
12584             }
12585 	    break;
12586 
12587         case TARGET_F_SETLK64:
12588         case TARGET_F_SETLKW64:
12589             ret = copyfrom(&fl, arg3);
12590             if (ret) {
12591                 break;
12592             }
12593             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12594 	    break;
12595         default:
12596             ret = do_fcntl(arg1, arg2, arg3);
12597             break;
12598         }
12599         return ret;
12600     }
12601 #endif
12602 #ifdef TARGET_NR_cacheflush
12603     case TARGET_NR_cacheflush:
12604         /* self-modifying code is handled automatically, so nothing needed */
12605         return 0;
12606 #endif
12607 #ifdef TARGET_NR_getpagesize
12608     case TARGET_NR_getpagesize:
12609         return TARGET_PAGE_SIZE;
12610 #endif
12611     case TARGET_NR_gettid:
12612         return get_errno(sys_gettid());
12613 #ifdef TARGET_NR_readahead
12614     case TARGET_NR_readahead:
12615 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12616         if (regpairs_aligned(cpu_env, num)) {
12617             arg2 = arg3;
12618             arg3 = arg4;
12619             arg4 = arg5;
12620         }
12621         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12622 #else
12623         ret = get_errno(readahead(arg1, arg2, arg3));
12624 #endif
12625         return ret;
12626 #endif
12627 #ifdef CONFIG_ATTR
12628 #ifdef TARGET_NR_setxattr
12629     case TARGET_NR_listxattr:
12630     case TARGET_NR_llistxattr:
12631     {
12632         void *b = 0;
12633         if (arg2) {
12634             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12635             if (!b) {
12636                 return -TARGET_EFAULT;
12637             }
12638         }
12639         p = lock_user_string(arg1);
12640         if (p) {
12641             if (num == TARGET_NR_listxattr) {
12642                 ret = get_errno(listxattr(p, b, arg3));
12643             } else {
12644                 ret = get_errno(llistxattr(p, b, arg3));
12645             }
12646         } else {
12647             ret = -TARGET_EFAULT;
12648         }
12649         unlock_user(p, arg1, 0);
12650         unlock_user(b, arg2, arg3);
12651         return ret;
12652     }
12653     case TARGET_NR_flistxattr:
12654     {
12655         void *b = 0;
12656         if (arg2) {
12657             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12658             if (!b) {
12659                 return -TARGET_EFAULT;
12660             }
12661         }
12662         ret = get_errno(flistxattr(arg1, b, arg3));
12663         unlock_user(b, arg2, arg3);
12664         return ret;
12665     }
12666     case TARGET_NR_setxattr:
12667     case TARGET_NR_lsetxattr:
12668         {
12669             void *n, *v = 0;
12670             if (arg3) {
12671                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12672                 if (!v) {
12673                     return -TARGET_EFAULT;
12674                 }
12675             }
12676             p = lock_user_string(arg1);
12677             n = lock_user_string(arg2);
12678             if (p && n) {
12679                 if (num == TARGET_NR_setxattr) {
12680                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12681                 } else {
12682                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12683                 }
12684             } else {
12685                 ret = -TARGET_EFAULT;
12686             }
12687             unlock_user(p, arg1, 0);
12688             unlock_user(n, arg2, 0);
12689             unlock_user(v, arg3, 0);
12690         }
12691         return ret;
12692     case TARGET_NR_fsetxattr:
12693         {
12694             void *n, *v = 0;
12695             if (arg3) {
12696                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12697                 if (!v) {
12698                     return -TARGET_EFAULT;
12699                 }
12700             }
12701             n = lock_user_string(arg2);
12702             if (n) {
12703                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12704             } else {
12705                 ret = -TARGET_EFAULT;
12706             }
12707             unlock_user(n, arg2, 0);
12708             unlock_user(v, arg3, 0);
12709         }
12710         return ret;
12711     case TARGET_NR_getxattr:
12712     case TARGET_NR_lgetxattr:
12713         {
12714             void *n, *v = 0;
12715             if (arg3) {
12716                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12717                 if (!v) {
12718                     return -TARGET_EFAULT;
12719                 }
12720             }
12721             p = lock_user_string(arg1);
12722             n = lock_user_string(arg2);
12723             if (p && n) {
12724                 if (num == TARGET_NR_getxattr) {
12725                     ret = get_errno(getxattr(p, n, v, arg4));
12726                 } else {
12727                     ret = get_errno(lgetxattr(p, n, v, arg4));
12728                 }
12729             } else {
12730                 ret = -TARGET_EFAULT;
12731             }
12732             unlock_user(p, arg1, 0);
12733             unlock_user(n, arg2, 0);
12734             unlock_user(v, arg3, arg4);
12735         }
12736         return ret;
12737     case TARGET_NR_fgetxattr:
12738         {
12739             void *n, *v = 0;
12740             if (arg3) {
12741                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12742                 if (!v) {
12743                     return -TARGET_EFAULT;
12744                 }
12745             }
12746             n = lock_user_string(arg2);
12747             if (n) {
12748                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12749             } else {
12750                 ret = -TARGET_EFAULT;
12751             }
12752             unlock_user(n, arg2, 0);
12753             unlock_user(v, arg3, arg4);
12754         }
12755         return ret;
12756     case TARGET_NR_removexattr:
12757     case TARGET_NR_lremovexattr:
12758         {
12759             void *n;
12760             p = lock_user_string(arg1);
12761             n = lock_user_string(arg2);
12762             if (p && n) {
12763                 if (num == TARGET_NR_removexattr) {
12764                     ret = get_errno(removexattr(p, n));
12765                 } else {
12766                     ret = get_errno(lremovexattr(p, n));
12767                 }
12768             } else {
12769                 ret = -TARGET_EFAULT;
12770             }
12771             unlock_user(p, arg1, 0);
12772             unlock_user(n, arg2, 0);
12773         }
12774         return ret;
12775     case TARGET_NR_fremovexattr:
12776         {
12777             void *n;
12778             n = lock_user_string(arg2);
12779             if (n) {
12780                 ret = get_errno(fremovexattr(arg1, n));
12781             } else {
12782                 ret = -TARGET_EFAULT;
12783             }
12784             unlock_user(n, arg2, 0);
12785         }
12786         return ret;
12787 #endif
12788 #endif /* CONFIG_ATTR */
12789 #ifdef TARGET_NR_set_thread_area
12790     case TARGET_NR_set_thread_area:
12791 #if defined(TARGET_MIPS)
12792       cpu_env->active_tc.CP0_UserLocal = arg1;
12793       return 0;
12794 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12795       return do_set_thread_area(cpu_env, arg1);
12796 #elif defined(TARGET_M68K)
12797       {
12798           TaskState *ts = get_task_state(cpu);
12799           ts->tp_value = arg1;
12800           return 0;
12801       }
12802 #else
12803       return -TARGET_ENOSYS;
12804 #endif
12805 #endif
12806 #ifdef TARGET_NR_get_thread_area
12807     case TARGET_NR_get_thread_area:
12808 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12809         return do_get_thread_area(cpu_env, arg1);
12810 #elif defined(TARGET_M68K)
12811         {
12812             TaskState *ts = get_task_state(cpu);
12813             return ts->tp_value;
12814         }
12815 #else
12816         return -TARGET_ENOSYS;
12817 #endif
12818 #endif
12819 #ifdef TARGET_NR_getdomainname
12820     case TARGET_NR_getdomainname:
12821         return -TARGET_ENOSYS;
12822 #endif
12823 
12824 #ifdef TARGET_NR_clock_settime
12825     case TARGET_NR_clock_settime:
12826     {
12827         struct timespec ts;
12828 
12829         ret = target_to_host_timespec(&ts, arg2);
12830         if (!is_error(ret)) {
12831             ret = get_errno(clock_settime(arg1, &ts));
12832         }
12833         return ret;
12834     }
12835 #endif
12836 #ifdef TARGET_NR_clock_settime64
12837     case TARGET_NR_clock_settime64:
12838     {
12839         struct timespec ts;
12840 
12841         ret = target_to_host_timespec64(&ts, arg2);
12842         if (!is_error(ret)) {
12843             ret = get_errno(clock_settime(arg1, &ts));
12844         }
12845         return ret;
12846     }
12847 #endif
12848 #ifdef TARGET_NR_clock_gettime
12849     case TARGET_NR_clock_gettime:
12850     {
12851         struct timespec ts;
12852         ret = get_errno(clock_gettime(arg1, &ts));
12853         if (!is_error(ret)) {
12854             ret = host_to_target_timespec(arg2, &ts);
12855         }
12856         return ret;
12857     }
12858 #endif
12859 #ifdef TARGET_NR_clock_gettime64
12860     case TARGET_NR_clock_gettime64:
12861     {
12862         struct timespec ts;
12863         ret = get_errno(clock_gettime(arg1, &ts));
12864         if (!is_error(ret)) {
12865             ret = host_to_target_timespec64(arg2, &ts);
12866         }
12867         return ret;
12868     }
12869 #endif
12870 #ifdef TARGET_NR_clock_getres
12871     case TARGET_NR_clock_getres:
12872     {
12873         struct timespec ts;
12874         ret = get_errno(clock_getres(arg1, &ts));
12875         if (!is_error(ret)) {
12876             host_to_target_timespec(arg2, &ts);
12877         }
12878         return ret;
12879     }
12880 #endif
12881 #ifdef TARGET_NR_clock_getres_time64
12882     case TARGET_NR_clock_getres_time64:
12883     {
12884         struct timespec ts;
12885         ret = get_errno(clock_getres(arg1, &ts));
12886         if (!is_error(ret)) {
12887             host_to_target_timespec64(arg2, &ts);
12888         }
12889         return ret;
12890     }
12891 #endif
12892 #ifdef TARGET_NR_clock_nanosleep
12893     case TARGET_NR_clock_nanosleep:
12894     {
12895         struct timespec ts;
12896         if (target_to_host_timespec(&ts, arg3)) {
12897             return -TARGET_EFAULT;
12898         }
12899         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12900                                              &ts, arg4 ? &ts : NULL));
12901         /*
12902          * if the call is interrupted by a signal handler, it fails
12903          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12904          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12905          */
12906         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12907             host_to_target_timespec(arg4, &ts)) {
12908               return -TARGET_EFAULT;
12909         }
12910 
12911         return ret;
12912     }
12913 #endif
12914 #ifdef TARGET_NR_clock_nanosleep_time64
12915     case TARGET_NR_clock_nanosleep_time64:
12916     {
12917         struct timespec ts;
12918 
12919         if (target_to_host_timespec64(&ts, arg3)) {
12920             return -TARGET_EFAULT;
12921         }
12922 
12923         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12924                                              &ts, arg4 ? &ts : NULL));
12925 
12926         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12927             host_to_target_timespec64(arg4, &ts)) {
12928             return -TARGET_EFAULT;
12929         }
12930         return ret;
12931     }
12932 #endif
12933 
12934 #if defined(TARGET_NR_set_tid_address)
12935     case TARGET_NR_set_tid_address:
12936     {
12937         TaskState *ts = get_task_state(cpu);
12938         ts->child_tidptr = arg1;
12939         /* do not call host set_tid_address() syscall, instead return tid() */
12940         return get_errno(sys_gettid());
12941     }
12942 #endif
12943 
12944     case TARGET_NR_tkill:
12945         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12946 
12947     case TARGET_NR_tgkill:
12948         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12949                          target_to_host_signal(arg3)));
12950 
12951 #ifdef TARGET_NR_set_robust_list
12952     case TARGET_NR_set_robust_list:
12953     case TARGET_NR_get_robust_list:
12954         /* The ABI for supporting robust futexes has userspace pass
12955          * the kernel a pointer to a linked list which is updated by
12956          * userspace after the syscall; the list is walked by the kernel
12957          * when the thread exits. Since the linked list in QEMU guest
12958          * memory isn't a valid linked list for the host and we have
12959          * no way to reliably intercept the thread-death event, we can't
12960          * support these. Silently return ENOSYS so that guest userspace
12961          * falls back to a non-robust futex implementation (which should
12962          * be OK except in the corner case of the guest crashing while
12963          * holding a mutex that is shared with another process via
12964          * shared memory).
12965          */
12966         return -TARGET_ENOSYS;
12967 #endif
12968 
12969 #if defined(TARGET_NR_utimensat)
12970     case TARGET_NR_utimensat:
12971         {
12972             struct timespec *tsp, ts[2];
12973             if (!arg3) {
12974                 tsp = NULL;
12975             } else {
12976                 if (target_to_host_timespec(ts, arg3)) {
12977                     return -TARGET_EFAULT;
12978                 }
12979                 if (target_to_host_timespec(ts + 1, arg3 +
12980                                             sizeof(struct target_timespec))) {
12981                     return -TARGET_EFAULT;
12982                 }
12983                 tsp = ts;
12984             }
12985             if (!arg2)
12986                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12987             else {
12988                 if (!(p = lock_user_string(arg2))) {
12989                     return -TARGET_EFAULT;
12990                 }
12991                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12992                 unlock_user(p, arg2, 0);
12993             }
12994         }
12995         return ret;
12996 #endif
12997 #ifdef TARGET_NR_utimensat_time64
12998     case TARGET_NR_utimensat_time64:
12999         {
13000             struct timespec *tsp, ts[2];
13001             if (!arg3) {
13002                 tsp = NULL;
13003             } else {
13004                 if (target_to_host_timespec64(ts, arg3)) {
13005                     return -TARGET_EFAULT;
13006                 }
13007                 if (target_to_host_timespec64(ts + 1, arg3 +
13008                                      sizeof(struct target__kernel_timespec))) {
13009                     return -TARGET_EFAULT;
13010                 }
13011                 tsp = ts;
13012             }
13013             if (!arg2)
13014                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
13015             else {
13016                 p = lock_user_string(arg2);
13017                 if (!p) {
13018                     return -TARGET_EFAULT;
13019                 }
13020                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
13021                 unlock_user(p, arg2, 0);
13022             }
13023         }
13024         return ret;
13025 #endif
13026 #ifdef TARGET_NR_futex
13027     case TARGET_NR_futex:
13028         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
13029 #endif
13030 #ifdef TARGET_NR_futex_time64
13031     case TARGET_NR_futex_time64:
13032         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
13033 #endif
13034 #ifdef CONFIG_INOTIFY
13035 #if defined(TARGET_NR_inotify_init)
13036     case TARGET_NR_inotify_init:
13037         ret = get_errno(inotify_init());
13038         if (ret >= 0) {
13039             fd_trans_register(ret, &target_inotify_trans);
13040         }
13041         return ret;
13042 #endif
13043 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
13044     case TARGET_NR_inotify_init1:
13045         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
13046                                           fcntl_flags_tbl)));
13047         if (ret >= 0) {
13048             fd_trans_register(ret, &target_inotify_trans);
13049         }
13050         return ret;
13051 #endif
13052 #if defined(TARGET_NR_inotify_add_watch)
13053     case TARGET_NR_inotify_add_watch:
13054         p = lock_user_string(arg2);
13055         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13056         unlock_user(p, arg2, 0);
13057         return ret;
13058 #endif
13059 #if defined(TARGET_NR_inotify_rm_watch)
13060     case TARGET_NR_inotify_rm_watch:
13061         return get_errno(inotify_rm_watch(arg1, arg2));
13062 #endif
13063 #endif
13064 
13065 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13066     case TARGET_NR_mq_open:
13067         {
13068             struct mq_attr posix_mq_attr;
13069             struct mq_attr *pposix_mq_attr;
13070             int host_flags;
13071 
13072             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13073             pposix_mq_attr = NULL;
13074             if (arg4) {
13075                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13076                     return -TARGET_EFAULT;
13077                 }
13078                 pposix_mq_attr = &posix_mq_attr;
13079             }
13080             p = lock_user_string(arg1 - 1);
13081             if (!p) {
13082                 return -TARGET_EFAULT;
13083             }
13084             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13085             unlock_user (p, arg1, 0);
13086         }
13087         return ret;
13088 
13089     case TARGET_NR_mq_unlink:
13090         p = lock_user_string(arg1 - 1);
13091         if (!p) {
13092             return -TARGET_EFAULT;
13093         }
13094         ret = get_errno(mq_unlink(p));
13095         unlock_user (p, arg1, 0);
13096         return ret;
13097 
13098 #ifdef TARGET_NR_mq_timedsend
13099     case TARGET_NR_mq_timedsend:
13100         {
13101             struct timespec ts;
13102 
13103             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13104             if (arg5 != 0) {
13105                 if (target_to_host_timespec(&ts, arg5)) {
13106                     return -TARGET_EFAULT;
13107                 }
13108                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13109                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13110                     return -TARGET_EFAULT;
13111                 }
13112             } else {
13113                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13114             }
13115             unlock_user (p, arg2, arg3);
13116         }
13117         return ret;
13118 #endif
13119 #ifdef TARGET_NR_mq_timedsend_time64
13120     case TARGET_NR_mq_timedsend_time64:
13121         {
13122             struct timespec ts;
13123 
13124             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13125             if (arg5 != 0) {
13126                 if (target_to_host_timespec64(&ts, arg5)) {
13127                     return -TARGET_EFAULT;
13128                 }
13129                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13130                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13131                     return -TARGET_EFAULT;
13132                 }
13133             } else {
13134                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13135             }
13136             unlock_user(p, arg2, arg3);
13137         }
13138         return ret;
13139 #endif
13140 
13141 #ifdef TARGET_NR_mq_timedreceive
13142     case TARGET_NR_mq_timedreceive:
13143         {
13144             struct timespec ts;
13145             unsigned int prio;
13146 
13147             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13148             if (arg5 != 0) {
13149                 if (target_to_host_timespec(&ts, arg5)) {
13150                     return -TARGET_EFAULT;
13151                 }
13152                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13153                                                      &prio, &ts));
13154                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13155                     return -TARGET_EFAULT;
13156                 }
13157             } else {
13158                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13159                                                      &prio, NULL));
13160             }
13161             unlock_user (p, arg2, arg3);
13162             if (arg4 != 0)
13163                 put_user_u32(prio, arg4);
13164         }
13165         return ret;
13166 #endif
13167 #ifdef TARGET_NR_mq_timedreceive_time64
13168     case TARGET_NR_mq_timedreceive_time64:
13169         {
13170             struct timespec ts;
13171             unsigned int prio;
13172 
13173             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13174             if (arg5 != 0) {
13175                 if (target_to_host_timespec64(&ts, arg5)) {
13176                     return -TARGET_EFAULT;
13177                 }
13178                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13179                                                      &prio, &ts));
13180                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13181                     return -TARGET_EFAULT;
13182                 }
13183             } else {
13184                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13185                                                      &prio, NULL));
13186             }
13187             unlock_user(p, arg2, arg3);
13188             if (arg4 != 0) {
13189                 put_user_u32(prio, arg4);
13190             }
13191         }
13192         return ret;
13193 #endif
13194 
13195     /* Not implemented for now... */
13196 /*     case TARGET_NR_mq_notify: */
13197 /*         break; */
13198 
13199     case TARGET_NR_mq_getsetattr:
13200         {
13201             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13202             ret = 0;
13203             if (arg2 != 0) {
13204                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13205                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13206                                            &posix_mq_attr_out));
13207             } else if (arg3 != 0) {
13208                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13209             }
13210             if (ret == 0 && arg3 != 0) {
13211                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13212             }
13213         }
13214         return ret;
13215 #endif
13216 
13217 #ifdef CONFIG_SPLICE
13218 #ifdef TARGET_NR_tee
13219     case TARGET_NR_tee:
13220         {
13221             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13222         }
13223         return ret;
13224 #endif
13225 #ifdef TARGET_NR_splice
13226     case TARGET_NR_splice:
13227         {
13228             loff_t loff_in, loff_out;
13229             loff_t *ploff_in = NULL, *ploff_out = NULL;
13230             if (arg2) {
13231                 if (get_user_u64(loff_in, arg2)) {
13232                     return -TARGET_EFAULT;
13233                 }
13234                 ploff_in = &loff_in;
13235             }
13236             if (arg4) {
13237                 if (get_user_u64(loff_out, arg4)) {
13238                     return -TARGET_EFAULT;
13239                 }
13240                 ploff_out = &loff_out;
13241             }
13242             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13243             if (arg2) {
13244                 if (put_user_u64(loff_in, arg2)) {
13245                     return -TARGET_EFAULT;
13246                 }
13247             }
13248             if (arg4) {
13249                 if (put_user_u64(loff_out, arg4)) {
13250                     return -TARGET_EFAULT;
13251                 }
13252             }
13253         }
13254         return ret;
13255 #endif
13256 #ifdef TARGET_NR_vmsplice
13257 	case TARGET_NR_vmsplice:
13258         {
13259             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13260             if (vec != NULL) {
13261                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13262                 unlock_iovec(vec, arg2, arg3, 0);
13263             } else {
13264                 ret = -host_to_target_errno(errno);
13265             }
13266         }
13267         return ret;
13268 #endif
13269 #endif /* CONFIG_SPLICE */
13270 #ifdef CONFIG_EVENTFD
13271 #if defined(TARGET_NR_eventfd)
13272     case TARGET_NR_eventfd:
13273         ret = get_errno(eventfd(arg1, 0));
13274         if (ret >= 0) {
13275             fd_trans_register(ret, &target_eventfd_trans);
13276         }
13277         return ret;
13278 #endif
13279 #if defined(TARGET_NR_eventfd2)
13280     case TARGET_NR_eventfd2:
13281     {
13282         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13283         if (arg2 & TARGET_O_NONBLOCK) {
13284             host_flags |= O_NONBLOCK;
13285         }
13286         if (arg2 & TARGET_O_CLOEXEC) {
13287             host_flags |= O_CLOEXEC;
13288         }
13289         ret = get_errno(eventfd(arg1, host_flags));
13290         if (ret >= 0) {
13291             fd_trans_register(ret, &target_eventfd_trans);
13292         }
13293         return ret;
13294     }
13295 #endif
13296 #endif /* CONFIG_EVENTFD  */
13297 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13298     case TARGET_NR_fallocate:
13299 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13300         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13301                                   target_offset64(arg5, arg6)));
13302 #else
13303         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13304 #endif
13305         return ret;
13306 #endif
13307 #if defined(CONFIG_SYNC_FILE_RANGE)
13308 #if defined(TARGET_NR_sync_file_range)
13309     case TARGET_NR_sync_file_range:
13310 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13311 #if defined(TARGET_MIPS)
13312         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13313                                         target_offset64(arg5, arg6), arg7));
13314 #else
13315         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13316                                         target_offset64(arg4, arg5), arg6));
13317 #endif /* !TARGET_MIPS */
13318 #else
13319         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13320 #endif
13321         return ret;
13322 #endif
13323 #if defined(TARGET_NR_sync_file_range2) || \
13324     defined(TARGET_NR_arm_sync_file_range)
13325 #if defined(TARGET_NR_sync_file_range2)
13326     case TARGET_NR_sync_file_range2:
13327 #endif
13328 #if defined(TARGET_NR_arm_sync_file_range)
13329     case TARGET_NR_arm_sync_file_range:
13330 #endif
13331         /* This is like sync_file_range but the arguments are reordered */
13332 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13333         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13334                                         target_offset64(arg5, arg6), arg2));
13335 #else
13336         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13337 #endif
13338         return ret;
13339 #endif
13340 #endif
13341 #if defined(TARGET_NR_signalfd4)
13342     case TARGET_NR_signalfd4:
13343         return do_signalfd4(arg1, arg2, arg4);
13344 #endif
13345 #if defined(TARGET_NR_signalfd)
13346     case TARGET_NR_signalfd:
13347         return do_signalfd4(arg1, arg2, 0);
13348 #endif
13349 #if defined(CONFIG_EPOLL)
13350 #if defined(TARGET_NR_epoll_create)
13351     case TARGET_NR_epoll_create:
13352         return get_errno(epoll_create(arg1));
13353 #endif
13354 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13355     case TARGET_NR_epoll_create1:
13356         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13357 #endif
13358 #if defined(TARGET_NR_epoll_ctl)
13359     case TARGET_NR_epoll_ctl:
13360     {
13361         struct epoll_event ep;
13362         struct epoll_event *epp = 0;
13363         if (arg4) {
13364             if (arg2 != EPOLL_CTL_DEL) {
13365                 struct target_epoll_event *target_ep;
13366                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13367                     return -TARGET_EFAULT;
13368                 }
13369                 ep.events = tswap32(target_ep->events);
13370                 /*
13371                  * The epoll_data_t union is just opaque data to the kernel,
13372                  * so we transfer all 64 bits across and need not worry what
13373                  * actual data type it is.
13374                  */
13375                 ep.data.u64 = tswap64(target_ep->data.u64);
13376                 unlock_user_struct(target_ep, arg4, 0);
13377             }
13378             /*
13379              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13380              * non-null pointer, even though this argument is ignored.
13381              *
13382              */
13383             epp = &ep;
13384         }
13385         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13386     }
13387 #endif
13388 
13389 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13390 #if defined(TARGET_NR_epoll_wait)
13391     case TARGET_NR_epoll_wait:
13392 #endif
13393 #if defined(TARGET_NR_epoll_pwait)
13394     case TARGET_NR_epoll_pwait:
13395 #endif
13396     {
13397         struct target_epoll_event *target_ep;
13398         struct epoll_event *ep;
13399         int epfd = arg1;
13400         int maxevents = arg3;
13401         int timeout = arg4;
13402 
13403         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13404             return -TARGET_EINVAL;
13405         }
13406 
13407         target_ep = lock_user(VERIFY_WRITE, arg2,
13408                               maxevents * sizeof(struct target_epoll_event), 1);
13409         if (!target_ep) {
13410             return -TARGET_EFAULT;
13411         }
13412 
13413         ep = g_try_new(struct epoll_event, maxevents);
13414         if (!ep) {
13415             unlock_user(target_ep, arg2, 0);
13416             return -TARGET_ENOMEM;
13417         }
13418 
13419         switch (num) {
13420 #if defined(TARGET_NR_epoll_pwait)
13421         case TARGET_NR_epoll_pwait:
13422         {
13423             sigset_t *set = NULL;
13424 
13425             if (arg5) {
13426                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13427                 if (ret != 0) {
13428                     break;
13429                 }
13430             }
13431 
13432             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13433                                              set, SIGSET_T_SIZE));
13434 
13435             if (set) {
13436                 finish_sigsuspend_mask(ret);
13437             }
13438             break;
13439         }
13440 #endif
13441 #if defined(TARGET_NR_epoll_wait)
13442         case TARGET_NR_epoll_wait:
13443             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13444                                              NULL, 0));
13445             break;
13446 #endif
13447         default:
13448             ret = -TARGET_ENOSYS;
13449         }
13450         if (!is_error(ret)) {
13451             int i;
13452             for (i = 0; i < ret; i++) {
13453                 target_ep[i].events = tswap32(ep[i].events);
13454                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13455             }
13456             unlock_user(target_ep, arg2,
13457                         ret * sizeof(struct target_epoll_event));
13458         } else {
13459             unlock_user(target_ep, arg2, 0);
13460         }
13461         g_free(ep);
13462         return ret;
13463     }
13464 #endif
13465 #endif
13466 #ifdef TARGET_NR_prlimit64
13467     case TARGET_NR_prlimit64:
13468     {
13469         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13470         struct target_rlimit64 *target_rnew, *target_rold;
13471         struct host_rlimit64 rnew, rold, *rnewp = 0;
13472         int resource = target_to_host_resource(arg2);
13473 
13474         if (arg3 && (resource != RLIMIT_AS &&
13475                      resource != RLIMIT_DATA &&
13476                      resource != RLIMIT_STACK)) {
13477             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13478                 return -TARGET_EFAULT;
13479             }
13480             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13481             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13482             unlock_user_struct(target_rnew, arg3, 0);
13483             rnewp = &rnew;
13484         }
13485 
13486         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13487         if (!is_error(ret) && arg4) {
13488             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13489                 return -TARGET_EFAULT;
13490             }
13491             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13492             __put_user(rold.rlim_max, &target_rold->rlim_max);
13493             unlock_user_struct(target_rold, arg4, 1);
13494         }
13495         return ret;
13496     }
13497 #endif
13498 #ifdef TARGET_NR_gethostname
13499     case TARGET_NR_gethostname:
13500     {
13501         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13502         if (name) {
13503             ret = get_errno(gethostname(name, arg2));
13504             unlock_user(name, arg1, arg2);
13505         } else {
13506             ret = -TARGET_EFAULT;
13507         }
13508         return ret;
13509     }
13510 #endif
13511 #ifdef TARGET_NR_atomic_cmpxchg_32
13512     case TARGET_NR_atomic_cmpxchg_32:
13513     {
13514         /* should use start_exclusive from main.c */
13515         abi_ulong mem_value;
13516         if (get_user_u32(mem_value, arg6)) {
13517             target_siginfo_t info;
13518             info.si_signo = SIGSEGV;
13519             info.si_errno = 0;
13520             info.si_code = TARGET_SEGV_MAPERR;
13521             info._sifields._sigfault._addr = arg6;
13522             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13523             ret = 0xdeadbeef;
13524 
13525         }
13526         if (mem_value == arg2)
13527             put_user_u32(arg1, arg6);
13528         return mem_value;
13529     }
13530 #endif
13531 #ifdef TARGET_NR_atomic_barrier
13532     case TARGET_NR_atomic_barrier:
13533         /* Like the kernel implementation and the
13534            qemu arm barrier, no-op this? */
13535         return 0;
13536 #endif
13537 
13538 #ifdef TARGET_NR_timer_create
13539     case TARGET_NR_timer_create:
13540     {
13541         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13542 
13543         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13544 
13545         int clkid = arg1;
13546         int timer_index = next_free_host_timer();
13547 
13548         if (timer_index < 0) {
13549             ret = -TARGET_EAGAIN;
13550         } else {
13551             timer_t *phtimer = g_posix_timers  + timer_index;
13552 
13553             if (arg2) {
13554                 phost_sevp = &host_sevp;
13555                 ret = target_to_host_sigevent(phost_sevp, arg2);
13556                 if (ret != 0) {
13557                     free_host_timer_slot(timer_index);
13558                     return ret;
13559                 }
13560             }
13561 
13562             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13563             if (ret) {
13564                 free_host_timer_slot(timer_index);
13565             } else {
13566                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13567                     timer_delete(*phtimer);
13568                     free_host_timer_slot(timer_index);
13569                     return -TARGET_EFAULT;
13570                 }
13571             }
13572         }
13573         return ret;
13574     }
13575 #endif
13576 
13577 #ifdef TARGET_NR_timer_settime
13578     case TARGET_NR_timer_settime:
13579     {
13580         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13581          * struct itimerspec * old_value */
13582         target_timer_t timerid = get_timer_id(arg1);
13583 
13584         if (timerid < 0) {
13585             ret = timerid;
13586         } else if (arg3 == 0) {
13587             ret = -TARGET_EINVAL;
13588         } else {
13589             timer_t htimer = g_posix_timers[timerid];
13590             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13591 
13592             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13593                 return -TARGET_EFAULT;
13594             }
13595             ret = get_errno(
13596                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13597             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13598                 return -TARGET_EFAULT;
13599             }
13600         }
13601         return ret;
13602     }
13603 #endif
13604 
13605 #ifdef TARGET_NR_timer_settime64
13606     case TARGET_NR_timer_settime64:
13607     {
13608         target_timer_t timerid = get_timer_id(arg1);
13609 
13610         if (timerid < 0) {
13611             ret = timerid;
13612         } else if (arg3 == 0) {
13613             ret = -TARGET_EINVAL;
13614         } else {
13615             timer_t htimer = g_posix_timers[timerid];
13616             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13617 
13618             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13619                 return -TARGET_EFAULT;
13620             }
13621             ret = get_errno(
13622                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13623             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13624                 return -TARGET_EFAULT;
13625             }
13626         }
13627         return ret;
13628     }
13629 #endif
13630 
13631 #ifdef TARGET_NR_timer_gettime
13632     case TARGET_NR_timer_gettime:
13633     {
13634         /* args: timer_t timerid, struct itimerspec *curr_value */
13635         target_timer_t timerid = get_timer_id(arg1);
13636 
13637         if (timerid < 0) {
13638             ret = timerid;
13639         } else if (!arg2) {
13640             ret = -TARGET_EFAULT;
13641         } else {
13642             timer_t htimer = g_posix_timers[timerid];
13643             struct itimerspec hspec;
13644             ret = get_errno(timer_gettime(htimer, &hspec));
13645 
13646             if (host_to_target_itimerspec(arg2, &hspec)) {
13647                 ret = -TARGET_EFAULT;
13648             }
13649         }
13650         return ret;
13651     }
13652 #endif
13653 
13654 #ifdef TARGET_NR_timer_gettime64
13655     case TARGET_NR_timer_gettime64:
13656     {
13657         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13658         target_timer_t timerid = get_timer_id(arg1);
13659 
13660         if (timerid < 0) {
13661             ret = timerid;
13662         } else if (!arg2) {
13663             ret = -TARGET_EFAULT;
13664         } else {
13665             timer_t htimer = g_posix_timers[timerid];
13666             struct itimerspec hspec;
13667             ret = get_errno(timer_gettime(htimer, &hspec));
13668 
13669             if (host_to_target_itimerspec64(arg2, &hspec)) {
13670                 ret = -TARGET_EFAULT;
13671             }
13672         }
13673         return ret;
13674     }
13675 #endif
13676 
13677 #ifdef TARGET_NR_timer_getoverrun
13678     case TARGET_NR_timer_getoverrun:
13679     {
13680         /* args: timer_t timerid */
13681         target_timer_t timerid = get_timer_id(arg1);
13682 
13683         if (timerid < 0) {
13684             ret = timerid;
13685         } else {
13686             timer_t htimer = g_posix_timers[timerid];
13687             ret = get_errno(timer_getoverrun(htimer));
13688         }
13689         return ret;
13690     }
13691 #endif
13692 
13693 #ifdef TARGET_NR_timer_delete
13694     case TARGET_NR_timer_delete:
13695     {
13696         /* args: timer_t timerid */
13697         target_timer_t timerid = get_timer_id(arg1);
13698 
13699         if (timerid < 0) {
13700             ret = timerid;
13701         } else {
13702             timer_t htimer = g_posix_timers[timerid];
13703             ret = get_errno(timer_delete(htimer));
13704             free_host_timer_slot(timerid);
13705         }
13706         return ret;
13707     }
13708 #endif
13709 
13710 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13711     case TARGET_NR_timerfd_create:
13712         ret = get_errno(timerfd_create(arg1,
13713                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13714         if (ret >= 0) {
13715             fd_trans_register(ret, &target_timerfd_trans);
13716         }
13717         return ret;
13718 #endif
13719 
13720 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13721     case TARGET_NR_timerfd_gettime:
13722         {
13723             struct itimerspec its_curr;
13724 
13725             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13726 
13727             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13728                 return -TARGET_EFAULT;
13729             }
13730         }
13731         return ret;
13732 #endif
13733 
13734 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13735     case TARGET_NR_timerfd_gettime64:
13736         {
13737             struct itimerspec its_curr;
13738 
13739             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13740 
13741             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13742                 return -TARGET_EFAULT;
13743             }
13744         }
13745         return ret;
13746 #endif
13747 
13748 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13749     case TARGET_NR_timerfd_settime:
13750         {
13751             struct itimerspec its_new, its_old, *p_new;
13752 
13753             if (arg3) {
13754                 if (target_to_host_itimerspec(&its_new, arg3)) {
13755                     return -TARGET_EFAULT;
13756                 }
13757                 p_new = &its_new;
13758             } else {
13759                 p_new = NULL;
13760             }
13761 
13762             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13763 
13764             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13765                 return -TARGET_EFAULT;
13766             }
13767         }
13768         return ret;
13769 #endif
13770 
13771 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13772     case TARGET_NR_timerfd_settime64:
13773         {
13774             struct itimerspec its_new, its_old, *p_new;
13775 
13776             if (arg3) {
13777                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13778                     return -TARGET_EFAULT;
13779                 }
13780                 p_new = &its_new;
13781             } else {
13782                 p_new = NULL;
13783             }
13784 
13785             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13786 
13787             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13788                 return -TARGET_EFAULT;
13789             }
13790         }
13791         return ret;
13792 #endif
13793 
13794 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13795     case TARGET_NR_ioprio_get:
13796         return get_errno(ioprio_get(arg1, arg2));
13797 #endif
13798 
13799 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13800     case TARGET_NR_ioprio_set:
13801         return get_errno(ioprio_set(arg1, arg2, arg3));
13802 #endif
13803 
13804 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13805     case TARGET_NR_setns:
13806         return get_errno(setns(arg1, arg2));
13807 #endif
13808 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13809     case TARGET_NR_unshare:
13810         return get_errno(unshare(arg1));
13811 #endif
13812 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13813     case TARGET_NR_kcmp:
13814         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13815 #endif
13816 #ifdef TARGET_NR_swapcontext
13817     case TARGET_NR_swapcontext:
13818         /* PowerPC specific.  */
13819         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13820 #endif
13821 #ifdef TARGET_NR_memfd_create
13822     case TARGET_NR_memfd_create:
13823         p = lock_user_string(arg1);
13824         if (!p) {
13825             return -TARGET_EFAULT;
13826         }
13827         ret = get_errno(memfd_create(p, arg2));
13828         fd_trans_unregister(ret);
13829         unlock_user(p, arg1, 0);
13830         return ret;
13831 #endif
13832 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13833     case TARGET_NR_membarrier:
13834         return get_errno(membarrier(arg1, arg2));
13835 #endif
13836 
13837 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13838     case TARGET_NR_copy_file_range:
13839         {
13840             loff_t inoff, outoff;
13841             loff_t *pinoff = NULL, *poutoff = NULL;
13842 
13843             if (arg2) {
13844                 if (get_user_u64(inoff, arg2)) {
13845                     return -TARGET_EFAULT;
13846                 }
13847                 pinoff = &inoff;
13848             }
13849             if (arg4) {
13850                 if (get_user_u64(outoff, arg4)) {
13851                     return -TARGET_EFAULT;
13852                 }
13853                 poutoff = &outoff;
13854             }
13855             /* Do not sign-extend the count parameter. */
13856             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13857                                                  (abi_ulong)arg5, arg6));
13858             if (!is_error(ret) && ret > 0) {
13859                 if (arg2) {
13860                     if (put_user_u64(inoff, arg2)) {
13861                         return -TARGET_EFAULT;
13862                     }
13863                 }
13864                 if (arg4) {
13865                     if (put_user_u64(outoff, arg4)) {
13866                         return -TARGET_EFAULT;
13867                     }
13868                 }
13869             }
13870         }
13871         return ret;
13872 #endif
13873 
13874 #if defined(TARGET_NR_pivot_root)
13875     case TARGET_NR_pivot_root:
13876         {
13877             void *p2;
13878             p = lock_user_string(arg1); /* new_root */
13879             p2 = lock_user_string(arg2); /* put_old */
13880             if (!p || !p2) {
13881                 ret = -TARGET_EFAULT;
13882             } else {
13883                 ret = get_errno(pivot_root(p, p2));
13884             }
13885             unlock_user(p2, arg2, 0);
13886             unlock_user(p, arg1, 0);
13887         }
13888         return ret;
13889 #endif
13890 
13891 #if defined(TARGET_NR_riscv_hwprobe)
13892     case TARGET_NR_riscv_hwprobe:
13893         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13894 #endif
13895 
13896     default:
13897         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13898         return -TARGET_ENOSYS;
13899     }
13900     return ret;
13901 }
13902 
do_syscall(CPUArchState * cpu_env,int num,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)13903 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13904                     abi_long arg2, abi_long arg3, abi_long arg4,
13905                     abi_long arg5, abi_long arg6, abi_long arg7,
13906                     abi_long arg8)
13907 {
13908     CPUState *cpu = env_cpu(cpu_env);
13909     abi_long ret;
13910 
13911 #ifdef DEBUG_ERESTARTSYS
13912     /* Debug-only code for exercising the syscall-restart code paths
13913      * in the per-architecture cpu main loops: restart every syscall
13914      * the guest makes once before letting it through.
13915      */
13916     {
13917         static bool flag;
13918         flag = !flag;
13919         if (flag) {
13920             return -QEMU_ERESTARTSYS;
13921         }
13922     }
13923 #endif
13924 
13925     record_syscall_start(cpu, num, arg1,
13926                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13927 
13928     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13929         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13930     }
13931 
13932     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13933                       arg5, arg6, arg7, arg8);
13934 
13935     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13936         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13937                           arg3, arg4, arg5, arg6);
13938     }
13939 
13940     record_syscall_return(cpu, num, ret);
13941     return ret;
13942 }
13943