xref: /openbmc/qemu/linux-user/syscall.c (revision c49db93c360762587daf67a274a17749bc60ebe1)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include "exec/tb-flush.h"
30 #include "exec/translation-block.h"
31 #include <elf.h>
32 #include <endian.h>
33 #include <grp.h>
34 #include <sys/ipc.h>
35 #include <sys/msg.h>
36 #include <sys/wait.h>
37 #include <sys/mount.h>
38 #include <sys/file.h>
39 #include <sys/fsuid.h>
40 #include <sys/personality.h>
41 #include <sys/prctl.h>
42 #include <sys/resource.h>
43 #include <sys/swap.h>
44 #include <linux/capability.h>
45 #include <sched.h>
46 #include <sys/timex.h>
47 #include <sys/socket.h>
48 #include <linux/sockios.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/signalfd.h>
59 #include <netinet/in.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <netinet/udp.h>
63 #include <linux/wireless.h>
64 #include <linux/icmp.h>
65 #include <linux/icmpv6.h>
66 #include <linux/if_tun.h>
67 #include <linux/in6.h>
68 #include <linux/errqueue.h>
69 #include <linux/random.h>
70 #ifdef CONFIG_TIMERFD
71 #include <sys/timerfd.h>
72 #endif
73 #ifdef CONFIG_EVENTFD
74 #include <sys/eventfd.h>
75 #endif
76 #ifdef CONFIG_EPOLL
77 #include <sys/epoll.h>
78 #endif
79 #ifdef CONFIG_ATTR
80 #include "qemu/xattr.h"
81 #endif
82 #ifdef CONFIG_SENDFILE
83 #include <sys/sendfile.h>
84 #endif
85 #ifdef HAVE_SYS_KCOV_H
86 #include <sys/kcov.h>
87 #endif
88 
89 #define termios host_termios
90 #define winsize host_winsize
91 #define termio host_termio
92 #define sgttyb host_sgttyb /* same as target */
93 #define tchars host_tchars /* same as target */
94 #define ltchars host_ltchars /* same as target */
95 
96 #include <linux/termios.h>
97 #include <linux/unistd.h>
98 #include <linux/cdrom.h>
99 #include <linux/hdreg.h>
100 #include <linux/soundcard.h>
101 #include <linux/kd.h>
102 #include <linux/mtio.h>
103 #include <linux/fs.h>
104 #include <linux/fd.h>
105 #if defined(CONFIG_FIEMAP)
106 #include <linux/fiemap.h>
107 #endif
108 #include <linux/fb.h>
109 #if defined(CONFIG_USBFS)
110 #include <linux/usbdevice_fs.h>
111 #include <linux/usb/ch9.h>
112 #endif
113 #include <linux/vt.h>
114 #include <linux/dm-ioctl.h>
115 #include <linux/reboot.h>
116 #include <linux/route.h>
117 #include <linux/filter.h>
118 #include <linux/blkpg.h>
119 #include <netpacket/packet.h>
120 #include <linux/netlink.h>
121 #include <linux/if_alg.h>
122 #include <linux/rtc.h>
123 #include <sound/asound.h>
124 #ifdef HAVE_BTRFS_H
125 #include <linux/btrfs.h>
126 #endif
127 #ifdef HAVE_DRM_H
128 #include <libdrm/drm.h>
129 #include <libdrm/i915_drm.h>
130 #endif
131 #include "linux_loop.h"
132 #include "uname.h"
133 
134 #include "qemu.h"
135 #include "user-internals.h"
136 #include "strace.h"
137 #include "signal-common.h"
138 #include "loader.h"
139 #include "user-mmap.h"
140 #include "user/page-protection.h"
141 #include "user/safe-syscall.h"
142 #include "user/signal.h"
143 #include "qemu/guest-random.h"
144 #include "qemu/selfmap.h"
145 #include "user/syscall-trace.h"
146 #include "special-errno.h"
147 #include "qapi/error.h"
148 #include "fd-trans.h"
149 #include "user/cpu_loop.h"
150 
151 #ifndef CLONE_IO
152 #define CLONE_IO                0x80000000      /* Clone io context */
153 #endif
154 
155 /* We can't directly call the host clone syscall, because this will
156  * badly confuse libc (breaking mutexes, for example). So we must
157  * divide clone flags into:
158  *  * flag combinations that look like pthread_create()
159  *  * flag combinations that look like fork()
160  *  * flags we can implement within QEMU itself
161  *  * flags we can't support and will return an error for
162  */
163 /* For thread creation, all these flags must be present; for
164  * fork, none must be present.
165  */
166 #define CLONE_THREAD_FLAGS                              \
167     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
168      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
169 
170 /* These flags are ignored:
171  * CLONE_DETACHED is now ignored by the kernel;
172  * CLONE_IO is just an optimisation hint to the I/O scheduler
173  */
174 #define CLONE_IGNORED_FLAGS                     \
175     (CLONE_DETACHED | CLONE_IO)
176 
177 #ifndef CLONE_PIDFD
178 # define CLONE_PIDFD 0x00001000
179 #endif
180 
181 /* Flags for fork which we can implement within QEMU itself */
182 #define CLONE_OPTIONAL_FORK_FLAGS               \
183     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
184      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
185 
186 /* Flags for thread creation which we can implement within QEMU itself */
187 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
188     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
189      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
190 
191 #define CLONE_INVALID_FORK_FLAGS                                        \
192     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
193 
194 #define CLONE_INVALID_THREAD_FLAGS                                      \
195     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
196        CLONE_IGNORED_FLAGS))
197 
198 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
199  * have almost all been allocated. We cannot support any of
200  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
201  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
202  * The checks against the invalid thread masks above will catch these.
203  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
204  */
205 
206 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
207  * once. This exercises the codepaths for restart.
208  */
209 //#define DEBUG_ERESTARTSYS
210 
211 //#include <linux/msdos_fs.h>
212 #define VFAT_IOCTL_READDIR_BOTH \
213     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
214 #define VFAT_IOCTL_READDIR_SHORT \
215     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
216 
217 #undef _syscall0
218 #undef _syscall1
219 #undef _syscall2
220 #undef _syscall3
221 #undef _syscall4
222 #undef _syscall5
223 #undef _syscall6
224 
225 #define _syscall0(type,name)		\
226 static type name (void)			\
227 {					\
228 	return syscall(__NR_##name);	\
229 }
230 
231 #define _syscall1(type,name,type1,arg1)		\
232 static type name (type1 arg1)			\
233 {						\
234 	return syscall(__NR_##name, arg1);	\
235 }
236 
237 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
238 static type name (type1 arg1,type2 arg2)		\
239 {							\
240 	return syscall(__NR_##name, arg1, arg2);	\
241 }
242 
243 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
244 static type name (type1 arg1,type2 arg2,type3 arg3)		\
245 {								\
246 	return syscall(__NR_##name, arg1, arg2, arg3);		\
247 }
248 
249 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
250 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
251 {										\
252 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
253 }
254 
255 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
256 		  type5,arg5)							\
257 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
258 {										\
259 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
260 }
261 
262 
263 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
264 		  type5,arg5,type6,arg6)					\
265 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
266                   type6 arg6)							\
267 {										\
268 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
269 }
270 
271 
272 #define __NR_sys_uname __NR_uname
273 #define __NR_sys_getcwd1 __NR_getcwd
274 #define __NR_sys_getdents __NR_getdents
275 #define __NR_sys_getdents64 __NR_getdents64
276 #define __NR_sys_getpriority __NR_getpriority
277 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
278 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
279 #define __NR_sys_syslog __NR_syslog
280 #if defined(__NR_futex)
281 # define __NR_sys_futex __NR_futex
282 #endif
283 #if defined(__NR_futex_time64)
284 # define __NR_sys_futex_time64 __NR_futex_time64
285 #endif
286 #define __NR_sys_statx __NR_statx
287 
288 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
289 #define __NR__llseek __NR_lseek
290 #endif
291 
292 /* Newer kernel ports have llseek() instead of _llseek() */
293 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
294 #define TARGET_NR__llseek TARGET_NR_llseek
295 #endif
296 
297 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
298 #ifndef TARGET_O_NONBLOCK_MASK
299 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
300 #endif
301 
302 #define __NR_sys_gettid __NR_gettid
303 _syscall0(int, sys_gettid)
304 
305 /* For the 64-bit guest on 32-bit host case we must emulate
306  * getdents using getdents64, because otherwise the host
307  * might hand us back more dirent records than we can fit
308  * into the guest buffer after structure format conversion.
309  * Otherwise we emulate getdents with getdents if the host has it.
310  */
311 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
312 #define EMULATE_GETDENTS_WITH_GETDENTS
313 #endif
314 
315 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
316 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
317 #endif
318 #if (defined(TARGET_NR_getdents) && \
319       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
320     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
321 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
322 #endif
323 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
324 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
325           loff_t *, res, unsigned int, wh);
326 #endif
327 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
328 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
329           siginfo_t *, uinfo)
330 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
331 #ifdef __NR_exit_group
332 _syscall1(int,exit_group,int,error_code)
333 #endif
334 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
335 #define __NR_sys_close_range __NR_close_range
336 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
337 #ifndef CLOSE_RANGE_CLOEXEC
338 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
339 #endif
340 #endif
341 #if defined(__NR_futex)
342 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_futex_time64)
346 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
347           const struct timespec *,timeout,int *,uaddr2,int,val3)
348 #endif
349 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
350 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
353 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
354                              unsigned int, flags);
355 #endif
356 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
357 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
358 #endif
359 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
360 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
361           unsigned long *, user_mask_ptr);
362 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
363 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
364           unsigned long *, user_mask_ptr);
365 /* sched_attr is not defined in glibc < 2.41 */
366 #ifndef SCHED_ATTR_SIZE_VER0
367 struct sched_attr {
368     uint32_t size;
369     uint32_t sched_policy;
370     uint64_t sched_flags;
371     int32_t sched_nice;
372     uint32_t sched_priority;
373     uint64_t sched_runtime;
374     uint64_t sched_deadline;
375     uint64_t sched_period;
376     uint32_t sched_util_min;
377     uint32_t sched_util_max;
378 };
379 #endif
380 #define __NR_sys_sched_getattr __NR_sched_getattr
381 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
382           unsigned int, size, unsigned int, flags);
383 #define __NR_sys_sched_setattr __NR_sched_setattr
384 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
385           unsigned int, flags);
386 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
387 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
388 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
389 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
390           const struct sched_param *, param);
391 #define __NR_sys_sched_getparam __NR_sched_getparam
392 _syscall2(int, sys_sched_getparam, pid_t, pid,
393           struct sched_param *, param);
394 #define __NR_sys_sched_setparam __NR_sched_setparam
395 _syscall2(int, sys_sched_setparam, pid_t, pid,
396           const struct sched_param *, param);
397 #define __NR_sys_getcpu __NR_getcpu
398 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
399 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
400           void *, arg);
401 _syscall2(int, capget, struct __user_cap_header_struct *, header,
402           struct __user_cap_data_struct *, data);
403 _syscall2(int, capset, struct __user_cap_header_struct *, header,
404           struct __user_cap_data_struct *, data);
405 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
406 _syscall2(int, ioprio_get, int, which, int, who)
407 #endif
408 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
409 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
410 #endif
411 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
412 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
413 #endif
414 
415 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
416 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
417           unsigned long, idx1, unsigned long, idx2)
418 #endif
419 
420 /*
421  * It is assumed that struct statx is architecture independent.
422  */
423 #if defined(TARGET_NR_statx) && defined(__NR_statx)
424 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
425           unsigned int, mask, struct target_statx *, statxbuf)
426 #endif
427 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
428 _syscall2(int, membarrier, int, cmd, int, flags)
429 #endif
430 
431 static const bitmask_transtbl fcntl_flags_tbl[] = {
432   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
433   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
434   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
435   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
436   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
437   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
438   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
439   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
440   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
441   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
442   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
443   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
444   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
445 #if defined(O_DIRECT)
446   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
447 #endif
448 #if defined(O_NOATIME)
449   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
450 #endif
451 #if defined(O_CLOEXEC)
452   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
453 #endif
454 #if defined(O_PATH)
455   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
456 #endif
457 #if defined(O_TMPFILE)
458   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
459 #endif
460   /* Don't terminate the list prematurely on 64-bit host+guest.  */
461 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
462   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
463 #endif
464 };
465 
466 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
467 
468 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
469 #if defined(__NR_utimensat)
470 #define __NR_sys_utimensat __NR_utimensat
471 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
472           const struct timespec *,tsp,int,flags)
473 #else
474 static int sys_utimensat(int dirfd, const char *pathname,
475                          const struct timespec times[2], int flags)
476 {
477     errno = ENOSYS;
478     return -1;
479 }
480 #endif
481 #endif /* TARGET_NR_utimensat */
482 
483 #ifdef TARGET_NR_renameat2
484 #if defined(__NR_renameat2)
485 #define __NR_sys_renameat2 __NR_renameat2
486 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
487           const char *, new, unsigned int, flags)
488 #else
489 static int sys_renameat2(int oldfd, const char *old,
490                          int newfd, const char *new, int flags)
491 {
492     if (flags == 0) {
493         return renameat(oldfd, old, newfd, new);
494     }
495     errno = ENOSYS;
496     return -1;
497 }
498 #endif
499 #endif /* TARGET_NR_renameat2 */
500 
501 #ifdef CONFIG_INOTIFY
502 #include <sys/inotify.h>
503 #else
504 /* Userspace can usually survive runtime without inotify */
505 #undef TARGET_NR_inotify_init
506 #undef TARGET_NR_inotify_init1
507 #undef TARGET_NR_inotify_add_watch
508 #undef TARGET_NR_inotify_rm_watch
509 #endif /* CONFIG_INOTIFY  */
510 
511 #if defined(TARGET_NR_prlimit64)
512 #ifndef __NR_prlimit64
513 # define __NR_prlimit64 -1
514 #endif
515 #define __NR_sys_prlimit64 __NR_prlimit64
516 /* The glibc rlimit structure may not be that used by the underlying syscall */
517 struct host_rlimit64 {
518     uint64_t rlim_cur;
519     uint64_t rlim_max;
520 };
521 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
522           const struct host_rlimit64 *, new_limit,
523           struct host_rlimit64 *, old_limit)
524 #endif
525 
526 
527 #if defined(TARGET_NR_timer_create)
528 /* Maximum of 32 active POSIX timers allowed at any one time. */
529 #define GUEST_TIMER_MAX 32
530 static timer_t g_posix_timers[GUEST_TIMER_MAX];
531 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
532 
next_free_host_timer(void)533 static inline int next_free_host_timer(void)
534 {
535     int k;
536     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
537         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
538             return k;
539         }
540     }
541     return -1;
542 }
543 
free_host_timer_slot(int id)544 static inline void free_host_timer_slot(int id)
545 {
546     qatomic_store_release(g_posix_timer_allocated + id, 0);
547 }
548 #endif
549 
host_to_target_errno(int host_errno)550 static inline int host_to_target_errno(int host_errno)
551 {
552     switch (host_errno) {
553 #define E(X)  case X: return TARGET_##X;
554 #include "errnos.c.inc"
555 #undef E
556     default:
557         return host_errno;
558     }
559 }
560 
target_to_host_errno(int target_errno)561 static inline int target_to_host_errno(int target_errno)
562 {
563     switch (target_errno) {
564 #define E(X)  case TARGET_##X: return X;
565 #include "errnos.c.inc"
566 #undef E
567     default:
568         return target_errno;
569     }
570 }
571 
get_errno(abi_long ret)572 abi_long get_errno(abi_long ret)
573 {
574     if (ret == -1)
575         return -host_to_target_errno(errno);
576     else
577         return ret;
578 }
579 
target_strerror(int err)580 const char *target_strerror(int err)
581 {
582     if (err == QEMU_ERESTARTSYS) {
583         return "To be restarted";
584     }
585     if (err == QEMU_ESIGRETURN) {
586         return "Successful exit from sigreturn";
587     }
588 
589     return strerror(target_to_host_errno(err));
590 }
591 
check_zeroed_user(abi_long addr,size_t ksize,size_t usize)592 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
593 {
594     int i;
595     uint8_t b;
596     if (usize <= ksize) {
597         return 1;
598     }
599     for (i = ksize; i < usize; i++) {
600         if (get_user_u8(b, addr + i)) {
601             return -TARGET_EFAULT;
602         }
603         if (b != 0) {
604             return 0;
605         }
606     }
607     return 1;
608 }
609 
610 /*
611  * Copies a target struct to a host struct, in a way that guarantees
612  * backwards-compatibility for struct syscall arguments.
613  *
614  * Similar to kernels uaccess.h:copy_struct_from_user()
615  */
copy_struct_from_user(void * dst,size_t ksize,abi_ptr src,size_t usize)616 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
617 {
618     size_t size = MIN(ksize, usize);
619     size_t rest = MAX(ksize, usize) - size;
620 
621     /* Deal with trailing bytes. */
622     if (usize < ksize) {
623         memset(dst + size, 0, rest);
624     } else if (usize > ksize) {
625         int ret = check_zeroed_user(src, ksize, usize);
626         if (ret <= 0) {
627             return ret ?: -TARGET_E2BIG;
628         }
629     }
630     /* Copy the interoperable parts of the struct. */
631     if (copy_from_user(dst, src, size)) {
632         return -TARGET_EFAULT;
633     }
634     return 0;
635 }
636 
637 #define safe_syscall0(type, name) \
638 static type safe_##name(void) \
639 { \
640     return safe_syscall(__NR_##name); \
641 }
642 
643 #define safe_syscall1(type, name, type1, arg1) \
644 static type safe_##name(type1 arg1) \
645 { \
646     return safe_syscall(__NR_##name, arg1); \
647 }
648 
649 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
650 static type safe_##name(type1 arg1, type2 arg2) \
651 { \
652     return safe_syscall(__NR_##name, arg1, arg2); \
653 }
654 
655 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
656 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
657 { \
658     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
659 }
660 
661 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
662     type4, arg4) \
663 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
664 { \
665     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
666 }
667 
668 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
669     type4, arg4, type5, arg5) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
671     type5 arg5) \
672 { \
673     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
674 }
675 
676 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
677     type4, arg4, type5, arg5, type6, arg6) \
678 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
679     type5 arg5, type6 arg6) \
680 { \
681     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
682 }
683 
safe_syscall3(ssize_t,read,int,fd,void *,buff,size_t,count)684 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
685 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
686 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
687               int, flags, mode_t, mode)
688 
689 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
690               const struct open_how_ver0 *, how, size_t, size)
691 
692 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
693 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
694               struct rusage *, rusage)
695 #endif
696 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
697               int, options, struct rusage *, rusage)
698 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
699 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
700               char **, argv, char **, envp, int, flags)
701 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
702     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
703 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
704               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
705 #endif
706 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
707 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
708               struct timespec *, tsp, const sigset_t *, sigmask,
709               size_t, sigsetsize)
710 #endif
711 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
712               int, maxevents, int, timeout, const sigset_t *, sigmask,
713               size_t, sigsetsize)
714 #if defined(__NR_futex)
715 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
716               const struct timespec *,timeout,int *,uaddr2,int,val3)
717 #endif
718 #if defined(__NR_futex_time64)
719 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
720               const struct timespec *,timeout,int *,uaddr2,int,val3)
721 #endif
722 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
723 safe_syscall2(int, kill, pid_t, pid, int, sig)
724 safe_syscall2(int, tkill, int, tid, int, sig)
725 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
726 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
727 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
728 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
729               unsigned long, pos_l, unsigned long, pos_h)
730 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
731               unsigned long, pos_l, unsigned long, pos_h)
732 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
733               socklen_t, addrlen)
734 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
735               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
736 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
737               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
738 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
739 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
740 safe_syscall2(int, flock, int, fd, int, operation)
741 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
742 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
743               const struct timespec *, uts, size_t, sigsetsize)
744 #endif
745 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
746               int, flags)
747 #if defined(TARGET_NR_nanosleep)
748 safe_syscall2(int, nanosleep, const struct timespec *, req,
749               struct timespec *, rem)
750 #endif
751 #if defined(TARGET_NR_clock_nanosleep) || \
752     defined(TARGET_NR_clock_nanosleep_time64)
753 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
754               const struct timespec *, req, struct timespec *, rem)
755 #endif
756 #ifdef __NR_ipc
757 #ifdef __s390x__
758 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
759               void *, ptr)
760 #else
761 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
762               void *, ptr, long, fifth)
763 #endif
764 #endif
765 #ifdef __NR_msgsnd
766 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
767               int, flags)
768 #endif
769 #ifdef __NR_msgrcv
770 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
771               long, msgtype, int, flags)
772 #endif
773 #ifdef __NR_semtimedop
774 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
775               unsigned, nsops, const struct timespec *, timeout)
776 #endif
777 #if defined(TARGET_NR_mq_timedsend) || \
778     defined(TARGET_NR_mq_timedsend_time64)
779 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
780               size_t, len, unsigned, prio, const struct timespec *, timeout)
781 #endif
782 #if defined(TARGET_NR_mq_timedreceive) || \
783     defined(TARGET_NR_mq_timedreceive_time64)
784 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
785               size_t, len, unsigned *, prio, const struct timespec *, timeout)
786 #endif
787 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
788 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
789               int, outfd, loff_t *, poutoff, size_t, length,
790               unsigned int, flags)
791 #endif
792 #if defined(TARGET_NR_fchmodat2) && defined(__NR_fchmodat2)
793 safe_syscall4(int, fchmodat2, int, dfd, const char *, filename,
794               unsigned short, mode, unsigned int, flags)
795 #endif
796 
797 /* We do ioctl like this rather than via safe_syscall3 to preserve the
798  * "third argument might be integer or pointer or not present" behaviour of
799  * the libc function.
800  */
801 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
802 /* Similarly for fcntl. Since we always build with LFS enabled,
803  * we should be using the 64-bit structures automatically.
804  */
805 #ifdef __NR_fcntl64
806 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
807 #else
808 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
809 #endif
810 
811 static inline int host_to_target_sock_type(int host_type)
812 {
813     int target_type;
814 
815     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
816     case SOCK_DGRAM:
817         target_type = TARGET_SOCK_DGRAM;
818         break;
819     case SOCK_STREAM:
820         target_type = TARGET_SOCK_STREAM;
821         break;
822     default:
823         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
824         break;
825     }
826 
827 #if defined(SOCK_CLOEXEC)
828     if (host_type & SOCK_CLOEXEC) {
829         target_type |= TARGET_SOCK_CLOEXEC;
830     }
831 #endif
832 
833 #if defined(SOCK_NONBLOCK)
834     if (host_type & SOCK_NONBLOCK) {
835         target_type |= TARGET_SOCK_NONBLOCK;
836     }
837 #endif
838 
839     return target_type;
840 }
841 
842 static abi_ulong target_brk, initial_target_brk;
843 
target_set_brk(abi_ulong new_brk)844 void target_set_brk(abi_ulong new_brk)
845 {
846     target_brk = TARGET_PAGE_ALIGN(new_brk);
847     initial_target_brk = target_brk;
848 }
849 
850 /* do_brk() must return target values and target errnos. */
do_brk(abi_ulong brk_val)851 abi_long do_brk(abi_ulong brk_val)
852 {
853     abi_long mapped_addr;
854     abi_ulong new_brk;
855     abi_ulong old_brk;
856 
857     /* brk pointers are always untagged */
858 
859     /* do not allow to shrink below initial brk value */
860     if (brk_val < initial_target_brk) {
861         return target_brk;
862     }
863 
864     new_brk = TARGET_PAGE_ALIGN(brk_val);
865     old_brk = TARGET_PAGE_ALIGN(target_brk);
866 
867     /* new and old target_brk might be on the same page */
868     if (new_brk == old_brk) {
869         target_brk = brk_val;
870         return target_brk;
871     }
872 
873     /* Release heap if necessary */
874     if (new_brk < old_brk) {
875         target_munmap(new_brk, old_brk - new_brk);
876 
877         target_brk = brk_val;
878         return target_brk;
879     }
880 
881     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
882                               PROT_READ | PROT_WRITE,
883                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
884                               -1, 0);
885 
886     if (mapped_addr == old_brk) {
887         target_brk = brk_val;
888         return target_brk;
889     }
890 
891 #if defined(TARGET_ALPHA)
892     /* We (partially) emulate OSF/1 on Alpha, which requires we
893        return a proper errno, not an unchanged brk value.  */
894     return -TARGET_ENOMEM;
895 #endif
896     /* For everything else, return the previous break. */
897     return target_brk;
898 }
899 
900 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
901     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
copy_from_user_fdset(fd_set * fds,abi_ulong target_fds_addr,int n)902 static inline abi_long copy_from_user_fdset(fd_set *fds,
903                                             abi_ulong target_fds_addr,
904                                             int n)
905 {
906     int i, nw, j, k;
907     abi_ulong b, *target_fds;
908 
909     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
910     if (!(target_fds = lock_user(VERIFY_READ,
911                                  target_fds_addr,
912                                  sizeof(abi_ulong) * nw,
913                                  1)))
914         return -TARGET_EFAULT;
915 
916     FD_ZERO(fds);
917     k = 0;
918     for (i = 0; i < nw; i++) {
919         /* grab the abi_ulong */
920         __get_user(b, &target_fds[i]);
921         for (j = 0; j < TARGET_ABI_BITS; j++) {
922             /* check the bit inside the abi_ulong */
923             if ((b >> j) & 1)
924                 FD_SET(k, fds);
925             k++;
926         }
927     }
928 
929     unlock_user(target_fds, target_fds_addr, 0);
930 
931     return 0;
932 }
933 
copy_from_user_fdset_ptr(fd_set * fds,fd_set ** fds_ptr,abi_ulong target_fds_addr,int n)934 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
935                                                  abi_ulong target_fds_addr,
936                                                  int n)
937 {
938     if (target_fds_addr) {
939         if (copy_from_user_fdset(fds, target_fds_addr, n))
940             return -TARGET_EFAULT;
941         *fds_ptr = fds;
942     } else {
943         *fds_ptr = NULL;
944     }
945     return 0;
946 }
947 
copy_to_user_fdset(abi_ulong target_fds_addr,const fd_set * fds,int n)948 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
949                                           const fd_set *fds,
950                                           int n)
951 {
952     int i, nw, j, k;
953     abi_long v;
954     abi_ulong *target_fds;
955 
956     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
957     if (!(target_fds = lock_user(VERIFY_WRITE,
958                                  target_fds_addr,
959                                  sizeof(abi_ulong) * nw,
960                                  0)))
961         return -TARGET_EFAULT;
962 
963     k = 0;
964     for (i = 0; i < nw; i++) {
965         v = 0;
966         for (j = 0; j < TARGET_ABI_BITS; j++) {
967             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
968             k++;
969         }
970         __put_user(v, &target_fds[i]);
971     }
972 
973     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
974 
975     return 0;
976 }
977 #endif
978 
979 #if defined(__alpha__)
980 #define HOST_HZ 1024
981 #else
982 #define HOST_HZ 100
983 #endif
984 
host_to_target_clock_t(long ticks)985 static inline abi_long host_to_target_clock_t(long ticks)
986 {
987 #if HOST_HZ == TARGET_HZ
988     return ticks;
989 #else
990     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
991 #endif
992 }
993 
host_to_target_rusage(abi_ulong target_addr,const struct rusage * rusage)994 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
995                                              const struct rusage *rusage)
996 {
997     struct target_rusage *target_rusage;
998 
999     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1000         return -TARGET_EFAULT;
1001     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1002     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1003     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1004     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1005     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1006     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1007     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1008     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1009     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1010     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1011     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1012     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1013     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1014     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1015     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1016     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1017     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1018     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1019     unlock_user_struct(target_rusage, target_addr, 1);
1020 
1021     return 0;
1022 }
1023 
1024 #ifdef TARGET_NR_setrlimit
target_to_host_rlim(abi_ulong target_rlim)1025 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1026 {
1027     abi_ulong target_rlim_swap;
1028     rlim_t result;
1029 
1030     target_rlim_swap = tswapal(target_rlim);
1031     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1032         return RLIM_INFINITY;
1033 
1034     result = target_rlim_swap;
1035     if (target_rlim_swap != (rlim_t)result)
1036         return RLIM_INFINITY;
1037 
1038     return result;
1039 }
1040 #endif
1041 
1042 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
host_to_target_rlim(rlim_t rlim)1043 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1044 {
1045     abi_ulong target_rlim_swap;
1046     abi_ulong result;
1047 
1048     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1049         target_rlim_swap = TARGET_RLIM_INFINITY;
1050     else
1051         target_rlim_swap = rlim;
1052     result = tswapal(target_rlim_swap);
1053 
1054     return result;
1055 }
1056 #endif
1057 
target_to_host_resource(int code)1058 static inline int target_to_host_resource(int code)
1059 {
1060     switch (code) {
1061     case TARGET_RLIMIT_AS:
1062         return RLIMIT_AS;
1063     case TARGET_RLIMIT_CORE:
1064         return RLIMIT_CORE;
1065     case TARGET_RLIMIT_CPU:
1066         return RLIMIT_CPU;
1067     case TARGET_RLIMIT_DATA:
1068         return RLIMIT_DATA;
1069     case TARGET_RLIMIT_FSIZE:
1070         return RLIMIT_FSIZE;
1071     case TARGET_RLIMIT_LOCKS:
1072         return RLIMIT_LOCKS;
1073     case TARGET_RLIMIT_MEMLOCK:
1074         return RLIMIT_MEMLOCK;
1075     case TARGET_RLIMIT_MSGQUEUE:
1076         return RLIMIT_MSGQUEUE;
1077     case TARGET_RLIMIT_NICE:
1078         return RLIMIT_NICE;
1079     case TARGET_RLIMIT_NOFILE:
1080         return RLIMIT_NOFILE;
1081     case TARGET_RLIMIT_NPROC:
1082         return RLIMIT_NPROC;
1083     case TARGET_RLIMIT_RSS:
1084         return RLIMIT_RSS;
1085     case TARGET_RLIMIT_RTPRIO:
1086         return RLIMIT_RTPRIO;
1087 #ifdef RLIMIT_RTTIME
1088     case TARGET_RLIMIT_RTTIME:
1089         return RLIMIT_RTTIME;
1090 #endif
1091     case TARGET_RLIMIT_SIGPENDING:
1092         return RLIMIT_SIGPENDING;
1093     case TARGET_RLIMIT_STACK:
1094         return RLIMIT_STACK;
1095     default:
1096         return code;
1097     }
1098 }
1099 
copy_from_user_timeval(struct timeval * tv,abi_ulong target_tv_addr)1100 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1101                                               abi_ulong target_tv_addr)
1102 {
1103     struct target_timeval *target_tv;
1104 
1105     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1106         return -TARGET_EFAULT;
1107     }
1108 
1109     __get_user(tv->tv_sec, &target_tv->tv_sec);
1110     __get_user(tv->tv_usec, &target_tv->tv_usec);
1111 
1112     unlock_user_struct(target_tv, target_tv_addr, 0);
1113 
1114     return 0;
1115 }
1116 
copy_to_user_timeval(abi_ulong target_tv_addr,const struct timeval * tv)1117 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1118                                             const struct timeval *tv)
1119 {
1120     struct target_timeval *target_tv;
1121 
1122     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1123         return -TARGET_EFAULT;
1124     }
1125 
1126     __put_user(tv->tv_sec, &target_tv->tv_sec);
1127     __put_user(tv->tv_usec, &target_tv->tv_usec);
1128 
1129     unlock_user_struct(target_tv, target_tv_addr, 1);
1130 
1131     return 0;
1132 }
1133 
1134 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
copy_from_user_timeval64(struct timeval * tv,abi_ulong target_tv_addr)1135 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1136                                                 abi_ulong target_tv_addr)
1137 {
1138     struct target__kernel_sock_timeval *target_tv;
1139 
1140     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1141         return -TARGET_EFAULT;
1142     }
1143 
1144     __get_user(tv->tv_sec, &target_tv->tv_sec);
1145     __get_user(tv->tv_usec, &target_tv->tv_usec);
1146 
1147     unlock_user_struct(target_tv, target_tv_addr, 0);
1148 
1149     return 0;
1150 }
1151 #endif
1152 
copy_to_user_timeval64(abi_ulong target_tv_addr,const struct timeval * tv)1153 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1154                                               const struct timeval *tv)
1155 {
1156     struct target__kernel_sock_timeval *target_tv;
1157 
1158     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1159         return -TARGET_EFAULT;
1160     }
1161 
1162     __put_user(tv->tv_sec, &target_tv->tv_sec);
1163     __put_user(tv->tv_usec, &target_tv->tv_usec);
1164 
1165     unlock_user_struct(target_tv, target_tv_addr, 1);
1166 
1167     return 0;
1168 }
1169 
1170 #if defined(TARGET_NR_futex) || \
1171     defined(TARGET_NR_rt_sigtimedwait) || \
1172     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1173     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1174     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1175     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1176     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1177     defined(TARGET_NR_timer_settime) || \
1178     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_timespec(struct timespec * host_ts,abi_ulong target_addr)1179 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1180                                                abi_ulong target_addr)
1181 {
1182     struct target_timespec *target_ts;
1183 
1184     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1185         return -TARGET_EFAULT;
1186     }
1187     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1188     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1189     unlock_user_struct(target_ts, target_addr, 0);
1190     return 0;
1191 }
1192 #endif
1193 
1194 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1195     defined(TARGET_NR_timer_settime64) || \
1196     defined(TARGET_NR_mq_timedsend_time64) || \
1197     defined(TARGET_NR_mq_timedreceive_time64) || \
1198     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1199     defined(TARGET_NR_clock_nanosleep_time64) || \
1200     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1201     defined(TARGET_NR_utimensat) || \
1202     defined(TARGET_NR_utimensat_time64) || \
1203     defined(TARGET_NR_semtimedop_time64) || \
1204     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
target_to_host_timespec64(struct timespec * host_ts,abi_ulong target_addr)1205 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1206                                                  abi_ulong target_addr)
1207 {
1208     struct target__kernel_timespec *target_ts;
1209 
1210     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1211         return -TARGET_EFAULT;
1212     }
1213     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1214     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1215     /* in 32bit mode, this drops the padding */
1216     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1217     unlock_user_struct(target_ts, target_addr, 0);
1218     return 0;
1219 }
1220 #endif
1221 
host_to_target_timespec(abi_ulong target_addr,struct timespec * host_ts)1222 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1223                                                struct timespec *host_ts)
1224 {
1225     struct target_timespec *target_ts;
1226 
1227     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1228         return -TARGET_EFAULT;
1229     }
1230     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1231     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1232     unlock_user_struct(target_ts, target_addr, 1);
1233     return 0;
1234 }
1235 
host_to_target_timespec64(abi_ulong target_addr,struct timespec * host_ts)1236 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1237                                                  struct timespec *host_ts)
1238 {
1239     struct target__kernel_timespec *target_ts;
1240 
1241     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1242         return -TARGET_EFAULT;
1243     }
1244     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1245     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1246     unlock_user_struct(target_ts, target_addr, 1);
1247     return 0;
1248 }
1249 
1250 #if defined(TARGET_NR_gettimeofday)
copy_to_user_timezone(abi_ulong target_tz_addr,struct timezone * tz)1251 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1252                                              struct timezone *tz)
1253 {
1254     struct target_timezone *target_tz;
1255 
1256     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1257         return -TARGET_EFAULT;
1258     }
1259 
1260     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1261     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1262 
1263     unlock_user_struct(target_tz, target_tz_addr, 1);
1264 
1265     return 0;
1266 }
1267 #endif
1268 
1269 #if defined(TARGET_NR_settimeofday)
copy_from_user_timezone(struct timezone * tz,abi_ulong target_tz_addr)1270 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1271                                                abi_ulong target_tz_addr)
1272 {
1273     struct target_timezone *target_tz;
1274 
1275     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1276         return -TARGET_EFAULT;
1277     }
1278 
1279     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1280     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1281 
1282     unlock_user_struct(target_tz, target_tz_addr, 0);
1283 
1284     return 0;
1285 }
1286 #endif
1287 
1288 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1289 #include <mqueue.h>
1290 
copy_from_user_mq_attr(struct mq_attr * attr,abi_ulong target_mq_attr_addr)1291 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1292                                               abi_ulong target_mq_attr_addr)
1293 {
1294     struct target_mq_attr *target_mq_attr;
1295 
1296     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1297                           target_mq_attr_addr, 1))
1298         return -TARGET_EFAULT;
1299 
1300     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1301     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1302     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1303     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1304 
1305     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1306 
1307     return 0;
1308 }
1309 
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,const struct mq_attr * attr)1310 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1311                                             const struct mq_attr *attr)
1312 {
1313     struct target_mq_attr *target_mq_attr;
1314 
1315     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1316                           target_mq_attr_addr, 0))
1317         return -TARGET_EFAULT;
1318 
1319     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1320     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1321     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1322     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1323 
1324     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1325 
1326     return 0;
1327 }
1328 #endif
1329 
1330 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1331 /* do_select() must return target values and target errnos. */
do_select(int n,abi_ulong rfd_addr,abi_ulong wfd_addr,abi_ulong efd_addr,abi_ulong target_tv_addr)1332 static abi_long do_select(int n,
1333                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1334                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1335 {
1336     fd_set rfds, wfds, efds;
1337     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1338     struct timeval tv;
1339     struct timespec ts, *ts_ptr;
1340     abi_long ret;
1341 
1342     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1343     if (ret) {
1344         return ret;
1345     }
1346     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1347     if (ret) {
1348         return ret;
1349     }
1350     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1351     if (ret) {
1352         return ret;
1353     }
1354 
1355     if (target_tv_addr) {
1356         if (copy_from_user_timeval(&tv, target_tv_addr))
1357             return -TARGET_EFAULT;
1358         ts.tv_sec = tv.tv_sec;
1359         ts.tv_nsec = tv.tv_usec * 1000;
1360         ts_ptr = &ts;
1361     } else {
1362         ts_ptr = NULL;
1363     }
1364 
1365     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1366                                   ts_ptr, NULL));
1367 
1368     if (!is_error(ret)) {
1369         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1370             return -TARGET_EFAULT;
1371         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1372             return -TARGET_EFAULT;
1373         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1374             return -TARGET_EFAULT;
1375 
1376         if (target_tv_addr) {
1377             tv.tv_sec = ts.tv_sec;
1378             tv.tv_usec = ts.tv_nsec / 1000;
1379             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1380                 return -TARGET_EFAULT;
1381             }
1382         }
1383     }
1384 
1385     return ret;
1386 }
1387 
1388 #if defined(TARGET_WANT_OLD_SYS_SELECT)
do_old_select(abi_ulong arg1)1389 static abi_long do_old_select(abi_ulong arg1)
1390 {
1391     struct target_sel_arg_struct *sel;
1392     abi_ulong inp, outp, exp, tvp;
1393     long nsel;
1394 
1395     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1396         return -TARGET_EFAULT;
1397     }
1398 
1399     nsel = tswapal(sel->n);
1400     inp = tswapal(sel->inp);
1401     outp = tswapal(sel->outp);
1402     exp = tswapal(sel->exp);
1403     tvp = tswapal(sel->tvp);
1404 
1405     unlock_user_struct(sel, arg1, 0);
1406 
1407     return do_select(nsel, inp, outp, exp, tvp);
1408 }
1409 #endif
1410 #endif
1411 
1412 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
do_pselect6(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,bool time64)1413 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1414                             abi_long arg4, abi_long arg5, abi_long arg6,
1415                             bool time64)
1416 {
1417     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1418     fd_set rfds, wfds, efds;
1419     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1420     struct timespec ts, *ts_ptr;
1421     abi_long ret;
1422 
1423     /*
1424      * The 6th arg is actually two args smashed together,
1425      * so we cannot use the C library.
1426      */
1427     struct {
1428         sigset_t *set;
1429         size_t size;
1430     } sig, *sig_ptr;
1431 
1432     abi_ulong arg_sigset, arg_sigsize, *arg7;
1433 
1434     n = arg1;
1435     rfd_addr = arg2;
1436     wfd_addr = arg3;
1437     efd_addr = arg4;
1438     ts_addr = arg5;
1439 
1440     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1441     if (ret) {
1442         return ret;
1443     }
1444     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1445     if (ret) {
1446         return ret;
1447     }
1448     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1449     if (ret) {
1450         return ret;
1451     }
1452 
1453     /*
1454      * This takes a timespec, and not a timeval, so we cannot
1455      * use the do_select() helper ...
1456      */
1457     if (ts_addr) {
1458         if (time64) {
1459             if (target_to_host_timespec64(&ts, ts_addr)) {
1460                 return -TARGET_EFAULT;
1461             }
1462         } else {
1463             if (target_to_host_timespec(&ts, ts_addr)) {
1464                 return -TARGET_EFAULT;
1465             }
1466         }
1467             ts_ptr = &ts;
1468     } else {
1469         ts_ptr = NULL;
1470     }
1471 
1472     /* Extract the two packed args for the sigset */
1473     sig_ptr = NULL;
1474     if (arg6) {
1475         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1476         if (!arg7) {
1477             return -TARGET_EFAULT;
1478         }
1479         arg_sigset = tswapal(arg7[0]);
1480         arg_sigsize = tswapal(arg7[1]);
1481         unlock_user(arg7, arg6, 0);
1482 
1483         if (arg_sigset) {
1484             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1485             if (ret != 0) {
1486                 return ret;
1487             }
1488             sig_ptr = &sig;
1489             sig.size = SIGSET_T_SIZE;
1490         }
1491     }
1492 
1493     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1494                                   ts_ptr, sig_ptr));
1495 
1496     if (sig_ptr) {
1497         finish_sigsuspend_mask(ret);
1498     }
1499 
1500     if (!is_error(ret)) {
1501         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1502             return -TARGET_EFAULT;
1503         }
1504         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1505             return -TARGET_EFAULT;
1506         }
1507         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1508             return -TARGET_EFAULT;
1509         }
1510         if (time64) {
1511             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1512                 return -TARGET_EFAULT;
1513             }
1514         } else {
1515             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1516                 return -TARGET_EFAULT;
1517             }
1518         }
1519     }
1520     return ret;
1521 }
1522 #endif
1523 
1524 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1525     defined(TARGET_NR_ppoll_time64)
do_ppoll(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,bool ppoll,bool time64)1526 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1527                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1528 {
1529     struct target_pollfd *target_pfd;
1530     unsigned int nfds = arg2;
1531     struct pollfd *pfd;
1532     unsigned int i;
1533     abi_long ret;
1534 
1535     pfd = NULL;
1536     target_pfd = NULL;
1537     if (nfds) {
1538         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1539             return -TARGET_EINVAL;
1540         }
1541         target_pfd = lock_user(VERIFY_WRITE, arg1,
1542                                sizeof(struct target_pollfd) * nfds, 1);
1543         if (!target_pfd) {
1544             return -TARGET_EFAULT;
1545         }
1546 
1547         pfd = alloca(sizeof(struct pollfd) * nfds);
1548         for (i = 0; i < nfds; i++) {
1549             pfd[i].fd = tswap32(target_pfd[i].fd);
1550             pfd[i].events = tswap16(target_pfd[i].events);
1551         }
1552     }
1553     if (ppoll) {
1554         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1555         sigset_t *set = NULL;
1556 
1557         if (arg3) {
1558             if (time64) {
1559                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1560                     unlock_user(target_pfd, arg1, 0);
1561                     return -TARGET_EFAULT;
1562                 }
1563             } else {
1564                 if (target_to_host_timespec(timeout_ts, arg3)) {
1565                     unlock_user(target_pfd, arg1, 0);
1566                     return -TARGET_EFAULT;
1567                 }
1568             }
1569         } else {
1570             timeout_ts = NULL;
1571         }
1572 
1573         if (arg4) {
1574             ret = process_sigsuspend_mask(&set, arg4, arg5);
1575             if (ret != 0) {
1576                 unlock_user(target_pfd, arg1, 0);
1577                 return ret;
1578             }
1579         }
1580 
1581         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1582                                    set, SIGSET_T_SIZE));
1583 
1584         if (set) {
1585             finish_sigsuspend_mask(ret);
1586         }
1587         if (!is_error(ret) && arg3) {
1588             if (time64) {
1589                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1590                     return -TARGET_EFAULT;
1591                 }
1592             } else {
1593                 if (host_to_target_timespec(arg3, timeout_ts)) {
1594                     return -TARGET_EFAULT;
1595                 }
1596             }
1597         }
1598     } else {
1599           struct timespec ts, *pts;
1600 
1601           if (arg3 >= 0) {
1602               /* Convert ms to secs, ns */
1603               ts.tv_sec = arg3 / 1000;
1604               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1605               pts = &ts;
1606           } else {
1607               /* -ve poll() timeout means "infinite" */
1608               pts = NULL;
1609           }
1610           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1611     }
1612 
1613     if (!is_error(ret)) {
1614         for (i = 0; i < nfds; i++) {
1615             target_pfd[i].revents = tswap16(pfd[i].revents);
1616         }
1617     }
1618     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1619     return ret;
1620 }
1621 #endif
1622 
do_pipe(CPUArchState * cpu_env,abi_ulong pipedes,int flags,int is_pipe2)1623 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1624                         int flags, int is_pipe2)
1625 {
1626     int host_pipe[2];
1627     abi_long ret;
1628     ret = pipe2(host_pipe, flags);
1629 
1630     if (is_error(ret))
1631         return get_errno(ret);
1632 
1633     /* Several targets have special calling conventions for the original
1634        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1635     if (!is_pipe2) {
1636 #if defined(TARGET_ALPHA)
1637         cpu_env->ir[IR_A4] = host_pipe[1];
1638         return host_pipe[0];
1639 #elif defined(TARGET_MIPS)
1640         cpu_env->active_tc.gpr[3] = host_pipe[1];
1641         return host_pipe[0];
1642 #elif defined(TARGET_SH4)
1643         cpu_env->gregs[1] = host_pipe[1];
1644         return host_pipe[0];
1645 #elif defined(TARGET_SPARC)
1646         cpu_env->regwptr[1] = host_pipe[1];
1647         return host_pipe[0];
1648 #endif
1649     }
1650 
1651     if (put_user_s32(host_pipe[0], pipedes)
1652         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1653         return -TARGET_EFAULT;
1654     return get_errno(ret);
1655 }
1656 
target_to_host_sockaddr(int fd,struct sockaddr * addr,abi_ulong target_addr,socklen_t len)1657 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1658                                                abi_ulong target_addr,
1659                                                socklen_t len)
1660 {
1661     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1662     sa_family_t sa_family;
1663     struct target_sockaddr *target_saddr;
1664 
1665     if (fd_trans_target_to_host_addr(fd)) {
1666         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1667     }
1668 
1669     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1670     if (!target_saddr)
1671         return -TARGET_EFAULT;
1672 
1673     sa_family = tswap16(target_saddr->sa_family);
1674 
1675     /* Oops. The caller might send a incomplete sun_path; sun_path
1676      * must be terminated by \0 (see the manual page), but
1677      * unfortunately it is quite common to specify sockaddr_un
1678      * length as "strlen(x->sun_path)" while it should be
1679      * "strlen(...) + 1". We'll fix that here if needed.
1680      * Linux kernel has a similar feature.
1681      */
1682 
1683     if (sa_family == AF_UNIX) {
1684         if (len < unix_maxlen && len > 0) {
1685             char *cp = (char*)target_saddr;
1686 
1687             if ( cp[len-1] && !cp[len] )
1688                 len++;
1689         }
1690         if (len > unix_maxlen)
1691             len = unix_maxlen;
1692     }
1693 
1694     memcpy(addr, target_saddr, len);
1695     addr->sa_family = sa_family;
1696     if (sa_family == AF_NETLINK) {
1697         struct sockaddr_nl *nladdr;
1698 
1699         nladdr = (struct sockaddr_nl *)addr;
1700         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1701         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1702     } else if (sa_family == AF_PACKET) {
1703 	struct target_sockaddr_ll *lladdr;
1704 
1705 	lladdr = (struct target_sockaddr_ll *)addr;
1706 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1707 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1708     } else if (sa_family == AF_INET6) {
1709         struct sockaddr_in6 *in6addr;
1710 
1711         in6addr = (struct sockaddr_in6 *)addr;
1712         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1713     }
1714     unlock_user(target_saddr, target_addr, 0);
1715 
1716     return 0;
1717 }
1718 
host_to_target_sockaddr(abi_ulong target_addr,struct sockaddr * addr,socklen_t len)1719 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1720                                                struct sockaddr *addr,
1721                                                socklen_t len)
1722 {
1723     struct target_sockaddr *target_saddr;
1724 
1725     if (len == 0) {
1726         return 0;
1727     }
1728     assert(addr);
1729 
1730     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1731     if (!target_saddr)
1732         return -TARGET_EFAULT;
1733     memcpy(target_saddr, addr, len);
1734     if (len >= offsetof(struct target_sockaddr, sa_family) +
1735         sizeof(target_saddr->sa_family)) {
1736         target_saddr->sa_family = tswap16(addr->sa_family);
1737     }
1738     if (addr->sa_family == AF_NETLINK &&
1739         len >= sizeof(struct target_sockaddr_nl)) {
1740         struct target_sockaddr_nl *target_nl =
1741                (struct target_sockaddr_nl *)target_saddr;
1742         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1743         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1744     } else if (addr->sa_family == AF_PACKET) {
1745         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1746         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1747         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1748     } else if (addr->sa_family == AF_INET6 &&
1749                len >= sizeof(struct target_sockaddr_in6)) {
1750         struct target_sockaddr_in6 *target_in6 =
1751                (struct target_sockaddr_in6 *)target_saddr;
1752         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1753     }
1754     unlock_user(target_saddr, target_addr, len);
1755 
1756     return 0;
1757 }
1758 
target_to_host_cmsg(struct msghdr * msgh,struct target_msghdr * target_msgh)1759 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1760                                            struct target_msghdr *target_msgh)
1761 {
1762     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1763     abi_long msg_controllen;
1764     abi_ulong target_cmsg_addr;
1765     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1766     socklen_t space = 0;
1767 
1768     msg_controllen = tswapal(target_msgh->msg_controllen);
1769     if (msg_controllen < sizeof (struct target_cmsghdr))
1770         goto the_end;
1771     target_cmsg_addr = tswapal(target_msgh->msg_control);
1772     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1773     target_cmsg_start = target_cmsg;
1774     if (!target_cmsg)
1775         return -TARGET_EFAULT;
1776 
1777     while (cmsg && target_cmsg) {
1778         void *data = CMSG_DATA(cmsg);
1779         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1780 
1781         int len = tswapal(target_cmsg->cmsg_len)
1782             - sizeof(struct target_cmsghdr);
1783 
1784         space += CMSG_SPACE(len);
1785         if (space > msgh->msg_controllen) {
1786             space -= CMSG_SPACE(len);
1787             /* This is a QEMU bug, since we allocated the payload
1788              * area ourselves (unlike overflow in host-to-target
1789              * conversion, which is just the guest giving us a buffer
1790              * that's too small). It can't happen for the payload types
1791              * we currently support; if it becomes an issue in future
1792              * we would need to improve our allocation strategy to
1793              * something more intelligent than "twice the size of the
1794              * target buffer we're reading from".
1795              */
1796             qemu_log_mask(LOG_UNIMP,
1797                           ("Unsupported ancillary data %d/%d: "
1798                            "unhandled msg size\n"),
1799                           tswap32(target_cmsg->cmsg_level),
1800                           tswap32(target_cmsg->cmsg_type));
1801             break;
1802         }
1803 
1804         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1805             cmsg->cmsg_level = SOL_SOCKET;
1806         } else {
1807             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1808         }
1809         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1810         cmsg->cmsg_len = CMSG_LEN(len);
1811 
1812         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1813             int *fd = (int *)data;
1814             int *target_fd = (int *)target_data;
1815             int i, numfds = len / sizeof(int);
1816 
1817             for (i = 0; i < numfds; i++) {
1818                 __get_user(fd[i], target_fd + i);
1819             }
1820         } else if (cmsg->cmsg_level == SOL_SOCKET
1821                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1822             struct ucred *cred = (struct ucred *)data;
1823             struct target_ucred *target_cred =
1824                 (struct target_ucred *)target_data;
1825 
1826             __get_user(cred->pid, &target_cred->pid);
1827             __get_user(cred->uid, &target_cred->uid);
1828             __get_user(cred->gid, &target_cred->gid);
1829         } else if (cmsg->cmsg_level == SOL_ALG) {
1830             uint32_t *dst = (uint32_t *)data;
1831 
1832             memcpy(dst, target_data, len);
1833             /* fix endianness of first 32-bit word */
1834             if (len >= sizeof(uint32_t)) {
1835                 *dst = tswap32(*dst);
1836             }
1837         } else {
1838             qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n",
1839                           cmsg->cmsg_level, cmsg->cmsg_type);
1840             memcpy(data, target_data, len);
1841         }
1842 
1843         cmsg = CMSG_NXTHDR(msgh, cmsg);
1844         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1845                                          target_cmsg_start);
1846     }
1847     unlock_user(target_cmsg, target_cmsg_addr, 0);
1848  the_end:
1849     msgh->msg_controllen = space;
1850     return 0;
1851 }
1852 
host_to_target_cmsg(struct target_msghdr * target_msgh,struct msghdr * msgh)1853 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1854                                            struct msghdr *msgh)
1855 {
1856     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1857     abi_long msg_controllen;
1858     abi_ulong target_cmsg_addr;
1859     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1860     socklen_t space = 0;
1861 
1862     msg_controllen = tswapal(target_msgh->msg_controllen);
1863     if (msg_controllen < sizeof (struct target_cmsghdr))
1864         goto the_end;
1865     target_cmsg_addr = tswapal(target_msgh->msg_control);
1866     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1867     target_cmsg_start = target_cmsg;
1868     if (!target_cmsg)
1869         return -TARGET_EFAULT;
1870 
1871     while (cmsg && target_cmsg) {
1872         void *data = CMSG_DATA(cmsg);
1873         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1874 
1875         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1876         int tgt_len, tgt_space;
1877 
1878         /* We never copy a half-header but may copy half-data;
1879          * this is Linux's behaviour in put_cmsg(). Note that
1880          * truncation here is a guest problem (which we report
1881          * to the guest via the CTRUNC bit), unlike truncation
1882          * in target_to_host_cmsg, which is a QEMU bug.
1883          */
1884         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1885             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1886             break;
1887         }
1888 
1889         if (cmsg->cmsg_level == SOL_SOCKET) {
1890             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1891         } else {
1892             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1893         }
1894         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1895 
1896         /* Payload types which need a different size of payload on
1897          * the target must adjust tgt_len here.
1898          */
1899         tgt_len = len;
1900         switch (cmsg->cmsg_level) {
1901         case SOL_SOCKET:
1902             switch (cmsg->cmsg_type) {
1903             case SO_TIMESTAMP:
1904                 tgt_len = sizeof(struct target_timeval);
1905                 break;
1906             default:
1907                 break;
1908             }
1909             break;
1910         default:
1911             break;
1912         }
1913 
1914         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1915             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1916             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1917         }
1918 
1919         /* We must now copy-and-convert len bytes of payload
1920          * into tgt_len bytes of destination space. Bear in mind
1921          * that in both source and destination we may be dealing
1922          * with a truncated value!
1923          */
1924         switch (cmsg->cmsg_level) {
1925         case SOL_SOCKET:
1926             switch (cmsg->cmsg_type) {
1927             case SCM_RIGHTS:
1928             {
1929                 int *fd = (int *)data;
1930                 int *target_fd = (int *)target_data;
1931                 int i, numfds = tgt_len / sizeof(int);
1932 
1933                 for (i = 0; i < numfds; i++) {
1934                     __put_user(fd[i], target_fd + i);
1935                 }
1936                 break;
1937             }
1938             case SO_TIMESTAMP:
1939             {
1940                 struct timeval *tv = (struct timeval *)data;
1941                 struct target_timeval *target_tv =
1942                     (struct target_timeval *)target_data;
1943 
1944                 if (len != sizeof(struct timeval) ||
1945                     tgt_len != sizeof(struct target_timeval)) {
1946                     goto unimplemented;
1947                 }
1948 
1949                 /* copy struct timeval to target */
1950                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1951                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1952                 break;
1953             }
1954             case SCM_CREDENTIALS:
1955             {
1956                 struct ucred *cred = (struct ucred *)data;
1957                 struct target_ucred *target_cred =
1958                     (struct target_ucred *)target_data;
1959 
1960                 __put_user(cred->pid, &target_cred->pid);
1961                 __put_user(cred->uid, &target_cred->uid);
1962                 __put_user(cred->gid, &target_cred->gid);
1963                 break;
1964             }
1965             default:
1966                 goto unimplemented;
1967             }
1968             break;
1969 
1970         case SOL_IP:
1971             switch (cmsg->cmsg_type) {
1972             case IP_TTL:
1973             {
1974                 uint32_t *v = (uint32_t *)data;
1975                 uint32_t *t_int = (uint32_t *)target_data;
1976 
1977                 if (len != sizeof(uint32_t) ||
1978                     tgt_len != sizeof(uint32_t)) {
1979                     goto unimplemented;
1980                 }
1981                 __put_user(*v, t_int);
1982                 break;
1983             }
1984             case IP_RECVERR:
1985             {
1986                 struct errhdr_t {
1987                    struct sock_extended_err ee;
1988                    struct sockaddr_in offender;
1989                 };
1990                 struct errhdr_t *errh = (struct errhdr_t *)data;
1991                 struct errhdr_t *target_errh =
1992                     (struct errhdr_t *)target_data;
1993 
1994                 if (len != sizeof(struct errhdr_t) ||
1995                     tgt_len != sizeof(struct errhdr_t)) {
1996                     goto unimplemented;
1997                 }
1998                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1999                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2000                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2001                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2002                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2003                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2004                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2005                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2006                     (void *) &errh->offender, sizeof(errh->offender));
2007                 break;
2008             }
2009             case IP_PKTINFO:
2010             {
2011                 struct in_pktinfo *pkti = data;
2012                 struct target_in_pktinfo *target_pi = target_data;
2013 
2014                 __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex);
2015                 target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr;
2016                 target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr;
2017                 break;
2018             }
2019             default:
2020                 goto unimplemented;
2021             }
2022             break;
2023 
2024         case SOL_IPV6:
2025             switch (cmsg->cmsg_type) {
2026             case IPV6_HOPLIMIT:
2027             {
2028                 uint32_t *v = (uint32_t *)data;
2029                 uint32_t *t_int = (uint32_t *)target_data;
2030 
2031                 if (len != sizeof(uint32_t) ||
2032                     tgt_len != sizeof(uint32_t)) {
2033                     goto unimplemented;
2034                 }
2035                 __put_user(*v, t_int);
2036                 break;
2037             }
2038             case IPV6_RECVERR:
2039             {
2040                 struct errhdr6_t {
2041                    struct sock_extended_err ee;
2042                    struct sockaddr_in6 offender;
2043                 };
2044                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2045                 struct errhdr6_t *target_errh =
2046                     (struct errhdr6_t *)target_data;
2047 
2048                 if (len != sizeof(struct errhdr6_t) ||
2049                     tgt_len != sizeof(struct errhdr6_t)) {
2050                     goto unimplemented;
2051                 }
2052                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2053                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2054                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2055                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2056                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2057                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2058                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2059                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2060                     (void *) &errh->offender, sizeof(errh->offender));
2061                 break;
2062             }
2063             default:
2064                 goto unimplemented;
2065             }
2066             break;
2067 
2068         default:
2069         unimplemented:
2070             qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n",
2071                           cmsg->cmsg_level, cmsg->cmsg_type);
2072             memcpy(target_data, data, MIN(len, tgt_len));
2073             if (tgt_len > len) {
2074                 memset(target_data + len, 0, tgt_len - len);
2075             }
2076         }
2077 
2078         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2079         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2080         if (msg_controllen < tgt_space) {
2081             tgt_space = msg_controllen;
2082         }
2083         msg_controllen -= tgt_space;
2084         space += tgt_space;
2085         cmsg = CMSG_NXTHDR(msgh, cmsg);
2086         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2087                                          target_cmsg_start);
2088     }
2089     unlock_user(target_cmsg, target_cmsg_addr, space);
2090  the_end:
2091     target_msgh->msg_controllen = tswapal(space);
2092     return 0;
2093 }
2094 
2095 /* do_setsockopt() Must return target values and target errnos. */
do_setsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,socklen_t optlen)2096 static abi_long do_setsockopt(int sockfd, int level, int optname,
2097                               abi_ulong optval_addr, socklen_t optlen)
2098 {
2099     abi_long ret;
2100     int val;
2101 
2102     switch(level) {
2103     case SOL_TCP:
2104     case SOL_UDP:
2105         /* TCP and UDP options all take an 'int' value.  */
2106         if (optlen < sizeof(uint32_t))
2107             return -TARGET_EINVAL;
2108 
2109         if (get_user_u32(val, optval_addr))
2110             return -TARGET_EFAULT;
2111         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2112         break;
2113     case SOL_IP:
2114         switch(optname) {
2115         case IP_TOS:
2116         case IP_TTL:
2117         case IP_HDRINCL:
2118         case IP_ROUTER_ALERT:
2119         case IP_RECVOPTS:
2120         case IP_RETOPTS:
2121         case IP_PKTINFO:
2122         case IP_MTU_DISCOVER:
2123         case IP_RECVERR:
2124         case IP_RECVTTL:
2125         case IP_RECVTOS:
2126 #ifdef IP_FREEBIND
2127         case IP_FREEBIND:
2128 #endif
2129         case IP_MULTICAST_TTL:
2130         case IP_MULTICAST_LOOP:
2131             val = 0;
2132             if (optlen >= sizeof(uint32_t)) {
2133                 if (get_user_u32(val, optval_addr))
2134                     return -TARGET_EFAULT;
2135             } else if (optlen >= 1) {
2136                 if (get_user_u8(val, optval_addr))
2137                     return -TARGET_EFAULT;
2138             }
2139             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2140             break;
2141         case IP_MULTICAST_IF:
2142         case IP_ADD_MEMBERSHIP:
2143         case IP_DROP_MEMBERSHIP:
2144         {
2145             struct ip_mreqn ip_mreq;
2146             struct target_ip_mreqn *target_smreqn;
2147             int min_size;
2148 
2149             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2150                               sizeof(struct target_ip_mreq));
2151 
2152             if (optname == IP_MULTICAST_IF) {
2153                 min_size = sizeof(struct in_addr);
2154             } else {
2155                 min_size = sizeof(struct target_ip_mreq);
2156             }
2157             if (optlen < min_size ||
2158                 optlen > sizeof (struct target_ip_mreqn)) {
2159                 return -TARGET_EINVAL;
2160             }
2161 
2162             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2163             if (!target_smreqn) {
2164                 return -TARGET_EFAULT;
2165             }
2166             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2167             if (optlen >= sizeof(struct target_ip_mreq)) {
2168                 ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2169                 if (optlen >= sizeof(struct target_ip_mreqn)) {
2170                     __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex);
2171                     optlen = sizeof(struct ip_mreqn);
2172                 }
2173             }
2174             unlock_user(target_smreqn, optval_addr, 0);
2175             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2176             break;
2177         }
2178         case IP_BLOCK_SOURCE:
2179         case IP_UNBLOCK_SOURCE:
2180         case IP_ADD_SOURCE_MEMBERSHIP:
2181         case IP_DROP_SOURCE_MEMBERSHIP:
2182         {
2183             struct ip_mreq_source *ip_mreq_source;
2184 
2185             if (optlen != sizeof (struct target_ip_mreq_source))
2186                 return -TARGET_EINVAL;
2187 
2188             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2189             if (!ip_mreq_source) {
2190                 return -TARGET_EFAULT;
2191             }
2192             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2193             unlock_user (ip_mreq_source, optval_addr, 0);
2194             break;
2195         }
2196         default:
2197             goto unimplemented;
2198         }
2199         break;
2200     case SOL_IPV6:
2201         switch (optname) {
2202         case IPV6_MTU_DISCOVER:
2203         case IPV6_MTU:
2204         case IPV6_V6ONLY:
2205         case IPV6_RECVPKTINFO:
2206         case IPV6_UNICAST_HOPS:
2207         case IPV6_MULTICAST_HOPS:
2208         case IPV6_MULTICAST_LOOP:
2209         case IPV6_RECVERR:
2210         case IPV6_RECVHOPLIMIT:
2211         case IPV6_2292HOPLIMIT:
2212         case IPV6_CHECKSUM:
2213         case IPV6_ADDRFORM:
2214         case IPV6_2292PKTINFO:
2215         case IPV6_RECVTCLASS:
2216         case IPV6_RECVRTHDR:
2217         case IPV6_2292RTHDR:
2218         case IPV6_RECVHOPOPTS:
2219         case IPV6_2292HOPOPTS:
2220         case IPV6_RECVDSTOPTS:
2221         case IPV6_2292DSTOPTS:
2222         case IPV6_TCLASS:
2223         case IPV6_ADDR_PREFERENCES:
2224 #ifdef IPV6_RECVPATHMTU
2225         case IPV6_RECVPATHMTU:
2226 #endif
2227 #ifdef IPV6_TRANSPARENT
2228         case IPV6_TRANSPARENT:
2229 #endif
2230 #ifdef IPV6_FREEBIND
2231         case IPV6_FREEBIND:
2232 #endif
2233 #ifdef IPV6_RECVORIGDSTADDR
2234         case IPV6_RECVORIGDSTADDR:
2235 #endif
2236             val = 0;
2237             if (optlen < sizeof(uint32_t)) {
2238                 return -TARGET_EINVAL;
2239             }
2240             if (get_user_u32(val, optval_addr)) {
2241                 return -TARGET_EFAULT;
2242             }
2243             ret = get_errno(setsockopt(sockfd, level, optname,
2244                                        &val, sizeof(val)));
2245             break;
2246         case IPV6_PKTINFO:
2247         {
2248             struct in6_pktinfo pki;
2249 
2250             if (optlen < sizeof(pki)) {
2251                 return -TARGET_EINVAL;
2252             }
2253 
2254             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2255                 return -TARGET_EFAULT;
2256             }
2257 
2258             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2259 
2260             ret = get_errno(setsockopt(sockfd, level, optname,
2261                                        &pki, sizeof(pki)));
2262             break;
2263         }
2264         case IPV6_ADD_MEMBERSHIP:
2265         case IPV6_DROP_MEMBERSHIP:
2266         {
2267             struct ipv6_mreq ipv6mreq;
2268 
2269             if (optlen < sizeof(ipv6mreq)) {
2270                 return -TARGET_EINVAL;
2271             }
2272 
2273             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2274                 return -TARGET_EFAULT;
2275             }
2276 
2277             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2278 
2279             ret = get_errno(setsockopt(sockfd, level, optname,
2280                                        &ipv6mreq, sizeof(ipv6mreq)));
2281             break;
2282         }
2283         default:
2284             goto unimplemented;
2285         }
2286         break;
2287     case SOL_ICMPV6:
2288         switch (optname) {
2289         case ICMPV6_FILTER:
2290         {
2291             struct icmp6_filter icmp6f;
2292 
2293             if (optlen > sizeof(icmp6f)) {
2294                 optlen = sizeof(icmp6f);
2295             }
2296 
2297             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2298                 return -TARGET_EFAULT;
2299             }
2300 
2301             for (val = 0; val < 8; val++) {
2302                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2303             }
2304 
2305             ret = get_errno(setsockopt(sockfd, level, optname,
2306                                        &icmp6f, optlen));
2307             break;
2308         }
2309         default:
2310             goto unimplemented;
2311         }
2312         break;
2313     case SOL_RAW:
2314         switch (optname) {
2315         case ICMP_FILTER:
2316         case IPV6_CHECKSUM:
2317             /* those take an u32 value */
2318             if (optlen < sizeof(uint32_t)) {
2319                 return -TARGET_EINVAL;
2320             }
2321 
2322             if (get_user_u32(val, optval_addr)) {
2323                 return -TARGET_EFAULT;
2324             }
2325             ret = get_errno(setsockopt(sockfd, level, optname,
2326                                        &val, sizeof(val)));
2327             break;
2328 
2329         default:
2330             goto unimplemented;
2331         }
2332         break;
2333 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2334     case SOL_ALG:
2335         switch (optname) {
2336         case ALG_SET_KEY:
2337         {
2338             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2339             if (!alg_key) {
2340                 return -TARGET_EFAULT;
2341             }
2342             ret = get_errno(setsockopt(sockfd, level, optname,
2343                                        alg_key, optlen));
2344             unlock_user(alg_key, optval_addr, optlen);
2345             break;
2346         }
2347         case ALG_SET_AEAD_AUTHSIZE:
2348         {
2349             ret = get_errno(setsockopt(sockfd, level, optname,
2350                                        NULL, optlen));
2351             break;
2352         }
2353         default:
2354             goto unimplemented;
2355         }
2356         break;
2357 #endif
2358     case TARGET_SOL_SOCKET:
2359         switch (optname) {
2360         case TARGET_SO_RCVTIMEO:
2361         case TARGET_SO_SNDTIMEO:
2362         {
2363                 struct timeval tv;
2364 
2365                 if (optlen != sizeof(struct target_timeval)) {
2366                     return -TARGET_EINVAL;
2367                 }
2368 
2369                 if (copy_from_user_timeval(&tv, optval_addr)) {
2370                     return -TARGET_EFAULT;
2371                 }
2372 
2373                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2374                                 optname == TARGET_SO_RCVTIMEO ?
2375                                     SO_RCVTIMEO : SO_SNDTIMEO,
2376                                 &tv, sizeof(tv)));
2377                 return ret;
2378         }
2379         case TARGET_SO_ATTACH_FILTER:
2380         {
2381                 struct target_sock_fprog *tfprog;
2382                 struct target_sock_filter *tfilter;
2383                 struct sock_fprog fprog;
2384                 struct sock_filter *filter;
2385                 int i;
2386 
2387                 if (optlen != sizeof(*tfprog)) {
2388                     return -TARGET_EINVAL;
2389                 }
2390                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2391                     return -TARGET_EFAULT;
2392                 }
2393                 if (!lock_user_struct(VERIFY_READ, tfilter,
2394                                       tswapal(tfprog->filter), 0)) {
2395                     unlock_user_struct(tfprog, optval_addr, 1);
2396                     return -TARGET_EFAULT;
2397                 }
2398 
2399                 fprog.len = tswap16(tfprog->len);
2400                 filter = g_try_new(struct sock_filter, fprog.len);
2401                 if (filter == NULL) {
2402                     unlock_user_struct(tfilter, tfprog->filter, 1);
2403                     unlock_user_struct(tfprog, optval_addr, 1);
2404                     return -TARGET_ENOMEM;
2405                 }
2406                 for (i = 0; i < fprog.len; i++) {
2407                     filter[i].code = tswap16(tfilter[i].code);
2408                     filter[i].jt = tfilter[i].jt;
2409                     filter[i].jf = tfilter[i].jf;
2410                     filter[i].k = tswap32(tfilter[i].k);
2411                 }
2412                 fprog.filter = filter;
2413 
2414                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2415                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2416                 g_free(filter);
2417 
2418                 unlock_user_struct(tfilter, tfprog->filter, 1);
2419                 unlock_user_struct(tfprog, optval_addr, 1);
2420                 return ret;
2421         }
2422 	case TARGET_SO_BINDTODEVICE:
2423 	{
2424 		char *dev_ifname, *addr_ifname;
2425 
2426 		if (optlen > IFNAMSIZ - 1) {
2427 		    optlen = IFNAMSIZ - 1;
2428 		}
2429 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2430 		if (!dev_ifname) {
2431 		    return -TARGET_EFAULT;
2432 		}
2433 		optname = SO_BINDTODEVICE;
2434 		addr_ifname = alloca(IFNAMSIZ);
2435 		memcpy(addr_ifname, dev_ifname, optlen);
2436 		addr_ifname[optlen] = 0;
2437 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2438                                            addr_ifname, optlen));
2439 		unlock_user (dev_ifname, optval_addr, 0);
2440 		return ret;
2441 	}
2442         case TARGET_SO_LINGER:
2443         {
2444                 struct linger lg;
2445                 struct target_linger *tlg;
2446 
2447                 if (optlen != sizeof(struct target_linger)) {
2448                     return -TARGET_EINVAL;
2449                 }
2450                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2451                     return -TARGET_EFAULT;
2452                 }
2453                 __get_user(lg.l_onoff, &tlg->l_onoff);
2454                 __get_user(lg.l_linger, &tlg->l_linger);
2455                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2456                                 &lg, sizeof(lg)));
2457                 unlock_user_struct(tlg, optval_addr, 0);
2458                 return ret;
2459         }
2460             /* Options with 'int' argument.  */
2461         case TARGET_SO_DEBUG:
2462 		optname = SO_DEBUG;
2463 		break;
2464         case TARGET_SO_REUSEADDR:
2465 		optname = SO_REUSEADDR;
2466 		break;
2467 #ifdef SO_REUSEPORT
2468         case TARGET_SO_REUSEPORT:
2469                 optname = SO_REUSEPORT;
2470                 break;
2471 #endif
2472         case TARGET_SO_TYPE:
2473 		optname = SO_TYPE;
2474 		break;
2475         case TARGET_SO_ERROR:
2476 		optname = SO_ERROR;
2477 		break;
2478         case TARGET_SO_DONTROUTE:
2479 		optname = SO_DONTROUTE;
2480 		break;
2481         case TARGET_SO_BROADCAST:
2482 		optname = SO_BROADCAST;
2483 		break;
2484         case TARGET_SO_SNDBUF:
2485 		optname = SO_SNDBUF;
2486 		break;
2487         case TARGET_SO_SNDBUFFORCE:
2488                 optname = SO_SNDBUFFORCE;
2489                 break;
2490         case TARGET_SO_RCVBUF:
2491 		optname = SO_RCVBUF;
2492 		break;
2493         case TARGET_SO_RCVBUFFORCE:
2494                 optname = SO_RCVBUFFORCE;
2495                 break;
2496         case TARGET_SO_KEEPALIVE:
2497 		optname = SO_KEEPALIVE;
2498 		break;
2499         case TARGET_SO_OOBINLINE:
2500 		optname = SO_OOBINLINE;
2501 		break;
2502         case TARGET_SO_NO_CHECK:
2503 		optname = SO_NO_CHECK;
2504 		break;
2505         case TARGET_SO_PRIORITY:
2506 		optname = SO_PRIORITY;
2507 		break;
2508 #ifdef SO_BSDCOMPAT
2509         case TARGET_SO_BSDCOMPAT:
2510 		optname = SO_BSDCOMPAT;
2511 		break;
2512 #endif
2513         case TARGET_SO_PASSCRED:
2514 		optname = SO_PASSCRED;
2515 		break;
2516         case TARGET_SO_PASSSEC:
2517                 optname = SO_PASSSEC;
2518                 break;
2519         case TARGET_SO_TIMESTAMP:
2520 		optname = SO_TIMESTAMP;
2521 		break;
2522         case TARGET_SO_RCVLOWAT:
2523 		optname = SO_RCVLOWAT;
2524 		break;
2525         default:
2526             goto unimplemented;
2527         }
2528 	if (optlen < sizeof(uint32_t))
2529             return -TARGET_EINVAL;
2530 
2531 	if (get_user_u32(val, optval_addr))
2532             return -TARGET_EFAULT;
2533 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2534         break;
2535 #ifdef SOL_NETLINK
2536     case SOL_NETLINK:
2537         switch (optname) {
2538         case NETLINK_PKTINFO:
2539         case NETLINK_ADD_MEMBERSHIP:
2540         case NETLINK_DROP_MEMBERSHIP:
2541         case NETLINK_BROADCAST_ERROR:
2542         case NETLINK_NO_ENOBUFS:
2543 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2544         case NETLINK_LISTEN_ALL_NSID:
2545         case NETLINK_CAP_ACK:
2546 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2547 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2548         case NETLINK_EXT_ACK:
2549 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2550 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2551         case NETLINK_GET_STRICT_CHK:
2552 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2553             break;
2554         default:
2555             goto unimplemented;
2556         }
2557         val = 0;
2558         if (optlen < sizeof(uint32_t)) {
2559             return -TARGET_EINVAL;
2560         }
2561         if (get_user_u32(val, optval_addr)) {
2562             return -TARGET_EFAULT;
2563         }
2564         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2565                                    sizeof(val)));
2566         break;
2567 #endif /* SOL_NETLINK */
2568     default:
2569     unimplemented:
2570         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2571                       level, optname);
2572         ret = -TARGET_ENOPROTOOPT;
2573     }
2574     return ret;
2575 }
2576 
2577 /* do_getsockopt() Must return target values and target errnos. */
do_getsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,abi_ulong optlen)2578 static abi_long do_getsockopt(int sockfd, int level, int optname,
2579                               abi_ulong optval_addr, abi_ulong optlen)
2580 {
2581     abi_long ret;
2582     int len, val;
2583     socklen_t lv;
2584 
2585     switch(level) {
2586     case TARGET_SOL_SOCKET:
2587         level = SOL_SOCKET;
2588         switch (optname) {
2589         /* These don't just return a single integer */
2590         case TARGET_SO_PEERNAME:
2591             goto unimplemented;
2592         case TARGET_SO_RCVTIMEO: {
2593             struct timeval tv;
2594             socklen_t tvlen;
2595 
2596             optname = SO_RCVTIMEO;
2597 
2598 get_timeout:
2599             if (get_user_u32(len, optlen)) {
2600                 return -TARGET_EFAULT;
2601             }
2602             if (len < 0) {
2603                 return -TARGET_EINVAL;
2604             }
2605 
2606             tvlen = sizeof(tv);
2607             ret = get_errno(getsockopt(sockfd, level, optname,
2608                                        &tv, &tvlen));
2609             if (ret < 0) {
2610                 return ret;
2611             }
2612             if (len > sizeof(struct target_timeval)) {
2613                 len = sizeof(struct target_timeval);
2614             }
2615             if (copy_to_user_timeval(optval_addr, &tv)) {
2616                 return -TARGET_EFAULT;
2617             }
2618             if (put_user_u32(len, optlen)) {
2619                 return -TARGET_EFAULT;
2620             }
2621             break;
2622         }
2623         case TARGET_SO_SNDTIMEO:
2624             optname = SO_SNDTIMEO;
2625             goto get_timeout;
2626         case TARGET_SO_PEERCRED: {
2627             struct ucred cr;
2628             socklen_t crlen;
2629             struct target_ucred *tcr;
2630 
2631             if (get_user_u32(len, optlen)) {
2632                 return -TARGET_EFAULT;
2633             }
2634             if (len < 0) {
2635                 return -TARGET_EINVAL;
2636             }
2637 
2638             crlen = sizeof(cr);
2639             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2640                                        &cr, &crlen));
2641             if (ret < 0) {
2642                 return ret;
2643             }
2644             if (len > crlen) {
2645                 len = crlen;
2646             }
2647             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2648                 return -TARGET_EFAULT;
2649             }
2650             __put_user(cr.pid, &tcr->pid);
2651             __put_user(cr.uid, &tcr->uid);
2652             __put_user(cr.gid, &tcr->gid);
2653             unlock_user_struct(tcr, optval_addr, 1);
2654             if (put_user_u32(len, optlen)) {
2655                 return -TARGET_EFAULT;
2656             }
2657             break;
2658         }
2659         case TARGET_SO_PEERSEC: {
2660             char *name;
2661 
2662             if (get_user_u32(len, optlen)) {
2663                 return -TARGET_EFAULT;
2664             }
2665             if (len < 0) {
2666                 return -TARGET_EINVAL;
2667             }
2668             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2669             if (!name) {
2670                 return -TARGET_EFAULT;
2671             }
2672             lv = len;
2673             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2674                                        name, &lv));
2675             if (put_user_u32(lv, optlen)) {
2676                 ret = -TARGET_EFAULT;
2677             }
2678             unlock_user(name, optval_addr, lv);
2679             break;
2680         }
2681         case TARGET_SO_LINGER:
2682         {
2683             struct linger lg;
2684             socklen_t lglen;
2685             struct target_linger *tlg;
2686 
2687             if (get_user_u32(len, optlen)) {
2688                 return -TARGET_EFAULT;
2689             }
2690             if (len < 0) {
2691                 return -TARGET_EINVAL;
2692             }
2693 
2694             lglen = sizeof(lg);
2695             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2696                                        &lg, &lglen));
2697             if (ret < 0) {
2698                 return ret;
2699             }
2700             if (len > lglen) {
2701                 len = lglen;
2702             }
2703             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2704                 return -TARGET_EFAULT;
2705             }
2706             __put_user(lg.l_onoff, &tlg->l_onoff);
2707             __put_user(lg.l_linger, &tlg->l_linger);
2708             unlock_user_struct(tlg, optval_addr, 1);
2709             if (put_user_u32(len, optlen)) {
2710                 return -TARGET_EFAULT;
2711             }
2712             break;
2713         }
2714         /* Options with 'int' argument.  */
2715         case TARGET_SO_DEBUG:
2716             optname = SO_DEBUG;
2717             goto int_case;
2718         case TARGET_SO_REUSEADDR:
2719             optname = SO_REUSEADDR;
2720             goto int_case;
2721 #ifdef SO_REUSEPORT
2722         case TARGET_SO_REUSEPORT:
2723             optname = SO_REUSEPORT;
2724             goto int_case;
2725 #endif
2726         case TARGET_SO_TYPE:
2727             optname = SO_TYPE;
2728             goto int_case;
2729         case TARGET_SO_ERROR:
2730             optname = SO_ERROR;
2731             goto int_case;
2732         case TARGET_SO_DONTROUTE:
2733             optname = SO_DONTROUTE;
2734             goto int_case;
2735         case TARGET_SO_BROADCAST:
2736             optname = SO_BROADCAST;
2737             goto int_case;
2738         case TARGET_SO_SNDBUF:
2739             optname = SO_SNDBUF;
2740             goto int_case;
2741         case TARGET_SO_RCVBUF:
2742             optname = SO_RCVBUF;
2743             goto int_case;
2744         case TARGET_SO_KEEPALIVE:
2745             optname = SO_KEEPALIVE;
2746             goto int_case;
2747         case TARGET_SO_OOBINLINE:
2748             optname = SO_OOBINLINE;
2749             goto int_case;
2750         case TARGET_SO_NO_CHECK:
2751             optname = SO_NO_CHECK;
2752             goto int_case;
2753         case TARGET_SO_PRIORITY:
2754             optname = SO_PRIORITY;
2755             goto int_case;
2756 #ifdef SO_BSDCOMPAT
2757         case TARGET_SO_BSDCOMPAT:
2758             optname = SO_BSDCOMPAT;
2759             goto int_case;
2760 #endif
2761         case TARGET_SO_PASSCRED:
2762             optname = SO_PASSCRED;
2763             goto int_case;
2764         case TARGET_SO_TIMESTAMP:
2765             optname = SO_TIMESTAMP;
2766             goto int_case;
2767         case TARGET_SO_RCVLOWAT:
2768             optname = SO_RCVLOWAT;
2769             goto int_case;
2770         case TARGET_SO_ACCEPTCONN:
2771             optname = SO_ACCEPTCONN;
2772             goto int_case;
2773         case TARGET_SO_PROTOCOL:
2774             optname = SO_PROTOCOL;
2775             goto int_case;
2776         case TARGET_SO_DOMAIN:
2777             optname = SO_DOMAIN;
2778             goto int_case;
2779         default:
2780             goto int_case;
2781         }
2782         break;
2783     case SOL_TCP:
2784     case SOL_UDP:
2785         /* TCP and UDP options all take an 'int' value.  */
2786     int_case:
2787         if (get_user_u32(len, optlen))
2788             return -TARGET_EFAULT;
2789         if (len < 0)
2790             return -TARGET_EINVAL;
2791         lv = sizeof(lv);
2792         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2793         if (ret < 0)
2794             return ret;
2795         switch (optname) {
2796         case SO_TYPE:
2797             val = host_to_target_sock_type(val);
2798             break;
2799         case SO_ERROR:
2800             val = host_to_target_errno(val);
2801             break;
2802         }
2803         if (len > lv)
2804             len = lv;
2805         if (len == 4) {
2806             if (put_user_u32(val, optval_addr))
2807                 return -TARGET_EFAULT;
2808         } else {
2809             if (put_user_u8(val, optval_addr))
2810                 return -TARGET_EFAULT;
2811         }
2812         if (put_user_u32(len, optlen))
2813             return -TARGET_EFAULT;
2814         break;
2815     case SOL_IP:
2816         switch(optname) {
2817         case IP_TOS:
2818         case IP_TTL:
2819         case IP_HDRINCL:
2820         case IP_ROUTER_ALERT:
2821         case IP_RECVOPTS:
2822         case IP_RETOPTS:
2823         case IP_PKTINFO:
2824         case IP_MTU_DISCOVER:
2825         case IP_RECVERR:
2826         case IP_RECVTOS:
2827 #ifdef IP_FREEBIND
2828         case IP_FREEBIND:
2829 #endif
2830         case IP_MULTICAST_TTL:
2831         case IP_MULTICAST_LOOP:
2832             if (get_user_u32(len, optlen))
2833                 return -TARGET_EFAULT;
2834             if (len < 0)
2835                 return -TARGET_EINVAL;
2836             lv = sizeof(lv);
2837             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2838             if (ret < 0)
2839                 return ret;
2840             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2841                 len = 1;
2842                 if (put_user_u32(len, optlen)
2843                     || put_user_u8(val, optval_addr))
2844                     return -TARGET_EFAULT;
2845             } else {
2846                 if (len > sizeof(int))
2847                     len = sizeof(int);
2848                 if (put_user_u32(len, optlen)
2849                     || put_user_u32(val, optval_addr))
2850                     return -TARGET_EFAULT;
2851             }
2852             break;
2853         default:
2854             ret = -TARGET_ENOPROTOOPT;
2855             break;
2856         }
2857         break;
2858     case SOL_IPV6:
2859         switch (optname) {
2860         case IPV6_MTU_DISCOVER:
2861         case IPV6_MTU:
2862         case IPV6_V6ONLY:
2863         case IPV6_RECVPKTINFO:
2864         case IPV6_UNICAST_HOPS:
2865         case IPV6_MULTICAST_HOPS:
2866         case IPV6_MULTICAST_LOOP:
2867         case IPV6_RECVERR:
2868         case IPV6_RECVHOPLIMIT:
2869         case IPV6_2292HOPLIMIT:
2870         case IPV6_CHECKSUM:
2871         case IPV6_ADDRFORM:
2872         case IPV6_2292PKTINFO:
2873         case IPV6_RECVTCLASS:
2874         case IPV6_RECVRTHDR:
2875         case IPV6_2292RTHDR:
2876         case IPV6_RECVHOPOPTS:
2877         case IPV6_2292HOPOPTS:
2878         case IPV6_RECVDSTOPTS:
2879         case IPV6_2292DSTOPTS:
2880         case IPV6_TCLASS:
2881         case IPV6_ADDR_PREFERENCES:
2882 #ifdef IPV6_RECVPATHMTU
2883         case IPV6_RECVPATHMTU:
2884 #endif
2885 #ifdef IPV6_TRANSPARENT
2886         case IPV6_TRANSPARENT:
2887 #endif
2888 #ifdef IPV6_FREEBIND
2889         case IPV6_FREEBIND:
2890 #endif
2891 #ifdef IPV6_RECVORIGDSTADDR
2892         case IPV6_RECVORIGDSTADDR:
2893 #endif
2894             if (get_user_u32(len, optlen))
2895                 return -TARGET_EFAULT;
2896             if (len < 0)
2897                 return -TARGET_EINVAL;
2898             lv = sizeof(lv);
2899             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2900             if (ret < 0)
2901                 return ret;
2902             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2903                 len = 1;
2904                 if (put_user_u32(len, optlen)
2905                     || put_user_u8(val, optval_addr))
2906                     return -TARGET_EFAULT;
2907             } else {
2908                 if (len > sizeof(int))
2909                     len = sizeof(int);
2910                 if (put_user_u32(len, optlen)
2911                     || put_user_u32(val, optval_addr))
2912                     return -TARGET_EFAULT;
2913             }
2914             break;
2915         default:
2916             ret = -TARGET_ENOPROTOOPT;
2917             break;
2918         }
2919         break;
2920 #ifdef SOL_NETLINK
2921     case SOL_NETLINK:
2922         switch (optname) {
2923         case NETLINK_PKTINFO:
2924         case NETLINK_BROADCAST_ERROR:
2925         case NETLINK_NO_ENOBUFS:
2926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2927         case NETLINK_LISTEN_ALL_NSID:
2928         case NETLINK_CAP_ACK:
2929 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2930 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2931         case NETLINK_EXT_ACK:
2932 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2933 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2934         case NETLINK_GET_STRICT_CHK:
2935 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2936             if (get_user_u32(len, optlen)) {
2937                 return -TARGET_EFAULT;
2938             }
2939             if (len != sizeof(val)) {
2940                 return -TARGET_EINVAL;
2941             }
2942             lv = len;
2943             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2944             if (ret < 0) {
2945                 return ret;
2946             }
2947             if (put_user_u32(lv, optlen)
2948                 || put_user_u32(val, optval_addr)) {
2949                 return -TARGET_EFAULT;
2950             }
2951             break;
2952 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2953         case NETLINK_LIST_MEMBERSHIPS:
2954         {
2955             uint32_t *results;
2956             int i;
2957             if (get_user_u32(len, optlen)) {
2958                 return -TARGET_EFAULT;
2959             }
2960             if (len < 0) {
2961                 return -TARGET_EINVAL;
2962             }
2963             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2964             if (!results && len > 0) {
2965                 return -TARGET_EFAULT;
2966             }
2967             lv = len;
2968             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2969             if (ret < 0) {
2970                 unlock_user(results, optval_addr, 0);
2971                 return ret;
2972             }
2973             /* swap host endianness to target endianness. */
2974             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2975                 results[i] = tswap32(results[i]);
2976             }
2977             if (put_user_u32(lv, optlen)) {
2978                 return -TARGET_EFAULT;
2979             }
2980             unlock_user(results, optval_addr, 0);
2981             break;
2982         }
2983 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2984         default:
2985             goto unimplemented;
2986         }
2987         break;
2988 #endif /* SOL_NETLINK */
2989     default:
2990     unimplemented:
2991         qemu_log_mask(LOG_UNIMP,
2992                       "getsockopt level=%d optname=%d not yet supported\n",
2993                       level, optname);
2994         ret = -TARGET_EOPNOTSUPP;
2995         break;
2996     }
2997     return ret;
2998 }
2999 
3000 /* Convert target low/high pair representing file offset into the host
3001  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3002  * as the kernel doesn't handle them either.
3003  */
target_to_host_low_high(abi_ulong tlow,abi_ulong thigh,unsigned long * hlow,unsigned long * hhigh)3004 static void target_to_host_low_high(abi_ulong tlow,
3005                                     abi_ulong thigh,
3006                                     unsigned long *hlow,
3007                                     unsigned long *hhigh)
3008 {
3009     uint64_t off = tlow |
3010         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3011         TARGET_LONG_BITS / 2;
3012 
3013     *hlow = off;
3014     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3015 }
3016 
lock_iovec(int type,abi_ulong target_addr,abi_ulong count,int copy)3017 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3018                                 abi_ulong count, int copy)
3019 {
3020     struct target_iovec *target_vec;
3021     struct iovec *vec;
3022     abi_ulong total_len, max_len;
3023     int i;
3024     int err = 0;
3025     bool bad_address = false;
3026 
3027     if (count == 0) {
3028         errno = 0;
3029         return NULL;
3030     }
3031     if (count > IOV_MAX) {
3032         errno = EINVAL;
3033         return NULL;
3034     }
3035 
3036     vec = g_try_new0(struct iovec, count);
3037     if (vec == NULL) {
3038         errno = ENOMEM;
3039         return NULL;
3040     }
3041 
3042     target_vec = lock_user(VERIFY_READ, target_addr,
3043                            count * sizeof(struct target_iovec), 1);
3044     if (target_vec == NULL) {
3045         err = EFAULT;
3046         goto fail2;
3047     }
3048 
3049     /* ??? If host page size > target page size, this will result in a
3050        value larger than what we can actually support.  */
3051     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3052     total_len = 0;
3053 
3054     for (i = 0; i < count; i++) {
3055         abi_ulong base = tswapal(target_vec[i].iov_base);
3056         abi_long len = tswapal(target_vec[i].iov_len);
3057 
3058         if (len < 0) {
3059             err = EINVAL;
3060             goto fail;
3061         } else if (len == 0) {
3062             /* Zero length pointer is ignored.  */
3063             vec[i].iov_base = 0;
3064         } else {
3065             vec[i].iov_base = lock_user(type, base, len, copy);
3066             /* If the first buffer pointer is bad, this is a fault.  But
3067              * subsequent bad buffers will result in a partial write; this
3068              * is realized by filling the vector with null pointers and
3069              * zero lengths. */
3070             if (!vec[i].iov_base) {
3071                 if (i == 0) {
3072                     err = EFAULT;
3073                     goto fail;
3074                 } else {
3075                     bad_address = true;
3076                 }
3077             }
3078             if (bad_address) {
3079                 len = 0;
3080             }
3081             if (len > max_len - total_len) {
3082                 len = max_len - total_len;
3083             }
3084         }
3085         vec[i].iov_len = len;
3086         total_len += len;
3087     }
3088 
3089     unlock_user(target_vec, target_addr, 0);
3090     return vec;
3091 
3092  fail:
3093     while (--i >= 0) {
3094         if (tswapal(target_vec[i].iov_len) > 0) {
3095             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3096         }
3097     }
3098     unlock_user(target_vec, target_addr, 0);
3099  fail2:
3100     g_free(vec);
3101     errno = err;
3102     return NULL;
3103 }
3104 
unlock_iovec(struct iovec * vec,abi_ulong target_addr,abi_ulong count,int copy)3105 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3106                          abi_ulong count, int copy)
3107 {
3108     struct target_iovec *target_vec;
3109     int i;
3110 
3111     target_vec = lock_user(VERIFY_READ, target_addr,
3112                            count * sizeof(struct target_iovec), 1);
3113     if (target_vec) {
3114         for (i = 0; i < count; i++) {
3115             abi_ulong base = tswapal(target_vec[i].iov_base);
3116             abi_long len = tswapal(target_vec[i].iov_len);
3117             if (len < 0) {
3118                 break;
3119             }
3120             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3121         }
3122         unlock_user(target_vec, target_addr, 0);
3123     }
3124 
3125     g_free(vec);
3126 }
3127 
target_to_host_sock_type(int * type)3128 static inline int target_to_host_sock_type(int *type)
3129 {
3130     int host_type = 0;
3131     int target_type = *type;
3132 
3133     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3134     case TARGET_SOCK_DGRAM:
3135         host_type = SOCK_DGRAM;
3136         break;
3137     case TARGET_SOCK_STREAM:
3138         host_type = SOCK_STREAM;
3139         break;
3140     default:
3141         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3142         break;
3143     }
3144     if (target_type & TARGET_SOCK_CLOEXEC) {
3145 #if defined(SOCK_CLOEXEC)
3146         host_type |= SOCK_CLOEXEC;
3147 #else
3148         return -TARGET_EINVAL;
3149 #endif
3150     }
3151     if (target_type & TARGET_SOCK_NONBLOCK) {
3152 #if defined(SOCK_NONBLOCK)
3153         host_type |= SOCK_NONBLOCK;
3154 #elif !defined(O_NONBLOCK)
3155         return -TARGET_EINVAL;
3156 #endif
3157     }
3158     *type = host_type;
3159     return 0;
3160 }
3161 
3162 /* Try to emulate socket type flags after socket creation.  */
sock_flags_fixup(int fd,int target_type)3163 static int sock_flags_fixup(int fd, int target_type)
3164 {
3165 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3166     if (target_type & TARGET_SOCK_NONBLOCK) {
3167         int flags = fcntl(fd, F_GETFL);
3168         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3169             close(fd);
3170             return -TARGET_EINVAL;
3171         }
3172     }
3173 #endif
3174     return fd;
3175 }
3176 
3177 /* do_socket() Must return target values and target errnos. */
do_socket(int domain,int type,int protocol)3178 static abi_long do_socket(int domain, int type, int protocol)
3179 {
3180     int target_type = type;
3181     int ret;
3182 
3183     ret = target_to_host_sock_type(&type);
3184     if (ret) {
3185         return ret;
3186     }
3187 
3188     if (domain == PF_NETLINK && !(
3189 #ifdef CONFIG_RTNETLINK
3190          protocol == NETLINK_ROUTE ||
3191 #endif
3192          protocol == NETLINK_KOBJECT_UEVENT ||
3193          protocol == NETLINK_AUDIT)) {
3194         return -TARGET_EPROTONOSUPPORT;
3195     }
3196 
3197     if (domain == AF_PACKET ||
3198         (domain == AF_INET && type == SOCK_PACKET)) {
3199         protocol = tswap16(protocol);
3200     }
3201 
3202     ret = get_errno(socket(domain, type, protocol));
3203     if (ret >= 0) {
3204         ret = sock_flags_fixup(ret, target_type);
3205         if (type == SOCK_PACKET) {
3206             /* Manage an obsolete case :
3207              * if socket type is SOCK_PACKET, bind by name
3208              */
3209             fd_trans_register(ret, &target_packet_trans);
3210         } else if (domain == PF_NETLINK) {
3211             switch (protocol) {
3212 #ifdef CONFIG_RTNETLINK
3213             case NETLINK_ROUTE:
3214                 fd_trans_register(ret, &target_netlink_route_trans);
3215                 break;
3216 #endif
3217             case NETLINK_KOBJECT_UEVENT:
3218                 /* nothing to do: messages are strings */
3219                 break;
3220             case NETLINK_AUDIT:
3221                 fd_trans_register(ret, &target_netlink_audit_trans);
3222                 break;
3223             default:
3224                 g_assert_not_reached();
3225             }
3226         }
3227     }
3228     return ret;
3229 }
3230 
3231 /* do_bind() Must return target values and target errnos. */
do_bind(int sockfd,abi_ulong target_addr,socklen_t addrlen)3232 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3233                         socklen_t addrlen)
3234 {
3235     void *addr;
3236     abi_long ret;
3237 
3238     if ((int)addrlen < 0) {
3239         return -TARGET_EINVAL;
3240     }
3241 
3242     addr = alloca(addrlen+1);
3243 
3244     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3245     if (ret)
3246         return ret;
3247 
3248     return get_errno(bind(sockfd, addr, addrlen));
3249 }
3250 
3251 /* do_connect() Must return target values and target errnos. */
do_connect(int sockfd,abi_ulong target_addr,socklen_t addrlen)3252 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3253                            socklen_t addrlen)
3254 {
3255     void *addr;
3256     abi_long ret;
3257 
3258     if ((int)addrlen < 0) {
3259         return -TARGET_EINVAL;
3260     }
3261 
3262     addr = alloca(addrlen+1);
3263 
3264     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3265     if (ret)
3266         return ret;
3267 
3268     return get_errno(safe_connect(sockfd, addr, addrlen));
3269 }
3270 
3271 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
do_sendrecvmsg_locked(int fd,struct target_msghdr * msgp,int flags,int send)3272 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3273                                       int flags, int send)
3274 {
3275     abi_long ret, len;
3276     struct msghdr msg;
3277     abi_ulong count;
3278     struct iovec *vec;
3279     abi_ulong target_vec;
3280 
3281     if (msgp->msg_name) {
3282         msg.msg_namelen = tswap32(msgp->msg_namelen);
3283         msg.msg_name = alloca(msg.msg_namelen+1);
3284         ret = target_to_host_sockaddr(fd, msg.msg_name,
3285                                       tswapal(msgp->msg_name),
3286                                       msg.msg_namelen);
3287         if (ret == -TARGET_EFAULT) {
3288             /* For connected sockets msg_name and msg_namelen must
3289              * be ignored, so returning EFAULT immediately is wrong.
3290              * Instead, pass a bad msg_name to the host kernel, and
3291              * let it decide whether to return EFAULT or not.
3292              */
3293             msg.msg_name = (void *)-1;
3294         } else if (ret) {
3295             goto out2;
3296         }
3297     } else {
3298         msg.msg_name = NULL;
3299         msg.msg_namelen = 0;
3300     }
3301     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3302     msg.msg_control = alloca(msg.msg_controllen);
3303     memset(msg.msg_control, 0, msg.msg_controllen);
3304 
3305     msg.msg_flags = tswap32(msgp->msg_flags);
3306 
3307     count = tswapal(msgp->msg_iovlen);
3308     target_vec = tswapal(msgp->msg_iov);
3309 
3310     if (count > IOV_MAX) {
3311         /* sendrcvmsg returns a different errno for this condition than
3312          * readv/writev, so we must catch it here before lock_iovec() does.
3313          */
3314         ret = -TARGET_EMSGSIZE;
3315         goto out2;
3316     }
3317 
3318     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3319                      target_vec, count, send);
3320     if (vec == NULL) {
3321         ret = -host_to_target_errno(errno);
3322         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3323         if (!send || ret) {
3324             goto out2;
3325         }
3326     }
3327     msg.msg_iovlen = count;
3328     msg.msg_iov = vec;
3329 
3330     if (send) {
3331         if (fd_trans_target_to_host_data(fd)) {
3332             void *host_msg;
3333 
3334             host_msg = g_malloc(msg.msg_iov->iov_len);
3335             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3336             ret = fd_trans_target_to_host_data(fd)(host_msg,
3337                                                    msg.msg_iov->iov_len);
3338             if (ret >= 0) {
3339                 msg.msg_iov->iov_base = host_msg;
3340                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3341             }
3342             g_free(host_msg);
3343         } else {
3344             ret = target_to_host_cmsg(&msg, msgp);
3345             if (ret == 0) {
3346                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3347             }
3348         }
3349     } else {
3350         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3351         if (!is_error(ret)) {
3352             len = ret;
3353             if (fd_trans_host_to_target_data(fd)) {
3354                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3355                                                MIN(msg.msg_iov->iov_len, len));
3356             }
3357             if (!is_error(ret)) {
3358                 ret = host_to_target_cmsg(msgp, &msg);
3359             }
3360             if (!is_error(ret)) {
3361                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3362                 msgp->msg_flags = tswap32(msg.msg_flags);
3363                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3364                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3365                                     msg.msg_name, msg.msg_namelen);
3366                     if (ret) {
3367                         goto out;
3368                     }
3369                 }
3370 
3371                 ret = len;
3372             }
3373         }
3374     }
3375 
3376 out:
3377     if (vec) {
3378         unlock_iovec(vec, target_vec, count, !send);
3379     }
3380 out2:
3381     return ret;
3382 }
3383 
do_sendrecvmsg(int fd,abi_ulong target_msg,int flags,int send)3384 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3385                                int flags, int send)
3386 {
3387     abi_long ret;
3388     struct target_msghdr *msgp;
3389 
3390     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3391                           msgp,
3392                           target_msg,
3393                           send ? 1 : 0)) {
3394         return -TARGET_EFAULT;
3395     }
3396     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3397     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3398     return ret;
3399 }
3400 
3401 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3402  * so it might not have this *mmsg-specific flag either.
3403  */
3404 #ifndef MSG_WAITFORONE
3405 #define MSG_WAITFORONE 0x10000
3406 #endif
3407 
do_sendrecvmmsg(int fd,abi_ulong target_msgvec,unsigned int vlen,unsigned int flags,int send)3408 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3409                                 unsigned int vlen, unsigned int flags,
3410                                 int send)
3411 {
3412     struct target_mmsghdr *mmsgp;
3413     abi_long ret = 0;
3414     int i;
3415 
3416     if (vlen > UIO_MAXIOV) {
3417         vlen = UIO_MAXIOV;
3418     }
3419 
3420     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3421     if (!mmsgp) {
3422         return -TARGET_EFAULT;
3423     }
3424 
3425     for (i = 0; i < vlen; i++) {
3426         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3427         if (is_error(ret)) {
3428             break;
3429         }
3430         mmsgp[i].msg_len = tswap32(ret);
3431         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3432         if (flags & MSG_WAITFORONE) {
3433             flags |= MSG_DONTWAIT;
3434         }
3435     }
3436 
3437     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3438 
3439     /* Return number of datagrams sent if we sent any at all;
3440      * otherwise return the error.
3441      */
3442     if (i) {
3443         return i;
3444     }
3445     return ret;
3446 }
3447 
3448 /* do_accept4() Must return target values and target errnos. */
do_accept4(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr,int flags)3449 static abi_long do_accept4(int fd, abi_ulong target_addr,
3450                            abi_ulong target_addrlen_addr, int flags)
3451 {
3452     socklen_t addrlen, ret_addrlen;
3453     void *addr;
3454     abi_long ret;
3455     int host_flags;
3456 
3457     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3458         return -TARGET_EINVAL;
3459     }
3460 
3461     host_flags = 0;
3462     if (flags & TARGET_SOCK_NONBLOCK) {
3463         host_flags |= SOCK_NONBLOCK;
3464     }
3465     if (flags & TARGET_SOCK_CLOEXEC) {
3466         host_flags |= SOCK_CLOEXEC;
3467     }
3468 
3469     if (target_addr == 0) {
3470         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3471     }
3472 
3473     /* linux returns EFAULT if addrlen pointer is invalid */
3474     if (get_user_u32(addrlen, target_addrlen_addr))
3475         return -TARGET_EFAULT;
3476 
3477     if ((int)addrlen < 0) {
3478         return -TARGET_EINVAL;
3479     }
3480 
3481     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3482         return -TARGET_EFAULT;
3483     }
3484 
3485     addr = alloca(addrlen);
3486 
3487     ret_addrlen = addrlen;
3488     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3489     if (!is_error(ret)) {
3490         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3491         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3492             ret = -TARGET_EFAULT;
3493         }
3494     }
3495     return ret;
3496 }
3497 
3498 /* do_getpeername() Must return target values and target errnos. */
do_getpeername(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3499 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3500                                abi_ulong target_addrlen_addr)
3501 {
3502     socklen_t addrlen, ret_addrlen;
3503     void *addr;
3504     abi_long ret;
3505 
3506     if (get_user_u32(addrlen, target_addrlen_addr))
3507         return -TARGET_EFAULT;
3508 
3509     if ((int)addrlen < 0) {
3510         return -TARGET_EINVAL;
3511     }
3512 
3513     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3514         return -TARGET_EFAULT;
3515     }
3516 
3517     addr = alloca(addrlen);
3518 
3519     ret_addrlen = addrlen;
3520     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3521     if (!is_error(ret)) {
3522         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3523         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3524             ret = -TARGET_EFAULT;
3525         }
3526     }
3527     return ret;
3528 }
3529 
3530 /* do_getsockname() Must return target values and target errnos. */
do_getsockname(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3531 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3532                                abi_ulong target_addrlen_addr)
3533 {
3534     socklen_t addrlen, ret_addrlen;
3535     void *addr;
3536     abi_long ret;
3537 
3538     if (get_user_u32(addrlen, target_addrlen_addr))
3539         return -TARGET_EFAULT;
3540 
3541     if ((int)addrlen < 0) {
3542         return -TARGET_EINVAL;
3543     }
3544 
3545     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3546         return -TARGET_EFAULT;
3547     }
3548 
3549     addr = alloca(addrlen);
3550 
3551     ret_addrlen = addrlen;
3552     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3553     if (!is_error(ret)) {
3554         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3555         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3556             ret = -TARGET_EFAULT;
3557         }
3558     }
3559     return ret;
3560 }
3561 
3562 /* do_socketpair() Must return target values and target errnos. */
do_socketpair(int domain,int type,int protocol,abi_ulong target_tab_addr)3563 static abi_long do_socketpair(int domain, int type, int protocol,
3564                               abi_ulong target_tab_addr)
3565 {
3566     int tab[2];
3567     abi_long ret;
3568 
3569     target_to_host_sock_type(&type);
3570 
3571     ret = get_errno(socketpair(domain, type, protocol, tab));
3572     if (!is_error(ret)) {
3573         if (put_user_s32(tab[0], target_tab_addr)
3574             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3575             ret = -TARGET_EFAULT;
3576     }
3577     return ret;
3578 }
3579 
3580 /* do_sendto() Must return target values and target errnos. */
do_sendto(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,socklen_t addrlen)3581 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3582                           abi_ulong target_addr, socklen_t addrlen)
3583 {
3584     void *addr;
3585     void *host_msg;
3586     void *copy_msg = NULL;
3587     abi_long ret;
3588 
3589     if ((int)addrlen < 0) {
3590         return -TARGET_EINVAL;
3591     }
3592 
3593     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3594     if (!host_msg)
3595         return -TARGET_EFAULT;
3596     if (fd_trans_target_to_host_data(fd)) {
3597         copy_msg = host_msg;
3598         host_msg = g_malloc(len);
3599         memcpy(host_msg, copy_msg, len);
3600         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3601         if (ret < 0) {
3602             goto fail;
3603         }
3604     }
3605     if (target_addr) {
3606         addr = alloca(addrlen+1);
3607         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3608         if (ret) {
3609             goto fail;
3610         }
3611         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3612     } else {
3613         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3614     }
3615 fail:
3616     if (copy_msg) {
3617         g_free(host_msg);
3618         host_msg = copy_msg;
3619     }
3620     unlock_user(host_msg, msg, 0);
3621     return ret;
3622 }
3623 
3624 /* do_recvfrom() Must return target values and target errnos. */
do_recvfrom(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,abi_ulong target_addrlen)3625 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3626                             abi_ulong target_addr,
3627                             abi_ulong target_addrlen)
3628 {
3629     socklen_t addrlen, ret_addrlen;
3630     void *addr;
3631     void *host_msg;
3632     abi_long ret;
3633 
3634     if (!msg) {
3635         host_msg = NULL;
3636     } else {
3637         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3638         if (!host_msg) {
3639             return -TARGET_EFAULT;
3640         }
3641     }
3642     if (target_addr) {
3643         if (get_user_u32(addrlen, target_addrlen)) {
3644             ret = -TARGET_EFAULT;
3645             goto fail;
3646         }
3647         if ((int)addrlen < 0) {
3648             ret = -TARGET_EINVAL;
3649             goto fail;
3650         }
3651         addr = alloca(addrlen);
3652         ret_addrlen = addrlen;
3653         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3654                                       addr, &ret_addrlen));
3655     } else {
3656         addr = NULL; /* To keep compiler quiet.  */
3657         addrlen = 0; /* To keep compiler quiet.  */
3658         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3659     }
3660     if (!is_error(ret)) {
3661         if (fd_trans_host_to_target_data(fd)) {
3662             abi_long trans;
3663             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3664             if (is_error(trans)) {
3665                 ret = trans;
3666                 goto fail;
3667             }
3668         }
3669         if (target_addr) {
3670             host_to_target_sockaddr(target_addr, addr,
3671                                     MIN(addrlen, ret_addrlen));
3672             if (put_user_u32(ret_addrlen, target_addrlen)) {
3673                 ret = -TARGET_EFAULT;
3674                 goto fail;
3675             }
3676         }
3677         unlock_user(host_msg, msg, len);
3678     } else {
3679 fail:
3680         unlock_user(host_msg, msg, 0);
3681     }
3682     return ret;
3683 }
3684 
3685 #ifdef TARGET_NR_socketcall
3686 /* do_socketcall() must return target values and target errnos. */
do_socketcall(int num,abi_ulong vptr)3687 static abi_long do_socketcall(int num, abi_ulong vptr)
3688 {
3689     static const unsigned nargs[] = { /* number of arguments per operation */
3690         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3691         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3692         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3693         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3694         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3695         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3696         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3697         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3698         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3699         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3700         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3701         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3702         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3703         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3704         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3705         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3706         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3707         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3708         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3709         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3710     };
3711     abi_long a[6]; /* max 6 args */
3712     unsigned i;
3713 
3714     /* check the range of the first argument num */
3715     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3716     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3717         return -TARGET_EINVAL;
3718     }
3719     /* ensure we have space for args */
3720     if (nargs[num] > ARRAY_SIZE(a)) {
3721         return -TARGET_EINVAL;
3722     }
3723     /* collect the arguments in a[] according to nargs[] */
3724     for (i = 0; i < nargs[num]; ++i) {
3725         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3726             return -TARGET_EFAULT;
3727         }
3728     }
3729     /* now when we have the args, invoke the appropriate underlying function */
3730     switch (num) {
3731     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3732         return do_socket(a[0], a[1], a[2]);
3733     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3734         return do_bind(a[0], a[1], a[2]);
3735     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3736         return do_connect(a[0], a[1], a[2]);
3737     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3738         return get_errno(listen(a[0], a[1]));
3739     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3740         return do_accept4(a[0], a[1], a[2], 0);
3741     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3742         return do_getsockname(a[0], a[1], a[2]);
3743     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3744         return do_getpeername(a[0], a[1], a[2]);
3745     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3746         return do_socketpair(a[0], a[1], a[2], a[3]);
3747     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3748         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3749     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3750         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3751     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3752         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3753     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3754         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3755     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3756         return get_errno(shutdown(a[0], a[1]));
3757     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3758         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3759     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3760         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3761     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3762         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3763     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3764         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3765     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3766         return do_accept4(a[0], a[1], a[2], a[3]);
3767     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3768         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3769     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3770         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3771     default:
3772         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3773         return -TARGET_EINVAL;
3774     }
3775 }
3776 #endif
3777 
3778 #ifndef TARGET_SEMID64_DS
3779 /* asm-generic version of this struct */
3780 struct target_semid64_ds
3781 {
3782   struct target_ipc_perm sem_perm;
3783   abi_ulong sem_otime;
3784 #if TARGET_ABI_BITS == 32
3785   abi_ulong __unused1;
3786 #endif
3787   abi_ulong sem_ctime;
3788 #if TARGET_ABI_BITS == 32
3789   abi_ulong __unused2;
3790 #endif
3791   abi_ulong sem_nsems;
3792   abi_ulong __unused3;
3793   abi_ulong __unused4;
3794 };
3795 #endif
3796 
target_to_host_ipc_perm(struct ipc_perm * host_ip,abi_ulong target_addr)3797 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3798                                                abi_ulong target_addr)
3799 {
3800     struct target_ipc_perm *target_ip;
3801     struct target_semid64_ds *target_sd;
3802 
3803     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3804         return -TARGET_EFAULT;
3805     target_ip = &(target_sd->sem_perm);
3806     host_ip->__key = tswap32(target_ip->__key);
3807     host_ip->uid = tswap32(target_ip->uid);
3808     host_ip->gid = tswap32(target_ip->gid);
3809     host_ip->cuid = tswap32(target_ip->cuid);
3810     host_ip->cgid = tswap32(target_ip->cgid);
3811 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3812     host_ip->mode = tswap32(target_ip->mode);
3813 #else
3814     host_ip->mode = tswap16(target_ip->mode);
3815 #endif
3816 #if defined(TARGET_PPC)
3817     host_ip->__seq = tswap32(target_ip->__seq);
3818 #else
3819     host_ip->__seq = tswap16(target_ip->__seq);
3820 #endif
3821     unlock_user_struct(target_sd, target_addr, 0);
3822     return 0;
3823 }
3824 
host_to_target_ipc_perm(abi_ulong target_addr,struct ipc_perm * host_ip)3825 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3826                                                struct ipc_perm *host_ip)
3827 {
3828     struct target_ipc_perm *target_ip;
3829     struct target_semid64_ds *target_sd;
3830 
3831     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3832         return -TARGET_EFAULT;
3833     target_ip = &(target_sd->sem_perm);
3834     target_ip->__key = tswap32(host_ip->__key);
3835     target_ip->uid = tswap32(host_ip->uid);
3836     target_ip->gid = tswap32(host_ip->gid);
3837     target_ip->cuid = tswap32(host_ip->cuid);
3838     target_ip->cgid = tswap32(host_ip->cgid);
3839 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3840     target_ip->mode = tswap32(host_ip->mode);
3841 #else
3842     target_ip->mode = tswap16(host_ip->mode);
3843 #endif
3844 #if defined(TARGET_PPC)
3845     target_ip->__seq = tswap32(host_ip->__seq);
3846 #else
3847     target_ip->__seq = tswap16(host_ip->__seq);
3848 #endif
3849     unlock_user_struct(target_sd, target_addr, 1);
3850     return 0;
3851 }
3852 
target_to_host_semid_ds(struct semid_ds * host_sd,abi_ulong target_addr)3853 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3854                                                abi_ulong target_addr)
3855 {
3856     struct target_semid64_ds *target_sd;
3857 
3858     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3859         return -TARGET_EFAULT;
3860     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3861         return -TARGET_EFAULT;
3862     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3863     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3864     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3865     unlock_user_struct(target_sd, target_addr, 0);
3866     return 0;
3867 }
3868 
host_to_target_semid_ds(abi_ulong target_addr,struct semid_ds * host_sd)3869 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3870                                                struct semid_ds *host_sd)
3871 {
3872     struct target_semid64_ds *target_sd;
3873 
3874     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3875         return -TARGET_EFAULT;
3876     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3877         return -TARGET_EFAULT;
3878     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3879     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3880     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3881     unlock_user_struct(target_sd, target_addr, 1);
3882     return 0;
3883 }
3884 
3885 struct target_seminfo {
3886     int semmap;
3887     int semmni;
3888     int semmns;
3889     int semmnu;
3890     int semmsl;
3891     int semopm;
3892     int semume;
3893     int semusz;
3894     int semvmx;
3895     int semaem;
3896 };
3897 
host_to_target_seminfo(abi_ulong target_addr,struct seminfo * host_seminfo)3898 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3899                                               struct seminfo *host_seminfo)
3900 {
3901     struct target_seminfo *target_seminfo;
3902     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3903         return -TARGET_EFAULT;
3904     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3905     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3906     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3907     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3908     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3909     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3910     __put_user(host_seminfo->semume, &target_seminfo->semume);
3911     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3912     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3913     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3914     unlock_user_struct(target_seminfo, target_addr, 1);
3915     return 0;
3916 }
3917 
3918 union semun {
3919 	int val;
3920 	struct semid_ds *buf;
3921 	unsigned short *array;
3922 	struct seminfo *__buf;
3923 };
3924 
3925 union target_semun {
3926 	int val;
3927 	abi_ulong buf;
3928 	abi_ulong array;
3929 	abi_ulong __buf;
3930 };
3931 
target_to_host_semarray(int semid,unsigned short ** host_array,abi_ulong target_addr)3932 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3933                                                abi_ulong target_addr)
3934 {
3935     int nsems;
3936     unsigned short *array;
3937     union semun semun;
3938     struct semid_ds semid_ds;
3939     int i, ret;
3940 
3941     semun.buf = &semid_ds;
3942 
3943     ret = semctl(semid, 0, IPC_STAT, semun);
3944     if (ret == -1)
3945         return get_errno(ret);
3946 
3947     nsems = semid_ds.sem_nsems;
3948 
3949     *host_array = g_try_new(unsigned short, nsems);
3950     if (!*host_array) {
3951         return -TARGET_ENOMEM;
3952     }
3953     array = lock_user(VERIFY_READ, target_addr,
3954                       nsems*sizeof(unsigned short), 1);
3955     if (!array) {
3956         g_free(*host_array);
3957         return -TARGET_EFAULT;
3958     }
3959 
3960     for(i=0; i<nsems; i++) {
3961         __get_user((*host_array)[i], &array[i]);
3962     }
3963     unlock_user(array, target_addr, 0);
3964 
3965     return 0;
3966 }
3967 
host_to_target_semarray(int semid,abi_ulong target_addr,unsigned short ** host_array)3968 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3969                                                unsigned short **host_array)
3970 {
3971     int nsems;
3972     unsigned short *array;
3973     union semun semun;
3974     struct semid_ds semid_ds;
3975     int i, ret;
3976 
3977     semun.buf = &semid_ds;
3978 
3979     ret = semctl(semid, 0, IPC_STAT, semun);
3980     if (ret == -1)
3981         return get_errno(ret);
3982 
3983     nsems = semid_ds.sem_nsems;
3984 
3985     array = lock_user(VERIFY_WRITE, target_addr,
3986                       nsems*sizeof(unsigned short), 0);
3987     if (!array)
3988         return -TARGET_EFAULT;
3989 
3990     for(i=0; i<nsems; i++) {
3991         __put_user((*host_array)[i], &array[i]);
3992     }
3993     g_free(*host_array);
3994     unlock_user(array, target_addr, 1);
3995 
3996     return 0;
3997 }
3998 
do_semctl(int semid,int semnum,int cmd,abi_ulong target_arg)3999 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4000                                  abi_ulong target_arg)
4001 {
4002     union target_semun target_su = { .buf = target_arg };
4003     union semun arg;
4004     struct semid_ds dsarg;
4005     unsigned short *array = NULL;
4006     struct seminfo seminfo;
4007     abi_long ret = -TARGET_EINVAL;
4008     abi_long err;
4009     cmd &= 0xff;
4010 
4011     switch( cmd ) {
4012 	case GETVAL:
4013 	case SETVAL:
4014             /* In 64 bit cross-endian situations, we will erroneously pick up
4015              * the wrong half of the union for the "val" element.  To rectify
4016              * this, the entire 8-byte structure is byteswapped, followed by
4017 	     * a swap of the 4 byte val field. In other cases, the data is
4018 	     * already in proper host byte order. */
4019 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4020 		target_su.buf = tswapal(target_su.buf);
4021 		arg.val = tswap32(target_su.val);
4022 	    } else {
4023 		arg.val = target_su.val;
4024 	    }
4025             ret = get_errno(semctl(semid, semnum, cmd, arg));
4026             break;
4027 	case GETALL:
4028 	case SETALL:
4029             err = target_to_host_semarray(semid, &array, target_su.array);
4030             if (err)
4031                 return err;
4032             arg.array = array;
4033             ret = get_errno(semctl(semid, semnum, cmd, arg));
4034             err = host_to_target_semarray(semid, target_su.array, &array);
4035             if (err)
4036                 return err;
4037             break;
4038 	case IPC_STAT:
4039 	case IPC_SET:
4040 	case SEM_STAT:
4041             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4042             if (err)
4043                 return err;
4044             arg.buf = &dsarg;
4045             ret = get_errno(semctl(semid, semnum, cmd, arg));
4046             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4047             if (err)
4048                 return err;
4049             break;
4050 	case IPC_INFO:
4051 	case SEM_INFO:
4052             arg.__buf = &seminfo;
4053             ret = get_errno(semctl(semid, semnum, cmd, arg));
4054             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4055             if (err)
4056                 return err;
4057             break;
4058 	case IPC_RMID:
4059 	case GETPID:
4060 	case GETNCNT:
4061 	case GETZCNT:
4062             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4063             break;
4064     }
4065 
4066     return ret;
4067 }
4068 
4069 struct target_sembuf {
4070     unsigned short sem_num;
4071     short sem_op;
4072     short sem_flg;
4073 };
4074 
target_to_host_sembuf(struct sembuf * host_sembuf,abi_ulong target_addr,unsigned nsops)4075 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4076                                              abi_ulong target_addr,
4077                                              unsigned nsops)
4078 {
4079     struct target_sembuf *target_sembuf;
4080     int i;
4081 
4082     target_sembuf = lock_user(VERIFY_READ, target_addr,
4083                               nsops*sizeof(struct target_sembuf), 1);
4084     if (!target_sembuf)
4085         return -TARGET_EFAULT;
4086 
4087     for(i=0; i<nsops; i++) {
4088         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4089         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4090         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4091     }
4092 
4093     unlock_user(target_sembuf, target_addr, 0);
4094 
4095     return 0;
4096 }
4097 
4098 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4099     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4100 
4101 /*
4102  * This macro is required to handle the s390 variants, which passes the
4103  * arguments in a different order than default.
4104  */
4105 #ifdef __s390x__
4106 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4107   (__nsops), (__timeout), (__sops)
4108 #else
4109 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4110   (__nsops), 0, (__sops), (__timeout)
4111 #endif
4112 
do_semtimedop(int semid,abi_long ptr,unsigned nsops,abi_long timeout,bool time64)4113 static inline abi_long do_semtimedop(int semid,
4114                                      abi_long ptr,
4115                                      unsigned nsops,
4116                                      abi_long timeout, bool time64)
4117 {
4118     struct sembuf *sops;
4119     struct timespec ts, *pts = NULL;
4120     abi_long ret;
4121 
4122     if (timeout) {
4123         pts = &ts;
4124         if (time64) {
4125             if (target_to_host_timespec64(pts, timeout)) {
4126                 return -TARGET_EFAULT;
4127             }
4128         } else {
4129             if (target_to_host_timespec(pts, timeout)) {
4130                 return -TARGET_EFAULT;
4131             }
4132         }
4133     }
4134 
4135     if (nsops > TARGET_SEMOPM) {
4136         return -TARGET_E2BIG;
4137     }
4138 
4139     sops = g_new(struct sembuf, nsops);
4140 
4141     if (target_to_host_sembuf(sops, ptr, nsops)) {
4142         g_free(sops);
4143         return -TARGET_EFAULT;
4144     }
4145 
4146     ret = -TARGET_ENOSYS;
4147 #ifdef __NR_semtimedop
4148     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4149 #endif
4150 #ifdef __NR_ipc
4151     if (ret == -TARGET_ENOSYS) {
4152         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4153                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4154     }
4155 #endif
4156     g_free(sops);
4157     return ret;
4158 }
4159 #endif
4160 
4161 struct target_msqid_ds
4162 {
4163     struct target_ipc_perm msg_perm;
4164     abi_ulong msg_stime;
4165 #if TARGET_ABI_BITS == 32
4166     abi_ulong __unused1;
4167 #endif
4168     abi_ulong msg_rtime;
4169 #if TARGET_ABI_BITS == 32
4170     abi_ulong __unused2;
4171 #endif
4172     abi_ulong msg_ctime;
4173 #if TARGET_ABI_BITS == 32
4174     abi_ulong __unused3;
4175 #endif
4176     abi_ulong __msg_cbytes;
4177     abi_ulong msg_qnum;
4178     abi_ulong msg_qbytes;
4179     abi_ulong msg_lspid;
4180     abi_ulong msg_lrpid;
4181     abi_ulong __unused4;
4182     abi_ulong __unused5;
4183 };
4184 
target_to_host_msqid_ds(struct msqid_ds * host_md,abi_ulong target_addr)4185 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4186                                                abi_ulong target_addr)
4187 {
4188     struct target_msqid_ds *target_md;
4189 
4190     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4191         return -TARGET_EFAULT;
4192     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4193         return -TARGET_EFAULT;
4194     host_md->msg_stime = tswapal(target_md->msg_stime);
4195     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4196     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4197     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4198     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4199     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4200     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4201     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4202     unlock_user_struct(target_md, target_addr, 0);
4203     return 0;
4204 }
4205 
host_to_target_msqid_ds(abi_ulong target_addr,struct msqid_ds * host_md)4206 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4207                                                struct msqid_ds *host_md)
4208 {
4209     struct target_msqid_ds *target_md;
4210 
4211     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4212         return -TARGET_EFAULT;
4213     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4214         return -TARGET_EFAULT;
4215     target_md->msg_stime = tswapal(host_md->msg_stime);
4216     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4217     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4218     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4219     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4220     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4221     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4222     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4223     unlock_user_struct(target_md, target_addr, 1);
4224     return 0;
4225 }
4226 
4227 struct target_msginfo {
4228     int msgpool;
4229     int msgmap;
4230     int msgmax;
4231     int msgmnb;
4232     int msgmni;
4233     int msgssz;
4234     int msgtql;
4235     unsigned short int msgseg;
4236 };
4237 
host_to_target_msginfo(abi_ulong target_addr,struct msginfo * host_msginfo)4238 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4239                                               struct msginfo *host_msginfo)
4240 {
4241     struct target_msginfo *target_msginfo;
4242     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4243         return -TARGET_EFAULT;
4244     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4245     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4246     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4247     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4248     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4249     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4250     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4251     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4252     unlock_user_struct(target_msginfo, target_addr, 1);
4253     return 0;
4254 }
4255 
do_msgctl(int msgid,int cmd,abi_long ptr)4256 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4257 {
4258     struct msqid_ds dsarg;
4259     struct msginfo msginfo;
4260     abi_long ret = -TARGET_EINVAL;
4261 
4262     cmd &= 0xff;
4263 
4264     switch (cmd) {
4265     case IPC_STAT:
4266     case IPC_SET:
4267     case MSG_STAT:
4268         if (target_to_host_msqid_ds(&dsarg,ptr))
4269             return -TARGET_EFAULT;
4270         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4271         if (host_to_target_msqid_ds(ptr,&dsarg))
4272             return -TARGET_EFAULT;
4273         break;
4274     case IPC_RMID:
4275         ret = get_errno(msgctl(msgid, cmd, NULL));
4276         break;
4277     case IPC_INFO:
4278     case MSG_INFO:
4279         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4280         if (host_to_target_msginfo(ptr, &msginfo))
4281             return -TARGET_EFAULT;
4282         break;
4283     }
4284 
4285     return ret;
4286 }
4287 
4288 struct target_msgbuf {
4289     abi_long mtype;
4290     char	mtext[1];
4291 };
4292 
do_msgsnd(int msqid,abi_long msgp,ssize_t msgsz,int msgflg)4293 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4294                                  ssize_t msgsz, int msgflg)
4295 {
4296     struct target_msgbuf *target_mb;
4297     struct msgbuf *host_mb;
4298     abi_long ret = 0;
4299 
4300     if (msgsz < 0) {
4301         return -TARGET_EINVAL;
4302     }
4303 
4304     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4305         return -TARGET_EFAULT;
4306     host_mb = g_try_malloc(msgsz + sizeof(long));
4307     if (!host_mb) {
4308         unlock_user_struct(target_mb, msgp, 0);
4309         return -TARGET_ENOMEM;
4310     }
4311     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4312     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4313     ret = -TARGET_ENOSYS;
4314 #ifdef __NR_msgsnd
4315     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4316 #endif
4317 #ifdef __NR_ipc
4318     if (ret == -TARGET_ENOSYS) {
4319 #ifdef __s390x__
4320         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4321                                  host_mb));
4322 #else
4323         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4324                                  host_mb, 0));
4325 #endif
4326     }
4327 #endif
4328     g_free(host_mb);
4329     unlock_user_struct(target_mb, msgp, 0);
4330 
4331     return ret;
4332 }
4333 
4334 #ifdef __NR_ipc
4335 #if defined(__sparc__)
4336 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4337 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4338 #elif defined(__s390x__)
4339 /* The s390 sys_ipc variant has only five parameters.  */
4340 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4341     ((long int[]){(long int)__msgp, __msgtyp})
4342 #else
4343 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4344     ((long int[]){(long int)__msgp, __msgtyp}), 0
4345 #endif
4346 #endif
4347 
do_msgrcv(int msqid,abi_long msgp,ssize_t msgsz,abi_long msgtyp,int msgflg)4348 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4349                                  ssize_t msgsz, abi_long msgtyp,
4350                                  int msgflg)
4351 {
4352     struct target_msgbuf *target_mb;
4353     char *target_mtext;
4354     struct msgbuf *host_mb;
4355     abi_long ret = 0;
4356 
4357     if (msgsz < 0) {
4358         return -TARGET_EINVAL;
4359     }
4360 
4361     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4362         return -TARGET_EFAULT;
4363 
4364     host_mb = g_try_malloc(msgsz + sizeof(long));
4365     if (!host_mb) {
4366         ret = -TARGET_ENOMEM;
4367         goto end;
4368     }
4369     ret = -TARGET_ENOSYS;
4370 #ifdef __NR_msgrcv
4371     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4372 #endif
4373 #ifdef __NR_ipc
4374     if (ret == -TARGET_ENOSYS) {
4375         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4376                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4377     }
4378 #endif
4379 
4380     if (ret > 0) {
4381         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4382         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4383         if (!target_mtext) {
4384             ret = -TARGET_EFAULT;
4385             goto end;
4386         }
4387         memcpy(target_mb->mtext, host_mb->mtext, ret);
4388         unlock_user(target_mtext, target_mtext_addr, ret);
4389     }
4390 
4391     target_mb->mtype = tswapal(host_mb->mtype);
4392 
4393 end:
4394     if (target_mb)
4395         unlock_user_struct(target_mb, msgp, 1);
4396     g_free(host_mb);
4397     return ret;
4398 }
4399 
target_to_host_shmid_ds(struct shmid_ds * host_sd,abi_ulong target_addr)4400 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4401                                                abi_ulong target_addr)
4402 {
4403     struct target_shmid_ds *target_sd;
4404 
4405     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4406         return -TARGET_EFAULT;
4407     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4408         return -TARGET_EFAULT;
4409     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4410     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4411     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4412     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4413     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4414     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4415     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4416     unlock_user_struct(target_sd, target_addr, 0);
4417     return 0;
4418 }
4419 
host_to_target_shmid_ds(abi_ulong target_addr,struct shmid_ds * host_sd)4420 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4421                                                struct shmid_ds *host_sd)
4422 {
4423     struct target_shmid_ds *target_sd;
4424 
4425     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4426         return -TARGET_EFAULT;
4427     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4428         return -TARGET_EFAULT;
4429     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4430     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4431     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4432     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4433     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4434     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4435     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4436     unlock_user_struct(target_sd, target_addr, 1);
4437     return 0;
4438 }
4439 
4440 struct  target_shminfo {
4441     abi_ulong shmmax;
4442     abi_ulong shmmin;
4443     abi_ulong shmmni;
4444     abi_ulong shmseg;
4445     abi_ulong shmall;
4446 };
4447 
host_to_target_shminfo(abi_ulong target_addr,struct shminfo * host_shminfo)4448 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4449                                               struct shminfo *host_shminfo)
4450 {
4451     struct target_shminfo *target_shminfo;
4452     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4453         return -TARGET_EFAULT;
4454     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4455     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4456     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4457     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4458     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4459     unlock_user_struct(target_shminfo, target_addr, 1);
4460     return 0;
4461 }
4462 
4463 struct target_shm_info {
4464     int used_ids;
4465     abi_ulong shm_tot;
4466     abi_ulong shm_rss;
4467     abi_ulong shm_swp;
4468     abi_ulong swap_attempts;
4469     abi_ulong swap_successes;
4470 };
4471 
host_to_target_shm_info(abi_ulong target_addr,struct shm_info * host_shm_info)4472 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4473                                                struct shm_info *host_shm_info)
4474 {
4475     struct target_shm_info *target_shm_info;
4476     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4477         return -TARGET_EFAULT;
4478     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4479     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4480     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4481     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4482     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4483     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4484     unlock_user_struct(target_shm_info, target_addr, 1);
4485     return 0;
4486 }
4487 
do_shmctl(int shmid,int cmd,abi_long buf)4488 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4489 {
4490     struct shmid_ds dsarg;
4491     struct shminfo shminfo;
4492     struct shm_info shm_info;
4493     abi_long ret = -TARGET_EINVAL;
4494 
4495     cmd &= 0xff;
4496 
4497     switch(cmd) {
4498     case IPC_STAT:
4499     case IPC_SET:
4500     case SHM_STAT:
4501         if (target_to_host_shmid_ds(&dsarg, buf))
4502             return -TARGET_EFAULT;
4503         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4504         if (host_to_target_shmid_ds(buf, &dsarg))
4505             return -TARGET_EFAULT;
4506         break;
4507     case IPC_INFO:
4508         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4509         if (host_to_target_shminfo(buf, &shminfo))
4510             return -TARGET_EFAULT;
4511         break;
4512     case SHM_INFO:
4513         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4514         if (host_to_target_shm_info(buf, &shm_info))
4515             return -TARGET_EFAULT;
4516         break;
4517     case IPC_RMID:
4518     case SHM_LOCK:
4519     case SHM_UNLOCK:
4520         ret = get_errno(shmctl(shmid, cmd, NULL));
4521         break;
4522     }
4523 
4524     return ret;
4525 }
4526 
4527 #ifdef TARGET_NR_ipc
4528 /* ??? This only works with linear mappings.  */
4529 /* do_ipc() must return target values and target errnos. */
do_ipc(CPUArchState * cpu_env,unsigned int call,abi_long first,abi_long second,abi_long third,abi_long ptr,abi_long fifth)4530 static abi_long do_ipc(CPUArchState *cpu_env,
4531                        unsigned int call, abi_long first,
4532                        abi_long second, abi_long third,
4533                        abi_long ptr, abi_long fifth)
4534 {
4535     int version;
4536     abi_long ret = 0;
4537 
4538     version = call >> 16;
4539     call &= 0xffff;
4540 
4541     switch (call) {
4542     case IPCOP_semop:
4543         ret = do_semtimedop(first, ptr, second, 0, false);
4544         break;
4545     case IPCOP_semtimedop:
4546     /*
4547      * The s390 sys_ipc variant has only five parameters instead of six
4548      * (as for default variant) and the only difference is the handling of
4549      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4550      * to a struct timespec where the generic variant uses fifth parameter.
4551      */
4552 #if defined(TARGET_S390X)
4553         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4554 #else
4555         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4556 #endif
4557         break;
4558 
4559     case IPCOP_semget:
4560         ret = get_errno(semget(first, second, third));
4561         break;
4562 
4563     case IPCOP_semctl: {
4564         /* The semun argument to semctl is passed by value, so dereference the
4565          * ptr argument. */
4566         abi_ulong atptr;
4567         get_user_ual(atptr, ptr);
4568         ret = do_semctl(first, second, third, atptr);
4569         break;
4570     }
4571 
4572     case IPCOP_msgget:
4573         ret = get_errno(msgget(first, second));
4574         break;
4575 
4576     case IPCOP_msgsnd:
4577         ret = do_msgsnd(first, ptr, second, third);
4578         break;
4579 
4580     case IPCOP_msgctl:
4581         ret = do_msgctl(first, second, ptr);
4582         break;
4583 
4584     case IPCOP_msgrcv:
4585         switch (version) {
4586         case 0:
4587             {
4588                 struct target_ipc_kludge {
4589                     abi_long msgp;
4590                     abi_long msgtyp;
4591                 } *tmp;
4592 
4593                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4594                     ret = -TARGET_EFAULT;
4595                     break;
4596                 }
4597 
4598                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4599 
4600                 unlock_user_struct(tmp, ptr, 0);
4601                 break;
4602             }
4603         default:
4604             ret = do_msgrcv(first, ptr, second, fifth, third);
4605         }
4606         break;
4607 
4608     case IPCOP_shmat:
4609         switch (version) {
4610         default:
4611         {
4612             abi_ulong raddr;
4613             raddr = target_shmat(cpu_env, first, ptr, second);
4614             if (is_error(raddr))
4615                 return get_errno(raddr);
4616             if (put_user_ual(raddr, third))
4617                 return -TARGET_EFAULT;
4618             break;
4619         }
4620         case 1:
4621             ret = -TARGET_EINVAL;
4622             break;
4623         }
4624 	break;
4625     case IPCOP_shmdt:
4626         ret = target_shmdt(ptr);
4627 	break;
4628 
4629     case IPCOP_shmget:
4630 	/* IPC_* flag values are the same on all linux platforms */
4631 	ret = get_errno(shmget(first, second, third));
4632 	break;
4633 
4634 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4635     case IPCOP_shmctl:
4636         ret = do_shmctl(first, second, ptr);
4637         break;
4638     default:
4639         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4640                       call, version);
4641 	ret = -TARGET_ENOSYS;
4642 	break;
4643     }
4644     return ret;
4645 }
4646 #endif
4647 
4648 /* kernel structure types definitions */
4649 
4650 #define STRUCT(name, ...) STRUCT_ ## name,
4651 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4652 enum {
4653 #include "syscall_types.h"
4654 STRUCT_MAX
4655 };
4656 #undef STRUCT
4657 #undef STRUCT_SPECIAL
4658 
4659 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4660 #define STRUCT_SPECIAL(name)
4661 #include "syscall_types.h"
4662 #undef STRUCT
4663 #undef STRUCT_SPECIAL
4664 
4665 #define MAX_STRUCT_SIZE 4096
4666 
4667 #ifdef CONFIG_FIEMAP
4668 /* So fiemap access checks don't overflow on 32 bit systems.
4669  * This is very slightly smaller than the limit imposed by
4670  * the underlying kernel.
4671  */
4672 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4673                             / sizeof(struct fiemap_extent))
4674 
do_ioctl_fs_ioc_fiemap(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4675 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4676                                        int fd, int cmd, abi_long arg)
4677 {
4678     /* The parameter for this ioctl is a struct fiemap followed
4679      * by an array of struct fiemap_extent whose size is set
4680      * in fiemap->fm_extent_count. The array is filled in by the
4681      * ioctl.
4682      */
4683     int target_size_in, target_size_out;
4684     struct fiemap *fm;
4685     const argtype *arg_type = ie->arg_type;
4686     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4687     void *argptr, *p;
4688     abi_long ret;
4689     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4690     uint32_t outbufsz;
4691     int free_fm = 0;
4692 
4693     assert(arg_type[0] == TYPE_PTR);
4694     assert(ie->access == IOC_RW);
4695     arg_type++;
4696     target_size_in = thunk_type_size(arg_type, 0);
4697     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4698     if (!argptr) {
4699         return -TARGET_EFAULT;
4700     }
4701     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4702     unlock_user(argptr, arg, 0);
4703     fm = (struct fiemap *)buf_temp;
4704     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4705         return -TARGET_EINVAL;
4706     }
4707 
4708     outbufsz = sizeof (*fm) +
4709         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4710 
4711     if (outbufsz > MAX_STRUCT_SIZE) {
4712         /* We can't fit all the extents into the fixed size buffer.
4713          * Allocate one that is large enough and use it instead.
4714          */
4715         fm = g_try_malloc(outbufsz);
4716         if (!fm) {
4717             return -TARGET_ENOMEM;
4718         }
4719         memcpy(fm, buf_temp, sizeof(struct fiemap));
4720         free_fm = 1;
4721     }
4722     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4723     if (!is_error(ret)) {
4724         target_size_out = target_size_in;
4725         /* An extent_count of 0 means we were only counting the extents
4726          * so there are no structs to copy
4727          */
4728         if (fm->fm_extent_count != 0) {
4729             target_size_out += fm->fm_mapped_extents * extent_size;
4730         }
4731         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4732         if (!argptr) {
4733             ret = -TARGET_EFAULT;
4734         } else {
4735             /* Convert the struct fiemap */
4736             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4737             if (fm->fm_extent_count != 0) {
4738                 p = argptr + target_size_in;
4739                 /* ...and then all the struct fiemap_extents */
4740                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4741                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4742                                   THUNK_TARGET);
4743                     p += extent_size;
4744                 }
4745             }
4746             unlock_user(argptr, arg, target_size_out);
4747         }
4748     }
4749     if (free_fm) {
4750         g_free(fm);
4751     }
4752     return ret;
4753 }
4754 #endif
4755 
do_ioctl_ifconf(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4756 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4757                                 int fd, int cmd, abi_long arg)
4758 {
4759     const argtype *arg_type = ie->arg_type;
4760     int target_size;
4761     void *argptr;
4762     int ret;
4763     struct ifconf *host_ifconf;
4764     uint32_t outbufsz;
4765     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4766     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4767     int target_ifreq_size;
4768     int nb_ifreq;
4769     int free_buf = 0;
4770     int i;
4771     int target_ifc_len;
4772     abi_long target_ifc_buf;
4773     int host_ifc_len;
4774     char *host_ifc_buf;
4775 
4776     assert(arg_type[0] == TYPE_PTR);
4777     assert(ie->access == IOC_RW);
4778 
4779     arg_type++;
4780     target_size = thunk_type_size(arg_type, 0);
4781 
4782     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4783     if (!argptr)
4784         return -TARGET_EFAULT;
4785     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4786     unlock_user(argptr, arg, 0);
4787 
4788     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4789     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4790     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4791 
4792     if (target_ifc_buf != 0) {
4793         target_ifc_len = host_ifconf->ifc_len;
4794         nb_ifreq = target_ifc_len / target_ifreq_size;
4795         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4796 
4797         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4798         if (outbufsz > MAX_STRUCT_SIZE) {
4799             /*
4800              * We can't fit all the extents into the fixed size buffer.
4801              * Allocate one that is large enough and use it instead.
4802              */
4803             host_ifconf = g_try_malloc(outbufsz);
4804             if (!host_ifconf) {
4805                 return -TARGET_ENOMEM;
4806             }
4807             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4808             free_buf = 1;
4809         }
4810         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4811 
4812         host_ifconf->ifc_len = host_ifc_len;
4813     } else {
4814       host_ifc_buf = NULL;
4815     }
4816     host_ifconf->ifc_buf = host_ifc_buf;
4817 
4818     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4819     if (!is_error(ret)) {
4820 	/* convert host ifc_len to target ifc_len */
4821 
4822         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4823         target_ifc_len = nb_ifreq * target_ifreq_size;
4824         host_ifconf->ifc_len = target_ifc_len;
4825 
4826 	/* restore target ifc_buf */
4827 
4828         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4829 
4830 	/* copy struct ifconf to target user */
4831 
4832         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4833         if (!argptr)
4834             return -TARGET_EFAULT;
4835         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4836         unlock_user(argptr, arg, target_size);
4837 
4838         if (target_ifc_buf != 0) {
4839             /* copy ifreq[] to target user */
4840             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4841             for (i = 0; i < nb_ifreq ; i++) {
4842                 thunk_convert(argptr + i * target_ifreq_size,
4843                               host_ifc_buf + i * sizeof(struct ifreq),
4844                               ifreq_arg_type, THUNK_TARGET);
4845             }
4846             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4847         }
4848     }
4849 
4850     if (free_buf) {
4851         g_free(host_ifconf);
4852     }
4853 
4854     return ret;
4855 }
4856 
4857 #if defined(CONFIG_USBFS)
4858 #if HOST_LONG_BITS > 64
4859 #error USBDEVFS thunks do not support >64 bit hosts yet.
4860 #endif
4861 struct live_urb {
4862     uint64_t target_urb_adr;
4863     uint64_t target_buf_adr;
4864     char *target_buf_ptr;
4865     struct usbdevfs_urb host_urb;
4866 };
4867 
usbdevfs_urb_hashtable(void)4868 static GHashTable *usbdevfs_urb_hashtable(void)
4869 {
4870     static GHashTable *urb_hashtable;
4871 
4872     if (!urb_hashtable) {
4873         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4874     }
4875     return urb_hashtable;
4876 }
4877 
urb_hashtable_insert(struct live_urb * urb)4878 static void urb_hashtable_insert(struct live_urb *urb)
4879 {
4880     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4881     g_hash_table_insert(urb_hashtable, urb, urb);
4882 }
4883 
urb_hashtable_lookup(uint64_t target_urb_adr)4884 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4885 {
4886     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4887     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4888 }
4889 
urb_hashtable_remove(struct live_urb * urb)4890 static void urb_hashtable_remove(struct live_urb *urb)
4891 {
4892     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4893     g_hash_table_remove(urb_hashtable, urb);
4894 }
4895 
4896 static abi_long
do_ioctl_usbdevfs_reapurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4897 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4898                           int fd, int cmd, abi_long arg)
4899 {
4900     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4901     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4902     struct live_urb *lurb;
4903     void *argptr;
4904     uint64_t hurb;
4905     int target_size;
4906     uintptr_t target_urb_adr;
4907     abi_long ret;
4908 
4909     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4910 
4911     memset(buf_temp, 0, sizeof(uint64_t));
4912     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4913     if (is_error(ret)) {
4914         return ret;
4915     }
4916 
4917     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4918     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4919     if (!lurb->target_urb_adr) {
4920         return -TARGET_EFAULT;
4921     }
4922     urb_hashtable_remove(lurb);
4923     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4924         lurb->host_urb.buffer_length);
4925     lurb->target_buf_ptr = NULL;
4926 
4927     /* restore the guest buffer pointer */
4928     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4929 
4930     /* update the guest urb struct */
4931     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4932     if (!argptr) {
4933         g_free(lurb);
4934         return -TARGET_EFAULT;
4935     }
4936     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4937     unlock_user(argptr, lurb->target_urb_adr, target_size);
4938 
4939     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4940     /* write back the urb handle */
4941     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4942     if (!argptr) {
4943         g_free(lurb);
4944         return -TARGET_EFAULT;
4945     }
4946 
4947     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4948     target_urb_adr = lurb->target_urb_adr;
4949     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4950     unlock_user(argptr, arg, target_size);
4951 
4952     g_free(lurb);
4953     return ret;
4954 }
4955 
4956 static abi_long
do_ioctl_usbdevfs_discardurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4957 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4958                              uint8_t *buf_temp __attribute__((unused)),
4959                              int fd, int cmd, abi_long arg)
4960 {
4961     struct live_urb *lurb;
4962 
4963     /* map target address back to host URB with metadata. */
4964     lurb = urb_hashtable_lookup(arg);
4965     if (!lurb) {
4966         return -TARGET_EFAULT;
4967     }
4968     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4969 }
4970 
4971 static abi_long
do_ioctl_usbdevfs_submiturb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4972 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4973                             int fd, int cmd, abi_long arg)
4974 {
4975     const argtype *arg_type = ie->arg_type;
4976     int target_size;
4977     abi_long ret;
4978     void *argptr;
4979     int rw_dir;
4980     struct live_urb *lurb;
4981 
4982     /*
4983      * each submitted URB needs to map to a unique ID for the
4984      * kernel, and that unique ID needs to be a pointer to
4985      * host memory.  hence, we need to malloc for each URB.
4986      * isochronous transfers have a variable length struct.
4987      */
4988     arg_type++;
4989     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4990 
4991     /* construct host copy of urb and metadata */
4992     lurb = g_try_new0(struct live_urb, 1);
4993     if (!lurb) {
4994         return -TARGET_ENOMEM;
4995     }
4996 
4997     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4998     if (!argptr) {
4999         g_free(lurb);
5000         return -TARGET_EFAULT;
5001     }
5002     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5003     unlock_user(argptr, arg, 0);
5004 
5005     lurb->target_urb_adr = arg;
5006     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5007 
5008     /* buffer space used depends on endpoint type so lock the entire buffer */
5009     /* control type urbs should check the buffer contents for true direction */
5010     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5011     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5012         lurb->host_urb.buffer_length, 1);
5013     if (lurb->target_buf_ptr == NULL) {
5014         g_free(lurb);
5015         return -TARGET_EFAULT;
5016     }
5017 
5018     /* update buffer pointer in host copy */
5019     lurb->host_urb.buffer = lurb->target_buf_ptr;
5020 
5021     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5022     if (is_error(ret)) {
5023         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5024         g_free(lurb);
5025     } else {
5026         urb_hashtable_insert(lurb);
5027     }
5028 
5029     return ret;
5030 }
5031 #endif /* CONFIG_USBFS */
5032 
do_ioctl_dm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5033 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5034                             int cmd, abi_long arg)
5035 {
5036     void *argptr;
5037     struct dm_ioctl *host_dm;
5038     abi_long guest_data;
5039     uint32_t guest_data_size;
5040     int target_size;
5041     const argtype *arg_type = ie->arg_type;
5042     abi_long ret;
5043     void *big_buf = NULL;
5044     char *host_data;
5045 
5046     arg_type++;
5047     target_size = thunk_type_size(arg_type, 0);
5048     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5049     if (!argptr) {
5050         ret = -TARGET_EFAULT;
5051         goto out;
5052     }
5053     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5054     unlock_user(argptr, arg, 0);
5055 
5056     /* buf_temp is too small, so fetch things into a bigger buffer */
5057     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5058     memcpy(big_buf, buf_temp, target_size);
5059     buf_temp = big_buf;
5060     host_dm = big_buf;
5061 
5062     guest_data = arg + host_dm->data_start;
5063     if ((guest_data - arg) < 0) {
5064         ret = -TARGET_EINVAL;
5065         goto out;
5066     }
5067     guest_data_size = host_dm->data_size - host_dm->data_start;
5068     host_data = (char*)host_dm + host_dm->data_start;
5069 
5070     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5071     if (!argptr) {
5072         ret = -TARGET_EFAULT;
5073         goto out;
5074     }
5075 
5076     switch (ie->host_cmd) {
5077     case DM_REMOVE_ALL:
5078     case DM_LIST_DEVICES:
5079     case DM_DEV_CREATE:
5080     case DM_DEV_REMOVE:
5081     case DM_DEV_SUSPEND:
5082     case DM_DEV_STATUS:
5083     case DM_DEV_WAIT:
5084     case DM_TABLE_STATUS:
5085     case DM_TABLE_CLEAR:
5086     case DM_TABLE_DEPS:
5087     case DM_LIST_VERSIONS:
5088         /* no input data */
5089         break;
5090     case DM_DEV_RENAME:
5091     case DM_DEV_SET_GEOMETRY:
5092         /* data contains only strings */
5093         memcpy(host_data, argptr, guest_data_size);
5094         break;
5095     case DM_TARGET_MSG:
5096         memcpy(host_data, argptr, guest_data_size);
5097         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5098         break;
5099     case DM_TABLE_LOAD:
5100     {
5101         void *gspec = argptr;
5102         void *cur_data = host_data;
5103         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5104         int spec_size = thunk_type_size(dm_arg_type, 0);
5105         int i;
5106 
5107         for (i = 0; i < host_dm->target_count; i++) {
5108             struct dm_target_spec *spec = cur_data;
5109             uint32_t next;
5110             int slen;
5111 
5112             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5113             slen = strlen((char*)gspec + spec_size) + 1;
5114             next = spec->next;
5115             spec->next = sizeof(*spec) + slen;
5116             strcpy((char*)&spec[1], gspec + spec_size);
5117             gspec += next;
5118             cur_data += spec->next;
5119         }
5120         break;
5121     }
5122     default:
5123         ret = -TARGET_EINVAL;
5124         unlock_user(argptr, guest_data, 0);
5125         goto out;
5126     }
5127     unlock_user(argptr, guest_data, 0);
5128 
5129     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5130     if (!is_error(ret)) {
5131         guest_data = arg + host_dm->data_start;
5132         guest_data_size = host_dm->data_size - host_dm->data_start;
5133         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5134         switch (ie->host_cmd) {
5135         case DM_REMOVE_ALL:
5136         case DM_DEV_CREATE:
5137         case DM_DEV_REMOVE:
5138         case DM_DEV_RENAME:
5139         case DM_DEV_SUSPEND:
5140         case DM_DEV_STATUS:
5141         case DM_TABLE_LOAD:
5142         case DM_TABLE_CLEAR:
5143         case DM_TARGET_MSG:
5144         case DM_DEV_SET_GEOMETRY:
5145             /* no return data */
5146             break;
5147         case DM_LIST_DEVICES:
5148         {
5149             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5150             uint32_t remaining_data = guest_data_size;
5151             void *cur_data = argptr;
5152             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5153             int nl_size = 12; /* can't use thunk_size due to alignment */
5154 
5155             while (1) {
5156                 uint32_t next = nl->next;
5157                 if (next) {
5158                     nl->next = nl_size + (strlen(nl->name) + 1);
5159                 }
5160                 if (remaining_data < nl->next) {
5161                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5162                     break;
5163                 }
5164                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5165                 strcpy(cur_data + nl_size, nl->name);
5166                 cur_data += nl->next;
5167                 remaining_data -= nl->next;
5168                 if (!next) {
5169                     break;
5170                 }
5171                 nl = (void*)nl + next;
5172             }
5173             break;
5174         }
5175         case DM_DEV_WAIT:
5176         case DM_TABLE_STATUS:
5177         {
5178             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5179             void *cur_data = argptr;
5180             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5181             int spec_size = thunk_type_size(dm_arg_type, 0);
5182             int i;
5183 
5184             for (i = 0; i < host_dm->target_count; i++) {
5185                 uint32_t next = spec->next;
5186                 int slen = strlen((char*)&spec[1]) + 1;
5187                 spec->next = (cur_data - argptr) + spec_size + slen;
5188                 if (guest_data_size < spec->next) {
5189                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5190                     break;
5191                 }
5192                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5193                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5194                 cur_data = argptr + spec->next;
5195                 spec = (void*)host_dm + host_dm->data_start + next;
5196             }
5197             break;
5198         }
5199         case DM_TABLE_DEPS:
5200         {
5201             void *hdata = (void*)host_dm + host_dm->data_start;
5202             int count = *(uint32_t*)hdata;
5203             uint64_t *hdev = hdata + 8;
5204             uint64_t *gdev = argptr + 8;
5205             int i;
5206 
5207             *(uint32_t*)argptr = tswap32(count);
5208             for (i = 0; i < count; i++) {
5209                 *gdev = tswap64(*hdev);
5210                 gdev++;
5211                 hdev++;
5212             }
5213             break;
5214         }
5215         case DM_LIST_VERSIONS:
5216         {
5217             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5218             uint32_t remaining_data = guest_data_size;
5219             void *cur_data = argptr;
5220             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5221             int vers_size = thunk_type_size(dm_arg_type, 0);
5222 
5223             while (1) {
5224                 uint32_t next = vers->next;
5225                 if (next) {
5226                     vers->next = vers_size + (strlen(vers->name) + 1);
5227                 }
5228                 if (remaining_data < vers->next) {
5229                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5230                     break;
5231                 }
5232                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5233                 strcpy(cur_data + vers_size, vers->name);
5234                 cur_data += vers->next;
5235                 remaining_data -= vers->next;
5236                 if (!next) {
5237                     break;
5238                 }
5239                 vers = (void*)vers + next;
5240             }
5241             break;
5242         }
5243         default:
5244             unlock_user(argptr, guest_data, 0);
5245             ret = -TARGET_EINVAL;
5246             goto out;
5247         }
5248         unlock_user(argptr, guest_data, guest_data_size);
5249 
5250         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5251         if (!argptr) {
5252             ret = -TARGET_EFAULT;
5253             goto out;
5254         }
5255         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5256         unlock_user(argptr, arg, target_size);
5257     }
5258 out:
5259     g_free(big_buf);
5260     return ret;
5261 }
5262 
do_ioctl_blkpg(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5263 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5264                                int cmd, abi_long arg)
5265 {
5266     void *argptr;
5267     int target_size;
5268     const argtype *arg_type = ie->arg_type;
5269     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5270     abi_long ret;
5271 
5272     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5273     struct blkpg_partition host_part;
5274 
5275     /* Read and convert blkpg */
5276     arg_type++;
5277     target_size = thunk_type_size(arg_type, 0);
5278     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5279     if (!argptr) {
5280         ret = -TARGET_EFAULT;
5281         goto out;
5282     }
5283     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5284     unlock_user(argptr, arg, 0);
5285 
5286     switch (host_blkpg->op) {
5287     case BLKPG_ADD_PARTITION:
5288     case BLKPG_DEL_PARTITION:
5289         /* payload is struct blkpg_partition */
5290         break;
5291     default:
5292         /* Unknown opcode */
5293         ret = -TARGET_EINVAL;
5294         goto out;
5295     }
5296 
5297     /* Read and convert blkpg->data */
5298     arg = (abi_long)(uintptr_t)host_blkpg->data;
5299     target_size = thunk_type_size(part_arg_type, 0);
5300     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5301     if (!argptr) {
5302         ret = -TARGET_EFAULT;
5303         goto out;
5304     }
5305     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5306     unlock_user(argptr, arg, 0);
5307 
5308     /* Swizzle the data pointer to our local copy and call! */
5309     host_blkpg->data = &host_part;
5310     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5311 
5312 out:
5313     return ret;
5314 }
5315 
do_ioctl_rt(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5316 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5317                                 int fd, int cmd, abi_long arg)
5318 {
5319     const argtype *arg_type = ie->arg_type;
5320     const StructEntry *se;
5321     const argtype *field_types;
5322     const int *dst_offsets, *src_offsets;
5323     int target_size;
5324     void *argptr;
5325     abi_ulong *target_rt_dev_ptr = NULL;
5326     unsigned long *host_rt_dev_ptr = NULL;
5327     abi_long ret;
5328     int i;
5329 
5330     assert(ie->access == IOC_W);
5331     assert(*arg_type == TYPE_PTR);
5332     arg_type++;
5333     assert(*arg_type == TYPE_STRUCT);
5334     target_size = thunk_type_size(arg_type, 0);
5335     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5336     if (!argptr) {
5337         return -TARGET_EFAULT;
5338     }
5339     arg_type++;
5340     assert(*arg_type == (int)STRUCT_rtentry);
5341     se = struct_entries + *arg_type++;
5342     assert(se->convert[0] == NULL);
5343     /* convert struct here to be able to catch rt_dev string */
5344     field_types = se->field_types;
5345     dst_offsets = se->field_offsets[THUNK_HOST];
5346     src_offsets = se->field_offsets[THUNK_TARGET];
5347     for (i = 0; i < se->nb_fields; i++) {
5348         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5349             assert(*field_types == TYPE_PTRVOID);
5350             target_rt_dev_ptr = argptr + src_offsets[i];
5351             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5352             if (*target_rt_dev_ptr != 0) {
5353                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5354                                                   tswapal(*target_rt_dev_ptr));
5355                 if (!*host_rt_dev_ptr) {
5356                     unlock_user(argptr, arg, 0);
5357                     return -TARGET_EFAULT;
5358                 }
5359             } else {
5360                 *host_rt_dev_ptr = 0;
5361             }
5362             field_types++;
5363             continue;
5364         }
5365         field_types = thunk_convert(buf_temp + dst_offsets[i],
5366                                     argptr + src_offsets[i],
5367                                     field_types, THUNK_HOST);
5368     }
5369     unlock_user(argptr, arg, 0);
5370 
5371     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5372 
5373     assert(host_rt_dev_ptr != NULL);
5374     assert(target_rt_dev_ptr != NULL);
5375     if (*host_rt_dev_ptr != 0) {
5376         unlock_user((void *)*host_rt_dev_ptr,
5377                     *target_rt_dev_ptr, 0);
5378     }
5379     return ret;
5380 }
5381 
do_ioctl_kdsigaccept(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5382 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5383                                      int fd, int cmd, abi_long arg)
5384 {
5385     int sig = target_to_host_signal(arg);
5386     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5387 }
5388 
do_ioctl_SIOCGSTAMP(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5389 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5390                                     int fd, int cmd, abi_long arg)
5391 {
5392     struct timeval tv;
5393     abi_long ret;
5394 
5395     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5396     if (is_error(ret)) {
5397         return ret;
5398     }
5399 
5400     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5401         if (copy_to_user_timeval(arg, &tv)) {
5402             return -TARGET_EFAULT;
5403         }
5404     } else {
5405         if (copy_to_user_timeval64(arg, &tv)) {
5406             return -TARGET_EFAULT;
5407         }
5408     }
5409 
5410     return ret;
5411 }
5412 
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5413 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5414                                       int fd, int cmd, abi_long arg)
5415 {
5416     struct timespec ts;
5417     abi_long ret;
5418 
5419     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5420     if (is_error(ret)) {
5421         return ret;
5422     }
5423 
5424     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5425         if (host_to_target_timespec(arg, &ts)) {
5426             return -TARGET_EFAULT;
5427         }
5428     } else{
5429         if (host_to_target_timespec64(arg, &ts)) {
5430             return -TARGET_EFAULT;
5431         }
5432     }
5433 
5434     return ret;
5435 }
5436 
5437 #ifdef TIOCGPTPEER
do_ioctl_tiocgptpeer(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5438 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5439                                      int fd, int cmd, abi_long arg)
5440 {
5441     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5442     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5443 }
5444 #endif
5445 
5446 #ifdef HAVE_DRM_H
5447 
unlock_drm_version(struct drm_version * host_ver,struct target_drm_version * target_ver,bool copy)5448 static void unlock_drm_version(struct drm_version *host_ver,
5449                                struct target_drm_version *target_ver,
5450                                bool copy)
5451 {
5452     unlock_user(host_ver->name, target_ver->name,
5453                                 copy ? host_ver->name_len : 0);
5454     unlock_user(host_ver->date, target_ver->date,
5455                                 copy ? host_ver->date_len : 0);
5456     unlock_user(host_ver->desc, target_ver->desc,
5457                                 copy ? host_ver->desc_len : 0);
5458 }
5459 
target_to_host_drmversion(struct drm_version * host_ver,struct target_drm_version * target_ver)5460 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5461                                           struct target_drm_version *target_ver)
5462 {
5463     memset(host_ver, 0, sizeof(*host_ver));
5464 
5465     __get_user(host_ver->name_len, &target_ver->name_len);
5466     if (host_ver->name_len) {
5467         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5468                                    target_ver->name_len, 0);
5469         if (!host_ver->name) {
5470             return -EFAULT;
5471         }
5472     }
5473 
5474     __get_user(host_ver->date_len, &target_ver->date_len);
5475     if (host_ver->date_len) {
5476         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5477                                    target_ver->date_len, 0);
5478         if (!host_ver->date) {
5479             goto err;
5480         }
5481     }
5482 
5483     __get_user(host_ver->desc_len, &target_ver->desc_len);
5484     if (host_ver->desc_len) {
5485         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5486                                    target_ver->desc_len, 0);
5487         if (!host_ver->desc) {
5488             goto err;
5489         }
5490     }
5491 
5492     return 0;
5493 err:
5494     unlock_drm_version(host_ver, target_ver, false);
5495     return -EFAULT;
5496 }
5497 
host_to_target_drmversion(struct target_drm_version * target_ver,struct drm_version * host_ver)5498 static inline void host_to_target_drmversion(
5499                                           struct target_drm_version *target_ver,
5500                                           struct drm_version *host_ver)
5501 {
5502     __put_user(host_ver->version_major, &target_ver->version_major);
5503     __put_user(host_ver->version_minor, &target_ver->version_minor);
5504     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5505     __put_user(host_ver->name_len, &target_ver->name_len);
5506     __put_user(host_ver->date_len, &target_ver->date_len);
5507     __put_user(host_ver->desc_len, &target_ver->desc_len);
5508     unlock_drm_version(host_ver, target_ver, true);
5509 }
5510 
do_ioctl_drm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5511 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5512                              int fd, int cmd, abi_long arg)
5513 {
5514     struct drm_version *ver;
5515     struct target_drm_version *target_ver;
5516     abi_long ret;
5517 
5518     switch (ie->host_cmd) {
5519     case DRM_IOCTL_VERSION:
5520         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5521             return -TARGET_EFAULT;
5522         }
5523         ver = (struct drm_version *)buf_temp;
5524         ret = target_to_host_drmversion(ver, target_ver);
5525         if (!is_error(ret)) {
5526             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5527             if (is_error(ret)) {
5528                 unlock_drm_version(ver, target_ver, false);
5529             } else {
5530                 host_to_target_drmversion(target_ver, ver);
5531             }
5532         }
5533         unlock_user_struct(target_ver, arg, 0);
5534         return ret;
5535     }
5536     return -TARGET_ENOSYS;
5537 }
5538 
do_ioctl_drm_i915_getparam(const IOCTLEntry * ie,struct drm_i915_getparam * gparam,int fd,abi_long arg)5539 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5540                                            struct drm_i915_getparam *gparam,
5541                                            int fd, abi_long arg)
5542 {
5543     abi_long ret;
5544     int value;
5545     struct target_drm_i915_getparam *target_gparam;
5546 
5547     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5548         return -TARGET_EFAULT;
5549     }
5550 
5551     __get_user(gparam->param, &target_gparam->param);
5552     gparam->value = &value;
5553     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5554     put_user_s32(value, target_gparam->value);
5555 
5556     unlock_user_struct(target_gparam, arg, 0);
5557     return ret;
5558 }
5559 
do_ioctl_drm_i915(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5560 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5561                                   int fd, int cmd, abi_long arg)
5562 {
5563     switch (ie->host_cmd) {
5564     case DRM_IOCTL_I915_GETPARAM:
5565         return do_ioctl_drm_i915_getparam(ie,
5566                                           (struct drm_i915_getparam *)buf_temp,
5567                                           fd, arg);
5568     default:
5569         return -TARGET_ENOSYS;
5570     }
5571 }
5572 
5573 #endif
5574 
do_ioctl_TUNSETTXFILTER(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5575 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5576                                         int fd, int cmd, abi_long arg)
5577 {
5578     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5579     struct tun_filter *target_filter;
5580     char *target_addr;
5581 
5582     assert(ie->access == IOC_W);
5583 
5584     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5585     if (!target_filter) {
5586         return -TARGET_EFAULT;
5587     }
5588     filter->flags = tswap16(target_filter->flags);
5589     filter->count = tswap16(target_filter->count);
5590     unlock_user(target_filter, arg, 0);
5591 
5592     if (filter->count) {
5593         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5594             MAX_STRUCT_SIZE) {
5595             return -TARGET_EFAULT;
5596         }
5597 
5598         target_addr = lock_user(VERIFY_READ,
5599                                 arg + offsetof(struct tun_filter, addr),
5600                                 filter->count * ETH_ALEN, 1);
5601         if (!target_addr) {
5602             return -TARGET_EFAULT;
5603         }
5604         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5605         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5606     }
5607 
5608     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5609 }
5610 
5611 IOCTLEntry ioctl_entries[] = {
5612 #define IOCTL(cmd, access, ...) \
5613     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5614 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5615     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5616 #define IOCTL_IGNORE(cmd) \
5617     { TARGET_ ## cmd, 0, #cmd },
5618 #include "ioctls.h"
5619     { 0, 0, },
5620 };
5621 
5622 /* ??? Implement proper locking for ioctls.  */
5623 /* do_ioctl() Must return target values and target errnos. */
do_ioctl(int fd,int cmd,abi_long arg)5624 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5625 {
5626     const IOCTLEntry *ie;
5627     const argtype *arg_type;
5628     abi_long ret;
5629     uint8_t buf_temp[MAX_STRUCT_SIZE];
5630     int target_size;
5631     void *argptr;
5632 
5633     ie = ioctl_entries;
5634     for(;;) {
5635         if (ie->target_cmd == 0) {
5636             qemu_log_mask(
5637                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5638             return -TARGET_ENOTTY;
5639         }
5640         if (ie->target_cmd == cmd)
5641             break;
5642         ie++;
5643     }
5644     arg_type = ie->arg_type;
5645     if (ie->do_ioctl) {
5646         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5647     } else if (!ie->host_cmd) {
5648         /* Some architectures define BSD ioctls in their headers
5649            that are not implemented in Linux.  */
5650         return -TARGET_ENOTTY;
5651     }
5652 
5653     switch(arg_type[0]) {
5654     case TYPE_NULL:
5655         /* no argument */
5656         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5657         break;
5658     case TYPE_PTRVOID:
5659     case TYPE_INT:
5660     case TYPE_LONG:
5661     case TYPE_ULONG:
5662         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5663         break;
5664     case TYPE_PTR:
5665         arg_type++;
5666         target_size = thunk_type_size(arg_type, 0);
5667         switch(ie->access) {
5668         case IOC_R:
5669             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5670             if (!is_error(ret)) {
5671                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5672                 if (!argptr)
5673                     return -TARGET_EFAULT;
5674                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5675                 unlock_user(argptr, arg, target_size);
5676             }
5677             break;
5678         case IOC_W:
5679             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5680             if (!argptr)
5681                 return -TARGET_EFAULT;
5682             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5683             unlock_user(argptr, arg, 0);
5684             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5685             break;
5686         default:
5687         case IOC_RW:
5688             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5689             if (!argptr)
5690                 return -TARGET_EFAULT;
5691             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5692             unlock_user(argptr, arg, 0);
5693             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5694             if (!is_error(ret)) {
5695                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5696                 if (!argptr)
5697                     return -TARGET_EFAULT;
5698                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5699                 unlock_user(argptr, arg, target_size);
5700             }
5701             break;
5702         }
5703         break;
5704     default:
5705         qemu_log_mask(LOG_UNIMP,
5706                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5707                       (long)cmd, arg_type[0]);
5708         ret = -TARGET_ENOTTY;
5709         break;
5710     }
5711     return ret;
5712 }
5713 
5714 static const bitmask_transtbl iflag_tbl[] = {
5715         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5716         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5717         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5718         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5719         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5720         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5721         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5722         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5723         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5724         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5725         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5726         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5727         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5728         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5729         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5730 };
5731 
5732 static const bitmask_transtbl oflag_tbl[] = {
5733 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5734 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5735 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5736 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5737 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5738 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5739 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5740 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5741 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5742 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5743 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5744 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5745 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5746 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5747 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5748 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5749 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5750 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5751 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5752 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5753 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5754 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5755 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5756 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5757 };
5758 
5759 static const bitmask_transtbl cflag_tbl[] = {
5760 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5761 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5762 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5763 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5764 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5765 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5766 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5767 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5768 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5769 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5770 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5771 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5772 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5773 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5774 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5775 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5776 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5777 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5778 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5779 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5780 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5781 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5782 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5783 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5784 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5785 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5786 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5787 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5788 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5789 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5790 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5791 };
5792 
5793 static const bitmask_transtbl lflag_tbl[] = {
5794   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5795   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5796   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5797   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5798   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5799   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5800   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5801   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5802   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5803   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5804   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5805   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5806   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5807   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5808   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5809   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5810 };
5811 
target_to_host_termios(void * dst,const void * src)5812 static void target_to_host_termios (void *dst, const void *src)
5813 {
5814     struct host_termios *host = dst;
5815     const struct target_termios *target = src;
5816 
5817     host->c_iflag =
5818         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5819     host->c_oflag =
5820         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5821     host->c_cflag =
5822         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5823     host->c_lflag =
5824         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5825     host->c_line = target->c_line;
5826 
5827     memset(host->c_cc, 0, sizeof(host->c_cc));
5828     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5829     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5830     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5831     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5832     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5833     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5834     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5835     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5836     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5837     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5838     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5839     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5840     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5841     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5842     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5843     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5844     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5845 }
5846 
host_to_target_termios(void * dst,const void * src)5847 static void host_to_target_termios (void *dst, const void *src)
5848 {
5849     struct target_termios *target = dst;
5850     const struct host_termios *host = src;
5851 
5852     target->c_iflag =
5853         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5854     target->c_oflag =
5855         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5856     target->c_cflag =
5857         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5858     target->c_lflag =
5859         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5860     target->c_line = host->c_line;
5861 
5862     memset(target->c_cc, 0, sizeof(target->c_cc));
5863     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5864     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5865     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5866     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5867     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5868     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5869     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5870     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5871     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5872     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5873     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5874     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5875     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5876     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5877     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5878     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5879     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5880 }
5881 
5882 static const StructEntry struct_termios_def = {
5883     .convert = { host_to_target_termios, target_to_host_termios },
5884     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5885     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5886     .print = print_termios,
5887 };
5888 
5889 /* If the host does not provide these bits, they may be safely discarded. */
5890 #ifndef MAP_SYNC
5891 #define MAP_SYNC 0
5892 #endif
5893 #ifndef MAP_UNINITIALIZED
5894 #define MAP_UNINITIALIZED 0
5895 #endif
5896 
5897 static const bitmask_transtbl mmap_flags_tbl[] = {
5898     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5899     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5900       MAP_ANONYMOUS, MAP_ANONYMOUS },
5901     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5902       MAP_GROWSDOWN, MAP_GROWSDOWN },
5903     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5904       MAP_DENYWRITE, MAP_DENYWRITE },
5905     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5906       MAP_EXECUTABLE, MAP_EXECUTABLE },
5907     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5908     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5909       MAP_NORESERVE, MAP_NORESERVE },
5910     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5911     /* MAP_STACK had been ignored by the kernel for quite some time.
5912        Recognize it for the target insofar as we do not want to pass
5913        it through to the host.  */
5914     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5915     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5916     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5917     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5918       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5919     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5920       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5921 };
5922 
5923 /*
5924  * Arrange for legacy / undefined architecture specific flags to be
5925  * ignored by mmap handling code.
5926  */
5927 #ifndef TARGET_MAP_32BIT
5928 #define TARGET_MAP_32BIT 0
5929 #endif
5930 #ifndef TARGET_MAP_HUGE_2MB
5931 #define TARGET_MAP_HUGE_2MB 0
5932 #endif
5933 #ifndef TARGET_MAP_HUGE_1GB
5934 #define TARGET_MAP_HUGE_1GB 0
5935 #endif
5936 
do_mmap(abi_ulong addr,abi_ulong len,int prot,int target_flags,int fd,off_t offset)5937 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5938                         int target_flags, int fd, off_t offset)
5939 {
5940     /*
5941      * The historical set of flags that all mmap types implicitly support.
5942      */
5943     enum {
5944         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5945                                | TARGET_MAP_PRIVATE
5946                                | TARGET_MAP_FIXED
5947                                | TARGET_MAP_ANONYMOUS
5948                                | TARGET_MAP_DENYWRITE
5949                                | TARGET_MAP_EXECUTABLE
5950                                | TARGET_MAP_UNINITIALIZED
5951                                | TARGET_MAP_GROWSDOWN
5952                                | TARGET_MAP_LOCKED
5953                                | TARGET_MAP_NORESERVE
5954                                | TARGET_MAP_POPULATE
5955                                | TARGET_MAP_NONBLOCK
5956                                | TARGET_MAP_STACK
5957                                | TARGET_MAP_HUGETLB
5958                                | TARGET_MAP_32BIT
5959                                | TARGET_MAP_HUGE_2MB
5960                                | TARGET_MAP_HUGE_1GB
5961     };
5962     int host_flags;
5963 
5964     switch (target_flags & TARGET_MAP_TYPE) {
5965     case TARGET_MAP_PRIVATE:
5966         host_flags = MAP_PRIVATE;
5967         break;
5968     case TARGET_MAP_SHARED:
5969         host_flags = MAP_SHARED;
5970         break;
5971     case TARGET_MAP_SHARED_VALIDATE:
5972         /*
5973          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5974          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5975          */
5976         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5977             return -TARGET_EOPNOTSUPP;
5978         }
5979         host_flags = MAP_SHARED_VALIDATE;
5980         if (target_flags & TARGET_MAP_SYNC) {
5981             host_flags |= MAP_SYNC;
5982         }
5983         break;
5984     default:
5985         return -TARGET_EINVAL;
5986     }
5987     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5988 
5989     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5990 }
5991 
5992 /*
5993  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5994  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5995  */
5996 #if defined(TARGET_I386)
5997 
5998 /* NOTE: there is really one LDT for all the threads */
5999 static uint8_t *ldt_table;
6000 
read_ldt(abi_ulong ptr,unsigned long bytecount)6001 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6002 {
6003     int size;
6004     void *p;
6005 
6006     if (!ldt_table)
6007         return 0;
6008     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6009     if (size > bytecount)
6010         size = bytecount;
6011     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6012     if (!p)
6013         return -TARGET_EFAULT;
6014     /* ??? Should this by byteswapped?  */
6015     memcpy(p, ldt_table, size);
6016     unlock_user(p, ptr, size);
6017     return size;
6018 }
6019 
6020 /* XXX: add locking support */
write_ldt(CPUX86State * env,abi_ulong ptr,unsigned long bytecount,int oldmode)6021 static abi_long write_ldt(CPUX86State *env,
6022                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6023 {
6024     struct target_modify_ldt_ldt_s ldt_info;
6025     struct target_modify_ldt_ldt_s *target_ldt_info;
6026     int seg_32bit, contents, read_exec_only, limit_in_pages;
6027     int seg_not_present, useable, lm;
6028     uint32_t *lp, entry_1, entry_2;
6029 
6030     if (bytecount != sizeof(ldt_info))
6031         return -TARGET_EINVAL;
6032     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6033         return -TARGET_EFAULT;
6034     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6035     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6036     ldt_info.limit = tswap32(target_ldt_info->limit);
6037     ldt_info.flags = tswap32(target_ldt_info->flags);
6038     unlock_user_struct(target_ldt_info, ptr, 0);
6039 
6040     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6041         return -TARGET_EINVAL;
6042     seg_32bit = ldt_info.flags & 1;
6043     contents = (ldt_info.flags >> 1) & 3;
6044     read_exec_only = (ldt_info.flags >> 3) & 1;
6045     limit_in_pages = (ldt_info.flags >> 4) & 1;
6046     seg_not_present = (ldt_info.flags >> 5) & 1;
6047     useable = (ldt_info.flags >> 6) & 1;
6048 #ifdef TARGET_ABI32
6049     lm = 0;
6050 #else
6051     lm = (ldt_info.flags >> 7) & 1;
6052 #endif
6053     if (contents == 3) {
6054         if (oldmode)
6055             return -TARGET_EINVAL;
6056         if (seg_not_present == 0)
6057             return -TARGET_EINVAL;
6058     }
6059     /* allocate the LDT */
6060     if (!ldt_table) {
6061         env->ldt.base = target_mmap(0,
6062                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6063                                     PROT_READ|PROT_WRITE,
6064                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6065         if (env->ldt.base == -1)
6066             return -TARGET_ENOMEM;
6067         memset(g2h_untagged(env->ldt.base), 0,
6068                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6069         env->ldt.limit = 0xffff;
6070         ldt_table = g2h_untagged(env->ldt.base);
6071     }
6072 
6073     /* NOTE: same code as Linux kernel */
6074     /* Allow LDTs to be cleared by the user. */
6075     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6076         if (oldmode ||
6077             (contents == 0		&&
6078              read_exec_only == 1	&&
6079              seg_32bit == 0		&&
6080              limit_in_pages == 0	&&
6081              seg_not_present == 1	&&
6082              useable == 0 )) {
6083             entry_1 = 0;
6084             entry_2 = 0;
6085             goto install;
6086         }
6087     }
6088 
6089     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6090         (ldt_info.limit & 0x0ffff);
6091     entry_2 = (ldt_info.base_addr & 0xff000000) |
6092         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6093         (ldt_info.limit & 0xf0000) |
6094         ((read_exec_only ^ 1) << 9) |
6095         (contents << 10) |
6096         ((seg_not_present ^ 1) << 15) |
6097         (seg_32bit << 22) |
6098         (limit_in_pages << 23) |
6099         (lm << 21) |
6100         0x7000;
6101     if (!oldmode)
6102         entry_2 |= (useable << 20);
6103 
6104     /* Install the new entry ...  */
6105 install:
6106     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6107     lp[0] = tswap32(entry_1);
6108     lp[1] = tswap32(entry_2);
6109     return 0;
6110 }
6111 
6112 /* specific and weird i386 syscalls */
do_modify_ldt(CPUX86State * env,int func,abi_ulong ptr,unsigned long bytecount)6113 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6114                               unsigned long bytecount)
6115 {
6116     abi_long ret;
6117 
6118     switch (func) {
6119     case 0:
6120         ret = read_ldt(ptr, bytecount);
6121         break;
6122     case 1:
6123         ret = write_ldt(env, ptr, bytecount, 1);
6124         break;
6125     case 0x11:
6126         ret = write_ldt(env, ptr, bytecount, 0);
6127         break;
6128     default:
6129         ret = -TARGET_ENOSYS;
6130         break;
6131     }
6132     return ret;
6133 }
6134 
6135 #if defined(TARGET_ABI32)
do_set_thread_area(CPUX86State * env,abi_ulong ptr)6136 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6137 {
6138     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6139     struct target_modify_ldt_ldt_s ldt_info;
6140     struct target_modify_ldt_ldt_s *target_ldt_info;
6141     int seg_32bit, contents, read_exec_only, limit_in_pages;
6142     int seg_not_present, useable, lm;
6143     uint32_t *lp, entry_1, entry_2;
6144     int i;
6145 
6146     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6147     if (!target_ldt_info)
6148         return -TARGET_EFAULT;
6149     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6150     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6151     ldt_info.limit = tswap32(target_ldt_info->limit);
6152     ldt_info.flags = tswap32(target_ldt_info->flags);
6153     if (ldt_info.entry_number == -1) {
6154         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6155             if (gdt_table[i] == 0) {
6156                 ldt_info.entry_number = i;
6157                 target_ldt_info->entry_number = tswap32(i);
6158                 break;
6159             }
6160         }
6161     }
6162     unlock_user_struct(target_ldt_info, ptr, 1);
6163 
6164     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6165         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6166            return -TARGET_EINVAL;
6167     seg_32bit = ldt_info.flags & 1;
6168     contents = (ldt_info.flags >> 1) & 3;
6169     read_exec_only = (ldt_info.flags >> 3) & 1;
6170     limit_in_pages = (ldt_info.flags >> 4) & 1;
6171     seg_not_present = (ldt_info.flags >> 5) & 1;
6172     useable = (ldt_info.flags >> 6) & 1;
6173 #ifdef TARGET_ABI32
6174     lm = 0;
6175 #else
6176     lm = (ldt_info.flags >> 7) & 1;
6177 #endif
6178 
6179     if (contents == 3) {
6180         if (seg_not_present == 0)
6181             return -TARGET_EINVAL;
6182     }
6183 
6184     /* NOTE: same code as Linux kernel */
6185     /* Allow LDTs to be cleared by the user. */
6186     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6187         if ((contents == 0             &&
6188              read_exec_only == 1       &&
6189              seg_32bit == 0            &&
6190              limit_in_pages == 0       &&
6191              seg_not_present == 1      &&
6192              useable == 0 )) {
6193             entry_1 = 0;
6194             entry_2 = 0;
6195             goto install;
6196         }
6197     }
6198 
6199     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6200         (ldt_info.limit & 0x0ffff);
6201     entry_2 = (ldt_info.base_addr & 0xff000000) |
6202         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6203         (ldt_info.limit & 0xf0000) |
6204         ((read_exec_only ^ 1) << 9) |
6205         (contents << 10) |
6206         ((seg_not_present ^ 1) << 15) |
6207         (seg_32bit << 22) |
6208         (limit_in_pages << 23) |
6209         (useable << 20) |
6210         (lm << 21) |
6211         0x7000;
6212 
6213     /* Install the new entry ...  */
6214 install:
6215     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6216     lp[0] = tswap32(entry_1);
6217     lp[1] = tswap32(entry_2);
6218     return 0;
6219 }
6220 
do_get_thread_area(CPUX86State * env,abi_ulong ptr)6221 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6222 {
6223     struct target_modify_ldt_ldt_s *target_ldt_info;
6224     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6225     uint32_t base_addr, limit, flags;
6226     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6227     int seg_not_present, useable, lm;
6228     uint32_t *lp, entry_1, entry_2;
6229 
6230     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6231     if (!target_ldt_info)
6232         return -TARGET_EFAULT;
6233     idx = tswap32(target_ldt_info->entry_number);
6234     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6235         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6236         unlock_user_struct(target_ldt_info, ptr, 1);
6237         return -TARGET_EINVAL;
6238     }
6239     lp = (uint32_t *)(gdt_table + idx);
6240     entry_1 = tswap32(lp[0]);
6241     entry_2 = tswap32(lp[1]);
6242 
6243     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6244     contents = (entry_2 >> 10) & 3;
6245     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6246     seg_32bit = (entry_2 >> 22) & 1;
6247     limit_in_pages = (entry_2 >> 23) & 1;
6248     useable = (entry_2 >> 20) & 1;
6249 #ifdef TARGET_ABI32
6250     lm = 0;
6251 #else
6252     lm = (entry_2 >> 21) & 1;
6253 #endif
6254     flags = (seg_32bit << 0) | (contents << 1) |
6255         (read_exec_only << 3) | (limit_in_pages << 4) |
6256         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6257     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6258     base_addr = (entry_1 >> 16) |
6259         (entry_2 & 0xff000000) |
6260         ((entry_2 & 0xff) << 16);
6261     target_ldt_info->base_addr = tswapal(base_addr);
6262     target_ldt_info->limit = tswap32(limit);
6263     target_ldt_info->flags = tswap32(flags);
6264     unlock_user_struct(target_ldt_info, ptr, 1);
6265     return 0;
6266 }
6267 
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6268 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6269 {
6270     return -TARGET_ENOSYS;
6271 }
6272 #else
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6273 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6274 {
6275     abi_long ret = 0;
6276     abi_ulong val;
6277     int idx;
6278 
6279     switch(code) {
6280     case TARGET_ARCH_SET_GS:
6281     case TARGET_ARCH_SET_FS:
6282         if (code == TARGET_ARCH_SET_GS)
6283             idx = R_GS;
6284         else
6285             idx = R_FS;
6286         cpu_x86_load_seg(env, idx, 0);
6287         env->segs[idx].base = addr;
6288         break;
6289     case TARGET_ARCH_GET_GS:
6290     case TARGET_ARCH_GET_FS:
6291         if (code == TARGET_ARCH_GET_GS)
6292             idx = R_GS;
6293         else
6294             idx = R_FS;
6295         val = env->segs[idx].base;
6296         if (put_user(val, addr, abi_ulong))
6297             ret = -TARGET_EFAULT;
6298         break;
6299     default:
6300         ret = -TARGET_EINVAL;
6301         break;
6302     }
6303     return ret;
6304 }
6305 #endif /* defined(TARGET_ABI32 */
6306 #endif /* defined(TARGET_I386) */
6307 
6308 /*
6309  * These constants are generic.  Supply any that are missing from the host.
6310  */
6311 #ifndef PR_SET_NAME
6312 # define PR_SET_NAME    15
6313 # define PR_GET_NAME    16
6314 #endif
6315 #ifndef PR_SET_FP_MODE
6316 # define PR_SET_FP_MODE 45
6317 # define PR_GET_FP_MODE 46
6318 # define PR_FP_MODE_FR   (1 << 0)
6319 # define PR_FP_MODE_FRE  (1 << 1)
6320 #endif
6321 #ifndef PR_SVE_SET_VL
6322 # define PR_SVE_SET_VL  50
6323 # define PR_SVE_GET_VL  51
6324 # define PR_SVE_VL_LEN_MASK  0xffff
6325 # define PR_SVE_VL_INHERIT   (1 << 17)
6326 #endif
6327 #ifndef PR_PAC_RESET_KEYS
6328 # define PR_PAC_RESET_KEYS  54
6329 # define PR_PAC_APIAKEY   (1 << 0)
6330 # define PR_PAC_APIBKEY   (1 << 1)
6331 # define PR_PAC_APDAKEY   (1 << 2)
6332 # define PR_PAC_APDBKEY   (1 << 3)
6333 # define PR_PAC_APGAKEY   (1 << 4)
6334 #endif
6335 #ifndef PR_SET_TAGGED_ADDR_CTRL
6336 # define PR_SET_TAGGED_ADDR_CTRL 55
6337 # define PR_GET_TAGGED_ADDR_CTRL 56
6338 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6339 #endif
6340 #ifndef PR_SET_IO_FLUSHER
6341 # define PR_SET_IO_FLUSHER 57
6342 # define PR_GET_IO_FLUSHER 58
6343 #endif
6344 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6345 # define PR_SET_SYSCALL_USER_DISPATCH 59
6346 #endif
6347 #ifndef PR_SME_SET_VL
6348 # define PR_SME_SET_VL  63
6349 # define PR_SME_GET_VL  64
6350 # define PR_SME_VL_LEN_MASK  0xffff
6351 # define PR_SME_VL_INHERIT   (1 << 17)
6352 #endif
6353 
6354 #include "target_prctl.h"
6355 
do_prctl_inval0(CPUArchState * env)6356 static abi_long do_prctl_inval0(CPUArchState *env)
6357 {
6358     return -TARGET_EINVAL;
6359 }
6360 
do_prctl_inval1(CPUArchState * env,abi_long arg2)6361 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6362 {
6363     return -TARGET_EINVAL;
6364 }
6365 
6366 #ifndef do_prctl_get_fp_mode
6367 #define do_prctl_get_fp_mode do_prctl_inval0
6368 #endif
6369 #ifndef do_prctl_set_fp_mode
6370 #define do_prctl_set_fp_mode do_prctl_inval1
6371 #endif
6372 #ifndef do_prctl_sve_get_vl
6373 #define do_prctl_sve_get_vl do_prctl_inval0
6374 #endif
6375 #ifndef do_prctl_sve_set_vl
6376 #define do_prctl_sve_set_vl do_prctl_inval1
6377 #endif
6378 #ifndef do_prctl_reset_keys
6379 #define do_prctl_reset_keys do_prctl_inval1
6380 #endif
6381 #ifndef do_prctl_set_tagged_addr_ctrl
6382 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6383 #endif
6384 #ifndef do_prctl_get_tagged_addr_ctrl
6385 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6386 #endif
6387 #ifndef do_prctl_get_unalign
6388 #define do_prctl_get_unalign do_prctl_inval1
6389 #endif
6390 #ifndef do_prctl_set_unalign
6391 #define do_prctl_set_unalign do_prctl_inval1
6392 #endif
6393 #ifndef do_prctl_sme_get_vl
6394 #define do_prctl_sme_get_vl do_prctl_inval0
6395 #endif
6396 #ifndef do_prctl_sme_set_vl
6397 #define do_prctl_sme_set_vl do_prctl_inval1
6398 #endif
6399 
do_prctl(CPUArchState * env,abi_long option,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)6400 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6401                          abi_long arg3, abi_long arg4, abi_long arg5)
6402 {
6403     abi_long ret;
6404 
6405     switch (option) {
6406     case PR_GET_PDEATHSIG:
6407         {
6408             int deathsig;
6409             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6410                                   arg3, arg4, arg5));
6411             if (!is_error(ret) &&
6412                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6413                 return -TARGET_EFAULT;
6414             }
6415             return ret;
6416         }
6417     case PR_SET_PDEATHSIG:
6418         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6419                                arg3, arg4, arg5));
6420     case PR_GET_NAME:
6421         {
6422             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6423             if (!name) {
6424                 return -TARGET_EFAULT;
6425             }
6426             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6427                                   arg3, arg4, arg5));
6428             unlock_user(name, arg2, 16);
6429             return ret;
6430         }
6431     case PR_SET_NAME:
6432         {
6433             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6434             if (!name) {
6435                 return -TARGET_EFAULT;
6436             }
6437             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6438                                   arg3, arg4, arg5));
6439             unlock_user(name, arg2, 0);
6440             return ret;
6441         }
6442     case PR_GET_FP_MODE:
6443         return do_prctl_get_fp_mode(env);
6444     case PR_SET_FP_MODE:
6445         return do_prctl_set_fp_mode(env, arg2);
6446     case PR_SVE_GET_VL:
6447         return do_prctl_sve_get_vl(env);
6448     case PR_SVE_SET_VL:
6449         return do_prctl_sve_set_vl(env, arg2);
6450     case PR_SME_GET_VL:
6451         return do_prctl_sme_get_vl(env);
6452     case PR_SME_SET_VL:
6453         return do_prctl_sme_set_vl(env, arg2);
6454     case PR_PAC_RESET_KEYS:
6455         if (arg3 || arg4 || arg5) {
6456             return -TARGET_EINVAL;
6457         }
6458         return do_prctl_reset_keys(env, arg2);
6459     case PR_SET_TAGGED_ADDR_CTRL:
6460         if (arg3 || arg4 || arg5) {
6461             return -TARGET_EINVAL;
6462         }
6463         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6464     case PR_GET_TAGGED_ADDR_CTRL:
6465         if (arg2 || arg3 || arg4 || arg5) {
6466             return -TARGET_EINVAL;
6467         }
6468         return do_prctl_get_tagged_addr_ctrl(env);
6469 
6470     case PR_GET_UNALIGN:
6471         return do_prctl_get_unalign(env, arg2);
6472     case PR_SET_UNALIGN:
6473         return do_prctl_set_unalign(env, arg2);
6474 
6475     case PR_CAP_AMBIENT:
6476     case PR_CAPBSET_READ:
6477     case PR_CAPBSET_DROP:
6478     case PR_GET_DUMPABLE:
6479     case PR_SET_DUMPABLE:
6480     case PR_GET_KEEPCAPS:
6481     case PR_SET_KEEPCAPS:
6482     case PR_GET_SECUREBITS:
6483     case PR_SET_SECUREBITS:
6484     case PR_GET_TIMING:
6485     case PR_SET_TIMING:
6486     case PR_GET_TIMERSLACK:
6487     case PR_SET_TIMERSLACK:
6488     case PR_MCE_KILL:
6489     case PR_MCE_KILL_GET:
6490     case PR_GET_NO_NEW_PRIVS:
6491     case PR_SET_NO_NEW_PRIVS:
6492     case PR_GET_IO_FLUSHER:
6493     case PR_SET_IO_FLUSHER:
6494     case PR_SET_CHILD_SUBREAPER:
6495     case PR_GET_SPECULATION_CTRL:
6496     case PR_SET_SPECULATION_CTRL:
6497         /* Some prctl options have no pointer arguments and we can pass on. */
6498         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6499 
6500     case PR_GET_CHILD_SUBREAPER:
6501         {
6502             int val;
6503             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6504                                   arg3, arg4, arg5));
6505             if (!is_error(ret) && put_user_s32(val, arg2)) {
6506                 return -TARGET_EFAULT;
6507             }
6508             return ret;
6509         }
6510 
6511     case PR_GET_TID_ADDRESS:
6512         {
6513             TaskState *ts = get_task_state(env_cpu(env));
6514             return put_user_ual(ts->child_tidptr, arg2);
6515         }
6516 
6517     case PR_GET_FPEXC:
6518     case PR_SET_FPEXC:
6519         /* Was used for SPE on PowerPC. */
6520         return -TARGET_EINVAL;
6521 
6522     case PR_GET_ENDIAN:
6523     case PR_SET_ENDIAN:
6524     case PR_GET_FPEMU:
6525     case PR_SET_FPEMU:
6526     case PR_SET_MM:
6527     case PR_GET_SECCOMP:
6528     case PR_SET_SECCOMP:
6529     case PR_SET_SYSCALL_USER_DISPATCH:
6530     case PR_GET_THP_DISABLE:
6531     case PR_SET_THP_DISABLE:
6532     case PR_GET_TSC:
6533     case PR_SET_TSC:
6534         /* Disable to prevent the target disabling stuff we need. */
6535         return -TARGET_EINVAL;
6536 
6537     default:
6538         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6539                       option);
6540         return -TARGET_EINVAL;
6541     }
6542 }
6543 
6544 #define NEW_STACK_SIZE 0x40000
6545 
6546 
6547 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6548 typedef struct {
6549     CPUArchState *env;
6550     pthread_mutex_t mutex;
6551     pthread_cond_t cond;
6552     pthread_t thread;
6553     uint32_t tid;
6554     abi_ulong child_tidptr;
6555     abi_ulong parent_tidptr;
6556     sigset_t sigmask;
6557 } new_thread_info;
6558 
clone_func(void * arg)6559 static void *clone_func(void *arg)
6560 {
6561     new_thread_info *info = arg;
6562     CPUArchState *env;
6563     CPUState *cpu;
6564     TaskState *ts;
6565 
6566     rcu_register_thread();
6567     tcg_register_thread();
6568     env = info->env;
6569     cpu = env_cpu(env);
6570     thread_cpu = cpu;
6571     ts = get_task_state(cpu);
6572     info->tid = sys_gettid();
6573     task_settid(ts);
6574     if (info->child_tidptr)
6575         put_user_u32(info->tid, info->child_tidptr);
6576     if (info->parent_tidptr)
6577         put_user_u32(info->tid, info->parent_tidptr);
6578     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6579     /* Enable signals.  */
6580     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6581     /* Signal to the parent that we're ready.  */
6582     pthread_mutex_lock(&info->mutex);
6583     pthread_cond_broadcast(&info->cond);
6584     pthread_mutex_unlock(&info->mutex);
6585     /* Wait until the parent has finished initializing the tls state.  */
6586     pthread_mutex_lock(&clone_lock);
6587     pthread_mutex_unlock(&clone_lock);
6588     cpu_loop(env);
6589     /* never exits */
6590     return NULL;
6591 }
6592 
6593 /* do_fork() Must return host values and target errnos (unlike most
6594    do_*() functions). */
do_fork(CPUArchState * env,unsigned int flags,abi_ulong newsp,abi_ulong parent_tidptr,target_ulong newtls,abi_ulong child_tidptr)6595 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6596                    abi_ulong parent_tidptr, target_ulong newtls,
6597                    abi_ulong child_tidptr)
6598 {
6599     CPUState *cpu = env_cpu(env);
6600     int ret;
6601     TaskState *ts;
6602     CPUState *new_cpu;
6603     CPUArchState *new_env;
6604     sigset_t sigmask;
6605 
6606     flags &= ~CLONE_IGNORED_FLAGS;
6607 
6608     /* Emulate vfork() with fork() */
6609     if (flags & CLONE_VFORK)
6610         flags &= ~(CLONE_VFORK | CLONE_VM);
6611 
6612     if (flags & CLONE_VM) {
6613         TaskState *parent_ts = get_task_state(cpu);
6614         new_thread_info info;
6615         pthread_attr_t attr;
6616 
6617         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6618             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6619             return -TARGET_EINVAL;
6620         }
6621 
6622         ts = g_new0(TaskState, 1);
6623         init_task_state(ts);
6624 
6625         /* Grab a mutex so that thread setup appears atomic.  */
6626         pthread_mutex_lock(&clone_lock);
6627 
6628         /*
6629          * If this is our first additional thread, we need to ensure we
6630          * generate code for parallel execution and flush old translations.
6631          * Do this now so that the copy gets CF_PARALLEL too.
6632          */
6633         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6634             tcg_cflags_set(cpu, CF_PARALLEL);
6635             tb_flush(cpu);
6636         }
6637 
6638         /* we create a new CPU instance. */
6639         new_env = cpu_copy(env);
6640         /* Init regs that differ from the parent.  */
6641         cpu_clone_regs_child(new_env, newsp, flags);
6642         cpu_clone_regs_parent(env, flags);
6643         new_cpu = env_cpu(new_env);
6644         new_cpu->opaque = ts;
6645         ts->bprm = parent_ts->bprm;
6646         ts->info = parent_ts->info;
6647         ts->signal_mask = parent_ts->signal_mask;
6648 
6649         if (flags & CLONE_CHILD_CLEARTID) {
6650             ts->child_tidptr = child_tidptr;
6651         }
6652 
6653         if (flags & CLONE_SETTLS) {
6654             cpu_set_tls (new_env, newtls);
6655         }
6656 
6657         memset(&info, 0, sizeof(info));
6658         pthread_mutex_init(&info.mutex, NULL);
6659         pthread_mutex_lock(&info.mutex);
6660         pthread_cond_init(&info.cond, NULL);
6661         info.env = new_env;
6662         if (flags & CLONE_CHILD_SETTID) {
6663             info.child_tidptr = child_tidptr;
6664         }
6665         if (flags & CLONE_PARENT_SETTID) {
6666             info.parent_tidptr = parent_tidptr;
6667         }
6668 
6669         ret = pthread_attr_init(&attr);
6670         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6671         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6672         /* It is not safe to deliver signals until the child has finished
6673            initializing, so temporarily block all signals.  */
6674         sigfillset(&sigmask);
6675         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6676         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6677 
6678         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6679         /* TODO: Free new CPU state if thread creation failed.  */
6680 
6681         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6682         pthread_attr_destroy(&attr);
6683         if (ret == 0) {
6684             /* Wait for the child to initialize.  */
6685             pthread_cond_wait(&info.cond, &info.mutex);
6686             ret = info.tid;
6687         } else {
6688             ret = -1;
6689         }
6690         pthread_mutex_unlock(&info.mutex);
6691         pthread_cond_destroy(&info.cond);
6692         pthread_mutex_destroy(&info.mutex);
6693         pthread_mutex_unlock(&clone_lock);
6694     } else {
6695         /* if no CLONE_VM, we consider it is a fork */
6696         if (flags & CLONE_INVALID_FORK_FLAGS) {
6697             return -TARGET_EINVAL;
6698         }
6699 
6700         /* We can't support custom termination signals */
6701         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6702             return -TARGET_EINVAL;
6703         }
6704 
6705 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6706         if (flags & CLONE_PIDFD) {
6707             return -TARGET_EINVAL;
6708         }
6709 #endif
6710 
6711         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6712         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6713             return -TARGET_EINVAL;
6714         }
6715 
6716         if (block_signals()) {
6717             return -QEMU_ERESTARTSYS;
6718         }
6719 
6720         fork_start();
6721         ret = fork();
6722         if (ret == 0) {
6723             /* Child Process.  */
6724             cpu_clone_regs_child(env, newsp, flags);
6725             fork_end(ret);
6726             /* There is a race condition here.  The parent process could
6727                theoretically read the TID in the child process before the child
6728                tid is set.  This would require using either ptrace
6729                (not implemented) or having *_tidptr to point at a shared memory
6730                mapping.  We can't repeat the spinlock hack used above because
6731                the child process gets its own copy of the lock.  */
6732             if (flags & CLONE_CHILD_SETTID)
6733                 put_user_u32(sys_gettid(), child_tidptr);
6734             if (flags & CLONE_PARENT_SETTID)
6735                 put_user_u32(sys_gettid(), parent_tidptr);
6736             ts = get_task_state(cpu);
6737             if (flags & CLONE_SETTLS)
6738                 cpu_set_tls (env, newtls);
6739             if (flags & CLONE_CHILD_CLEARTID)
6740                 ts->child_tidptr = child_tidptr;
6741         } else {
6742             cpu_clone_regs_parent(env, flags);
6743             if (flags & CLONE_PIDFD) {
6744                 int pid_fd = 0;
6745 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6746                 int pid_child = ret;
6747                 pid_fd = pidfd_open(pid_child, 0);
6748                 if (pid_fd >= 0) {
6749                     qemu_set_cloexec(pid_fd);
6750                 } else {
6751                     pid_fd = 0;
6752                 }
6753 #endif
6754                 put_user_u32(pid_fd, parent_tidptr);
6755             }
6756             fork_end(ret);
6757         }
6758         g_assert(!cpu_in_exclusive_context(cpu));
6759     }
6760     return ret;
6761 }
6762 
6763 /* warning : doesn't handle linux specific flags... */
target_to_host_fcntl_cmd(int cmd)6764 static int target_to_host_fcntl_cmd(int cmd)
6765 {
6766     int ret;
6767 
6768     switch(cmd) {
6769     case TARGET_F_DUPFD:
6770     case TARGET_F_GETFD:
6771     case TARGET_F_SETFD:
6772     case TARGET_F_GETFL:
6773     case TARGET_F_SETFL:
6774     case TARGET_F_OFD_GETLK:
6775     case TARGET_F_OFD_SETLK:
6776     case TARGET_F_OFD_SETLKW:
6777         ret = cmd;
6778         break;
6779     case TARGET_F_GETLK:
6780         ret = F_GETLK;
6781         break;
6782     case TARGET_F_SETLK:
6783         ret = F_SETLK;
6784         break;
6785     case TARGET_F_SETLKW:
6786         ret = F_SETLKW;
6787         break;
6788     case TARGET_F_GETOWN:
6789         ret = F_GETOWN;
6790         break;
6791     case TARGET_F_SETOWN:
6792         ret = F_SETOWN;
6793         break;
6794     case TARGET_F_GETSIG:
6795         ret = F_GETSIG;
6796         break;
6797     case TARGET_F_SETSIG:
6798         ret = F_SETSIG;
6799         break;
6800 #if TARGET_ABI_BITS == 32
6801     case TARGET_F_GETLK64:
6802         ret = F_GETLK;
6803         break;
6804     case TARGET_F_SETLK64:
6805         ret = F_SETLK;
6806         break;
6807     case TARGET_F_SETLKW64:
6808         ret = F_SETLKW;
6809         break;
6810 #endif
6811     case TARGET_F_SETLEASE:
6812         ret = F_SETLEASE;
6813         break;
6814     case TARGET_F_GETLEASE:
6815         ret = F_GETLEASE;
6816         break;
6817 #ifdef F_DUPFD_CLOEXEC
6818     case TARGET_F_DUPFD_CLOEXEC:
6819         ret = F_DUPFD_CLOEXEC;
6820         break;
6821 #endif
6822     case TARGET_F_NOTIFY:
6823         ret = F_NOTIFY;
6824         break;
6825 #ifdef F_GETOWN_EX
6826     case TARGET_F_GETOWN_EX:
6827         ret = F_GETOWN_EX;
6828         break;
6829 #endif
6830 #ifdef F_SETOWN_EX
6831     case TARGET_F_SETOWN_EX:
6832         ret = F_SETOWN_EX;
6833         break;
6834 #endif
6835 #ifdef F_SETPIPE_SZ
6836     case TARGET_F_SETPIPE_SZ:
6837         ret = F_SETPIPE_SZ;
6838         break;
6839     case TARGET_F_GETPIPE_SZ:
6840         ret = F_GETPIPE_SZ;
6841         break;
6842 #endif
6843 #ifdef F_ADD_SEALS
6844     case TARGET_F_ADD_SEALS:
6845         ret = F_ADD_SEALS;
6846         break;
6847     case TARGET_F_GET_SEALS:
6848         ret = F_GET_SEALS;
6849         break;
6850 #endif
6851     default:
6852         ret = -TARGET_EINVAL;
6853         break;
6854     }
6855 
6856 #if defined(__powerpc64__)
6857     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6858      * is not supported by kernel. The glibc fcntl call actually adjusts
6859      * them to 5, 6 and 7 before making the syscall(). Since we make the
6860      * syscall directly, adjust to what is supported by the kernel.
6861      */
6862     if (ret >= F_GETLK && ret <= F_SETLKW) {
6863         ret -= F_GETLK - 5;
6864     }
6865 #endif
6866 
6867     return ret;
6868 }
6869 
6870 #define FLOCK_TRANSTBL \
6871     switch (type) { \
6872     TRANSTBL_CONVERT(F_RDLCK); \
6873     TRANSTBL_CONVERT(F_WRLCK); \
6874     TRANSTBL_CONVERT(F_UNLCK); \
6875     }
6876 
target_to_host_flock(int type)6877 static int target_to_host_flock(int type)
6878 {
6879 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6880     FLOCK_TRANSTBL
6881 #undef  TRANSTBL_CONVERT
6882     return -TARGET_EINVAL;
6883 }
6884 
host_to_target_flock(int type)6885 static int host_to_target_flock(int type)
6886 {
6887 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6888     FLOCK_TRANSTBL
6889 #undef  TRANSTBL_CONVERT
6890     /* if we don't know how to convert the value coming
6891      * from the host we copy to the target field as-is
6892      */
6893     return type;
6894 }
6895 
copy_from_user_flock(struct flock * fl,abi_ulong target_flock_addr)6896 static inline abi_long copy_from_user_flock(struct flock *fl,
6897                                             abi_ulong target_flock_addr)
6898 {
6899     struct target_flock *target_fl;
6900     int l_type;
6901 
6902     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6903         return -TARGET_EFAULT;
6904     }
6905 
6906     __get_user(l_type, &target_fl->l_type);
6907     l_type = target_to_host_flock(l_type);
6908     if (l_type < 0) {
6909         return l_type;
6910     }
6911     fl->l_type = l_type;
6912     __get_user(fl->l_whence, &target_fl->l_whence);
6913     __get_user(fl->l_start, &target_fl->l_start);
6914     __get_user(fl->l_len, &target_fl->l_len);
6915     __get_user(fl->l_pid, &target_fl->l_pid);
6916     unlock_user_struct(target_fl, target_flock_addr, 0);
6917     return 0;
6918 }
6919 
copy_to_user_flock(abi_ulong target_flock_addr,const struct flock * fl)6920 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6921                                           const struct flock *fl)
6922 {
6923     struct target_flock *target_fl;
6924     short l_type;
6925 
6926     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6927         return -TARGET_EFAULT;
6928     }
6929 
6930     l_type = host_to_target_flock(fl->l_type);
6931     __put_user(l_type, &target_fl->l_type);
6932     __put_user(fl->l_whence, &target_fl->l_whence);
6933     __put_user(fl->l_start, &target_fl->l_start);
6934     __put_user(fl->l_len, &target_fl->l_len);
6935     __put_user(fl->l_pid, &target_fl->l_pid);
6936     unlock_user_struct(target_fl, target_flock_addr, 1);
6937     return 0;
6938 }
6939 
6940 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6941 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6942 
6943 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6944 struct target_oabi_flock64 {
6945     abi_short l_type;
6946     abi_short l_whence;
6947     abi_llong l_start;
6948     abi_llong l_len;
6949     abi_int   l_pid;
6950 } QEMU_PACKED;
6951 
copy_from_user_oabi_flock64(struct flock * fl,abi_ulong target_flock_addr)6952 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6953                                                    abi_ulong target_flock_addr)
6954 {
6955     struct target_oabi_flock64 *target_fl;
6956     int l_type;
6957 
6958     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6959         return -TARGET_EFAULT;
6960     }
6961 
6962     __get_user(l_type, &target_fl->l_type);
6963     l_type = target_to_host_flock(l_type);
6964     if (l_type < 0) {
6965         return l_type;
6966     }
6967     fl->l_type = l_type;
6968     __get_user(fl->l_whence, &target_fl->l_whence);
6969     __get_user(fl->l_start, &target_fl->l_start);
6970     __get_user(fl->l_len, &target_fl->l_len);
6971     __get_user(fl->l_pid, &target_fl->l_pid);
6972     unlock_user_struct(target_fl, target_flock_addr, 0);
6973     return 0;
6974 }
6975 
copy_to_user_oabi_flock64(abi_ulong target_flock_addr,const struct flock * fl)6976 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6977                                                  const struct flock *fl)
6978 {
6979     struct target_oabi_flock64 *target_fl;
6980     short l_type;
6981 
6982     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6983         return -TARGET_EFAULT;
6984     }
6985 
6986     l_type = host_to_target_flock(fl->l_type);
6987     __put_user(l_type, &target_fl->l_type);
6988     __put_user(fl->l_whence, &target_fl->l_whence);
6989     __put_user(fl->l_start, &target_fl->l_start);
6990     __put_user(fl->l_len, &target_fl->l_len);
6991     __put_user(fl->l_pid, &target_fl->l_pid);
6992     unlock_user_struct(target_fl, target_flock_addr, 1);
6993     return 0;
6994 }
6995 #endif
6996 
copy_from_user_flock64(struct flock * fl,abi_ulong target_flock_addr)6997 static inline abi_long copy_from_user_flock64(struct flock *fl,
6998                                               abi_ulong target_flock_addr)
6999 {
7000     struct target_flock64 *target_fl;
7001     int l_type;
7002 
7003     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7004         return -TARGET_EFAULT;
7005     }
7006 
7007     __get_user(l_type, &target_fl->l_type);
7008     l_type = target_to_host_flock(l_type);
7009     if (l_type < 0) {
7010         return l_type;
7011     }
7012     fl->l_type = l_type;
7013     __get_user(fl->l_whence, &target_fl->l_whence);
7014     __get_user(fl->l_start, &target_fl->l_start);
7015     __get_user(fl->l_len, &target_fl->l_len);
7016     __get_user(fl->l_pid, &target_fl->l_pid);
7017     unlock_user_struct(target_fl, target_flock_addr, 0);
7018     return 0;
7019 }
7020 
copy_to_user_flock64(abi_ulong target_flock_addr,const struct flock * fl)7021 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7022                                             const struct flock *fl)
7023 {
7024     struct target_flock64 *target_fl;
7025     short l_type;
7026 
7027     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7028         return -TARGET_EFAULT;
7029     }
7030 
7031     l_type = host_to_target_flock(fl->l_type);
7032     __put_user(l_type, &target_fl->l_type);
7033     __put_user(fl->l_whence, &target_fl->l_whence);
7034     __put_user(fl->l_start, &target_fl->l_start);
7035     __put_user(fl->l_len, &target_fl->l_len);
7036     __put_user(fl->l_pid, &target_fl->l_pid);
7037     unlock_user_struct(target_fl, target_flock_addr, 1);
7038     return 0;
7039 }
7040 
do_fcntl(int fd,int cmd,abi_ulong arg)7041 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7042 {
7043     struct flock fl;
7044 #ifdef F_GETOWN_EX
7045     struct f_owner_ex fox;
7046     struct target_f_owner_ex *target_fox;
7047 #endif
7048     abi_long ret;
7049     int host_cmd = target_to_host_fcntl_cmd(cmd);
7050 
7051     if (host_cmd == -TARGET_EINVAL)
7052 	    return host_cmd;
7053 
7054     switch(cmd) {
7055     case TARGET_F_GETLK:
7056         ret = copy_from_user_flock(&fl, arg);
7057         if (ret) {
7058             return ret;
7059         }
7060         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7061         if (ret == 0) {
7062             ret = copy_to_user_flock(arg, &fl);
7063         }
7064         break;
7065 
7066     case TARGET_F_SETLK:
7067     case TARGET_F_SETLKW:
7068         ret = copy_from_user_flock(&fl, arg);
7069         if (ret) {
7070             return ret;
7071         }
7072         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7073         break;
7074 
7075     case TARGET_F_GETLK64:
7076     case TARGET_F_OFD_GETLK:
7077         ret = copy_from_user_flock64(&fl, arg);
7078         if (ret) {
7079             return ret;
7080         }
7081         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7082         if (ret == 0) {
7083             ret = copy_to_user_flock64(arg, &fl);
7084         }
7085         break;
7086     case TARGET_F_SETLK64:
7087     case TARGET_F_SETLKW64:
7088     case TARGET_F_OFD_SETLK:
7089     case TARGET_F_OFD_SETLKW:
7090         ret = copy_from_user_flock64(&fl, arg);
7091         if (ret) {
7092             return ret;
7093         }
7094         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7095         break;
7096 
7097     case TARGET_F_GETFL:
7098         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7099         if (ret >= 0) {
7100             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7101             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7102             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7103                 ret |= TARGET_O_LARGEFILE;
7104             }
7105         }
7106         break;
7107 
7108     case TARGET_F_SETFL:
7109         ret = get_errno(safe_fcntl(fd, host_cmd,
7110                                    target_to_host_bitmask(arg,
7111                                                           fcntl_flags_tbl)));
7112         break;
7113 
7114 #ifdef F_GETOWN_EX
7115     case TARGET_F_GETOWN_EX:
7116         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7117         if (ret >= 0) {
7118             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7119                 return -TARGET_EFAULT;
7120             target_fox->type = tswap32(fox.type);
7121             target_fox->pid = tswap32(fox.pid);
7122             unlock_user_struct(target_fox, arg, 1);
7123         }
7124         break;
7125 #endif
7126 
7127 #ifdef F_SETOWN_EX
7128     case TARGET_F_SETOWN_EX:
7129         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7130             return -TARGET_EFAULT;
7131         fox.type = tswap32(target_fox->type);
7132         fox.pid = tswap32(target_fox->pid);
7133         unlock_user_struct(target_fox, arg, 0);
7134         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7135         break;
7136 #endif
7137 
7138     case TARGET_F_SETSIG:
7139         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7140         break;
7141 
7142     case TARGET_F_GETSIG:
7143         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7144         break;
7145 
7146     case TARGET_F_SETOWN:
7147     case TARGET_F_GETOWN:
7148     case TARGET_F_SETLEASE:
7149     case TARGET_F_GETLEASE:
7150     case TARGET_F_SETPIPE_SZ:
7151     case TARGET_F_GETPIPE_SZ:
7152     case TARGET_F_ADD_SEALS:
7153     case TARGET_F_GET_SEALS:
7154         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7155         break;
7156 
7157     default:
7158         ret = get_errno(safe_fcntl(fd, cmd, arg));
7159         break;
7160     }
7161     return ret;
7162 }
7163 
7164 #ifdef USE_UID16
7165 
high2lowuid(int uid)7166 static inline int high2lowuid(int uid)
7167 {
7168     if (uid > 65535)
7169         return 65534;
7170     else
7171         return uid;
7172 }
7173 
high2lowgid(int gid)7174 static inline int high2lowgid(int gid)
7175 {
7176     if (gid > 65535)
7177         return 65534;
7178     else
7179         return gid;
7180 }
7181 
low2highuid(int uid)7182 static inline int low2highuid(int uid)
7183 {
7184     if ((int16_t)uid == -1)
7185         return -1;
7186     else
7187         return uid;
7188 }
7189 
low2highgid(int gid)7190 static inline int low2highgid(int gid)
7191 {
7192     if ((int16_t)gid == -1)
7193         return -1;
7194     else
7195         return gid;
7196 }
tswapid(int id)7197 static inline int tswapid(int id)
7198 {
7199     return tswap16(id);
7200 }
7201 
7202 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7203 
7204 #else /* !USE_UID16 */
high2lowuid(int uid)7205 static inline int high2lowuid(int uid)
7206 {
7207     return uid;
7208 }
high2lowgid(int gid)7209 static inline int high2lowgid(int gid)
7210 {
7211     return gid;
7212 }
low2highuid(int uid)7213 static inline int low2highuid(int uid)
7214 {
7215     return uid;
7216 }
low2highgid(int gid)7217 static inline int low2highgid(int gid)
7218 {
7219     return gid;
7220 }
tswapid(int id)7221 static inline int tswapid(int id)
7222 {
7223     return tswap32(id);
7224 }
7225 
7226 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7227 
7228 #endif /* USE_UID16 */
7229 
7230 /* We must do direct syscalls for setting UID/GID, because we want to
7231  * implement the Linux system call semantics of "change only for this thread",
7232  * not the libc/POSIX semantics of "change for all threads in process".
7233  * (See http://ewontfix.com/17/ for more details.)
7234  * We use the 32-bit version of the syscalls if present; if it is not
7235  * then either the host architecture supports 32-bit UIDs natively with
7236  * the standard syscall, or the 16-bit UID is the best we can do.
7237  */
7238 #ifdef __NR_setuid32
7239 #define __NR_sys_setuid __NR_setuid32
7240 #else
7241 #define __NR_sys_setuid __NR_setuid
7242 #endif
7243 #ifdef __NR_setgid32
7244 #define __NR_sys_setgid __NR_setgid32
7245 #else
7246 #define __NR_sys_setgid __NR_setgid
7247 #endif
7248 #ifdef __NR_setresuid32
7249 #define __NR_sys_setresuid __NR_setresuid32
7250 #else
7251 #define __NR_sys_setresuid __NR_setresuid
7252 #endif
7253 #ifdef __NR_setresgid32
7254 #define __NR_sys_setresgid __NR_setresgid32
7255 #else
7256 #define __NR_sys_setresgid __NR_setresgid
7257 #endif
7258 #ifdef __NR_setgroups32
7259 #define __NR_sys_setgroups __NR_setgroups32
7260 #else
7261 #define __NR_sys_setgroups __NR_setgroups
7262 #endif
7263 #ifdef __NR_sys_setreuid32
7264 #define __NR_sys_setreuid __NR_setreuid32
7265 #else
7266 #define __NR_sys_setreuid __NR_setreuid
7267 #endif
7268 #ifdef __NR_sys_setregid32
7269 #define __NR_sys_setregid __NR_setregid32
7270 #else
7271 #define __NR_sys_setregid __NR_setregid
7272 #endif
7273 
7274 _syscall1(int, sys_setuid, uid_t, uid)
7275 _syscall1(int, sys_setgid, gid_t, gid)
7276 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7277 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7278 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7279 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7280 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7281 
syscall_init(void)7282 void syscall_init(void)
7283 {
7284     IOCTLEntry *ie;
7285     const argtype *arg_type;
7286     int size;
7287 
7288     thunk_init(STRUCT_MAX);
7289 
7290 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7291 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7292 #include "syscall_types.h"
7293 #undef STRUCT
7294 #undef STRUCT_SPECIAL
7295 
7296     /* we patch the ioctl size if necessary. We rely on the fact that
7297        no ioctl has all the bits at '1' in the size field */
7298     ie = ioctl_entries;
7299     while (ie->target_cmd != 0) {
7300         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7301             TARGET_IOC_SIZEMASK) {
7302             arg_type = ie->arg_type;
7303             if (arg_type[0] != TYPE_PTR) {
7304                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7305                         ie->target_cmd);
7306                 exit(1);
7307             }
7308             arg_type++;
7309             size = thunk_type_size(arg_type, 0);
7310             ie->target_cmd = (ie->target_cmd &
7311                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7312                 (size << TARGET_IOC_SIZESHIFT);
7313         }
7314 
7315         /* automatic consistency check if same arch */
7316 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7317     (defined(__x86_64__) && defined(TARGET_X86_64))
7318         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7319             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7320                     ie->name, ie->target_cmd, ie->host_cmd);
7321         }
7322 #endif
7323         ie++;
7324     }
7325 }
7326 
7327 #ifdef TARGET_NR_truncate64
target_truncate64(CPUArchState * cpu_env,const char * arg1,abi_long arg2,abi_long arg3,abi_long arg4)7328 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7329                                          abi_long arg2,
7330                                          abi_long arg3,
7331                                          abi_long arg4)
7332 {
7333     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7334         arg2 = arg3;
7335         arg3 = arg4;
7336     }
7337     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7338 }
7339 #endif
7340 
7341 #ifdef TARGET_NR_ftruncate64
target_ftruncate64(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4)7342 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7343                                           abi_long arg2,
7344                                           abi_long arg3,
7345                                           abi_long arg4)
7346 {
7347     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7348         arg2 = arg3;
7349         arg3 = arg4;
7350     }
7351     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7352 }
7353 #endif
7354 
7355 #if defined(TARGET_NR_timer_settime) || \
7356     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec(struct itimerspec * host_its,abi_ulong target_addr)7357 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7358                                                  abi_ulong target_addr)
7359 {
7360     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7361                                 offsetof(struct target_itimerspec,
7362                                          it_interval)) ||
7363         target_to_host_timespec(&host_its->it_value, target_addr +
7364                                 offsetof(struct target_itimerspec,
7365                                          it_value))) {
7366         return -TARGET_EFAULT;
7367     }
7368 
7369     return 0;
7370 }
7371 #endif
7372 
7373 #if defined(TARGET_NR_timer_settime64) || \
7374     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec64(struct itimerspec * host_its,abi_ulong target_addr)7375 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7376                                                    abi_ulong target_addr)
7377 {
7378     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7379                                   offsetof(struct target__kernel_itimerspec,
7380                                            it_interval)) ||
7381         target_to_host_timespec64(&host_its->it_value, target_addr +
7382                                   offsetof(struct target__kernel_itimerspec,
7383                                            it_value))) {
7384         return -TARGET_EFAULT;
7385     }
7386 
7387     return 0;
7388 }
7389 #endif
7390 
7391 #if ((defined(TARGET_NR_timerfd_gettime) || \
7392       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7393       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
host_to_target_itimerspec(abi_ulong target_addr,struct itimerspec * host_its)7394 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7395                                                  struct itimerspec *host_its)
7396 {
7397     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7398                                                        it_interval),
7399                                 &host_its->it_interval) ||
7400         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7401                                                        it_value),
7402                                 &host_its->it_value)) {
7403         return -TARGET_EFAULT;
7404     }
7405     return 0;
7406 }
7407 #endif
7408 
7409 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7410       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7411       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
host_to_target_itimerspec64(abi_ulong target_addr,struct itimerspec * host_its)7412 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7413                                                    struct itimerspec *host_its)
7414 {
7415     if (host_to_target_timespec64(target_addr +
7416                                   offsetof(struct target__kernel_itimerspec,
7417                                            it_interval),
7418                                   &host_its->it_interval) ||
7419         host_to_target_timespec64(target_addr +
7420                                   offsetof(struct target__kernel_itimerspec,
7421                                            it_value),
7422                                   &host_its->it_value)) {
7423         return -TARGET_EFAULT;
7424     }
7425     return 0;
7426 }
7427 #endif
7428 
7429 #if defined(TARGET_NR_adjtimex) || \
7430     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
target_to_host_timex(struct timex * host_tx,abi_long target_addr)7431 static inline abi_long target_to_host_timex(struct timex *host_tx,
7432                                             abi_long target_addr)
7433 {
7434     struct target_timex *target_tx;
7435 
7436     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7437         return -TARGET_EFAULT;
7438     }
7439 
7440     __get_user(host_tx->modes, &target_tx->modes);
7441     __get_user(host_tx->offset, &target_tx->offset);
7442     __get_user(host_tx->freq, &target_tx->freq);
7443     __get_user(host_tx->maxerror, &target_tx->maxerror);
7444     __get_user(host_tx->esterror, &target_tx->esterror);
7445     __get_user(host_tx->status, &target_tx->status);
7446     __get_user(host_tx->constant, &target_tx->constant);
7447     __get_user(host_tx->precision, &target_tx->precision);
7448     __get_user(host_tx->tolerance, &target_tx->tolerance);
7449     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7450     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7451     __get_user(host_tx->tick, &target_tx->tick);
7452     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7453     __get_user(host_tx->jitter, &target_tx->jitter);
7454     __get_user(host_tx->shift, &target_tx->shift);
7455     __get_user(host_tx->stabil, &target_tx->stabil);
7456     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7457     __get_user(host_tx->calcnt, &target_tx->calcnt);
7458     __get_user(host_tx->errcnt, &target_tx->errcnt);
7459     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7460     __get_user(host_tx->tai, &target_tx->tai);
7461 
7462     unlock_user_struct(target_tx, target_addr, 0);
7463     return 0;
7464 }
7465 
host_to_target_timex(abi_long target_addr,struct timex * host_tx)7466 static inline abi_long host_to_target_timex(abi_long target_addr,
7467                                             struct timex *host_tx)
7468 {
7469     struct target_timex *target_tx;
7470 
7471     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7472         return -TARGET_EFAULT;
7473     }
7474 
7475     __put_user(host_tx->modes, &target_tx->modes);
7476     __put_user(host_tx->offset, &target_tx->offset);
7477     __put_user(host_tx->freq, &target_tx->freq);
7478     __put_user(host_tx->maxerror, &target_tx->maxerror);
7479     __put_user(host_tx->esterror, &target_tx->esterror);
7480     __put_user(host_tx->status, &target_tx->status);
7481     __put_user(host_tx->constant, &target_tx->constant);
7482     __put_user(host_tx->precision, &target_tx->precision);
7483     __put_user(host_tx->tolerance, &target_tx->tolerance);
7484     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7485     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7486     __put_user(host_tx->tick, &target_tx->tick);
7487     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7488     __put_user(host_tx->jitter, &target_tx->jitter);
7489     __put_user(host_tx->shift, &target_tx->shift);
7490     __put_user(host_tx->stabil, &target_tx->stabil);
7491     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7492     __put_user(host_tx->calcnt, &target_tx->calcnt);
7493     __put_user(host_tx->errcnt, &target_tx->errcnt);
7494     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7495     __put_user(host_tx->tai, &target_tx->tai);
7496 
7497     unlock_user_struct(target_tx, target_addr, 1);
7498     return 0;
7499 }
7500 #endif
7501 
7502 
7503 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
target_to_host_timex64(struct timex * host_tx,abi_long target_addr)7504 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7505                                               abi_long target_addr)
7506 {
7507     struct target__kernel_timex *target_tx;
7508 
7509     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7510                                  offsetof(struct target__kernel_timex,
7511                                           time))) {
7512         return -TARGET_EFAULT;
7513     }
7514 
7515     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7516         return -TARGET_EFAULT;
7517     }
7518 
7519     __get_user(host_tx->modes, &target_tx->modes);
7520     __get_user(host_tx->offset, &target_tx->offset);
7521     __get_user(host_tx->freq, &target_tx->freq);
7522     __get_user(host_tx->maxerror, &target_tx->maxerror);
7523     __get_user(host_tx->esterror, &target_tx->esterror);
7524     __get_user(host_tx->status, &target_tx->status);
7525     __get_user(host_tx->constant, &target_tx->constant);
7526     __get_user(host_tx->precision, &target_tx->precision);
7527     __get_user(host_tx->tolerance, &target_tx->tolerance);
7528     __get_user(host_tx->tick, &target_tx->tick);
7529     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7530     __get_user(host_tx->jitter, &target_tx->jitter);
7531     __get_user(host_tx->shift, &target_tx->shift);
7532     __get_user(host_tx->stabil, &target_tx->stabil);
7533     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7534     __get_user(host_tx->calcnt, &target_tx->calcnt);
7535     __get_user(host_tx->errcnt, &target_tx->errcnt);
7536     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7537     __get_user(host_tx->tai, &target_tx->tai);
7538 
7539     unlock_user_struct(target_tx, target_addr, 0);
7540     return 0;
7541 }
7542 
host_to_target_timex64(abi_long target_addr,struct timex * host_tx)7543 static inline abi_long host_to_target_timex64(abi_long target_addr,
7544                                               struct timex *host_tx)
7545 {
7546     struct target__kernel_timex *target_tx;
7547 
7548    if (copy_to_user_timeval64(target_addr +
7549                               offsetof(struct target__kernel_timex, time),
7550                               &host_tx->time)) {
7551         return -TARGET_EFAULT;
7552     }
7553 
7554     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7555         return -TARGET_EFAULT;
7556     }
7557 
7558     __put_user(host_tx->modes, &target_tx->modes);
7559     __put_user(host_tx->offset, &target_tx->offset);
7560     __put_user(host_tx->freq, &target_tx->freq);
7561     __put_user(host_tx->maxerror, &target_tx->maxerror);
7562     __put_user(host_tx->esterror, &target_tx->esterror);
7563     __put_user(host_tx->status, &target_tx->status);
7564     __put_user(host_tx->constant, &target_tx->constant);
7565     __put_user(host_tx->precision, &target_tx->precision);
7566     __put_user(host_tx->tolerance, &target_tx->tolerance);
7567     __put_user(host_tx->tick, &target_tx->tick);
7568     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7569     __put_user(host_tx->jitter, &target_tx->jitter);
7570     __put_user(host_tx->shift, &target_tx->shift);
7571     __put_user(host_tx->stabil, &target_tx->stabil);
7572     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7573     __put_user(host_tx->calcnt, &target_tx->calcnt);
7574     __put_user(host_tx->errcnt, &target_tx->errcnt);
7575     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7576     __put_user(host_tx->tai, &target_tx->tai);
7577 
7578     unlock_user_struct(target_tx, target_addr, 1);
7579     return 0;
7580 }
7581 #endif
7582 
7583 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7584 #define sigev_notify_thread_id _sigev_un._tid
7585 #endif
7586 
target_to_host_sigevent(struct sigevent * host_sevp,abi_ulong target_addr)7587 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7588                                                abi_ulong target_addr)
7589 {
7590     struct target_sigevent *target_sevp;
7591 
7592     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7593         return -TARGET_EFAULT;
7594     }
7595 
7596     /* This union is awkward on 64 bit systems because it has a 32 bit
7597      * integer and a pointer in it; we follow the conversion approach
7598      * used for handling sigval types in signal.c so the guest should get
7599      * the correct value back even if we did a 64 bit byteswap and it's
7600      * using the 32 bit integer.
7601      */
7602     host_sevp->sigev_value.sival_ptr =
7603         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7604     host_sevp->sigev_signo =
7605         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7606     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7607     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7608 
7609     unlock_user_struct(target_sevp, target_addr, 1);
7610     return 0;
7611 }
7612 
7613 #if defined(TARGET_NR_mlockall)
target_to_host_mlockall_arg(int arg)7614 static inline int target_to_host_mlockall_arg(int arg)
7615 {
7616     int result = 0;
7617 
7618     if (arg & TARGET_MCL_CURRENT) {
7619         result |= MCL_CURRENT;
7620     }
7621     if (arg & TARGET_MCL_FUTURE) {
7622         result |= MCL_FUTURE;
7623     }
7624 #ifdef MCL_ONFAULT
7625     if (arg & TARGET_MCL_ONFAULT) {
7626         result |= MCL_ONFAULT;
7627     }
7628 #endif
7629 
7630     return result;
7631 }
7632 #endif
7633 
target_to_host_msync_arg(abi_long arg)7634 static inline int target_to_host_msync_arg(abi_long arg)
7635 {
7636     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7637            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7638            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7639            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7640 }
7641 
7642 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7643      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7644      defined(TARGET_NR_newfstatat))
host_to_target_stat64(CPUArchState * cpu_env,abi_ulong target_addr,struct stat * host_st)7645 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7646                                              abi_ulong target_addr,
7647                                              struct stat *host_st)
7648 {
7649 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7650     if (cpu_env->eabi) {
7651         struct target_eabi_stat64 *target_st;
7652 
7653         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7654             return -TARGET_EFAULT;
7655         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7656         __put_user(host_st->st_dev, &target_st->st_dev);
7657         __put_user(host_st->st_ino, &target_st->st_ino);
7658 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7659         __put_user(host_st->st_ino, &target_st->__st_ino);
7660 #endif
7661         __put_user(host_st->st_mode, &target_st->st_mode);
7662         __put_user(host_st->st_nlink, &target_st->st_nlink);
7663         __put_user(host_st->st_uid, &target_st->st_uid);
7664         __put_user(host_st->st_gid, &target_st->st_gid);
7665         __put_user(host_st->st_rdev, &target_st->st_rdev);
7666         __put_user(host_st->st_size, &target_st->st_size);
7667         __put_user(host_st->st_blksize, &target_st->st_blksize);
7668         __put_user(host_st->st_blocks, &target_st->st_blocks);
7669         __put_user(host_st->st_atime, &target_st->target_st_atime);
7670         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7671         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7672 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7673         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7674         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7675         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7676 #endif
7677         unlock_user_struct(target_st, target_addr, 1);
7678     } else
7679 #endif
7680     {
7681 #if defined(TARGET_HAS_STRUCT_STAT64)
7682         struct target_stat64 *target_st;
7683 #else
7684         struct target_stat *target_st;
7685 #endif
7686 
7687         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7688             return -TARGET_EFAULT;
7689         memset(target_st, 0, sizeof(*target_st));
7690         __put_user(host_st->st_dev, &target_st->st_dev);
7691         __put_user(host_st->st_ino, &target_st->st_ino);
7692 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7693         __put_user(host_st->st_ino, &target_st->__st_ino);
7694 #endif
7695         __put_user(host_st->st_mode, &target_st->st_mode);
7696         __put_user(host_st->st_nlink, &target_st->st_nlink);
7697         __put_user(host_st->st_uid, &target_st->st_uid);
7698         __put_user(host_st->st_gid, &target_st->st_gid);
7699         __put_user(host_st->st_rdev, &target_st->st_rdev);
7700         /* XXX: better use of kernel struct */
7701         __put_user(host_st->st_size, &target_st->st_size);
7702         __put_user(host_st->st_blksize, &target_st->st_blksize);
7703         __put_user(host_st->st_blocks, &target_st->st_blocks);
7704         __put_user(host_st->st_atime, &target_st->target_st_atime);
7705         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7706         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7707 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7708         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7709         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7710         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7711 #endif
7712         unlock_user_struct(target_st, target_addr, 1);
7713     }
7714 
7715     return 0;
7716 }
7717 #endif
7718 
7719 #if defined(TARGET_NR_statx) && defined(__NR_statx)
host_to_target_statx(struct target_statx * host_stx,abi_ulong target_addr)7720 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7721                                             abi_ulong target_addr)
7722 {
7723     struct target_statx *target_stx;
7724 
7725     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7726         return -TARGET_EFAULT;
7727     }
7728     memset(target_stx, 0, sizeof(*target_stx));
7729 
7730     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7731     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7732     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7733     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7734     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7735     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7736     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7737     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7738     __put_user(host_stx->stx_size, &target_stx->stx_size);
7739     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7740     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7741     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7742     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7743     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7744     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7745     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7746     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7747     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7748     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7749     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7750     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7751     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7752     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7753 
7754     unlock_user_struct(target_stx, target_addr, 1);
7755 
7756     return 0;
7757 }
7758 #endif
7759 
do_sys_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7760 static int do_sys_futex(int *uaddr, int op, int val,
7761                          const struct timespec *timeout, int *uaddr2,
7762                          int val3)
7763 {
7764 #if HOST_LONG_BITS == 64
7765 #if defined(__NR_futex)
7766     /* always a 64-bit time_t, it doesn't define _time64 version  */
7767     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7768 
7769 #endif
7770 #else /* HOST_LONG_BITS == 64 */
7771 #if defined(__NR_futex_time64)
7772     if (sizeof(timeout->tv_sec) == 8) {
7773         /* _time64 function on 32bit arch */
7774         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7775     }
7776 #endif
7777 #if defined(__NR_futex)
7778     /* old function on 32bit arch */
7779     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7780 #endif
7781 #endif /* HOST_LONG_BITS == 64 */
7782     g_assert_not_reached();
7783 }
7784 
do_safe_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7785 static int do_safe_futex(int *uaddr, int op, int val,
7786                          const struct timespec *timeout, int *uaddr2,
7787                          int val3)
7788 {
7789 #if HOST_LONG_BITS == 64
7790 #if defined(__NR_futex)
7791     /* always a 64-bit time_t, it doesn't define _time64 version  */
7792     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7793 #endif
7794 #else /* HOST_LONG_BITS == 64 */
7795 #if defined(__NR_futex_time64)
7796     if (sizeof(timeout->tv_sec) == 8) {
7797         /* _time64 function on 32bit arch */
7798         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7799                                            val3));
7800     }
7801 #endif
7802 #if defined(__NR_futex)
7803     /* old function on 32bit arch */
7804     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7805 #endif
7806 #endif /* HOST_LONG_BITS == 64 */
7807     return -TARGET_ENOSYS;
7808 }
7809 
7810 /* ??? Using host futex calls even when target atomic operations
7811    are not really atomic probably breaks things.  However implementing
7812    futexes locally would make futexes shared between multiple processes
7813    tricky.  However they're probably useless because guest atomic
7814    operations won't work either.  */
7815 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
do_futex(CPUState * cpu,bool time64,target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)7816 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7817                     int op, int val, target_ulong timeout,
7818                     target_ulong uaddr2, int val3)
7819 {
7820     struct timespec ts, *pts = NULL;
7821     void *haddr2 = NULL;
7822     int base_op;
7823 
7824     /* We assume FUTEX_* constants are the same on both host and target. */
7825 #ifdef FUTEX_CMD_MASK
7826     base_op = op & FUTEX_CMD_MASK;
7827 #else
7828     base_op = op;
7829 #endif
7830     switch (base_op) {
7831     case FUTEX_WAIT:
7832     case FUTEX_WAIT_BITSET:
7833         val = tswap32(val);
7834         break;
7835     case FUTEX_WAIT_REQUEUE_PI:
7836         val = tswap32(val);
7837         haddr2 = g2h(cpu, uaddr2);
7838         break;
7839     case FUTEX_LOCK_PI:
7840     case FUTEX_LOCK_PI2:
7841         break;
7842     case FUTEX_WAKE:
7843     case FUTEX_WAKE_BITSET:
7844     case FUTEX_TRYLOCK_PI:
7845     case FUTEX_UNLOCK_PI:
7846         timeout = 0;
7847         break;
7848     case FUTEX_FD:
7849         val = target_to_host_signal(val);
7850         timeout = 0;
7851         break;
7852     case FUTEX_CMP_REQUEUE:
7853     case FUTEX_CMP_REQUEUE_PI:
7854         val3 = tswap32(val3);
7855         /* fall through */
7856     case FUTEX_REQUEUE:
7857     case FUTEX_WAKE_OP:
7858         /*
7859          * For these, the 4th argument is not TIMEOUT, but VAL2.
7860          * But the prototype of do_safe_futex takes a pointer, so
7861          * insert casts to satisfy the compiler.  We do not need
7862          * to tswap VAL2 since it's not compared to guest memory.
7863           */
7864         pts = (struct timespec *)(uintptr_t)timeout;
7865         timeout = 0;
7866         haddr2 = g2h(cpu, uaddr2);
7867         break;
7868     default:
7869         return -TARGET_ENOSYS;
7870     }
7871     if (timeout) {
7872         pts = &ts;
7873         if (time64
7874             ? target_to_host_timespec64(pts, timeout)
7875             : target_to_host_timespec(pts, timeout)) {
7876             return -TARGET_EFAULT;
7877         }
7878     }
7879     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7880 }
7881 #endif
7882 
7883 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_name_to_handle_at(abi_long dirfd,abi_long pathname,abi_long handle,abi_long mount_id,abi_long flags)7884 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7885                                      abi_long handle, abi_long mount_id,
7886                                      abi_long flags)
7887 {
7888     struct file_handle *target_fh;
7889     struct file_handle *fh;
7890     int mid = 0;
7891     abi_long ret;
7892     char *name;
7893     unsigned int size, total_size;
7894 
7895     if (get_user_s32(size, handle)) {
7896         return -TARGET_EFAULT;
7897     }
7898 
7899     name = lock_user_string(pathname);
7900     if (!name) {
7901         return -TARGET_EFAULT;
7902     }
7903 
7904     total_size = sizeof(struct file_handle) + size;
7905     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7906     if (!target_fh) {
7907         unlock_user(name, pathname, 0);
7908         return -TARGET_EFAULT;
7909     }
7910 
7911     fh = g_malloc0(total_size);
7912     fh->handle_bytes = size;
7913 
7914     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7915     unlock_user(name, pathname, 0);
7916 
7917     /* man name_to_handle_at(2):
7918      * Other than the use of the handle_bytes field, the caller should treat
7919      * the file_handle structure as an opaque data type
7920      */
7921 
7922     memcpy(target_fh, fh, total_size);
7923     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7924     target_fh->handle_type = tswap32(fh->handle_type);
7925     g_free(fh);
7926     unlock_user(target_fh, handle, total_size);
7927 
7928     if (put_user_s32(mid, mount_id)) {
7929         return -TARGET_EFAULT;
7930     }
7931 
7932     return ret;
7933 
7934 }
7935 #endif
7936 
7937 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_open_by_handle_at(abi_long mount_fd,abi_long handle,abi_long flags)7938 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7939                                      abi_long flags)
7940 {
7941     struct file_handle *target_fh;
7942     struct file_handle *fh;
7943     unsigned int size, total_size;
7944     abi_long ret;
7945 
7946     if (get_user_s32(size, handle)) {
7947         return -TARGET_EFAULT;
7948     }
7949 
7950     total_size = sizeof(struct file_handle) + size;
7951     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7952     if (!target_fh) {
7953         return -TARGET_EFAULT;
7954     }
7955 
7956     fh = g_memdup(target_fh, total_size);
7957     fh->handle_bytes = size;
7958     fh->handle_type = tswap32(target_fh->handle_type);
7959 
7960     ret = get_errno(open_by_handle_at(mount_fd, fh,
7961                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7962 
7963     g_free(fh);
7964 
7965     unlock_user(target_fh, handle, total_size);
7966 
7967     return ret;
7968 }
7969 #endif
7970 
7971 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7972 
do_signalfd4(int fd,abi_long mask,int flags)7973 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7974 {
7975     int host_flags;
7976     target_sigset_t *target_mask;
7977     sigset_t host_mask;
7978     abi_long ret;
7979 
7980     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7981         return -TARGET_EINVAL;
7982     }
7983     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7984         return -TARGET_EFAULT;
7985     }
7986 
7987     target_to_host_sigset(&host_mask, target_mask);
7988 
7989     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7990 
7991     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7992     if (ret >= 0) {
7993         fd_trans_register(ret, &target_signalfd_trans);
7994     }
7995 
7996     unlock_user_struct(target_mask, mask, 0);
7997 
7998     return ret;
7999 }
8000 #endif
8001 
8002 /* Map host to target signal numbers for the wait family of syscalls.
8003    Assume all other status bits are the same.  */
host_to_target_waitstatus(int status)8004 int host_to_target_waitstatus(int status)
8005 {
8006     if (WIFSIGNALED(status)) {
8007         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8008     }
8009     if (WIFSTOPPED(status)) {
8010         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8011                | (status & 0xff);
8012     }
8013     return status;
8014 }
8015 
open_self_cmdline(CPUArchState * cpu_env,int fd)8016 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8017 {
8018     CPUState *cpu = env_cpu(cpu_env);
8019     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
8020     int i;
8021 
8022     for (i = 0; i < bprm->argc; i++) {
8023         size_t len = strlen(bprm->argv[i]) + 1;
8024 
8025         if (write(fd, bprm->argv[i], len) != len) {
8026             return -1;
8027         }
8028     }
8029 
8030     return 0;
8031 }
8032 
8033 struct open_self_maps_data {
8034     TaskState *ts;
8035     IntervalTreeRoot *host_maps;
8036     int fd;
8037     bool smaps;
8038 };
8039 
8040 /*
8041  * Subroutine to output one line of /proc/self/maps,
8042  * or one region of /proc/self/smaps.
8043  */
8044 
8045 #ifdef TARGET_HPPA
8046 # define test_stack(S, E, L)  (E == L)
8047 #else
8048 # define test_stack(S, E, L)  (S == L)
8049 #endif
8050 
open_self_maps_4(const struct open_self_maps_data * d,const MapInfo * mi,abi_ptr start,abi_ptr end,unsigned flags)8051 static void open_self_maps_4(const struct open_self_maps_data *d,
8052                              const MapInfo *mi, abi_ptr start,
8053                              abi_ptr end, unsigned flags)
8054 {
8055     const struct image_info *info = d->ts->info;
8056     const char *path = mi->path;
8057     uint64_t offset;
8058     int fd = d->fd;
8059     int count;
8060 
8061     if (test_stack(start, end, info->stack_limit)) {
8062         path = "[stack]";
8063     } else if (start == info->brk) {
8064         path = "[heap]";
8065     } else if (start == info->vdso) {
8066         path = "[vdso]";
8067 #ifdef TARGET_X86_64
8068     } else if (start == TARGET_VSYSCALL_PAGE) {
8069         path = "[vsyscall]";
8070 #endif
8071     }
8072 
8073     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8074     offset = mi->offset;
8075     if (mi->dev) {
8076         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8077         offset += hstart - mi->itree.start;
8078     }
8079 
8080     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8081                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8082                     start, end,
8083                     (flags & PAGE_READ) ? 'r' : '-',
8084                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8085                     (flags & PAGE_EXEC) ? 'x' : '-',
8086                     mi->is_priv ? 'p' : 's',
8087                     offset, major(mi->dev), minor(mi->dev),
8088                     (uint64_t)mi->inode);
8089     if (path) {
8090         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8091     } else {
8092         dprintf(fd, "\n");
8093     }
8094 
8095     if (d->smaps) {
8096         unsigned long size = end - start;
8097         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8098         unsigned long size_kb = size >> 10;
8099 
8100         dprintf(fd, "Size:                  %lu kB\n"
8101                 "KernelPageSize:        %lu kB\n"
8102                 "MMUPageSize:           %lu kB\n"
8103                 "Rss:                   0 kB\n"
8104                 "Pss:                   0 kB\n"
8105                 "Pss_Dirty:             0 kB\n"
8106                 "Shared_Clean:          0 kB\n"
8107                 "Shared_Dirty:          0 kB\n"
8108                 "Private_Clean:         0 kB\n"
8109                 "Private_Dirty:         0 kB\n"
8110                 "Referenced:            0 kB\n"
8111                 "Anonymous:             %lu kB\n"
8112                 "LazyFree:              0 kB\n"
8113                 "AnonHugePages:         0 kB\n"
8114                 "ShmemPmdMapped:        0 kB\n"
8115                 "FilePmdMapped:         0 kB\n"
8116                 "Shared_Hugetlb:        0 kB\n"
8117                 "Private_Hugetlb:       0 kB\n"
8118                 "Swap:                  0 kB\n"
8119                 "SwapPss:               0 kB\n"
8120                 "Locked:                0 kB\n"
8121                 "THPeligible:    0\n"
8122                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8123                 size_kb, page_size_kb, page_size_kb,
8124                 (flags & PAGE_ANON ? size_kb : 0),
8125                 (flags & PAGE_READ) ? " rd" : "",
8126                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8127                 (flags & PAGE_EXEC) ? " ex" : "",
8128                 mi->is_priv ? "" : " sh",
8129                 (flags & PAGE_READ) ? " mr" : "",
8130                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8131                 (flags & PAGE_EXEC) ? " me" : "",
8132                 mi->is_priv ? "" : " ms");
8133     }
8134 }
8135 
8136 /*
8137  * Callback for walk_memory_regions, when read_self_maps() fails.
8138  * Proceed without the benefit of host /proc/self/maps cross-check.
8139  */
open_self_maps_3(void * opaque,target_ulong guest_start,target_ulong guest_end,unsigned long flags)8140 static int open_self_maps_3(void *opaque, target_ulong guest_start,
8141                             target_ulong guest_end, unsigned long flags)
8142 {
8143     static const MapInfo mi = { .is_priv = true };
8144 
8145     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8146     return 0;
8147 }
8148 
8149 /*
8150  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8151  */
open_self_maps_2(void * opaque,target_ulong guest_start,target_ulong guest_end,unsigned long flags)8152 static int open_self_maps_2(void *opaque, target_ulong guest_start,
8153                             target_ulong guest_end, unsigned long flags)
8154 {
8155     const struct open_self_maps_data *d = opaque;
8156     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8157     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8158 
8159 #ifdef TARGET_X86_64
8160     /*
8161      * Because of the extremely high position of the page within the guest
8162      * virtual address space, this is not backed by host memory at all.
8163      * Therefore the loop below would fail.  This is the only instance
8164      * of not having host backing memory.
8165      */
8166     if (guest_start == TARGET_VSYSCALL_PAGE) {
8167         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8168     }
8169 #endif
8170 
8171     while (1) {
8172         IntervalTreeNode *n =
8173             interval_tree_iter_first(d->host_maps, host_start, host_start);
8174         MapInfo *mi = container_of(n, MapInfo, itree);
8175         uintptr_t this_hlast = MIN(host_last, n->last);
8176         target_ulong this_gend = h2g(this_hlast) + 1;
8177 
8178         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8179 
8180         if (this_hlast == host_last) {
8181             return 0;
8182         }
8183         host_start = this_hlast + 1;
8184         guest_start = h2g(host_start);
8185     }
8186 }
8187 
open_self_maps_1(CPUArchState * env,int fd,bool smaps)8188 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8189 {
8190     struct open_self_maps_data d = {
8191         .ts = get_task_state(env_cpu(env)),
8192         .fd = fd,
8193         .smaps = smaps
8194     };
8195 
8196     mmap_lock();
8197     d.host_maps = read_self_maps();
8198     if (d.host_maps) {
8199         walk_memory_regions(&d, open_self_maps_2);
8200         free_self_maps(d.host_maps);
8201     } else {
8202         walk_memory_regions(&d, open_self_maps_3);
8203     }
8204     mmap_unlock();
8205     return 0;
8206 }
8207 
open_self_maps(CPUArchState * cpu_env,int fd)8208 static int open_self_maps(CPUArchState *cpu_env, int fd)
8209 {
8210     return open_self_maps_1(cpu_env, fd, false);
8211 }
8212 
open_self_smaps(CPUArchState * cpu_env,int fd)8213 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8214 {
8215     return open_self_maps_1(cpu_env, fd, true);
8216 }
8217 
open_self_stat(CPUArchState * cpu_env,int fd)8218 static int open_self_stat(CPUArchState *cpu_env, int fd)
8219 {
8220     CPUState *cpu = env_cpu(cpu_env);
8221     TaskState *ts = get_task_state(cpu);
8222     g_autoptr(GString) buf = g_string_new(NULL);
8223     int i;
8224 
8225     for (i = 0; i < 44; i++) {
8226         if (i == 0) {
8227             /* pid */
8228             g_string_printf(buf, FMT_pid " ", getpid());
8229         } else if (i == 1) {
8230             /* app name */
8231             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8232             bin = bin ? bin + 1 : ts->bprm->argv[0];
8233             g_string_printf(buf, "(%.15s) ", bin);
8234         } else if (i == 2) {
8235             /* task state */
8236             g_string_assign(buf, "R "); /* we are running right now */
8237         } else if (i == 3) {
8238             /* ppid */
8239             g_string_printf(buf, FMT_pid " ", getppid());
8240         } else if (i == 19) {
8241             /* num_threads */
8242             int cpus = 0;
8243             WITH_RCU_READ_LOCK_GUARD() {
8244                 CPUState *cpu_iter;
8245                 CPU_FOREACH(cpu_iter) {
8246                     cpus++;
8247                 }
8248             }
8249             g_string_printf(buf, "%d ", cpus);
8250         } else if (i == 21) {
8251             /* starttime */
8252             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8253         } else if (i == 27) {
8254             /* stack bottom */
8255             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8256         } else {
8257             /* for the rest, there is MasterCard */
8258             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8259         }
8260 
8261         if (write(fd, buf->str, buf->len) != buf->len) {
8262             return -1;
8263         }
8264     }
8265 
8266     return 0;
8267 }
8268 
open_self_auxv(CPUArchState * cpu_env,int fd)8269 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8270 {
8271     CPUState *cpu = env_cpu(cpu_env);
8272     TaskState *ts = get_task_state(cpu);
8273     abi_ulong auxv = ts->info->saved_auxv;
8274     abi_ulong len = ts->info->auxv_len;
8275     char *ptr;
8276 
8277     /*
8278      * Auxiliary vector is stored in target process stack.
8279      * read in whole auxv vector and copy it to file
8280      */
8281     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8282     if (ptr != NULL) {
8283         while (len > 0) {
8284             ssize_t r;
8285             r = write(fd, ptr, len);
8286             if (r <= 0) {
8287                 break;
8288             }
8289             len -= r;
8290             ptr += r;
8291         }
8292         lseek(fd, 0, SEEK_SET);
8293         unlock_user(ptr, auxv, len);
8294     }
8295 
8296     return 0;
8297 }
8298 
is_proc_myself(const char * filename,const char * entry)8299 static int is_proc_myself(const char *filename, const char *entry)
8300 {
8301     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8302         filename += strlen("/proc/");
8303         if (!strncmp(filename, "self/", strlen("self/"))) {
8304             filename += strlen("self/");
8305         } else if (*filename >= '1' && *filename <= '9') {
8306             char myself[80];
8307             snprintf(myself, sizeof(myself), "%d/", getpid());
8308             if (!strncmp(filename, myself, strlen(myself))) {
8309                 filename += strlen(myself);
8310             } else {
8311                 return 0;
8312             }
8313         } else {
8314             return 0;
8315         }
8316         if (!strcmp(filename, entry)) {
8317             return 1;
8318         }
8319     }
8320     return 0;
8321 }
8322 
excp_dump_file(FILE * logfile,CPUArchState * env,const char * fmt,int code)8323 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8324                       const char *fmt, int code)
8325 {
8326     if (logfile) {
8327         CPUState *cs = env_cpu(env);
8328 
8329         fprintf(logfile, fmt, code);
8330         fprintf(logfile, "Failing executable: %s\n", exec_path);
8331         cpu_dump_state(cs, logfile, 0);
8332         open_self_maps(env, fileno(logfile));
8333     }
8334 }
8335 
target_exception_dump(CPUArchState * env,const char * fmt,int code)8336 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8337 {
8338     /* dump to console */
8339     excp_dump_file(stderr, env, fmt, code);
8340 
8341     /* dump to log file */
8342     if (qemu_log_separate()) {
8343         FILE *logfile = qemu_log_trylock();
8344 
8345         excp_dump_file(logfile, env, fmt, code);
8346         qemu_log_unlock(logfile);
8347     }
8348 }
8349 
8350 #include "target_proc.h"
8351 
8352 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8353     defined(HAVE_ARCH_PROC_CPUINFO) || \
8354     defined(HAVE_ARCH_PROC_HARDWARE)
is_proc(const char * filename,const char * entry)8355 static int is_proc(const char *filename, const char *entry)
8356 {
8357     return strcmp(filename, entry) == 0;
8358 }
8359 #endif
8360 
8361 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
open_net_route(CPUArchState * cpu_env,int fd)8362 static int open_net_route(CPUArchState *cpu_env, int fd)
8363 {
8364     FILE *fp;
8365     char *line = NULL;
8366     size_t len = 0;
8367     ssize_t read;
8368 
8369     fp = fopen("/proc/net/route", "r");
8370     if (fp == NULL) {
8371         return -1;
8372     }
8373 
8374     /* read header */
8375 
8376     read = getline(&line, &len, fp);
8377     dprintf(fd, "%s", line);
8378 
8379     /* read routes */
8380 
8381     while ((read = getline(&line, &len, fp)) != -1) {
8382         char iface[16];
8383         uint32_t dest, gw, mask;
8384         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8385         int fields;
8386 
8387         fields = sscanf(line,
8388                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8389                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8390                         &mask, &mtu, &window, &irtt);
8391         if (fields != 11) {
8392             continue;
8393         }
8394         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8395                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8396                 metric, tswap32(mask), mtu, window, irtt);
8397     }
8398 
8399     free(line);
8400     fclose(fp);
8401 
8402     return 0;
8403 }
8404 #endif
8405 
maybe_do_fake_open(CPUArchState * cpu_env,int dirfd,const char * fname,int flags,mode_t mode,int openat2_resolve,bool safe)8406 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8407                               const char *fname, int flags, mode_t mode,
8408                               int openat2_resolve, bool safe)
8409 {
8410     g_autofree char *proc_name = NULL;
8411     const char *pathname;
8412     struct fake_open {
8413         const char *filename;
8414         int (*fill)(CPUArchState *cpu_env, int fd);
8415         int (*cmp)(const char *s1, const char *s2);
8416     };
8417     const struct fake_open *fake_open;
8418     static const struct fake_open fakes[] = {
8419         { "maps", open_self_maps, is_proc_myself },
8420         { "smaps", open_self_smaps, is_proc_myself },
8421         { "stat", open_self_stat, is_proc_myself },
8422         { "auxv", open_self_auxv, is_proc_myself },
8423         { "cmdline", open_self_cmdline, is_proc_myself },
8424 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8425         { "/proc/net/route", open_net_route, is_proc },
8426 #endif
8427 #if defined(HAVE_ARCH_PROC_CPUINFO)
8428         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8429 #endif
8430 #if defined(HAVE_ARCH_PROC_HARDWARE)
8431         { "/proc/hardware", open_hardware, is_proc },
8432 #endif
8433         { NULL, NULL, NULL }
8434     };
8435 
8436     /* if this is a file from /proc/ filesystem, expand full name */
8437     proc_name = realpath(fname, NULL);
8438     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8439         pathname = proc_name;
8440     } else {
8441         pathname = fname;
8442     }
8443 
8444     if (is_proc_myself(pathname, "exe")) {
8445         /* Honor openat2 resolve flags */
8446         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8447             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8448             errno = ELOOP;
8449             return -1;
8450         }
8451         if (safe) {
8452             return safe_openat(dirfd, exec_path, flags, mode);
8453         } else {
8454             return openat(dirfd, exec_path, flags, mode);
8455         }
8456     }
8457 
8458     for (fake_open = fakes; fake_open->filename; fake_open++) {
8459         if (fake_open->cmp(pathname, fake_open->filename)) {
8460             break;
8461         }
8462     }
8463 
8464     if (fake_open->filename) {
8465         const char *tmpdir;
8466         char filename[PATH_MAX];
8467         int fd, r;
8468 
8469         fd = memfd_create("qemu-open", 0);
8470         if (fd < 0) {
8471             if (errno != ENOSYS) {
8472                 return fd;
8473             }
8474             /* create temporary file to map stat to */
8475             tmpdir = getenv("TMPDIR");
8476             if (!tmpdir)
8477                 tmpdir = "/tmp";
8478             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8479             fd = mkstemp(filename);
8480             if (fd < 0) {
8481                 return fd;
8482             }
8483             unlink(filename);
8484         }
8485 
8486         if ((r = fake_open->fill(cpu_env, fd))) {
8487             int e = errno;
8488             close(fd);
8489             errno = e;
8490             return r;
8491         }
8492         lseek(fd, 0, SEEK_SET);
8493 
8494         return fd;
8495     }
8496 
8497     return -2;
8498 }
8499 
do_guest_openat(CPUArchState * cpu_env,int dirfd,const char * pathname,int flags,mode_t mode,bool safe)8500 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8501                     int flags, mode_t mode, bool safe)
8502 {
8503     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8504     if (fd > -2) {
8505         return fd;
8506     }
8507 
8508     if (safe) {
8509         return safe_openat(dirfd, path(pathname), flags, mode);
8510     } else {
8511         return openat(dirfd, path(pathname), flags, mode);
8512     }
8513 }
8514 
8515 
do_openat2(CPUArchState * cpu_env,abi_long dirfd,abi_ptr guest_pathname,abi_ptr guest_open_how,abi_ulong guest_size)8516 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8517                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8518                       abi_ulong guest_size)
8519 {
8520     struct open_how_ver0 how = {0};
8521     char *pathname;
8522     int ret;
8523 
8524     if (guest_size < sizeof(struct target_open_how_ver0)) {
8525         return -TARGET_EINVAL;
8526     }
8527     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8528     if (ret) {
8529         if (ret == -TARGET_E2BIG) {
8530             qemu_log_mask(LOG_UNIMP,
8531                           "Unimplemented openat2 open_how size: "
8532                           TARGET_ABI_FMT_lu "\n", guest_size);
8533         }
8534         return ret;
8535     }
8536     pathname = lock_user_string(guest_pathname);
8537     if (!pathname) {
8538         return -TARGET_EFAULT;
8539     }
8540 
8541     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8542     how.mode = tswap64(how.mode);
8543     how.resolve = tswap64(how.resolve);
8544     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8545                                 how.resolve, true);
8546     if (fd > -2) {
8547         ret = get_errno(fd);
8548     } else {
8549         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8550                                      sizeof(struct open_how_ver0)));
8551     }
8552 
8553     fd_trans_unregister(ret);
8554     unlock_user(pathname, guest_pathname, 0);
8555     return ret;
8556 }
8557 
do_guest_readlink(const char * pathname,char * buf,size_t bufsiz)8558 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8559 {
8560     ssize_t ret;
8561 
8562     if (!pathname || !buf) {
8563         errno = EFAULT;
8564         return -1;
8565     }
8566 
8567     if (!bufsiz) {
8568         /* Short circuit this for the magic exe check. */
8569         errno = EINVAL;
8570         return -1;
8571     }
8572 
8573     if (is_proc_myself((const char *)pathname, "exe")) {
8574         /*
8575          * Don't worry about sign mismatch as earlier mapping
8576          * logic would have thrown a bad address error.
8577          */
8578         ret = MIN(strlen(exec_path), bufsiz);
8579         /* We cannot NUL terminate the string. */
8580         memcpy(buf, exec_path, ret);
8581     } else {
8582         ret = readlink(path(pathname), buf, bufsiz);
8583     }
8584 
8585     return ret;
8586 }
8587 
do_execv(CPUArchState * cpu_env,int dirfd,abi_long pathname,abi_long guest_argp,abi_long guest_envp,int flags,bool is_execveat)8588 static int do_execv(CPUArchState *cpu_env, int dirfd,
8589                     abi_long pathname, abi_long guest_argp,
8590                     abi_long guest_envp, int flags, bool is_execveat)
8591 {
8592     int ret;
8593     char **argp, **envp;
8594     int argc, envc;
8595     abi_ulong gp;
8596     abi_ulong addr;
8597     char **q;
8598     void *p;
8599 
8600     argc = 0;
8601 
8602     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8603         if (get_user_ual(addr, gp)) {
8604             return -TARGET_EFAULT;
8605         }
8606         if (!addr) {
8607             break;
8608         }
8609         argc++;
8610     }
8611     envc = 0;
8612     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8613         if (get_user_ual(addr, gp)) {
8614             return -TARGET_EFAULT;
8615         }
8616         if (!addr) {
8617             break;
8618         }
8619         envc++;
8620     }
8621 
8622     argp = g_new0(char *, argc + 1);
8623     envp = g_new0(char *, envc + 1);
8624 
8625     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8626         if (get_user_ual(addr, gp)) {
8627             goto execve_efault;
8628         }
8629         if (!addr) {
8630             break;
8631         }
8632         *q = lock_user_string(addr);
8633         if (!*q) {
8634             goto execve_efault;
8635         }
8636     }
8637     *q = NULL;
8638 
8639     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8640         if (get_user_ual(addr, gp)) {
8641             goto execve_efault;
8642         }
8643         if (!addr) {
8644             break;
8645         }
8646         *q = lock_user_string(addr);
8647         if (!*q) {
8648             goto execve_efault;
8649         }
8650     }
8651     *q = NULL;
8652 
8653     /*
8654      * Although execve() is not an interruptible syscall it is
8655      * a special case where we must use the safe_syscall wrapper:
8656      * if we allow a signal to happen before we make the host
8657      * syscall then we will 'lose' it, because at the point of
8658      * execve the process leaves QEMU's control. So we use the
8659      * safe syscall wrapper to ensure that we either take the
8660      * signal as a guest signal, or else it does not happen
8661      * before the execve completes and makes it the other
8662      * program's problem.
8663      */
8664     p = lock_user_string(pathname);
8665     if (!p) {
8666         goto execve_efault;
8667     }
8668 
8669     const char *exe = p;
8670     if (is_proc_myself(p, "exe")) {
8671         exe = exec_path;
8672     }
8673     ret = is_execveat
8674         ? safe_execveat(dirfd, exe, argp, envp, flags)
8675         : safe_execve(exe, argp, envp);
8676     ret = get_errno(ret);
8677 
8678     unlock_user(p, pathname, 0);
8679 
8680     goto execve_end;
8681 
8682 execve_efault:
8683     ret = -TARGET_EFAULT;
8684 
8685 execve_end:
8686     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8687         if (get_user_ual(addr, gp) || !addr) {
8688             break;
8689         }
8690         unlock_user(*q, addr, 0);
8691     }
8692     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8693         if (get_user_ual(addr, gp) || !addr) {
8694             break;
8695         }
8696         unlock_user(*q, addr, 0);
8697     }
8698 
8699     g_free(argp);
8700     g_free(envp);
8701     return ret;
8702 }
8703 
8704 #define TIMER_MAGIC 0x0caf0000
8705 #define TIMER_MAGIC_MASK 0xffff0000
8706 
8707 /* Convert QEMU provided timer ID back to internal 16bit index format */
get_timer_id(abi_long arg)8708 static target_timer_t get_timer_id(abi_long arg)
8709 {
8710     target_timer_t timerid = arg;
8711 
8712     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8713         return -TARGET_EINVAL;
8714     }
8715 
8716     timerid &= 0xffff;
8717 
8718     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8719         return -TARGET_EINVAL;
8720     }
8721 
8722     return timerid;
8723 }
8724 
target_to_host_cpu_mask(unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8725 static int target_to_host_cpu_mask(unsigned long *host_mask,
8726                                    size_t host_size,
8727                                    abi_ulong target_addr,
8728                                    size_t target_size)
8729 {
8730     unsigned target_bits = sizeof(abi_ulong) * 8;
8731     unsigned host_bits = sizeof(*host_mask) * 8;
8732     abi_ulong *target_mask;
8733     unsigned i, j;
8734 
8735     assert(host_size >= target_size);
8736 
8737     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8738     if (!target_mask) {
8739         return -TARGET_EFAULT;
8740     }
8741     memset(host_mask, 0, host_size);
8742 
8743     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8744         unsigned bit = i * target_bits;
8745         abi_ulong val;
8746 
8747         __get_user(val, &target_mask[i]);
8748         for (j = 0; j < target_bits; j++, bit++) {
8749             if (val & (1UL << j)) {
8750                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8751             }
8752         }
8753     }
8754 
8755     unlock_user(target_mask, target_addr, 0);
8756     return 0;
8757 }
8758 
host_to_target_cpu_mask(const unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8759 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8760                                    size_t host_size,
8761                                    abi_ulong target_addr,
8762                                    size_t target_size)
8763 {
8764     unsigned target_bits = sizeof(abi_ulong) * 8;
8765     unsigned host_bits = sizeof(*host_mask) * 8;
8766     abi_ulong *target_mask;
8767     unsigned i, j;
8768 
8769     assert(host_size >= target_size);
8770 
8771     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8772     if (!target_mask) {
8773         return -TARGET_EFAULT;
8774     }
8775 
8776     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8777         unsigned bit = i * target_bits;
8778         abi_ulong val = 0;
8779 
8780         for (j = 0; j < target_bits; j++, bit++) {
8781             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8782                 val |= 1UL << j;
8783             }
8784         }
8785         __put_user(val, &target_mask[i]);
8786     }
8787 
8788     unlock_user(target_mask, target_addr, target_size);
8789     return 0;
8790 }
8791 
8792 #ifdef TARGET_NR_getdents
do_getdents(abi_long dirfd,abi_long arg2,abi_long count)8793 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8794 {
8795     g_autofree void *hdirp = NULL;
8796     void *tdirp;
8797     int hlen, hoff, toff;
8798     int hreclen, treclen;
8799     off_t prev_diroff = 0;
8800 
8801     hdirp = g_try_malloc(count);
8802     if (!hdirp) {
8803         return -TARGET_ENOMEM;
8804     }
8805 
8806 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8807     hlen = sys_getdents(dirfd, hdirp, count);
8808 #else
8809     hlen = sys_getdents64(dirfd, hdirp, count);
8810 #endif
8811 
8812     hlen = get_errno(hlen);
8813     if (is_error(hlen)) {
8814         return hlen;
8815     }
8816 
8817     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8818     if (!tdirp) {
8819         return -TARGET_EFAULT;
8820     }
8821 
8822     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8823 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8824         struct linux_dirent *hde = hdirp + hoff;
8825 #else
8826         struct linux_dirent64 *hde = hdirp + hoff;
8827 #endif
8828         struct target_dirent *tde = tdirp + toff;
8829         int namelen;
8830         uint8_t type;
8831 
8832         namelen = strlen(hde->d_name);
8833         hreclen = hde->d_reclen;
8834         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8835         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8836 
8837         if (toff + treclen > count) {
8838             /*
8839              * If the host struct is smaller than the target struct, or
8840              * requires less alignment and thus packs into less space,
8841              * then the host can return more entries than we can pass
8842              * on to the guest.
8843              */
8844             if (toff == 0) {
8845                 toff = -TARGET_EINVAL; /* result buffer is too small */
8846                 break;
8847             }
8848             /*
8849              * Return what we have, resetting the file pointer to the
8850              * location of the first record not returned.
8851              */
8852             lseek(dirfd, prev_diroff, SEEK_SET);
8853             break;
8854         }
8855 
8856         prev_diroff = hde->d_off;
8857         tde->d_ino = tswapal(hde->d_ino);
8858         tde->d_off = tswapal(hde->d_off);
8859         tde->d_reclen = tswap16(treclen);
8860         memcpy(tde->d_name, hde->d_name, namelen + 1);
8861 
8862         /*
8863          * The getdents type is in what was formerly a padding byte at the
8864          * end of the structure.
8865          */
8866 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8867         type = *((uint8_t *)hde + hreclen - 1);
8868 #else
8869         type = hde->d_type;
8870 #endif
8871         *((uint8_t *)tde + treclen - 1) = type;
8872     }
8873 
8874     unlock_user(tdirp, arg2, toff);
8875     return toff;
8876 }
8877 #endif /* TARGET_NR_getdents */
8878 
8879 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
do_getdents64(abi_long dirfd,abi_long arg2,abi_long count)8880 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8881 {
8882     g_autofree void *hdirp = NULL;
8883     void *tdirp;
8884     int hlen, hoff, toff;
8885     int hreclen, treclen;
8886     off_t prev_diroff = 0;
8887 
8888     hdirp = g_try_malloc(count);
8889     if (!hdirp) {
8890         return -TARGET_ENOMEM;
8891     }
8892 
8893     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8894     if (is_error(hlen)) {
8895         return hlen;
8896     }
8897 
8898     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8899     if (!tdirp) {
8900         return -TARGET_EFAULT;
8901     }
8902 
8903     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8904         struct linux_dirent64 *hde = hdirp + hoff;
8905         struct target_dirent64 *tde = tdirp + toff;
8906         int namelen;
8907 
8908         namelen = strlen(hde->d_name) + 1;
8909         hreclen = hde->d_reclen;
8910         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8911         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8912 
8913         if (toff + treclen > count) {
8914             /*
8915              * If the host struct is smaller than the target struct, or
8916              * requires less alignment and thus packs into less space,
8917              * then the host can return more entries than we can pass
8918              * on to the guest.
8919              */
8920             if (toff == 0) {
8921                 toff = -TARGET_EINVAL; /* result buffer is too small */
8922                 break;
8923             }
8924             /*
8925              * Return what we have, resetting the file pointer to the
8926              * location of the first record not returned.
8927              */
8928             lseek(dirfd, prev_diroff, SEEK_SET);
8929             break;
8930         }
8931 
8932         prev_diroff = hde->d_off;
8933         tde->d_ino = tswap64(hde->d_ino);
8934         tde->d_off = tswap64(hde->d_off);
8935         tde->d_reclen = tswap16(treclen);
8936         tde->d_type = hde->d_type;
8937         memcpy(tde->d_name, hde->d_name, namelen);
8938     }
8939 
8940     unlock_user(tdirp, arg2, toff);
8941     return toff;
8942 }
8943 #endif /* TARGET_NR_getdents64 */
8944 
8945 #if defined(TARGET_NR_riscv_hwprobe)
8946 
8947 #define RISCV_HWPROBE_KEY_MVENDORID     0
8948 #define RISCV_HWPROBE_KEY_MARCHID       1
8949 #define RISCV_HWPROBE_KEY_MIMPID        2
8950 
8951 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8952 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8953 
8954 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8955 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8956 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8957 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8958 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8959 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8960 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8961 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8962 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8963 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8964 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8965 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8966 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8967 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8968 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8969 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8970 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8971 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8972 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8973 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8974 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8975 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8976 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8977 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8978 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8979 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8980 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8981 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8982 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8983 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8984 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8985 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8986 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8987 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8988 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8989 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8990 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8991 
8992 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8993 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8994 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8995 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8996 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8997 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8998 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8999 
9000 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
9001 
9002 struct riscv_hwprobe {
9003     abi_llong  key;
9004     abi_ullong value;
9005 };
9006 
risc_hwprobe_fill_pairs(CPURISCVState * env,struct riscv_hwprobe * pair,size_t pair_count)9007 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9008                                     struct riscv_hwprobe *pair,
9009                                     size_t pair_count)
9010 {
9011     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9012 
9013     for (; pair_count > 0; pair_count--, pair++) {
9014         abi_llong key;
9015         abi_ullong value;
9016         __put_user(0, &pair->value);
9017         __get_user(key, &pair->key);
9018         switch (key) {
9019         case RISCV_HWPROBE_KEY_MVENDORID:
9020             __put_user(cfg->mvendorid, &pair->value);
9021             break;
9022         case RISCV_HWPROBE_KEY_MARCHID:
9023             __put_user(cfg->marchid, &pair->value);
9024             break;
9025         case RISCV_HWPROBE_KEY_MIMPID:
9026             __put_user(cfg->mimpid, &pair->value);
9027             break;
9028         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9029             value = riscv_has_ext(env, RVI) &&
9030                     riscv_has_ext(env, RVM) &&
9031                     riscv_has_ext(env, RVA) ?
9032                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9033             __put_user(value, &pair->value);
9034             break;
9035         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9036             value = riscv_has_ext(env, RVF) &&
9037                     riscv_has_ext(env, RVD) ?
9038                     RISCV_HWPROBE_IMA_FD : 0;
9039             value |= riscv_has_ext(env, RVC) ?
9040                      RISCV_HWPROBE_IMA_C : 0;
9041             value |= riscv_has_ext(env, RVV) ?
9042                      RISCV_HWPROBE_IMA_V : 0;
9043             value |= cfg->ext_zba ?
9044                      RISCV_HWPROBE_EXT_ZBA : 0;
9045             value |= cfg->ext_zbb ?
9046                      RISCV_HWPROBE_EXT_ZBB : 0;
9047             value |= cfg->ext_zbs ?
9048                      RISCV_HWPROBE_EXT_ZBS : 0;
9049             value |= cfg->ext_zicboz ?
9050                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9051             value |= cfg->ext_zbc ?
9052                      RISCV_HWPROBE_EXT_ZBC : 0;
9053             value |= cfg->ext_zbkb ?
9054                      RISCV_HWPROBE_EXT_ZBKB : 0;
9055             value |= cfg->ext_zbkc ?
9056                      RISCV_HWPROBE_EXT_ZBKC : 0;
9057             value |= cfg->ext_zbkx ?
9058                      RISCV_HWPROBE_EXT_ZBKX : 0;
9059             value |= cfg->ext_zknd ?
9060                      RISCV_HWPROBE_EXT_ZKND : 0;
9061             value |= cfg->ext_zkne ?
9062                      RISCV_HWPROBE_EXT_ZKNE : 0;
9063             value |= cfg->ext_zknh ?
9064                      RISCV_HWPROBE_EXT_ZKNH : 0;
9065             value |= cfg->ext_zksed ?
9066                      RISCV_HWPROBE_EXT_ZKSED : 0;
9067             value |= cfg->ext_zksh ?
9068                      RISCV_HWPROBE_EXT_ZKSH : 0;
9069             value |= cfg->ext_zkt ?
9070                      RISCV_HWPROBE_EXT_ZKT : 0;
9071             value |= cfg->ext_zvbb ?
9072                      RISCV_HWPROBE_EXT_ZVBB : 0;
9073             value |= cfg->ext_zvbc ?
9074                      RISCV_HWPROBE_EXT_ZVBC : 0;
9075             value |= cfg->ext_zvkb ?
9076                      RISCV_HWPROBE_EXT_ZVKB : 0;
9077             value |= cfg->ext_zvkg ?
9078                      RISCV_HWPROBE_EXT_ZVKG : 0;
9079             value |= cfg->ext_zvkned ?
9080                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9081             value |= cfg->ext_zvknha ?
9082                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9083             value |= cfg->ext_zvknhb ?
9084                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9085             value |= cfg->ext_zvksed ?
9086                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9087             value |= cfg->ext_zvksh ?
9088                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9089             value |= cfg->ext_zvkt ?
9090                      RISCV_HWPROBE_EXT_ZVKT : 0;
9091             value |= cfg->ext_zfh ?
9092                      RISCV_HWPROBE_EXT_ZFH : 0;
9093             value |= cfg->ext_zfhmin ?
9094                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9095             value |= cfg->ext_zihintntl ?
9096                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9097             value |= cfg->ext_zvfh ?
9098                      RISCV_HWPROBE_EXT_ZVFH : 0;
9099             value |= cfg->ext_zvfhmin ?
9100                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9101             value |= cfg->ext_zfa ?
9102                      RISCV_HWPROBE_EXT_ZFA : 0;
9103             value |= cfg->ext_ztso ?
9104                      RISCV_HWPROBE_EXT_ZTSO : 0;
9105             value |= cfg->ext_zacas ?
9106                      RISCV_HWPROBE_EXT_ZACAS : 0;
9107             value |= cfg->ext_zicond ?
9108                      RISCV_HWPROBE_EXT_ZICOND : 0;
9109             __put_user(value, &pair->value);
9110             break;
9111         case RISCV_HWPROBE_KEY_CPUPERF_0:
9112             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9113             break;
9114         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9115             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9116             __put_user(value, &pair->value);
9117             break;
9118         default:
9119             __put_user(-1, &pair->key);
9120             break;
9121         }
9122     }
9123 }
9124 
9125 /*
9126  * If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT.
9127  * If the cpumast_t has no bits set: -EINVAL.
9128  * Otherwise the cpumask_t contains some bit set: 0.
9129  * Unlike the kernel, we do not mask cpumask_t by the set of online cpus,
9130  * nor bound the search by cpumask_size().
9131  */
nonempty_cpu_set(abi_ulong cpusetsize,abi_ptr target_cpus)9132 static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus)
9133 {
9134     unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1);
9135     int ret = -TARGET_EFAULT;
9136 
9137     if (p) {
9138         ret = -TARGET_EINVAL;
9139         /*
9140          * Since we only care about the empty/non-empty state of the cpumask_t
9141          * not the individual bits, we do not need to repartition the bits
9142          * from target abi_ulong to host unsigned long.
9143          *
9144          * Note that the kernel does not round up cpusetsize to a multiple of
9145          * sizeof(abi_ulong).  After bounding cpusetsize by cpumask_size(),
9146          * it copies exactly cpusetsize bytes into a zeroed buffer.
9147          */
9148         for (abi_ulong i = 0; i < cpusetsize; ++i) {
9149             if (p[i]) {
9150                 ret = 0;
9151                 break;
9152             }
9153         }
9154         unlock_user(p, target_cpus, 0);
9155     }
9156     return ret;
9157 }
9158 
do_riscv_hwprobe(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)9159 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9160                                  abi_long arg2, abi_long arg3,
9161                                  abi_long arg4, abi_long arg5)
9162 {
9163     int ret;
9164     struct riscv_hwprobe *host_pairs;
9165 
9166     /* flags must be 0 */
9167     if (arg5 != 0) {
9168         return -TARGET_EINVAL;
9169     }
9170 
9171     /* check cpu_set */
9172     if (arg3 != 0) {
9173         ret = nonempty_cpu_set(arg3, arg4);
9174         if (ret != 0) {
9175             return ret;
9176         }
9177     } else if (arg4 != 0) {
9178         return -TARGET_EINVAL;
9179     }
9180 
9181     /* no pairs */
9182     if (arg2 == 0) {
9183         return 0;
9184     }
9185 
9186     host_pairs = lock_user(VERIFY_WRITE, arg1,
9187                            sizeof(*host_pairs) * (size_t)arg2, 0);
9188     if (host_pairs == NULL) {
9189         return -TARGET_EFAULT;
9190     }
9191     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9192     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9193     return 0;
9194 }
9195 #endif /* TARGET_NR_riscv_hwprobe */
9196 
9197 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
_syscall2(int,pivot_root,const char *,new_root,const char *,put_old)9198 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9199 #endif
9200 
9201 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9202 #define __NR_sys_open_tree __NR_open_tree
9203 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9204           unsigned int, __flags)
9205 #endif
9206 
9207 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9208 #define __NR_sys_move_mount __NR_move_mount
9209 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9210            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9211 #endif
9212 
9213 /* This is an internal helper for do_syscall so that it is easier
9214  * to have a single return point, so that actions, such as logging
9215  * of syscall results, can be performed.
9216  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9217  */
9218 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9219                             abi_long arg2, abi_long arg3, abi_long arg4,
9220                             abi_long arg5, abi_long arg6, abi_long arg7,
9221                             abi_long arg8)
9222 {
9223     CPUState *cpu = env_cpu(cpu_env);
9224     abi_long ret;
9225 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9226     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9227     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9228     || defined(TARGET_NR_statx)
9229     struct stat st;
9230 #endif
9231 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9232     || defined(TARGET_NR_fstatfs)
9233     struct statfs stfs;
9234 #endif
9235     void *p;
9236 
9237     switch(num) {
9238     case TARGET_NR_exit:
9239         /* In old applications this may be used to implement _exit(2).
9240            However in threaded applications it is used for thread termination,
9241            and _exit_group is used for application termination.
9242            Do thread termination if we have more then one thread.  */
9243 
9244         if (block_signals()) {
9245             return -QEMU_ERESTARTSYS;
9246         }
9247 
9248         pthread_mutex_lock(&clone_lock);
9249 
9250         if (CPU_NEXT(first_cpu)) {
9251             TaskState *ts = get_task_state(cpu);
9252 
9253             if (ts->child_tidptr) {
9254                 put_user_u32(0, ts->child_tidptr);
9255                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9256                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9257             }
9258 
9259             object_unparent(OBJECT(cpu));
9260             object_unref(OBJECT(cpu));
9261             /*
9262              * At this point the CPU should be unrealized and removed
9263              * from cpu lists. We can clean-up the rest of the thread
9264              * data without the lock held.
9265              */
9266 
9267             pthread_mutex_unlock(&clone_lock);
9268 
9269             thread_cpu = NULL;
9270             g_free(ts);
9271             rcu_unregister_thread();
9272             pthread_exit(NULL);
9273         }
9274 
9275         pthread_mutex_unlock(&clone_lock);
9276         preexit_cleanup(cpu_env, arg1);
9277         _exit(arg1);
9278         return 0; /* avoid warning */
9279     case TARGET_NR_read:
9280         if (arg2 == 0 && arg3 == 0) {
9281             return get_errno(safe_read(arg1, 0, 0));
9282         } else {
9283             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9284                 return -TARGET_EFAULT;
9285             ret = get_errno(safe_read(arg1, p, arg3));
9286             if (ret >= 0 &&
9287                 fd_trans_host_to_target_data(arg1)) {
9288                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9289             }
9290             unlock_user(p, arg2, ret);
9291         }
9292         return ret;
9293     case TARGET_NR_write:
9294         if (arg2 == 0 && arg3 == 0) {
9295             return get_errno(safe_write(arg1, 0, 0));
9296         }
9297         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9298             return -TARGET_EFAULT;
9299         if (fd_trans_target_to_host_data(arg1)) {
9300             void *copy = g_malloc(arg3);
9301             memcpy(copy, p, arg3);
9302             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9303             if (ret >= 0) {
9304                 ret = get_errno(safe_write(arg1, copy, ret));
9305             }
9306             g_free(copy);
9307         } else {
9308             ret = get_errno(safe_write(arg1, p, arg3));
9309         }
9310         unlock_user(p, arg2, 0);
9311         return ret;
9312 
9313 #ifdef TARGET_NR_open
9314     case TARGET_NR_open:
9315         if (!(p = lock_user_string(arg1)))
9316             return -TARGET_EFAULT;
9317         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9318                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9319                                   arg3, true));
9320         fd_trans_unregister(ret);
9321         unlock_user(p, arg1, 0);
9322         return ret;
9323 #endif
9324     case TARGET_NR_openat:
9325         if (!(p = lock_user_string(arg2)))
9326             return -TARGET_EFAULT;
9327         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9328                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9329                                   arg4, true));
9330         fd_trans_unregister(ret);
9331         unlock_user(p, arg2, 0);
9332         return ret;
9333     case TARGET_NR_openat2:
9334         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9335         return ret;
9336 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9337     case TARGET_NR_name_to_handle_at:
9338         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9339         return ret;
9340 #endif
9341 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9342     case TARGET_NR_open_by_handle_at:
9343         ret = do_open_by_handle_at(arg1, arg2, arg3);
9344         fd_trans_unregister(ret);
9345         return ret;
9346 #endif
9347 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9348     case TARGET_NR_pidfd_open:
9349         return get_errno(pidfd_open(arg1, arg2));
9350 #endif
9351 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9352     case TARGET_NR_pidfd_send_signal:
9353         {
9354             siginfo_t uinfo, *puinfo;
9355 
9356             if (arg3) {
9357                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9358                 if (!p) {
9359                     return -TARGET_EFAULT;
9360                  }
9361                  target_to_host_siginfo(&uinfo, p);
9362                  unlock_user(p, arg3, 0);
9363                  puinfo = &uinfo;
9364             } else {
9365                  puinfo = NULL;
9366             }
9367             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9368                                               puinfo, arg4));
9369         }
9370         return ret;
9371 #endif
9372 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9373     case TARGET_NR_pidfd_getfd:
9374         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9375 #endif
9376     case TARGET_NR_close:
9377         fd_trans_unregister(arg1);
9378         return get_errno(close(arg1));
9379 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9380     case TARGET_NR_close_range:
9381         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9382         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9383             abi_long fd, maxfd;
9384             maxfd = MIN(arg2, target_fd_max);
9385             for (fd = arg1; fd < maxfd; fd++) {
9386                 fd_trans_unregister(fd);
9387             }
9388         }
9389         return ret;
9390 #endif
9391 
9392     case TARGET_NR_brk:
9393         return do_brk(arg1);
9394 #ifdef TARGET_NR_fork
9395     case TARGET_NR_fork:
9396         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9397 #endif
9398 #ifdef TARGET_NR_waitpid
9399     case TARGET_NR_waitpid:
9400         {
9401             int status;
9402             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9403             if (!is_error(ret) && arg2 && ret
9404                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9405                 return -TARGET_EFAULT;
9406         }
9407         return ret;
9408 #endif
9409 #ifdef TARGET_NR_waitid
9410     case TARGET_NR_waitid:
9411         {
9412             struct rusage ru;
9413             siginfo_t info;
9414 
9415             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9416                                         arg4, (arg5 ? &ru : NULL)));
9417             if (!is_error(ret)) {
9418                 if (arg3) {
9419                     p = lock_user(VERIFY_WRITE, arg3,
9420                                   sizeof(target_siginfo_t), 0);
9421                     if (!p) {
9422                         return -TARGET_EFAULT;
9423                     }
9424                     host_to_target_siginfo(p, &info);
9425                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9426                 }
9427                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9428                     return -TARGET_EFAULT;
9429                 }
9430             }
9431         }
9432         return ret;
9433 #endif
9434 #ifdef TARGET_NR_creat /* not on alpha */
9435     case TARGET_NR_creat:
9436         if (!(p = lock_user_string(arg1)))
9437             return -TARGET_EFAULT;
9438         ret = get_errno(creat(p, arg2));
9439         fd_trans_unregister(ret);
9440         unlock_user(p, arg1, 0);
9441         return ret;
9442 #endif
9443 #ifdef TARGET_NR_link
9444     case TARGET_NR_link:
9445         {
9446             void * p2;
9447             p = lock_user_string(arg1);
9448             p2 = lock_user_string(arg2);
9449             if (!p || !p2)
9450                 ret = -TARGET_EFAULT;
9451             else
9452                 ret = get_errno(link(p, p2));
9453             unlock_user(p2, arg2, 0);
9454             unlock_user(p, arg1, 0);
9455         }
9456         return ret;
9457 #endif
9458 #if defined(TARGET_NR_linkat)
9459     case TARGET_NR_linkat:
9460         {
9461             void * p2 = NULL;
9462             if (!arg2 || !arg4)
9463                 return -TARGET_EFAULT;
9464             p  = lock_user_string(arg2);
9465             p2 = lock_user_string(arg4);
9466             if (!p || !p2)
9467                 ret = -TARGET_EFAULT;
9468             else
9469                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9470             unlock_user(p, arg2, 0);
9471             unlock_user(p2, arg4, 0);
9472         }
9473         return ret;
9474 #endif
9475 #ifdef TARGET_NR_unlink
9476     case TARGET_NR_unlink:
9477         if (!(p = lock_user_string(arg1)))
9478             return -TARGET_EFAULT;
9479         ret = get_errno(unlink(p));
9480         unlock_user(p, arg1, 0);
9481         return ret;
9482 #endif
9483 #if defined(TARGET_NR_unlinkat)
9484     case TARGET_NR_unlinkat:
9485         if (!(p = lock_user_string(arg2)))
9486             return -TARGET_EFAULT;
9487         ret = get_errno(unlinkat(arg1, p, arg3));
9488         unlock_user(p, arg2, 0);
9489         return ret;
9490 #endif
9491     case TARGET_NR_execveat:
9492         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9493     case TARGET_NR_execve:
9494         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9495     case TARGET_NR_chdir:
9496         if (!(p = lock_user_string(arg1)))
9497             return -TARGET_EFAULT;
9498         ret = get_errno(chdir(p));
9499         unlock_user(p, arg1, 0);
9500         return ret;
9501 #ifdef TARGET_NR_time
9502     case TARGET_NR_time:
9503         {
9504             time_t host_time;
9505             ret = get_errno(time(&host_time));
9506             if (!is_error(ret)
9507                 && arg1
9508                 && put_user_sal(host_time, arg1))
9509                 return -TARGET_EFAULT;
9510         }
9511         return ret;
9512 #endif
9513 #ifdef TARGET_NR_mknod
9514     case TARGET_NR_mknod:
9515         if (!(p = lock_user_string(arg1)))
9516             return -TARGET_EFAULT;
9517         ret = get_errno(mknod(p, arg2, arg3));
9518         unlock_user(p, arg1, 0);
9519         return ret;
9520 #endif
9521 #if defined(TARGET_NR_mknodat)
9522     case TARGET_NR_mknodat:
9523         if (!(p = lock_user_string(arg2)))
9524             return -TARGET_EFAULT;
9525         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9526         unlock_user(p, arg2, 0);
9527         return ret;
9528 #endif
9529 #ifdef TARGET_NR_chmod
9530     case TARGET_NR_chmod:
9531         if (!(p = lock_user_string(arg1)))
9532             return -TARGET_EFAULT;
9533         ret = get_errno(chmod(p, arg2));
9534         unlock_user(p, arg1, 0);
9535         return ret;
9536 #endif
9537 #ifdef TARGET_NR_lseek
9538     case TARGET_NR_lseek:
9539         return get_errno(lseek(arg1, arg2, arg3));
9540 #endif
9541 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9542     /* Alpha specific */
9543     case TARGET_NR_getxpid:
9544         cpu_env->ir[IR_A4] = getppid();
9545         return get_errno(getpid());
9546 #endif
9547 #ifdef TARGET_NR_getpid
9548     case TARGET_NR_getpid:
9549         return get_errno(getpid());
9550 #endif
9551     case TARGET_NR_mount:
9552         {
9553             /* need to look at the data field */
9554             void *p2, *p3;
9555 
9556             if (arg1) {
9557                 p = lock_user_string(arg1);
9558                 if (!p) {
9559                     return -TARGET_EFAULT;
9560                 }
9561             } else {
9562                 p = NULL;
9563             }
9564 
9565             p2 = lock_user_string(arg2);
9566             if (!p2) {
9567                 if (arg1) {
9568                     unlock_user(p, arg1, 0);
9569                 }
9570                 return -TARGET_EFAULT;
9571             }
9572 
9573             if (arg3) {
9574                 p3 = lock_user_string(arg3);
9575                 if (!p3) {
9576                     if (arg1) {
9577                         unlock_user(p, arg1, 0);
9578                     }
9579                     unlock_user(p2, arg2, 0);
9580                     return -TARGET_EFAULT;
9581                 }
9582             } else {
9583                 p3 = NULL;
9584             }
9585 
9586             /* FIXME - arg5 should be locked, but it isn't clear how to
9587              * do that since it's not guaranteed to be a NULL-terminated
9588              * string.
9589              */
9590             if (!arg5) {
9591                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9592             } else {
9593                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9594             }
9595             ret = get_errno(ret);
9596 
9597             if (arg1) {
9598                 unlock_user(p, arg1, 0);
9599             }
9600             unlock_user(p2, arg2, 0);
9601             if (arg3) {
9602                 unlock_user(p3, arg3, 0);
9603             }
9604         }
9605         return ret;
9606 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9607 #if defined(TARGET_NR_umount)
9608     case TARGET_NR_umount:
9609 #endif
9610 #if defined(TARGET_NR_oldumount)
9611     case TARGET_NR_oldumount:
9612 #endif
9613         if (!(p = lock_user_string(arg1)))
9614             return -TARGET_EFAULT;
9615         ret = get_errno(umount(p));
9616         unlock_user(p, arg1, 0);
9617         return ret;
9618 #endif
9619 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9620     case TARGET_NR_move_mount:
9621         {
9622             void *p2, *p4;
9623 
9624             if (!arg2 || !arg4) {
9625                 return -TARGET_EFAULT;
9626             }
9627 
9628             p2 = lock_user_string(arg2);
9629             if (!p2) {
9630                 return -TARGET_EFAULT;
9631             }
9632 
9633             p4 = lock_user_string(arg4);
9634             if (!p4) {
9635                 unlock_user(p2, arg2, 0);
9636                 return -TARGET_EFAULT;
9637             }
9638             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9639 
9640             unlock_user(p2, arg2, 0);
9641             unlock_user(p4, arg4, 0);
9642 
9643             return ret;
9644         }
9645 #endif
9646 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9647     case TARGET_NR_open_tree:
9648         {
9649             void *p2;
9650             int host_flags;
9651 
9652             if (!arg2) {
9653                 return -TARGET_EFAULT;
9654             }
9655 
9656             p2 = lock_user_string(arg2);
9657             if (!p2) {
9658                 return -TARGET_EFAULT;
9659             }
9660 
9661             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9662             if (arg3 & TARGET_O_CLOEXEC) {
9663                 host_flags |= O_CLOEXEC;
9664             }
9665 
9666             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9667 
9668             unlock_user(p2, arg2, 0);
9669 
9670             return ret;
9671         }
9672 #endif
9673 #ifdef TARGET_NR_stime /* not on alpha */
9674     case TARGET_NR_stime:
9675         {
9676             struct timespec ts;
9677             ts.tv_nsec = 0;
9678             if (get_user_sal(ts.tv_sec, arg1)) {
9679                 return -TARGET_EFAULT;
9680             }
9681             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9682         }
9683 #endif
9684 #ifdef TARGET_NR_alarm /* not on alpha */
9685     case TARGET_NR_alarm:
9686         return alarm(arg1);
9687 #endif
9688 #ifdef TARGET_NR_pause /* not on alpha */
9689     case TARGET_NR_pause:
9690         if (!block_signals()) {
9691             sigsuspend(&get_task_state(cpu)->signal_mask);
9692         }
9693         return -TARGET_EINTR;
9694 #endif
9695 #ifdef TARGET_NR_utime
9696     case TARGET_NR_utime:
9697         {
9698             struct utimbuf tbuf, *host_tbuf;
9699             struct target_utimbuf *target_tbuf;
9700             if (arg2) {
9701                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9702                     return -TARGET_EFAULT;
9703                 tbuf.actime = tswapal(target_tbuf->actime);
9704                 tbuf.modtime = tswapal(target_tbuf->modtime);
9705                 unlock_user_struct(target_tbuf, arg2, 0);
9706                 host_tbuf = &tbuf;
9707             } else {
9708                 host_tbuf = NULL;
9709             }
9710             if (!(p = lock_user_string(arg1)))
9711                 return -TARGET_EFAULT;
9712             ret = get_errno(utime(p, host_tbuf));
9713             unlock_user(p, arg1, 0);
9714         }
9715         return ret;
9716 #endif
9717 #ifdef TARGET_NR_utimes
9718     case TARGET_NR_utimes:
9719         {
9720             struct timeval *tvp, tv[2];
9721             if (arg2) {
9722                 if (copy_from_user_timeval(&tv[0], arg2)
9723                     || copy_from_user_timeval(&tv[1],
9724                                               arg2 + sizeof(struct target_timeval)))
9725                     return -TARGET_EFAULT;
9726                 tvp = tv;
9727             } else {
9728                 tvp = NULL;
9729             }
9730             if (!(p = lock_user_string(arg1)))
9731                 return -TARGET_EFAULT;
9732             ret = get_errno(utimes(p, tvp));
9733             unlock_user(p, arg1, 0);
9734         }
9735         return ret;
9736 #endif
9737 #if defined(TARGET_NR_futimesat)
9738     case TARGET_NR_futimesat:
9739         {
9740             struct timeval *tvp, tv[2];
9741             if (arg3) {
9742                 if (copy_from_user_timeval(&tv[0], arg3)
9743                     || copy_from_user_timeval(&tv[1],
9744                                               arg3 + sizeof(struct target_timeval)))
9745                     return -TARGET_EFAULT;
9746                 tvp = tv;
9747             } else {
9748                 tvp = NULL;
9749             }
9750             if (!(p = lock_user_string(arg2))) {
9751                 return -TARGET_EFAULT;
9752             }
9753             ret = get_errno(futimesat(arg1, path(p), tvp));
9754             unlock_user(p, arg2, 0);
9755         }
9756         return ret;
9757 #endif
9758 #ifdef TARGET_NR_access
9759     case TARGET_NR_access:
9760         if (!(p = lock_user_string(arg1))) {
9761             return -TARGET_EFAULT;
9762         }
9763         ret = get_errno(access(path(p), arg2));
9764         unlock_user(p, arg1, 0);
9765         return ret;
9766 #endif
9767 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9768     case TARGET_NR_faccessat:
9769         if (!(p = lock_user_string(arg2))) {
9770             return -TARGET_EFAULT;
9771         }
9772         ret = get_errno(faccessat(arg1, p, arg3, 0));
9773         unlock_user(p, arg2, 0);
9774         return ret;
9775 #endif
9776 #if defined(TARGET_NR_faccessat2)
9777     case TARGET_NR_faccessat2:
9778         if (!(p = lock_user_string(arg2))) {
9779             return -TARGET_EFAULT;
9780         }
9781         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9782         unlock_user(p, arg2, 0);
9783         return ret;
9784 #endif
9785 #ifdef TARGET_NR_nice /* not on alpha */
9786     case TARGET_NR_nice:
9787         return get_errno(nice(arg1));
9788 #endif
9789     case TARGET_NR_sync:
9790         sync();
9791         return 0;
9792 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9793     case TARGET_NR_syncfs:
9794         return get_errno(syncfs(arg1));
9795 #endif
9796     case TARGET_NR_kill:
9797         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9798 #ifdef TARGET_NR_rename
9799     case TARGET_NR_rename:
9800         {
9801             void *p2;
9802             p = lock_user_string(arg1);
9803             p2 = lock_user_string(arg2);
9804             if (!p || !p2)
9805                 ret = -TARGET_EFAULT;
9806             else
9807                 ret = get_errno(rename(p, p2));
9808             unlock_user(p2, arg2, 0);
9809             unlock_user(p, arg1, 0);
9810         }
9811         return ret;
9812 #endif
9813 #if defined(TARGET_NR_renameat)
9814     case TARGET_NR_renameat:
9815         {
9816             void *p2;
9817             p  = lock_user_string(arg2);
9818             p2 = lock_user_string(arg4);
9819             if (!p || !p2)
9820                 ret = -TARGET_EFAULT;
9821             else
9822                 ret = get_errno(renameat(arg1, p, arg3, p2));
9823             unlock_user(p2, arg4, 0);
9824             unlock_user(p, arg2, 0);
9825         }
9826         return ret;
9827 #endif
9828 #if defined(TARGET_NR_renameat2)
9829     case TARGET_NR_renameat2:
9830         {
9831             void *p2;
9832             p  = lock_user_string(arg2);
9833             p2 = lock_user_string(arg4);
9834             if (!p || !p2) {
9835                 ret = -TARGET_EFAULT;
9836             } else {
9837                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9838             }
9839             unlock_user(p2, arg4, 0);
9840             unlock_user(p, arg2, 0);
9841         }
9842         return ret;
9843 #endif
9844 #ifdef TARGET_NR_mkdir
9845     case TARGET_NR_mkdir:
9846         if (!(p = lock_user_string(arg1)))
9847             return -TARGET_EFAULT;
9848         ret = get_errno(mkdir(p, arg2));
9849         unlock_user(p, arg1, 0);
9850         return ret;
9851 #endif
9852 #if defined(TARGET_NR_mkdirat)
9853     case TARGET_NR_mkdirat:
9854         if (!(p = lock_user_string(arg2)))
9855             return -TARGET_EFAULT;
9856         ret = get_errno(mkdirat(arg1, p, arg3));
9857         unlock_user(p, arg2, 0);
9858         return ret;
9859 #endif
9860 #ifdef TARGET_NR_rmdir
9861     case TARGET_NR_rmdir:
9862         if (!(p = lock_user_string(arg1)))
9863             return -TARGET_EFAULT;
9864         ret = get_errno(rmdir(p));
9865         unlock_user(p, arg1, 0);
9866         return ret;
9867 #endif
9868     case TARGET_NR_dup:
9869         ret = get_errno(dup(arg1));
9870         if (ret >= 0) {
9871             fd_trans_dup(arg1, ret);
9872         }
9873         return ret;
9874 #ifdef TARGET_NR_pipe
9875     case TARGET_NR_pipe:
9876         return do_pipe(cpu_env, arg1, 0, 0);
9877 #endif
9878 #ifdef TARGET_NR_pipe2
9879     case TARGET_NR_pipe2:
9880         return do_pipe(cpu_env, arg1,
9881                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9882 #endif
9883     case TARGET_NR_times:
9884         {
9885             struct target_tms *tmsp;
9886             struct tms tms;
9887             ret = get_errno(times(&tms));
9888             if (arg1) {
9889                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9890                 if (!tmsp)
9891                     return -TARGET_EFAULT;
9892                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9893                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9894                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9895                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9896             }
9897             if (!is_error(ret))
9898                 ret = host_to_target_clock_t(ret);
9899         }
9900         return ret;
9901     case TARGET_NR_acct:
9902         if (arg1 == 0) {
9903             ret = get_errno(acct(NULL));
9904         } else {
9905             if (!(p = lock_user_string(arg1))) {
9906                 return -TARGET_EFAULT;
9907             }
9908             ret = get_errno(acct(path(p)));
9909             unlock_user(p, arg1, 0);
9910         }
9911         return ret;
9912 #ifdef TARGET_NR_umount2
9913     case TARGET_NR_umount2:
9914         if (!(p = lock_user_string(arg1)))
9915             return -TARGET_EFAULT;
9916         ret = get_errno(umount2(p, arg2));
9917         unlock_user(p, arg1, 0);
9918         return ret;
9919 #endif
9920     case TARGET_NR_ioctl:
9921         return do_ioctl(arg1, arg2, arg3);
9922 #ifdef TARGET_NR_fcntl
9923     case TARGET_NR_fcntl:
9924         return do_fcntl(arg1, arg2, arg3);
9925 #endif
9926     case TARGET_NR_setpgid:
9927         return get_errno(setpgid(arg1, arg2));
9928     case TARGET_NR_umask:
9929         return get_errno(umask(arg1));
9930     case TARGET_NR_chroot:
9931         if (!(p = lock_user_string(arg1)))
9932             return -TARGET_EFAULT;
9933         ret = get_errno(chroot(p));
9934         unlock_user(p, arg1, 0);
9935         return ret;
9936 #ifdef TARGET_NR_dup2
9937     case TARGET_NR_dup2:
9938         ret = get_errno(dup2(arg1, arg2));
9939         if (ret >= 0) {
9940             fd_trans_dup(arg1, arg2);
9941         }
9942         return ret;
9943 #endif
9944 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9945     case TARGET_NR_dup3:
9946     {
9947         int host_flags;
9948 
9949         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9950             return -EINVAL;
9951         }
9952         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9953         ret = get_errno(dup3(arg1, arg2, host_flags));
9954         if (ret >= 0) {
9955             fd_trans_dup(arg1, arg2);
9956         }
9957         return ret;
9958     }
9959 #endif
9960 #ifdef TARGET_NR_getppid /* not on alpha */
9961     case TARGET_NR_getppid:
9962         return get_errno(getppid());
9963 #endif
9964 #ifdef TARGET_NR_getpgrp
9965     case TARGET_NR_getpgrp:
9966         return get_errno(getpgrp());
9967 #endif
9968     case TARGET_NR_setsid:
9969         return get_errno(setsid());
9970 #ifdef TARGET_NR_sigaction
9971     case TARGET_NR_sigaction:
9972         {
9973 #if defined(TARGET_MIPS)
9974 	    struct target_sigaction act, oact, *pact, *old_act;
9975 
9976 	    if (arg2) {
9977                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9978                     return -TARGET_EFAULT;
9979 		act._sa_handler = old_act->_sa_handler;
9980 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9981 		act.sa_flags = old_act->sa_flags;
9982 		unlock_user_struct(old_act, arg2, 0);
9983 		pact = &act;
9984 	    } else {
9985 		pact = NULL;
9986 	    }
9987 
9988         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9989 
9990 	    if (!is_error(ret) && arg3) {
9991                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9992                     return -TARGET_EFAULT;
9993 		old_act->_sa_handler = oact._sa_handler;
9994 		old_act->sa_flags = oact.sa_flags;
9995 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9996 		old_act->sa_mask.sig[1] = 0;
9997 		old_act->sa_mask.sig[2] = 0;
9998 		old_act->sa_mask.sig[3] = 0;
9999 		unlock_user_struct(old_act, arg3, 1);
10000 	    }
10001 #else
10002             struct target_old_sigaction *old_act;
10003             struct target_sigaction act, oact, *pact;
10004             if (arg2) {
10005                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
10006                     return -TARGET_EFAULT;
10007                 act._sa_handler = old_act->_sa_handler;
10008                 target_siginitset(&act.sa_mask, old_act->sa_mask);
10009                 act.sa_flags = old_act->sa_flags;
10010 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10011                 act.sa_restorer = old_act->sa_restorer;
10012 #endif
10013                 unlock_user_struct(old_act, arg2, 0);
10014                 pact = &act;
10015             } else {
10016                 pact = NULL;
10017             }
10018             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
10019             if (!is_error(ret) && arg3) {
10020                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
10021                     return -TARGET_EFAULT;
10022                 old_act->_sa_handler = oact._sa_handler;
10023                 old_act->sa_mask = oact.sa_mask.sig[0];
10024                 old_act->sa_flags = oact.sa_flags;
10025 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10026                 old_act->sa_restorer = oact.sa_restorer;
10027 #endif
10028                 unlock_user_struct(old_act, arg3, 1);
10029             }
10030 #endif
10031         }
10032         return ret;
10033 #endif
10034     case TARGET_NR_rt_sigaction:
10035         {
10036             /*
10037              * For Alpha and SPARC this is a 5 argument syscall, with
10038              * a 'restorer' parameter which must be copied into the
10039              * sa_restorer field of the sigaction struct.
10040              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10041              * and arg5 is the sigsetsize.
10042              */
10043 #if defined(TARGET_ALPHA)
10044             target_ulong sigsetsize = arg4;
10045             target_ulong restorer = arg5;
10046 #elif defined(TARGET_SPARC)
10047             target_ulong restorer = arg4;
10048             target_ulong sigsetsize = arg5;
10049 #else
10050             target_ulong sigsetsize = arg4;
10051             target_ulong restorer = 0;
10052 #endif
10053             struct target_sigaction *act = NULL;
10054             struct target_sigaction *oact = NULL;
10055 
10056             if (sigsetsize != sizeof(target_sigset_t)) {
10057                 return -TARGET_EINVAL;
10058             }
10059             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10060                 return -TARGET_EFAULT;
10061             }
10062             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10063                 ret = -TARGET_EFAULT;
10064             } else {
10065                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10066                 if (oact) {
10067                     unlock_user_struct(oact, arg3, 1);
10068                 }
10069             }
10070             if (act) {
10071                 unlock_user_struct(act, arg2, 0);
10072             }
10073         }
10074         return ret;
10075 #ifdef TARGET_NR_sgetmask /* not on alpha */
10076     case TARGET_NR_sgetmask:
10077         {
10078             sigset_t cur_set;
10079             abi_ulong target_set;
10080             ret = do_sigprocmask(0, NULL, &cur_set);
10081             if (!ret) {
10082                 host_to_target_old_sigset(&target_set, &cur_set);
10083                 ret = target_set;
10084             }
10085         }
10086         return ret;
10087 #endif
10088 #ifdef TARGET_NR_ssetmask /* not on alpha */
10089     case TARGET_NR_ssetmask:
10090         {
10091             sigset_t set, oset;
10092             abi_ulong target_set = arg1;
10093             target_to_host_old_sigset(&set, &target_set);
10094             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10095             if (!ret) {
10096                 host_to_target_old_sigset(&target_set, &oset);
10097                 ret = target_set;
10098             }
10099         }
10100         return ret;
10101 #endif
10102 #ifdef TARGET_NR_sigprocmask
10103     case TARGET_NR_sigprocmask:
10104         {
10105 #if defined(TARGET_ALPHA)
10106             sigset_t set, oldset;
10107             abi_ulong mask;
10108             int how;
10109 
10110             switch (arg1) {
10111             case TARGET_SIG_BLOCK:
10112                 how = SIG_BLOCK;
10113                 break;
10114             case TARGET_SIG_UNBLOCK:
10115                 how = SIG_UNBLOCK;
10116                 break;
10117             case TARGET_SIG_SETMASK:
10118                 how = SIG_SETMASK;
10119                 break;
10120             default:
10121                 return -TARGET_EINVAL;
10122             }
10123             mask = arg2;
10124             target_to_host_old_sigset(&set, &mask);
10125 
10126             ret = do_sigprocmask(how, &set, &oldset);
10127             if (!is_error(ret)) {
10128                 host_to_target_old_sigset(&mask, &oldset);
10129                 ret = mask;
10130                 cpu_env->ir[IR_V0] = 0; /* force no error */
10131             }
10132 #else
10133             sigset_t set, oldset, *set_ptr;
10134             int how;
10135 
10136             if (arg2) {
10137                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10138                 if (!p) {
10139                     return -TARGET_EFAULT;
10140                 }
10141                 target_to_host_old_sigset(&set, p);
10142                 unlock_user(p, arg2, 0);
10143                 set_ptr = &set;
10144                 switch (arg1) {
10145                 case TARGET_SIG_BLOCK:
10146                     how = SIG_BLOCK;
10147                     break;
10148                 case TARGET_SIG_UNBLOCK:
10149                     how = SIG_UNBLOCK;
10150                     break;
10151                 case TARGET_SIG_SETMASK:
10152                     how = SIG_SETMASK;
10153                     break;
10154                 default:
10155                     return -TARGET_EINVAL;
10156                 }
10157             } else {
10158                 how = 0;
10159                 set_ptr = NULL;
10160             }
10161             ret = do_sigprocmask(how, set_ptr, &oldset);
10162             if (!is_error(ret) && arg3) {
10163                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10164                     return -TARGET_EFAULT;
10165                 host_to_target_old_sigset(p, &oldset);
10166                 unlock_user(p, arg3, sizeof(target_sigset_t));
10167             }
10168 #endif
10169         }
10170         return ret;
10171 #endif
10172     case TARGET_NR_rt_sigprocmask:
10173         {
10174             int how = arg1;
10175             sigset_t set, oldset, *set_ptr;
10176 
10177             if (arg4 != sizeof(target_sigset_t)) {
10178                 return -TARGET_EINVAL;
10179             }
10180 
10181             if (arg2) {
10182                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10183                 if (!p) {
10184                     return -TARGET_EFAULT;
10185                 }
10186                 target_to_host_sigset(&set, p);
10187                 unlock_user(p, arg2, 0);
10188                 set_ptr = &set;
10189                 switch(how) {
10190                 case TARGET_SIG_BLOCK:
10191                     how = SIG_BLOCK;
10192                     break;
10193                 case TARGET_SIG_UNBLOCK:
10194                     how = SIG_UNBLOCK;
10195                     break;
10196                 case TARGET_SIG_SETMASK:
10197                     how = SIG_SETMASK;
10198                     break;
10199                 default:
10200                     return -TARGET_EINVAL;
10201                 }
10202             } else {
10203                 how = 0;
10204                 set_ptr = NULL;
10205             }
10206             ret = do_sigprocmask(how, set_ptr, &oldset);
10207             if (!is_error(ret) && arg3) {
10208                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10209                     return -TARGET_EFAULT;
10210                 host_to_target_sigset(p, &oldset);
10211                 unlock_user(p, arg3, sizeof(target_sigset_t));
10212             }
10213         }
10214         return ret;
10215 #ifdef TARGET_NR_sigpending
10216     case TARGET_NR_sigpending:
10217         {
10218             sigset_t set;
10219             ret = get_errno(sigpending(&set));
10220             if (!is_error(ret)) {
10221                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10222                     return -TARGET_EFAULT;
10223                 host_to_target_old_sigset(p, &set);
10224                 unlock_user(p, arg1, sizeof(target_sigset_t));
10225             }
10226         }
10227         return ret;
10228 #endif
10229     case TARGET_NR_rt_sigpending:
10230         {
10231             sigset_t set;
10232 
10233             /* Yes, this check is >, not != like most. We follow the kernel's
10234              * logic and it does it like this because it implements
10235              * NR_sigpending through the same code path, and in that case
10236              * the old_sigset_t is smaller in size.
10237              */
10238             if (arg2 > sizeof(target_sigset_t)) {
10239                 return -TARGET_EINVAL;
10240             }
10241 
10242             ret = get_errno(sigpending(&set));
10243             if (!is_error(ret)) {
10244                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10245                     return -TARGET_EFAULT;
10246                 host_to_target_sigset(p, &set);
10247                 unlock_user(p, arg1, sizeof(target_sigset_t));
10248             }
10249         }
10250         return ret;
10251 #ifdef TARGET_NR_sigsuspend
10252     case TARGET_NR_sigsuspend:
10253         {
10254             sigset_t *set;
10255 
10256 #if defined(TARGET_ALPHA)
10257             TaskState *ts = get_task_state(cpu);
10258             /* target_to_host_old_sigset will bswap back */
10259             abi_ulong mask = tswapal(arg1);
10260             set = &ts->sigsuspend_mask;
10261             target_to_host_old_sigset(set, &mask);
10262 #else
10263             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10264             if (ret != 0) {
10265                 return ret;
10266             }
10267 #endif
10268             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10269             finish_sigsuspend_mask(ret);
10270         }
10271         return ret;
10272 #endif
10273     case TARGET_NR_rt_sigsuspend:
10274         {
10275             sigset_t *set;
10276 
10277             ret = process_sigsuspend_mask(&set, arg1, arg2);
10278             if (ret != 0) {
10279                 return ret;
10280             }
10281             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10282             finish_sigsuspend_mask(ret);
10283         }
10284         return ret;
10285 #ifdef TARGET_NR_rt_sigtimedwait
10286     case TARGET_NR_rt_sigtimedwait:
10287         {
10288             sigset_t set;
10289             struct timespec uts, *puts;
10290             siginfo_t uinfo;
10291 
10292             if (arg4 != sizeof(target_sigset_t)) {
10293                 return -TARGET_EINVAL;
10294             }
10295 
10296             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10297                 return -TARGET_EFAULT;
10298             target_to_host_sigset(&set, p);
10299             unlock_user(p, arg1, 0);
10300             if (arg3) {
10301                 puts = &uts;
10302                 if (target_to_host_timespec(puts, arg3)) {
10303                     return -TARGET_EFAULT;
10304                 }
10305             } else {
10306                 puts = NULL;
10307             }
10308             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10309                                                  SIGSET_T_SIZE));
10310             if (!is_error(ret)) {
10311                 if (arg2) {
10312                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10313                                   0);
10314                     if (!p) {
10315                         return -TARGET_EFAULT;
10316                     }
10317                     host_to_target_siginfo(p, &uinfo);
10318                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10319                 }
10320                 ret = host_to_target_signal(ret);
10321             }
10322         }
10323         return ret;
10324 #endif
10325 #ifdef TARGET_NR_rt_sigtimedwait_time64
10326     case TARGET_NR_rt_sigtimedwait_time64:
10327         {
10328             sigset_t set;
10329             struct timespec uts, *puts;
10330             siginfo_t uinfo;
10331 
10332             if (arg4 != sizeof(target_sigset_t)) {
10333                 return -TARGET_EINVAL;
10334             }
10335 
10336             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10337             if (!p) {
10338                 return -TARGET_EFAULT;
10339             }
10340             target_to_host_sigset(&set, p);
10341             unlock_user(p, arg1, 0);
10342             if (arg3) {
10343                 puts = &uts;
10344                 if (target_to_host_timespec64(puts, arg3)) {
10345                     return -TARGET_EFAULT;
10346                 }
10347             } else {
10348                 puts = NULL;
10349             }
10350             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10351                                                  SIGSET_T_SIZE));
10352             if (!is_error(ret)) {
10353                 if (arg2) {
10354                     p = lock_user(VERIFY_WRITE, arg2,
10355                                   sizeof(target_siginfo_t), 0);
10356                     if (!p) {
10357                         return -TARGET_EFAULT;
10358                     }
10359                     host_to_target_siginfo(p, &uinfo);
10360                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10361                 }
10362                 ret = host_to_target_signal(ret);
10363             }
10364         }
10365         return ret;
10366 #endif
10367     case TARGET_NR_rt_sigqueueinfo:
10368         {
10369             siginfo_t uinfo;
10370 
10371             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10372             if (!p) {
10373                 return -TARGET_EFAULT;
10374             }
10375             target_to_host_siginfo(&uinfo, p);
10376             unlock_user(p, arg3, 0);
10377             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10378         }
10379         return ret;
10380     case TARGET_NR_rt_tgsigqueueinfo:
10381         {
10382             siginfo_t uinfo;
10383 
10384             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10385             if (!p) {
10386                 return -TARGET_EFAULT;
10387             }
10388             target_to_host_siginfo(&uinfo, p);
10389             unlock_user(p, arg4, 0);
10390             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10391         }
10392         return ret;
10393 #ifdef TARGET_NR_sigreturn
10394     case TARGET_NR_sigreturn:
10395         if (block_signals()) {
10396             return -QEMU_ERESTARTSYS;
10397         }
10398         return do_sigreturn(cpu_env);
10399 #endif
10400     case TARGET_NR_rt_sigreturn:
10401         if (block_signals()) {
10402             return -QEMU_ERESTARTSYS;
10403         }
10404         return do_rt_sigreturn(cpu_env);
10405     case TARGET_NR_sethostname:
10406         if (!(p = lock_user_string(arg1)))
10407             return -TARGET_EFAULT;
10408         ret = get_errno(sethostname(p, arg2));
10409         unlock_user(p, arg1, 0);
10410         return ret;
10411 #ifdef TARGET_NR_setrlimit
10412     case TARGET_NR_setrlimit:
10413         {
10414             int resource = target_to_host_resource(arg1);
10415             struct target_rlimit *target_rlim;
10416             struct rlimit rlim;
10417             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10418                 return -TARGET_EFAULT;
10419             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10420             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10421             unlock_user_struct(target_rlim, arg2, 0);
10422             /*
10423              * If we just passed through resource limit settings for memory then
10424              * they would also apply to QEMU's own allocations, and QEMU will
10425              * crash or hang or die if its allocations fail. Ideally we would
10426              * track the guest allocations in QEMU and apply the limits ourselves.
10427              * For now, just tell the guest the call succeeded but don't actually
10428              * limit anything.
10429              */
10430             if (resource != RLIMIT_AS &&
10431                 resource != RLIMIT_DATA &&
10432                 resource != RLIMIT_STACK) {
10433                 return get_errno(setrlimit(resource, &rlim));
10434             } else {
10435                 return 0;
10436             }
10437         }
10438 #endif
10439 #ifdef TARGET_NR_getrlimit
10440     case TARGET_NR_getrlimit:
10441         {
10442             int resource = target_to_host_resource(arg1);
10443             struct target_rlimit *target_rlim;
10444             struct rlimit rlim;
10445 
10446             ret = get_errno(getrlimit(resource, &rlim));
10447             if (!is_error(ret)) {
10448                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10449                     return -TARGET_EFAULT;
10450                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10451                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10452                 unlock_user_struct(target_rlim, arg2, 1);
10453             }
10454         }
10455         return ret;
10456 #endif
10457     case TARGET_NR_getrusage:
10458         {
10459             struct rusage rusage;
10460             ret = get_errno(getrusage(arg1, &rusage));
10461             if (!is_error(ret)) {
10462                 ret = host_to_target_rusage(arg2, &rusage);
10463             }
10464         }
10465         return ret;
10466 #if defined(TARGET_NR_gettimeofday)
10467     case TARGET_NR_gettimeofday:
10468         {
10469             struct timeval tv;
10470             struct timezone tz;
10471 
10472             ret = get_errno(gettimeofday(&tv, &tz));
10473             if (!is_error(ret)) {
10474                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10475                     return -TARGET_EFAULT;
10476                 }
10477                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10478                     return -TARGET_EFAULT;
10479                 }
10480             }
10481         }
10482         return ret;
10483 #endif
10484 #if defined(TARGET_NR_settimeofday)
10485     case TARGET_NR_settimeofday:
10486         {
10487             struct timeval tv, *ptv = NULL;
10488             struct timezone tz, *ptz = NULL;
10489 
10490             if (arg1) {
10491                 if (copy_from_user_timeval(&tv, arg1)) {
10492                     return -TARGET_EFAULT;
10493                 }
10494                 ptv = &tv;
10495             }
10496 
10497             if (arg2) {
10498                 if (copy_from_user_timezone(&tz, arg2)) {
10499                     return -TARGET_EFAULT;
10500                 }
10501                 ptz = &tz;
10502             }
10503 
10504             return get_errno(settimeofday(ptv, ptz));
10505         }
10506 #endif
10507 #if defined(TARGET_NR_select)
10508     case TARGET_NR_select:
10509 #if defined(TARGET_WANT_NI_OLD_SELECT)
10510         /* some architectures used to have old_select here
10511          * but now ENOSYS it.
10512          */
10513         ret = -TARGET_ENOSYS;
10514 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10515         ret = do_old_select(arg1);
10516 #else
10517         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10518 #endif
10519         return ret;
10520 #endif
10521 #ifdef TARGET_NR_pselect6
10522     case TARGET_NR_pselect6:
10523         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10524 #endif
10525 #ifdef TARGET_NR_pselect6_time64
10526     case TARGET_NR_pselect6_time64:
10527         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10528 #endif
10529 #ifdef TARGET_NR_symlink
10530     case TARGET_NR_symlink:
10531         {
10532             void *p2;
10533             p = lock_user_string(arg1);
10534             p2 = lock_user_string(arg2);
10535             if (!p || !p2)
10536                 ret = -TARGET_EFAULT;
10537             else
10538                 ret = get_errno(symlink(p, p2));
10539             unlock_user(p2, arg2, 0);
10540             unlock_user(p, arg1, 0);
10541         }
10542         return ret;
10543 #endif
10544 #if defined(TARGET_NR_symlinkat)
10545     case TARGET_NR_symlinkat:
10546         {
10547             void *p2;
10548             p  = lock_user_string(arg1);
10549             p2 = lock_user_string(arg3);
10550             if (!p || !p2)
10551                 ret = -TARGET_EFAULT;
10552             else
10553                 ret = get_errno(symlinkat(p, arg2, p2));
10554             unlock_user(p2, arg3, 0);
10555             unlock_user(p, arg1, 0);
10556         }
10557         return ret;
10558 #endif
10559 #ifdef TARGET_NR_readlink
10560     case TARGET_NR_readlink:
10561         {
10562             void *p2;
10563             p = lock_user_string(arg1);
10564             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10565             ret = get_errno(do_guest_readlink(p, p2, arg3));
10566             unlock_user(p2, arg2, ret);
10567             unlock_user(p, arg1, 0);
10568         }
10569         return ret;
10570 #endif
10571 #if defined(TARGET_NR_readlinkat)
10572     case TARGET_NR_readlinkat:
10573         {
10574             void *p2;
10575             p  = lock_user_string(arg2);
10576             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10577             if (!p || !p2) {
10578                 ret = -TARGET_EFAULT;
10579             } else if (!arg4) {
10580                 /* Short circuit this for the magic exe check. */
10581                 ret = -TARGET_EINVAL;
10582             } else if (is_proc_myself((const char *)p, "exe")) {
10583                 /*
10584                  * Don't worry about sign mismatch as earlier mapping
10585                  * logic would have thrown a bad address error.
10586                  */
10587                 ret = MIN(strlen(exec_path), arg4);
10588                 /* We cannot NUL terminate the string. */
10589                 memcpy(p2, exec_path, ret);
10590             } else {
10591                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10592             }
10593             unlock_user(p2, arg3, ret);
10594             unlock_user(p, arg2, 0);
10595         }
10596         return ret;
10597 #endif
10598 #ifdef TARGET_NR_swapon
10599     case TARGET_NR_swapon:
10600         if (!(p = lock_user_string(arg1)))
10601             return -TARGET_EFAULT;
10602         ret = get_errno(swapon(p, arg2));
10603         unlock_user(p, arg1, 0);
10604         return ret;
10605 #endif
10606     case TARGET_NR_reboot:
10607         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10608            /* arg4 must be ignored in all other cases */
10609            p = lock_user_string(arg4);
10610            if (!p) {
10611                return -TARGET_EFAULT;
10612            }
10613            ret = get_errno(reboot(arg1, arg2, arg3, p));
10614            unlock_user(p, arg4, 0);
10615         } else {
10616            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10617         }
10618         return ret;
10619 #ifdef TARGET_NR_mmap
10620     case TARGET_NR_mmap:
10621 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10622         {
10623             abi_ulong *v;
10624             abi_ulong v1, v2, v3, v4, v5, v6;
10625             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10626                 return -TARGET_EFAULT;
10627             v1 = tswapal(v[0]);
10628             v2 = tswapal(v[1]);
10629             v3 = tswapal(v[2]);
10630             v4 = tswapal(v[3]);
10631             v5 = tswapal(v[4]);
10632             v6 = tswapal(v[5]);
10633             unlock_user(v, arg1, 0);
10634             return do_mmap(v1, v2, v3, v4, v5, v6);
10635         }
10636 #else
10637         /* mmap pointers are always untagged */
10638         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10639 #endif
10640 #endif
10641 #ifdef TARGET_NR_mmap2
10642     case TARGET_NR_mmap2:
10643 #ifndef MMAP_SHIFT
10644 #define MMAP_SHIFT 12
10645 #endif
10646         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10647                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10648 #endif
10649     case TARGET_NR_munmap:
10650         arg1 = cpu_untagged_addr(cpu, arg1);
10651         return get_errno(target_munmap(arg1, arg2));
10652     case TARGET_NR_mprotect:
10653         arg1 = cpu_untagged_addr(cpu, arg1);
10654         {
10655             TaskState *ts = get_task_state(cpu);
10656             /* Special hack to detect libc making the stack executable.  */
10657             if ((arg3 & PROT_GROWSDOWN)
10658                 && arg1 >= ts->info->stack_limit
10659                 && arg1 <= ts->info->start_stack) {
10660                 arg3 &= ~PROT_GROWSDOWN;
10661                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10662                 arg1 = ts->info->stack_limit;
10663             }
10664         }
10665         return get_errno(target_mprotect(arg1, arg2, arg3));
10666 #ifdef TARGET_NR_mremap
10667     case TARGET_NR_mremap:
10668         arg1 = cpu_untagged_addr(cpu, arg1);
10669         /* mremap new_addr (arg5) is always untagged */
10670         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10671 #endif
10672         /* ??? msync/mlock/munlock are broken for softmmu.  */
10673 #ifdef TARGET_NR_msync
10674     case TARGET_NR_msync:
10675         return get_errno(msync(g2h(cpu, arg1), arg2,
10676                                target_to_host_msync_arg(arg3)));
10677 #endif
10678 #ifdef TARGET_NR_mlock
10679     case TARGET_NR_mlock:
10680         return get_errno(mlock(g2h(cpu, arg1), arg2));
10681 #endif
10682 #ifdef TARGET_NR_munlock
10683     case TARGET_NR_munlock:
10684         return get_errno(munlock(g2h(cpu, arg1), arg2));
10685 #endif
10686 #ifdef TARGET_NR_mlockall
10687     case TARGET_NR_mlockall:
10688         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10689 #endif
10690 #ifdef TARGET_NR_munlockall
10691     case TARGET_NR_munlockall:
10692         return get_errno(munlockall());
10693 #endif
10694 #ifdef TARGET_NR_truncate
10695     case TARGET_NR_truncate:
10696         if (!(p = lock_user_string(arg1)))
10697             return -TARGET_EFAULT;
10698         ret = get_errno(truncate(p, arg2));
10699         unlock_user(p, arg1, 0);
10700         return ret;
10701 #endif
10702 #ifdef TARGET_NR_ftruncate
10703     case TARGET_NR_ftruncate:
10704         return get_errno(ftruncate(arg1, arg2));
10705 #endif
10706     case TARGET_NR_fchmod:
10707         return get_errno(fchmod(arg1, arg2));
10708 #if defined(TARGET_NR_fchmodat)
10709     case TARGET_NR_fchmodat:
10710         if (!(p = lock_user_string(arg2)))
10711             return -TARGET_EFAULT;
10712         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10713         unlock_user(p, arg2, 0);
10714         return ret;
10715 #endif
10716 #if defined(TARGET_NR_fchmodat2) && defined(__NR_fchmodat2)
10717     case TARGET_NR_fchmodat2:
10718         if (!(p = lock_user_string(arg2))) {
10719             return -TARGET_EFAULT;
10720         }
10721         ret = get_errno(safe_fchmodat2(arg1, p, arg3, arg4));
10722         unlock_user(p, arg2, 0);
10723         return ret;
10724 #endif
10725     case TARGET_NR_getpriority:
10726         /* Note that negative values are valid for getpriority, so we must
10727            differentiate based on errno settings.  */
10728         errno = 0;
10729         ret = getpriority(arg1, arg2);
10730         if (ret == -1 && errno != 0) {
10731             return -host_to_target_errno(errno);
10732         }
10733 #ifdef TARGET_ALPHA
10734         /* Return value is the unbiased priority.  Signal no error.  */
10735         cpu_env->ir[IR_V0] = 0;
10736 #else
10737         /* Return value is a biased priority to avoid negative numbers.  */
10738         ret = 20 - ret;
10739 #endif
10740         return ret;
10741     case TARGET_NR_setpriority:
10742         return get_errno(setpriority(arg1, arg2, arg3));
10743 #ifdef TARGET_NR_statfs
10744     case TARGET_NR_statfs:
10745         if (!(p = lock_user_string(arg1))) {
10746             return -TARGET_EFAULT;
10747         }
10748         ret = get_errno(statfs(path(p), &stfs));
10749         unlock_user(p, arg1, 0);
10750     convert_statfs:
10751         if (!is_error(ret)) {
10752             struct target_statfs *target_stfs;
10753 
10754             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10755                 return -TARGET_EFAULT;
10756             __put_user(stfs.f_type, &target_stfs->f_type);
10757             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10758             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10759             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10760             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10761             __put_user(stfs.f_files, &target_stfs->f_files);
10762             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10763             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10764             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10765             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10766             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10767 #ifdef _STATFS_F_FLAGS
10768             __put_user(stfs.f_flags, &target_stfs->f_flags);
10769 #else
10770             __put_user(0, &target_stfs->f_flags);
10771 #endif
10772             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10773             unlock_user_struct(target_stfs, arg2, 1);
10774         }
10775         return ret;
10776 #endif
10777 #ifdef TARGET_NR_fstatfs
10778     case TARGET_NR_fstatfs:
10779         ret = get_errno(fstatfs(arg1, &stfs));
10780         goto convert_statfs;
10781 #endif
10782 #ifdef TARGET_NR_statfs64
10783     case TARGET_NR_statfs64:
10784         if (!(p = lock_user_string(arg1))) {
10785             return -TARGET_EFAULT;
10786         }
10787         ret = get_errno(statfs(path(p), &stfs));
10788         unlock_user(p, arg1, 0);
10789     convert_statfs64:
10790         if (!is_error(ret)) {
10791             struct target_statfs64 *target_stfs;
10792 
10793             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10794                 return -TARGET_EFAULT;
10795             __put_user(stfs.f_type, &target_stfs->f_type);
10796             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10797             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10798             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10799             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10800             __put_user(stfs.f_files, &target_stfs->f_files);
10801             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10802             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10803             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10804             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10805             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10806 #ifdef _STATFS_F_FLAGS
10807             __put_user(stfs.f_flags, &target_stfs->f_flags);
10808 #else
10809             __put_user(0, &target_stfs->f_flags);
10810 #endif
10811             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10812             unlock_user_struct(target_stfs, arg3, 1);
10813         }
10814         return ret;
10815     case TARGET_NR_fstatfs64:
10816         ret = get_errno(fstatfs(arg1, &stfs));
10817         goto convert_statfs64;
10818 #endif
10819 #ifdef TARGET_NR_socketcall
10820     case TARGET_NR_socketcall:
10821         return do_socketcall(arg1, arg2);
10822 #endif
10823 #ifdef TARGET_NR_accept
10824     case TARGET_NR_accept:
10825         return do_accept4(arg1, arg2, arg3, 0);
10826 #endif
10827 #ifdef TARGET_NR_accept4
10828     case TARGET_NR_accept4:
10829         return do_accept4(arg1, arg2, arg3, arg4);
10830 #endif
10831 #ifdef TARGET_NR_bind
10832     case TARGET_NR_bind:
10833         return do_bind(arg1, arg2, arg3);
10834 #endif
10835 #ifdef TARGET_NR_connect
10836     case TARGET_NR_connect:
10837         return do_connect(arg1, arg2, arg3);
10838 #endif
10839 #ifdef TARGET_NR_getpeername
10840     case TARGET_NR_getpeername:
10841         return do_getpeername(arg1, arg2, arg3);
10842 #endif
10843 #ifdef TARGET_NR_getsockname
10844     case TARGET_NR_getsockname:
10845         return do_getsockname(arg1, arg2, arg3);
10846 #endif
10847 #ifdef TARGET_NR_getsockopt
10848     case TARGET_NR_getsockopt:
10849         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10850 #endif
10851 #ifdef TARGET_NR_listen
10852     case TARGET_NR_listen:
10853         return get_errno(listen(arg1, arg2));
10854 #endif
10855 #ifdef TARGET_NR_recv
10856     case TARGET_NR_recv:
10857         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10858 #endif
10859 #ifdef TARGET_NR_recvfrom
10860     case TARGET_NR_recvfrom:
10861         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10862 #endif
10863 #ifdef TARGET_NR_recvmsg
10864     case TARGET_NR_recvmsg:
10865         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10866 #endif
10867 #ifdef TARGET_NR_send
10868     case TARGET_NR_send:
10869         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10870 #endif
10871 #ifdef TARGET_NR_sendmsg
10872     case TARGET_NR_sendmsg:
10873         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10874 #endif
10875 #ifdef TARGET_NR_sendmmsg
10876     case TARGET_NR_sendmmsg:
10877         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10878 #endif
10879 #ifdef TARGET_NR_recvmmsg
10880     case TARGET_NR_recvmmsg:
10881         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10882 #endif
10883 #ifdef TARGET_NR_sendto
10884     case TARGET_NR_sendto:
10885         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10886 #endif
10887 #ifdef TARGET_NR_shutdown
10888     case TARGET_NR_shutdown:
10889         return get_errno(shutdown(arg1, arg2));
10890 #endif
10891 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10892     case TARGET_NR_getrandom:
10893         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10894         if (!p) {
10895             return -TARGET_EFAULT;
10896         }
10897         ret = get_errno(getrandom(p, arg2, arg3));
10898         unlock_user(p, arg1, ret);
10899         return ret;
10900 #endif
10901 #ifdef TARGET_NR_socket
10902     case TARGET_NR_socket:
10903         return do_socket(arg1, arg2, arg3);
10904 #endif
10905 #ifdef TARGET_NR_socketpair
10906     case TARGET_NR_socketpair:
10907         return do_socketpair(arg1, arg2, arg3, arg4);
10908 #endif
10909 #ifdef TARGET_NR_setsockopt
10910     case TARGET_NR_setsockopt:
10911         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10912 #endif
10913 #if defined(TARGET_NR_syslog)
10914     case TARGET_NR_syslog:
10915         {
10916             int len = arg2;
10917 
10918             switch (arg1) {
10919             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10920             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10921             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10922             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10923             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10924             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10925             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10926             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10927                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10928             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10929             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10930             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10931                 {
10932                     if (len < 0) {
10933                         return -TARGET_EINVAL;
10934                     }
10935                     if (len == 0) {
10936                         return 0;
10937                     }
10938                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10939                     if (!p) {
10940                         return -TARGET_EFAULT;
10941                     }
10942                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10943                     unlock_user(p, arg2, arg3);
10944                 }
10945                 return ret;
10946             default:
10947                 return -TARGET_EINVAL;
10948             }
10949         }
10950         break;
10951 #endif
10952     case TARGET_NR_setitimer:
10953         {
10954             struct itimerval value, ovalue, *pvalue;
10955 
10956             if (arg2) {
10957                 pvalue = &value;
10958                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10959                     || copy_from_user_timeval(&pvalue->it_value,
10960                                               arg2 + sizeof(struct target_timeval)))
10961                     return -TARGET_EFAULT;
10962             } else {
10963                 pvalue = NULL;
10964             }
10965             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10966             if (!is_error(ret) && arg3) {
10967                 if (copy_to_user_timeval(arg3,
10968                                          &ovalue.it_interval)
10969                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10970                                             &ovalue.it_value))
10971                     return -TARGET_EFAULT;
10972             }
10973         }
10974         return ret;
10975     case TARGET_NR_getitimer:
10976         {
10977             struct itimerval value;
10978 
10979             ret = get_errno(getitimer(arg1, &value));
10980             if (!is_error(ret) && arg2) {
10981                 if (copy_to_user_timeval(arg2,
10982                                          &value.it_interval)
10983                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10984                                             &value.it_value))
10985                     return -TARGET_EFAULT;
10986             }
10987         }
10988         return ret;
10989 #ifdef TARGET_NR_stat
10990     case TARGET_NR_stat:
10991         if (!(p = lock_user_string(arg1))) {
10992             return -TARGET_EFAULT;
10993         }
10994         ret = get_errno(stat(path(p), &st));
10995         unlock_user(p, arg1, 0);
10996         goto do_stat;
10997 #endif
10998 #ifdef TARGET_NR_lstat
10999     case TARGET_NR_lstat:
11000         if (!(p = lock_user_string(arg1))) {
11001             return -TARGET_EFAULT;
11002         }
11003         ret = get_errno(lstat(path(p), &st));
11004         unlock_user(p, arg1, 0);
11005         goto do_stat;
11006 #endif
11007 #ifdef TARGET_NR_fstat
11008     case TARGET_NR_fstat:
11009         {
11010             ret = get_errno(fstat(arg1, &st));
11011 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
11012         do_stat:
11013 #endif
11014             if (!is_error(ret)) {
11015                 struct target_stat *target_st;
11016 
11017                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
11018                     return -TARGET_EFAULT;
11019                 memset(target_st, 0, sizeof(*target_st));
11020                 __put_user(st.st_dev, &target_st->st_dev);
11021                 __put_user(st.st_ino, &target_st->st_ino);
11022                 __put_user(st.st_mode, &target_st->st_mode);
11023                 __put_user(st.st_uid, &target_st->st_uid);
11024                 __put_user(st.st_gid, &target_st->st_gid);
11025                 __put_user(st.st_nlink, &target_st->st_nlink);
11026                 __put_user(st.st_rdev, &target_st->st_rdev);
11027                 __put_user(st.st_size, &target_st->st_size);
11028                 __put_user(st.st_blksize, &target_st->st_blksize);
11029                 __put_user(st.st_blocks, &target_st->st_blocks);
11030                 __put_user(st.st_atime, &target_st->target_st_atime);
11031                 __put_user(st.st_mtime, &target_st->target_st_mtime);
11032                 __put_user(st.st_ctime, &target_st->target_st_ctime);
11033 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
11034                 __put_user(st.st_atim.tv_nsec,
11035                            &target_st->target_st_atime_nsec);
11036                 __put_user(st.st_mtim.tv_nsec,
11037                            &target_st->target_st_mtime_nsec);
11038                 __put_user(st.st_ctim.tv_nsec,
11039                            &target_st->target_st_ctime_nsec);
11040 #endif
11041                 unlock_user_struct(target_st, arg2, 1);
11042             }
11043         }
11044         return ret;
11045 #endif
11046     case TARGET_NR_vhangup:
11047         return get_errno(vhangup());
11048 #ifdef TARGET_NR_syscall
11049     case TARGET_NR_syscall:
11050         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11051                           arg6, arg7, arg8, 0);
11052 #endif
11053 #if defined(TARGET_NR_wait4)
11054     case TARGET_NR_wait4:
11055         {
11056             int status;
11057             abi_long status_ptr = arg2;
11058             struct rusage rusage, *rusage_ptr;
11059             abi_ulong target_rusage = arg4;
11060             abi_long rusage_err;
11061             if (target_rusage)
11062                 rusage_ptr = &rusage;
11063             else
11064                 rusage_ptr = NULL;
11065             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11066             if (!is_error(ret)) {
11067                 if (status_ptr && ret) {
11068                     status = host_to_target_waitstatus(status);
11069                     if (put_user_s32(status, status_ptr))
11070                         return -TARGET_EFAULT;
11071                 }
11072                 if (target_rusage) {
11073                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11074                     if (rusage_err) {
11075                         ret = rusage_err;
11076                     }
11077                 }
11078             }
11079         }
11080         return ret;
11081 #endif
11082 #ifdef TARGET_NR_swapoff
11083     case TARGET_NR_swapoff:
11084         if (!(p = lock_user_string(arg1)))
11085             return -TARGET_EFAULT;
11086         ret = get_errno(swapoff(p));
11087         unlock_user(p, arg1, 0);
11088         return ret;
11089 #endif
11090     case TARGET_NR_sysinfo:
11091         {
11092             struct target_sysinfo *target_value;
11093             struct sysinfo value;
11094             ret = get_errno(sysinfo(&value));
11095             if (!is_error(ret) && arg1)
11096             {
11097                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11098                     return -TARGET_EFAULT;
11099                 __put_user(value.uptime, &target_value->uptime);
11100                 __put_user(value.loads[0], &target_value->loads[0]);
11101                 __put_user(value.loads[1], &target_value->loads[1]);
11102                 __put_user(value.loads[2], &target_value->loads[2]);
11103                 __put_user(value.totalram, &target_value->totalram);
11104                 __put_user(value.freeram, &target_value->freeram);
11105                 __put_user(value.sharedram, &target_value->sharedram);
11106                 __put_user(value.bufferram, &target_value->bufferram);
11107                 __put_user(value.totalswap, &target_value->totalswap);
11108                 __put_user(value.freeswap, &target_value->freeswap);
11109                 __put_user(value.procs, &target_value->procs);
11110                 __put_user(value.totalhigh, &target_value->totalhigh);
11111                 __put_user(value.freehigh, &target_value->freehigh);
11112                 __put_user(value.mem_unit, &target_value->mem_unit);
11113                 unlock_user_struct(target_value, arg1, 1);
11114             }
11115         }
11116         return ret;
11117 #ifdef TARGET_NR_ipc
11118     case TARGET_NR_ipc:
11119         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11120 #endif
11121 #ifdef TARGET_NR_semget
11122     case TARGET_NR_semget:
11123         return get_errno(semget(arg1, arg2, arg3));
11124 #endif
11125 #ifdef TARGET_NR_semop
11126     case TARGET_NR_semop:
11127         return do_semtimedop(arg1, arg2, arg3, 0, false);
11128 #endif
11129 #ifdef TARGET_NR_semtimedop
11130     case TARGET_NR_semtimedop:
11131         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11132 #endif
11133 #ifdef TARGET_NR_semtimedop_time64
11134     case TARGET_NR_semtimedop_time64:
11135         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11136 #endif
11137 #ifdef TARGET_NR_semctl
11138     case TARGET_NR_semctl:
11139         return do_semctl(arg1, arg2, arg3, arg4);
11140 #endif
11141 #ifdef TARGET_NR_msgctl
11142     case TARGET_NR_msgctl:
11143         return do_msgctl(arg1, arg2, arg3);
11144 #endif
11145 #ifdef TARGET_NR_msgget
11146     case TARGET_NR_msgget:
11147         return get_errno(msgget(arg1, arg2));
11148 #endif
11149 #ifdef TARGET_NR_msgrcv
11150     case TARGET_NR_msgrcv:
11151         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11152 #endif
11153 #ifdef TARGET_NR_msgsnd
11154     case TARGET_NR_msgsnd:
11155         return do_msgsnd(arg1, arg2, arg3, arg4);
11156 #endif
11157 #ifdef TARGET_NR_shmget
11158     case TARGET_NR_shmget:
11159         return get_errno(shmget(arg1, arg2, arg3));
11160 #endif
11161 #ifdef TARGET_NR_shmctl
11162     case TARGET_NR_shmctl:
11163         return do_shmctl(arg1, arg2, arg3);
11164 #endif
11165 #ifdef TARGET_NR_shmat
11166     case TARGET_NR_shmat:
11167         return target_shmat(cpu_env, arg1, arg2, arg3);
11168 #endif
11169 #ifdef TARGET_NR_shmdt
11170     case TARGET_NR_shmdt:
11171         return target_shmdt(arg1);
11172 #endif
11173     case TARGET_NR_fsync:
11174         return get_errno(fsync(arg1));
11175     case TARGET_NR_clone:
11176         /* Linux manages to have three different orderings for its
11177          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11178          * match the kernel's CONFIG_CLONE_* settings.
11179          * Microblaze is further special in that it uses a sixth
11180          * implicit argument to clone for the TLS pointer.
11181          */
11182 #if defined(TARGET_MICROBLAZE)
11183         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11184 #elif defined(TARGET_CLONE_BACKWARDS)
11185         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11186 #elif defined(TARGET_CLONE_BACKWARDS2)
11187         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11188 #else
11189         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11190 #endif
11191         return ret;
11192 #ifdef __NR_exit_group
11193         /* new thread calls */
11194     case TARGET_NR_exit_group:
11195         preexit_cleanup(cpu_env, arg1);
11196         return get_errno(exit_group(arg1));
11197 #endif
11198     case TARGET_NR_setdomainname:
11199         if (!(p = lock_user_string(arg1)))
11200             return -TARGET_EFAULT;
11201         ret = get_errno(setdomainname(p, arg2));
11202         unlock_user(p, arg1, 0);
11203         return ret;
11204     case TARGET_NR_uname:
11205         /* no need to transcode because we use the linux syscall */
11206         {
11207             struct new_utsname * buf;
11208 
11209             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11210                 return -TARGET_EFAULT;
11211             ret = get_errno(sys_uname(buf));
11212             if (!is_error(ret)) {
11213                 /* Overwrite the native machine name with whatever is being
11214                    emulated. */
11215                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11216                           sizeof(buf->machine));
11217                 /* Allow the user to override the reported release.  */
11218                 if (qemu_uname_release && *qemu_uname_release) {
11219                     g_strlcpy(buf->release, qemu_uname_release,
11220                               sizeof(buf->release));
11221                 }
11222             }
11223             unlock_user_struct(buf, arg1, 1);
11224         }
11225         return ret;
11226 #ifdef TARGET_I386
11227     case TARGET_NR_modify_ldt:
11228         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11229 #if !defined(TARGET_X86_64)
11230     case TARGET_NR_vm86:
11231         return do_vm86(cpu_env, arg1, arg2);
11232 #endif
11233 #endif
11234 #if defined(TARGET_NR_adjtimex)
11235     case TARGET_NR_adjtimex:
11236         {
11237             struct timex host_buf;
11238 
11239             if (target_to_host_timex(&host_buf, arg1) != 0) {
11240                 return -TARGET_EFAULT;
11241             }
11242             ret = get_errno(adjtimex(&host_buf));
11243             if (!is_error(ret)) {
11244                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11245                     return -TARGET_EFAULT;
11246                 }
11247             }
11248         }
11249         return ret;
11250 #endif
11251 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11252     case TARGET_NR_clock_adjtime:
11253         {
11254             struct timex htx;
11255 
11256             if (target_to_host_timex(&htx, arg2) != 0) {
11257                 return -TARGET_EFAULT;
11258             }
11259             ret = get_errno(clock_adjtime(arg1, &htx));
11260             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11261                 return -TARGET_EFAULT;
11262             }
11263         }
11264         return ret;
11265 #endif
11266 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11267     case TARGET_NR_clock_adjtime64:
11268         {
11269             struct timex htx;
11270 
11271             if (target_to_host_timex64(&htx, arg2) != 0) {
11272                 return -TARGET_EFAULT;
11273             }
11274             ret = get_errno(clock_adjtime(arg1, &htx));
11275             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11276                     return -TARGET_EFAULT;
11277             }
11278         }
11279         return ret;
11280 #endif
11281     case TARGET_NR_getpgid:
11282         return get_errno(getpgid(arg1));
11283     case TARGET_NR_fchdir:
11284         return get_errno(fchdir(arg1));
11285     case TARGET_NR_personality:
11286         return get_errno(personality(arg1));
11287 #ifdef TARGET_NR__llseek /* Not on alpha */
11288     case TARGET_NR__llseek:
11289         {
11290             int64_t res;
11291 #if !defined(__NR_llseek)
11292             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11293             if (res == -1) {
11294                 ret = get_errno(res);
11295             } else {
11296                 ret = 0;
11297             }
11298 #else
11299             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11300 #endif
11301             if ((ret == 0) && put_user_s64(res, arg4)) {
11302                 return -TARGET_EFAULT;
11303             }
11304         }
11305         return ret;
11306 #endif
11307 #ifdef TARGET_NR_getdents
11308     case TARGET_NR_getdents:
11309         return do_getdents(arg1, arg2, arg3);
11310 #endif /* TARGET_NR_getdents */
11311 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11312     case TARGET_NR_getdents64:
11313         return do_getdents64(arg1, arg2, arg3);
11314 #endif /* TARGET_NR_getdents64 */
11315 #if defined(TARGET_NR__newselect)
11316     case TARGET_NR__newselect:
11317         return do_select(arg1, arg2, arg3, arg4, arg5);
11318 #endif
11319 #ifdef TARGET_NR_poll
11320     case TARGET_NR_poll:
11321         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11322 #endif
11323 #ifdef TARGET_NR_ppoll
11324     case TARGET_NR_ppoll:
11325         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11326 #endif
11327 #ifdef TARGET_NR_ppoll_time64
11328     case TARGET_NR_ppoll_time64:
11329         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11330 #endif
11331     case TARGET_NR_flock:
11332         /* NOTE: the flock constant seems to be the same for every
11333            Linux platform */
11334         return get_errno(safe_flock(arg1, arg2));
11335     case TARGET_NR_readv:
11336         {
11337             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11338             if (vec != NULL) {
11339                 ret = get_errno(safe_readv(arg1, vec, arg3));
11340                 unlock_iovec(vec, arg2, arg3, 1);
11341             } else {
11342                 ret = -host_to_target_errno(errno);
11343             }
11344         }
11345         return ret;
11346     case TARGET_NR_writev:
11347         {
11348             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11349             if (vec != NULL) {
11350                 ret = get_errno(safe_writev(arg1, vec, arg3));
11351                 unlock_iovec(vec, arg2, arg3, 0);
11352             } else {
11353                 ret = -host_to_target_errno(errno);
11354             }
11355         }
11356         return ret;
11357 #if defined(TARGET_NR_preadv)
11358     case TARGET_NR_preadv:
11359         {
11360             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11361             if (vec != NULL) {
11362                 unsigned long low, high;
11363 
11364                 target_to_host_low_high(arg4, arg5, &low, &high);
11365                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11366                 unlock_iovec(vec, arg2, arg3, 1);
11367             } else {
11368                 ret = -host_to_target_errno(errno);
11369            }
11370         }
11371         return ret;
11372 #endif
11373 #if defined(TARGET_NR_pwritev)
11374     case TARGET_NR_pwritev:
11375         {
11376             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11377             if (vec != NULL) {
11378                 unsigned long low, high;
11379 
11380                 target_to_host_low_high(arg4, arg5, &low, &high);
11381                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11382                 unlock_iovec(vec, arg2, arg3, 0);
11383             } else {
11384                 ret = -host_to_target_errno(errno);
11385            }
11386         }
11387         return ret;
11388 #endif
11389     case TARGET_NR_getsid:
11390         return get_errno(getsid(arg1));
11391 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11392     case TARGET_NR_fdatasync:
11393         return get_errno(fdatasync(arg1));
11394 #endif
11395     case TARGET_NR_sched_getaffinity:
11396         {
11397             unsigned int mask_size;
11398             unsigned long *mask;
11399 
11400             /*
11401              * sched_getaffinity needs multiples of ulong, so need to take
11402              * care of mismatches between target ulong and host ulong sizes.
11403              */
11404             if (arg2 & (sizeof(abi_ulong) - 1)) {
11405                 return -TARGET_EINVAL;
11406             }
11407             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11408 
11409             mask = alloca(mask_size);
11410             memset(mask, 0, mask_size);
11411             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11412 
11413             if (!is_error(ret)) {
11414                 if (ret > arg2) {
11415                     /* More data returned than the caller's buffer will fit.
11416                      * This only happens if sizeof(abi_long) < sizeof(long)
11417                      * and the caller passed us a buffer holding an odd number
11418                      * of abi_longs. If the host kernel is actually using the
11419                      * extra 4 bytes then fail EINVAL; otherwise we can just
11420                      * ignore them and only copy the interesting part.
11421                      */
11422                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11423                     if (numcpus > arg2 * 8) {
11424                         return -TARGET_EINVAL;
11425                     }
11426                     ret = arg2;
11427                 }
11428 
11429                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11430                     return -TARGET_EFAULT;
11431                 }
11432             }
11433         }
11434         return ret;
11435     case TARGET_NR_sched_setaffinity:
11436         {
11437             unsigned int mask_size;
11438             unsigned long *mask;
11439 
11440             /*
11441              * sched_setaffinity needs multiples of ulong, so need to take
11442              * care of mismatches between target ulong and host ulong sizes.
11443              */
11444             if (arg2 & (sizeof(abi_ulong) - 1)) {
11445                 return -TARGET_EINVAL;
11446             }
11447             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11448             mask = alloca(mask_size);
11449 
11450             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11451             if (ret) {
11452                 return ret;
11453             }
11454 
11455             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11456         }
11457     case TARGET_NR_getcpu:
11458         {
11459             unsigned cpuid, node;
11460             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11461                                        arg2 ? &node : NULL,
11462                                        NULL));
11463             if (is_error(ret)) {
11464                 return ret;
11465             }
11466             if (arg1 && put_user_u32(cpuid, arg1)) {
11467                 return -TARGET_EFAULT;
11468             }
11469             if (arg2 && put_user_u32(node, arg2)) {
11470                 return -TARGET_EFAULT;
11471             }
11472         }
11473         return ret;
11474     case TARGET_NR_sched_setparam:
11475         {
11476             struct target_sched_param *target_schp;
11477             struct sched_param schp;
11478 
11479             if (arg2 == 0) {
11480                 return -TARGET_EINVAL;
11481             }
11482             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11483                 return -TARGET_EFAULT;
11484             }
11485             schp.sched_priority = tswap32(target_schp->sched_priority);
11486             unlock_user_struct(target_schp, arg2, 0);
11487             return get_errno(sys_sched_setparam(arg1, &schp));
11488         }
11489     case TARGET_NR_sched_getparam:
11490         {
11491             struct target_sched_param *target_schp;
11492             struct sched_param schp;
11493 
11494             if (arg2 == 0) {
11495                 return -TARGET_EINVAL;
11496             }
11497             ret = get_errno(sys_sched_getparam(arg1, &schp));
11498             if (!is_error(ret)) {
11499                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11500                     return -TARGET_EFAULT;
11501                 }
11502                 target_schp->sched_priority = tswap32(schp.sched_priority);
11503                 unlock_user_struct(target_schp, arg2, 1);
11504             }
11505         }
11506         return ret;
11507     case TARGET_NR_sched_setscheduler:
11508         {
11509             struct target_sched_param *target_schp;
11510             struct sched_param schp;
11511             if (arg3 == 0) {
11512                 return -TARGET_EINVAL;
11513             }
11514             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11515                 return -TARGET_EFAULT;
11516             }
11517             schp.sched_priority = tswap32(target_schp->sched_priority);
11518             unlock_user_struct(target_schp, arg3, 0);
11519             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11520         }
11521     case TARGET_NR_sched_getscheduler:
11522         return get_errno(sys_sched_getscheduler(arg1));
11523     case TARGET_NR_sched_getattr:
11524         {
11525             struct target_sched_attr *target_scha;
11526             struct sched_attr scha;
11527             if (arg2 == 0) {
11528                 return -TARGET_EINVAL;
11529             }
11530             if (arg3 > sizeof(scha)) {
11531                 arg3 = sizeof(scha);
11532             }
11533             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11534             if (!is_error(ret)) {
11535                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11536                 if (!target_scha) {
11537                     return -TARGET_EFAULT;
11538                 }
11539                 target_scha->size = tswap32(scha.size);
11540                 target_scha->sched_policy = tswap32(scha.sched_policy);
11541                 target_scha->sched_flags = tswap64(scha.sched_flags);
11542                 target_scha->sched_nice = tswap32(scha.sched_nice);
11543                 target_scha->sched_priority = tswap32(scha.sched_priority);
11544                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11545                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11546                 target_scha->sched_period = tswap64(scha.sched_period);
11547                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11548                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11549                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11550                 }
11551                 unlock_user(target_scha, arg2, arg3);
11552             }
11553             return ret;
11554         }
11555     case TARGET_NR_sched_setattr:
11556         {
11557             struct target_sched_attr *target_scha;
11558             struct sched_attr scha;
11559             uint32_t size;
11560             int zeroed;
11561             if (arg2 == 0) {
11562                 return -TARGET_EINVAL;
11563             }
11564             if (get_user_u32(size, arg2)) {
11565                 return -TARGET_EFAULT;
11566             }
11567             if (!size) {
11568                 size = offsetof(struct target_sched_attr, sched_util_min);
11569             }
11570             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11571                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11572                     return -TARGET_EFAULT;
11573                 }
11574                 return -TARGET_E2BIG;
11575             }
11576 
11577             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11578             if (zeroed < 0) {
11579                 return zeroed;
11580             } else if (zeroed == 0) {
11581                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11582                     return -TARGET_EFAULT;
11583                 }
11584                 return -TARGET_E2BIG;
11585             }
11586             if (size > sizeof(struct target_sched_attr)) {
11587                 size = sizeof(struct target_sched_attr);
11588             }
11589 
11590             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11591             if (!target_scha) {
11592                 return -TARGET_EFAULT;
11593             }
11594             scha.size = size;
11595             scha.sched_policy = tswap32(target_scha->sched_policy);
11596             scha.sched_flags = tswap64(target_scha->sched_flags);
11597             scha.sched_nice = tswap32(target_scha->sched_nice);
11598             scha.sched_priority = tswap32(target_scha->sched_priority);
11599             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11600             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11601             scha.sched_period = tswap64(target_scha->sched_period);
11602             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11603                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11604                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11605             }
11606             unlock_user(target_scha, arg2, 0);
11607             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11608         }
11609     case TARGET_NR_sched_yield:
11610         return get_errno(sched_yield());
11611     case TARGET_NR_sched_get_priority_max:
11612         return get_errno(sched_get_priority_max(arg1));
11613     case TARGET_NR_sched_get_priority_min:
11614         return get_errno(sched_get_priority_min(arg1));
11615 #ifdef TARGET_NR_sched_rr_get_interval
11616     case TARGET_NR_sched_rr_get_interval:
11617         {
11618             struct timespec ts;
11619             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11620             if (!is_error(ret)) {
11621                 ret = host_to_target_timespec(arg2, &ts);
11622             }
11623         }
11624         return ret;
11625 #endif
11626 #ifdef TARGET_NR_sched_rr_get_interval_time64
11627     case TARGET_NR_sched_rr_get_interval_time64:
11628         {
11629             struct timespec ts;
11630             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11631             if (!is_error(ret)) {
11632                 ret = host_to_target_timespec64(arg2, &ts);
11633             }
11634         }
11635         return ret;
11636 #endif
11637 #if defined(TARGET_NR_nanosleep)
11638     case TARGET_NR_nanosleep:
11639         {
11640             struct timespec req, rem;
11641             if (target_to_host_timespec(&req, arg1)) {
11642                 return -TARGET_EFAULT;
11643             }
11644             ret = get_errno(safe_nanosleep(&req, &rem));
11645             if (is_error(ret) && arg2) {
11646                 if (host_to_target_timespec(arg2, &rem)) {
11647                     return -TARGET_EFAULT;
11648                 }
11649             }
11650         }
11651         return ret;
11652 #endif
11653     case TARGET_NR_prctl:
11654         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11655         break;
11656 #ifdef TARGET_NR_arch_prctl
11657     case TARGET_NR_arch_prctl:
11658         return do_arch_prctl(cpu_env, arg1, arg2);
11659 #endif
11660 #ifdef TARGET_NR_pread64
11661     case TARGET_NR_pread64:
11662         if (regpairs_aligned(cpu_env, num)) {
11663             arg4 = arg5;
11664             arg5 = arg6;
11665         }
11666         if (arg2 == 0 && arg3 == 0) {
11667             /* Special-case NULL buffer and zero length, which should succeed */
11668             p = 0;
11669         } else {
11670             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11671             if (!p) {
11672                 return -TARGET_EFAULT;
11673             }
11674         }
11675         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11676         unlock_user(p, arg2, ret);
11677         return ret;
11678     case TARGET_NR_pwrite64:
11679         if (regpairs_aligned(cpu_env, num)) {
11680             arg4 = arg5;
11681             arg5 = arg6;
11682         }
11683         if (arg2 == 0 && arg3 == 0) {
11684             /* Special-case NULL buffer and zero length, which should succeed */
11685             p = 0;
11686         } else {
11687             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11688             if (!p) {
11689                 return -TARGET_EFAULT;
11690             }
11691         }
11692         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11693         unlock_user(p, arg2, 0);
11694         return ret;
11695 #endif
11696     case TARGET_NR_getcwd:
11697         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11698             return -TARGET_EFAULT;
11699         ret = get_errno(sys_getcwd1(p, arg2));
11700         unlock_user(p, arg1, ret);
11701         return ret;
11702     case TARGET_NR_capget:
11703     case TARGET_NR_capset:
11704     {
11705         struct target_user_cap_header *target_header;
11706         struct target_user_cap_data *target_data = NULL;
11707         struct __user_cap_header_struct header;
11708         struct __user_cap_data_struct data[2];
11709         struct __user_cap_data_struct *dataptr = NULL;
11710         int i, target_datalen;
11711         int data_items = 1;
11712 
11713         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11714             return -TARGET_EFAULT;
11715         }
11716         header.version = tswap32(target_header->version);
11717         header.pid = tswap32(target_header->pid);
11718 
11719         if (header.version != _LINUX_CAPABILITY_VERSION) {
11720             /* Version 2 and up takes pointer to two user_data structs */
11721             data_items = 2;
11722         }
11723 
11724         target_datalen = sizeof(*target_data) * data_items;
11725 
11726         if (arg2) {
11727             if (num == TARGET_NR_capget) {
11728                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11729             } else {
11730                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11731             }
11732             if (!target_data) {
11733                 unlock_user_struct(target_header, arg1, 0);
11734                 return -TARGET_EFAULT;
11735             }
11736 
11737             if (num == TARGET_NR_capset) {
11738                 for (i = 0; i < data_items; i++) {
11739                     data[i].effective = tswap32(target_data[i].effective);
11740                     data[i].permitted = tswap32(target_data[i].permitted);
11741                     data[i].inheritable = tswap32(target_data[i].inheritable);
11742                 }
11743             }
11744 
11745             dataptr = data;
11746         }
11747 
11748         if (num == TARGET_NR_capget) {
11749             ret = get_errno(capget(&header, dataptr));
11750         } else {
11751             ret = get_errno(capset(&header, dataptr));
11752         }
11753 
11754         /* The kernel always updates version for both capget and capset */
11755         target_header->version = tswap32(header.version);
11756         unlock_user_struct(target_header, arg1, 1);
11757 
11758         if (arg2) {
11759             if (num == TARGET_NR_capget) {
11760                 for (i = 0; i < data_items; i++) {
11761                     target_data[i].effective = tswap32(data[i].effective);
11762                     target_data[i].permitted = tswap32(data[i].permitted);
11763                     target_data[i].inheritable = tswap32(data[i].inheritable);
11764                 }
11765                 unlock_user(target_data, arg2, target_datalen);
11766             } else {
11767                 unlock_user(target_data, arg2, 0);
11768             }
11769         }
11770         return ret;
11771     }
11772     case TARGET_NR_sigaltstack:
11773         return do_sigaltstack(arg1, arg2, cpu_env);
11774 
11775 #ifdef CONFIG_SENDFILE
11776 #ifdef TARGET_NR_sendfile
11777     case TARGET_NR_sendfile:
11778     {
11779         off_t *offp = NULL;
11780         off_t off;
11781         if (arg3) {
11782             ret = get_user_sal(off, arg3);
11783             if (is_error(ret)) {
11784                 return ret;
11785             }
11786             offp = &off;
11787         }
11788         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11789         if (!is_error(ret) && arg3) {
11790             abi_long ret2 = put_user_sal(off, arg3);
11791             if (is_error(ret2)) {
11792                 ret = ret2;
11793             }
11794         }
11795         return ret;
11796     }
11797 #endif
11798 #ifdef TARGET_NR_sendfile64
11799     case TARGET_NR_sendfile64:
11800     {
11801         off_t *offp = NULL;
11802         off_t off;
11803         if (arg3) {
11804             ret = get_user_s64(off, arg3);
11805             if (is_error(ret)) {
11806                 return ret;
11807             }
11808             offp = &off;
11809         }
11810         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11811         if (!is_error(ret) && arg3) {
11812             abi_long ret2 = put_user_s64(off, arg3);
11813             if (is_error(ret2)) {
11814                 ret = ret2;
11815             }
11816         }
11817         return ret;
11818     }
11819 #endif
11820 #endif
11821 #ifdef TARGET_NR_vfork
11822     case TARGET_NR_vfork:
11823         return get_errno(do_fork(cpu_env,
11824                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11825                          0, 0, 0, 0));
11826 #endif
11827 #ifdef TARGET_NR_ugetrlimit
11828     case TARGET_NR_ugetrlimit:
11829     {
11830 	struct rlimit rlim;
11831 	int resource = target_to_host_resource(arg1);
11832 	ret = get_errno(getrlimit(resource, &rlim));
11833 	if (!is_error(ret)) {
11834 	    struct target_rlimit *target_rlim;
11835             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11836                 return -TARGET_EFAULT;
11837 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11838 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11839             unlock_user_struct(target_rlim, arg2, 1);
11840 	}
11841         return ret;
11842     }
11843 #endif
11844 #ifdef TARGET_NR_truncate64
11845     case TARGET_NR_truncate64:
11846         if (!(p = lock_user_string(arg1)))
11847             return -TARGET_EFAULT;
11848 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11849         unlock_user(p, arg1, 0);
11850         return ret;
11851 #endif
11852 #ifdef TARGET_NR_ftruncate64
11853     case TARGET_NR_ftruncate64:
11854         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11855 #endif
11856 #ifdef TARGET_NR_stat64
11857     case TARGET_NR_stat64:
11858         if (!(p = lock_user_string(arg1))) {
11859             return -TARGET_EFAULT;
11860         }
11861         ret = get_errno(stat(path(p), &st));
11862         unlock_user(p, arg1, 0);
11863         if (!is_error(ret))
11864             ret = host_to_target_stat64(cpu_env, arg2, &st);
11865         return ret;
11866 #endif
11867 #ifdef TARGET_NR_lstat64
11868     case TARGET_NR_lstat64:
11869         if (!(p = lock_user_string(arg1))) {
11870             return -TARGET_EFAULT;
11871         }
11872         ret = get_errno(lstat(path(p), &st));
11873         unlock_user(p, arg1, 0);
11874         if (!is_error(ret))
11875             ret = host_to_target_stat64(cpu_env, arg2, &st);
11876         return ret;
11877 #endif
11878 #ifdef TARGET_NR_fstat64
11879     case TARGET_NR_fstat64:
11880         ret = get_errno(fstat(arg1, &st));
11881         if (!is_error(ret))
11882             ret = host_to_target_stat64(cpu_env, arg2, &st);
11883         return ret;
11884 #endif
11885 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11886 #ifdef TARGET_NR_fstatat64
11887     case TARGET_NR_fstatat64:
11888 #endif
11889 #ifdef TARGET_NR_newfstatat
11890     case TARGET_NR_newfstatat:
11891 #endif
11892         if (!(p = lock_user_string(arg2))) {
11893             return -TARGET_EFAULT;
11894         }
11895         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11896         unlock_user(p, arg2, 0);
11897         if (!is_error(ret))
11898             ret = host_to_target_stat64(cpu_env, arg3, &st);
11899         return ret;
11900 #endif
11901 #if defined(TARGET_NR_statx)
11902     case TARGET_NR_statx:
11903         {
11904             struct target_statx *target_stx;
11905             int dirfd = arg1;
11906             int flags = arg3;
11907 
11908             p = lock_user_string(arg2);
11909             if (p == NULL) {
11910                 return -TARGET_EFAULT;
11911             }
11912 #if defined(__NR_statx)
11913             {
11914                 /*
11915                  * It is assumed that struct statx is architecture independent.
11916                  */
11917                 struct target_statx host_stx;
11918                 int mask = arg4;
11919 
11920                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11921                 if (!is_error(ret)) {
11922                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11923                         unlock_user(p, arg2, 0);
11924                         return -TARGET_EFAULT;
11925                     }
11926                 }
11927 
11928                 if (ret != -TARGET_ENOSYS) {
11929                     unlock_user(p, arg2, 0);
11930                     return ret;
11931                 }
11932             }
11933 #endif
11934             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11935             unlock_user(p, arg2, 0);
11936 
11937             if (!is_error(ret)) {
11938                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11939                     return -TARGET_EFAULT;
11940                 }
11941                 memset(target_stx, 0, sizeof(*target_stx));
11942                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11943                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11944                 __put_user(st.st_ino, &target_stx->stx_ino);
11945                 __put_user(st.st_mode, &target_stx->stx_mode);
11946                 __put_user(st.st_uid, &target_stx->stx_uid);
11947                 __put_user(st.st_gid, &target_stx->stx_gid);
11948                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11949                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11950                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11951                 __put_user(st.st_size, &target_stx->stx_size);
11952                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11953                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11954                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11955                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11956                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11957                 unlock_user_struct(target_stx, arg5, 1);
11958             }
11959         }
11960         return ret;
11961 #endif
11962 #ifdef TARGET_NR_lchown
11963     case TARGET_NR_lchown:
11964         if (!(p = lock_user_string(arg1)))
11965             return -TARGET_EFAULT;
11966         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11967         unlock_user(p, arg1, 0);
11968         return ret;
11969 #endif
11970 #ifdef TARGET_NR_getuid
11971     case TARGET_NR_getuid:
11972         return get_errno(high2lowuid(getuid()));
11973 #endif
11974 #ifdef TARGET_NR_getgid
11975     case TARGET_NR_getgid:
11976         return get_errno(high2lowgid(getgid()));
11977 #endif
11978 #ifdef TARGET_NR_geteuid
11979     case TARGET_NR_geteuid:
11980         return get_errno(high2lowuid(geteuid()));
11981 #endif
11982 #ifdef TARGET_NR_getegid
11983     case TARGET_NR_getegid:
11984         return get_errno(high2lowgid(getegid()));
11985 #endif
11986     case TARGET_NR_setreuid:
11987         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
11988     case TARGET_NR_setregid:
11989         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
11990     case TARGET_NR_getgroups:
11991         { /* the same code as for TARGET_NR_getgroups32 */
11992             int gidsetsize = arg1;
11993             target_id *target_grouplist;
11994             g_autofree gid_t *grouplist = NULL;
11995             int i;
11996 
11997             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11998                 return -TARGET_EINVAL;
11999             }
12000             if (gidsetsize > 0) {
12001                 grouplist = g_try_new(gid_t, gidsetsize);
12002                 if (!grouplist) {
12003                     return -TARGET_ENOMEM;
12004                 }
12005             }
12006             ret = get_errno(getgroups(gidsetsize, grouplist));
12007             if (!is_error(ret) && gidsetsize > 0) {
12008                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12009                                              gidsetsize * sizeof(target_id), 0);
12010                 if (!target_grouplist) {
12011                     return -TARGET_EFAULT;
12012                 }
12013                 for (i = 0; i < ret; i++) {
12014                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
12015                 }
12016                 unlock_user(target_grouplist, arg2,
12017                             gidsetsize * sizeof(target_id));
12018             }
12019             return ret;
12020         }
12021     case TARGET_NR_setgroups:
12022         { /* the same code as for TARGET_NR_setgroups32 */
12023             int gidsetsize = arg1;
12024             target_id *target_grouplist;
12025             g_autofree gid_t *grouplist = NULL;
12026             int i;
12027 
12028             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12029                 return -TARGET_EINVAL;
12030             }
12031             if (gidsetsize > 0) {
12032                 grouplist = g_try_new(gid_t, gidsetsize);
12033                 if (!grouplist) {
12034                     return -TARGET_ENOMEM;
12035                 }
12036                 target_grouplist = lock_user(VERIFY_READ, arg2,
12037                                              gidsetsize * sizeof(target_id), 1);
12038                 if (!target_grouplist) {
12039                     return -TARGET_EFAULT;
12040                 }
12041                 for (i = 0; i < gidsetsize; i++) {
12042                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12043                 }
12044                 unlock_user(target_grouplist, arg2,
12045                             gidsetsize * sizeof(target_id));
12046             }
12047             return get_errno(sys_setgroups(gidsetsize, grouplist));
12048         }
12049     case TARGET_NR_fchown:
12050         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12051 #if defined(TARGET_NR_fchownat)
12052     case TARGET_NR_fchownat:
12053         if (!(p = lock_user_string(arg2)))
12054             return -TARGET_EFAULT;
12055         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12056                                  low2highgid(arg4), arg5));
12057         unlock_user(p, arg2, 0);
12058         return ret;
12059 #endif
12060 #ifdef TARGET_NR_setresuid
12061     case TARGET_NR_setresuid:
12062         return get_errno(sys_setresuid(low2highuid(arg1),
12063                                        low2highuid(arg2),
12064                                        low2highuid(arg3)));
12065 #endif
12066 #ifdef TARGET_NR_getresuid
12067     case TARGET_NR_getresuid:
12068         {
12069             uid_t ruid, euid, suid;
12070             ret = get_errno(getresuid(&ruid, &euid, &suid));
12071             if (!is_error(ret)) {
12072                 if (put_user_id(high2lowuid(ruid), arg1)
12073                     || put_user_id(high2lowuid(euid), arg2)
12074                     || put_user_id(high2lowuid(suid), arg3))
12075                     return -TARGET_EFAULT;
12076             }
12077         }
12078         return ret;
12079 #endif
12080 #ifdef TARGET_NR_getresgid
12081     case TARGET_NR_setresgid:
12082         return get_errno(sys_setresgid(low2highgid(arg1),
12083                                        low2highgid(arg2),
12084                                        low2highgid(arg3)));
12085 #endif
12086 #ifdef TARGET_NR_getresgid
12087     case TARGET_NR_getresgid:
12088         {
12089             gid_t rgid, egid, sgid;
12090             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12091             if (!is_error(ret)) {
12092                 if (put_user_id(high2lowgid(rgid), arg1)
12093                     || put_user_id(high2lowgid(egid), arg2)
12094                     || put_user_id(high2lowgid(sgid), arg3))
12095                     return -TARGET_EFAULT;
12096             }
12097         }
12098         return ret;
12099 #endif
12100 #ifdef TARGET_NR_chown
12101     case TARGET_NR_chown:
12102         if (!(p = lock_user_string(arg1)))
12103             return -TARGET_EFAULT;
12104         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12105         unlock_user(p, arg1, 0);
12106         return ret;
12107 #endif
12108     case TARGET_NR_setuid:
12109         return get_errno(sys_setuid(low2highuid(arg1)));
12110     case TARGET_NR_setgid:
12111         return get_errno(sys_setgid(low2highgid(arg1)));
12112     case TARGET_NR_setfsuid:
12113         return get_errno(setfsuid(arg1));
12114     case TARGET_NR_setfsgid:
12115         return get_errno(setfsgid(arg1));
12116 
12117 #ifdef TARGET_NR_lchown32
12118     case TARGET_NR_lchown32:
12119         if (!(p = lock_user_string(arg1)))
12120             return -TARGET_EFAULT;
12121         ret = get_errno(lchown(p, arg2, arg3));
12122         unlock_user(p, arg1, 0);
12123         return ret;
12124 #endif
12125 #ifdef TARGET_NR_getuid32
12126     case TARGET_NR_getuid32:
12127         return get_errno(getuid());
12128 #endif
12129 
12130 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12131    /* Alpha specific */
12132     case TARGET_NR_getxuid:
12133          {
12134             uid_t euid;
12135             euid=geteuid();
12136             cpu_env->ir[IR_A4]=euid;
12137          }
12138         return get_errno(getuid());
12139 #endif
12140 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12141    /* Alpha specific */
12142     case TARGET_NR_getxgid:
12143          {
12144             uid_t egid;
12145             egid=getegid();
12146             cpu_env->ir[IR_A4]=egid;
12147          }
12148         return get_errno(getgid());
12149 #endif
12150 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12151     /* Alpha specific */
12152     case TARGET_NR_osf_getsysinfo:
12153         ret = -TARGET_EOPNOTSUPP;
12154         switch (arg1) {
12155           case TARGET_GSI_IEEE_FP_CONTROL:
12156             {
12157                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12158                 uint64_t swcr = cpu_env->swcr;
12159 
12160                 swcr &= ~SWCR_STATUS_MASK;
12161                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12162 
12163                 if (put_user_u64 (swcr, arg2))
12164                         return -TARGET_EFAULT;
12165                 ret = 0;
12166             }
12167             break;
12168 
12169           /* case GSI_IEEE_STATE_AT_SIGNAL:
12170              -- Not implemented in linux kernel.
12171              case GSI_UACPROC:
12172              -- Retrieves current unaligned access state; not much used.
12173              case GSI_PROC_TYPE:
12174              -- Retrieves implver information; surely not used.
12175              case GSI_GET_HWRPB:
12176              -- Grabs a copy of the HWRPB; surely not used.
12177           */
12178         }
12179         return ret;
12180 #endif
12181 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12182     /* Alpha specific */
12183     case TARGET_NR_osf_setsysinfo:
12184         ret = -TARGET_EOPNOTSUPP;
12185         switch (arg1) {
12186           case TARGET_SSI_IEEE_FP_CONTROL:
12187             {
12188                 uint64_t swcr, fpcr;
12189 
12190                 if (get_user_u64 (swcr, arg2)) {
12191                     return -TARGET_EFAULT;
12192                 }
12193 
12194                 /*
12195                  * The kernel calls swcr_update_status to update the
12196                  * status bits from the fpcr at every point that it
12197                  * could be queried.  Therefore, we store the status
12198                  * bits only in FPCR.
12199                  */
12200                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12201 
12202                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12203                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12204                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12205                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12206                 ret = 0;
12207             }
12208             break;
12209 
12210           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12211             {
12212                 uint64_t exc, fpcr, fex;
12213 
12214                 if (get_user_u64(exc, arg2)) {
12215                     return -TARGET_EFAULT;
12216                 }
12217                 exc &= SWCR_STATUS_MASK;
12218                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12219 
12220                 /* Old exceptions are not signaled.  */
12221                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12222                 fex = exc & ~fex;
12223                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12224                 fex &= (cpu_env)->swcr;
12225 
12226                 /* Update the hardware fpcr.  */
12227                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12228                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12229 
12230                 if (fex) {
12231                     int si_code = TARGET_FPE_FLTUNK;
12232                     target_siginfo_t info;
12233 
12234                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12235                         si_code = TARGET_FPE_FLTUND;
12236                     }
12237                     if (fex & SWCR_TRAP_ENABLE_INE) {
12238                         si_code = TARGET_FPE_FLTRES;
12239                     }
12240                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12241                         si_code = TARGET_FPE_FLTUND;
12242                     }
12243                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12244                         si_code = TARGET_FPE_FLTOVF;
12245                     }
12246                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12247                         si_code = TARGET_FPE_FLTDIV;
12248                     }
12249                     if (fex & SWCR_TRAP_ENABLE_INV) {
12250                         si_code = TARGET_FPE_FLTINV;
12251                     }
12252 
12253                     info.si_signo = SIGFPE;
12254                     info.si_errno = 0;
12255                     info.si_code = si_code;
12256                     info._sifields._sigfault._addr = (cpu_env)->pc;
12257                     queue_signal(cpu_env, info.si_signo,
12258                                  QEMU_SI_FAULT, &info);
12259                 }
12260                 ret = 0;
12261             }
12262             break;
12263 
12264           /* case SSI_NVPAIRS:
12265              -- Used with SSIN_UACPROC to enable unaligned accesses.
12266              case SSI_IEEE_STATE_AT_SIGNAL:
12267              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12268              -- Not implemented in linux kernel
12269           */
12270         }
12271         return ret;
12272 #endif
12273 #ifdef TARGET_NR_osf_sigprocmask
12274     /* Alpha specific.  */
12275     case TARGET_NR_osf_sigprocmask:
12276         {
12277             abi_ulong mask;
12278             int how;
12279             sigset_t set, oldset;
12280 
12281             switch(arg1) {
12282             case TARGET_SIG_BLOCK:
12283                 how = SIG_BLOCK;
12284                 break;
12285             case TARGET_SIG_UNBLOCK:
12286                 how = SIG_UNBLOCK;
12287                 break;
12288             case TARGET_SIG_SETMASK:
12289                 how = SIG_SETMASK;
12290                 break;
12291             default:
12292                 return -TARGET_EINVAL;
12293             }
12294             mask = arg2;
12295             target_to_host_old_sigset(&set, &mask);
12296             ret = do_sigprocmask(how, &set, &oldset);
12297             if (!ret) {
12298                 host_to_target_old_sigset(&mask, &oldset);
12299                 ret = mask;
12300             }
12301         }
12302         return ret;
12303 #endif
12304 
12305 #ifdef TARGET_NR_getgid32
12306     case TARGET_NR_getgid32:
12307         return get_errno(getgid());
12308 #endif
12309 #ifdef TARGET_NR_geteuid32
12310     case TARGET_NR_geteuid32:
12311         return get_errno(geteuid());
12312 #endif
12313 #ifdef TARGET_NR_getegid32
12314     case TARGET_NR_getegid32:
12315         return get_errno(getegid());
12316 #endif
12317 #ifdef TARGET_NR_setreuid32
12318     case TARGET_NR_setreuid32:
12319         return get_errno(sys_setreuid(arg1, arg2));
12320 #endif
12321 #ifdef TARGET_NR_setregid32
12322     case TARGET_NR_setregid32:
12323         return get_errno(sys_setregid(arg1, arg2));
12324 #endif
12325 #ifdef TARGET_NR_getgroups32
12326     case TARGET_NR_getgroups32:
12327         { /* the same code as for TARGET_NR_getgroups */
12328             int gidsetsize = arg1;
12329             uint32_t *target_grouplist;
12330             g_autofree gid_t *grouplist = NULL;
12331             int i;
12332 
12333             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12334                 return -TARGET_EINVAL;
12335             }
12336             if (gidsetsize > 0) {
12337                 grouplist = g_try_new(gid_t, gidsetsize);
12338                 if (!grouplist) {
12339                     return -TARGET_ENOMEM;
12340                 }
12341             }
12342             ret = get_errno(getgroups(gidsetsize, grouplist));
12343             if (!is_error(ret) && gidsetsize > 0) {
12344                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12345                                              gidsetsize * 4, 0);
12346                 if (!target_grouplist) {
12347                     return -TARGET_EFAULT;
12348                 }
12349                 for (i = 0; i < ret; i++) {
12350                     target_grouplist[i] = tswap32(grouplist[i]);
12351                 }
12352                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12353             }
12354             return ret;
12355         }
12356 #endif
12357 #ifdef TARGET_NR_setgroups32
12358     case TARGET_NR_setgroups32:
12359         { /* the same code as for TARGET_NR_setgroups */
12360             int gidsetsize = arg1;
12361             uint32_t *target_grouplist;
12362             g_autofree gid_t *grouplist = NULL;
12363             int i;
12364 
12365             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12366                 return -TARGET_EINVAL;
12367             }
12368             if (gidsetsize > 0) {
12369                 grouplist = g_try_new(gid_t, gidsetsize);
12370                 if (!grouplist) {
12371                     return -TARGET_ENOMEM;
12372                 }
12373                 target_grouplist = lock_user(VERIFY_READ, arg2,
12374                                              gidsetsize * 4, 1);
12375                 if (!target_grouplist) {
12376                     return -TARGET_EFAULT;
12377                 }
12378                 for (i = 0; i < gidsetsize; i++) {
12379                     grouplist[i] = tswap32(target_grouplist[i]);
12380                 }
12381                 unlock_user(target_grouplist, arg2, 0);
12382             }
12383             return get_errno(sys_setgroups(gidsetsize, grouplist));
12384         }
12385 #endif
12386 #ifdef TARGET_NR_fchown32
12387     case TARGET_NR_fchown32:
12388         return get_errno(fchown(arg1, arg2, arg3));
12389 #endif
12390 #ifdef TARGET_NR_setresuid32
12391     case TARGET_NR_setresuid32:
12392         return get_errno(sys_setresuid(arg1, arg2, arg3));
12393 #endif
12394 #ifdef TARGET_NR_getresuid32
12395     case TARGET_NR_getresuid32:
12396         {
12397             uid_t ruid, euid, suid;
12398             ret = get_errno(getresuid(&ruid, &euid, &suid));
12399             if (!is_error(ret)) {
12400                 if (put_user_u32(ruid, arg1)
12401                     || put_user_u32(euid, arg2)
12402                     || put_user_u32(suid, arg3))
12403                     return -TARGET_EFAULT;
12404             }
12405         }
12406         return ret;
12407 #endif
12408 #ifdef TARGET_NR_setresgid32
12409     case TARGET_NR_setresgid32:
12410         return get_errno(sys_setresgid(arg1, arg2, arg3));
12411 #endif
12412 #ifdef TARGET_NR_getresgid32
12413     case TARGET_NR_getresgid32:
12414         {
12415             gid_t rgid, egid, sgid;
12416             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12417             if (!is_error(ret)) {
12418                 if (put_user_u32(rgid, arg1)
12419                     || put_user_u32(egid, arg2)
12420                     || put_user_u32(sgid, arg3))
12421                     return -TARGET_EFAULT;
12422             }
12423         }
12424         return ret;
12425 #endif
12426 #ifdef TARGET_NR_chown32
12427     case TARGET_NR_chown32:
12428         if (!(p = lock_user_string(arg1)))
12429             return -TARGET_EFAULT;
12430         ret = get_errno(chown(p, arg2, arg3));
12431         unlock_user(p, arg1, 0);
12432         return ret;
12433 #endif
12434 #ifdef TARGET_NR_setuid32
12435     case TARGET_NR_setuid32:
12436         return get_errno(sys_setuid(arg1));
12437 #endif
12438 #ifdef TARGET_NR_setgid32
12439     case TARGET_NR_setgid32:
12440         return get_errno(sys_setgid(arg1));
12441 #endif
12442 #ifdef TARGET_NR_setfsuid32
12443     case TARGET_NR_setfsuid32:
12444         return get_errno(setfsuid(arg1));
12445 #endif
12446 #ifdef TARGET_NR_setfsgid32
12447     case TARGET_NR_setfsgid32:
12448         return get_errno(setfsgid(arg1));
12449 #endif
12450 #ifdef TARGET_NR_mincore
12451     case TARGET_NR_mincore:
12452         {
12453             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12454             if (!a) {
12455                 return -TARGET_ENOMEM;
12456             }
12457             p = lock_user_string(arg3);
12458             if (!p) {
12459                 ret = -TARGET_EFAULT;
12460             } else {
12461                 ret = get_errno(mincore(a, arg2, p));
12462                 unlock_user(p, arg3, ret);
12463             }
12464             unlock_user(a, arg1, 0);
12465         }
12466         return ret;
12467 #endif
12468 #ifdef TARGET_NR_arm_fadvise64_64
12469     case TARGET_NR_arm_fadvise64_64:
12470         /* arm_fadvise64_64 looks like fadvise64_64 but
12471          * with different argument order: fd, advice, offset, len
12472          * rather than the usual fd, offset, len, advice.
12473          * Note that offset and len are both 64-bit so appear as
12474          * pairs of 32-bit registers.
12475          */
12476         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12477                             target_offset64(arg5, arg6), arg2);
12478         return -host_to_target_errno(ret);
12479 #endif
12480 
12481 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12482 
12483 #ifdef TARGET_NR_fadvise64_64
12484     case TARGET_NR_fadvise64_64:
12485 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12486         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12487         ret = arg2;
12488         arg2 = arg3;
12489         arg3 = arg4;
12490         arg4 = arg5;
12491         arg5 = arg6;
12492         arg6 = ret;
12493 #else
12494         /* 6 args: fd, offset (high, low), len (high, low), advice */
12495         if (regpairs_aligned(cpu_env, num)) {
12496             /* offset is in (3,4), len in (5,6) and advice in 7 */
12497             arg2 = arg3;
12498             arg3 = arg4;
12499             arg4 = arg5;
12500             arg5 = arg6;
12501             arg6 = arg7;
12502         }
12503 #endif
12504         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12505                             target_offset64(arg4, arg5), arg6);
12506         return -host_to_target_errno(ret);
12507 #endif
12508 
12509 #ifdef TARGET_NR_fadvise64
12510     case TARGET_NR_fadvise64:
12511         /* 5 args: fd, offset (high, low), len, advice */
12512         if (regpairs_aligned(cpu_env, num)) {
12513             /* offset is in (3,4), len in 5 and advice in 6 */
12514             arg2 = arg3;
12515             arg3 = arg4;
12516             arg4 = arg5;
12517             arg5 = arg6;
12518         }
12519         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12520         return -host_to_target_errno(ret);
12521 #endif
12522 
12523 #else /* not a 32-bit ABI */
12524 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12525 #ifdef TARGET_NR_fadvise64_64
12526     case TARGET_NR_fadvise64_64:
12527 #endif
12528 #ifdef TARGET_NR_fadvise64
12529     case TARGET_NR_fadvise64:
12530 #endif
12531 #ifdef TARGET_S390X
12532         switch (arg4) {
12533         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12534         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12535         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12536         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12537         default: break;
12538         }
12539 #endif
12540         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12541 #endif
12542 #endif /* end of 64-bit ABI fadvise handling */
12543 
12544 #ifdef TARGET_NR_madvise
12545     case TARGET_NR_madvise:
12546         return target_madvise(arg1, arg2, arg3);
12547 #endif
12548 #ifdef TARGET_NR_fcntl64
12549     case TARGET_NR_fcntl64:
12550     {
12551         int cmd;
12552         struct flock fl;
12553         from_flock64_fn *copyfrom = copy_from_user_flock64;
12554         to_flock64_fn *copyto = copy_to_user_flock64;
12555 
12556 #ifdef TARGET_ARM
12557         if (!cpu_env->eabi) {
12558             copyfrom = copy_from_user_oabi_flock64;
12559             copyto = copy_to_user_oabi_flock64;
12560         }
12561 #endif
12562 
12563         cmd = target_to_host_fcntl_cmd(arg2);
12564         if (cmd == -TARGET_EINVAL) {
12565             return cmd;
12566         }
12567 
12568         switch(arg2) {
12569         case TARGET_F_GETLK64:
12570             ret = copyfrom(&fl, arg3);
12571             if (ret) {
12572                 break;
12573             }
12574             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12575             if (ret == 0) {
12576                 ret = copyto(arg3, &fl);
12577             }
12578 	    break;
12579 
12580         case TARGET_F_SETLK64:
12581         case TARGET_F_SETLKW64:
12582             ret = copyfrom(&fl, arg3);
12583             if (ret) {
12584                 break;
12585             }
12586             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12587 	    break;
12588         default:
12589             ret = do_fcntl(arg1, arg2, arg3);
12590             break;
12591         }
12592         return ret;
12593     }
12594 #endif
12595 #ifdef TARGET_NR_cacheflush
12596     case TARGET_NR_cacheflush:
12597         /* self-modifying code is handled automatically, so nothing needed */
12598         return 0;
12599 #endif
12600 #ifdef TARGET_NR_getpagesize
12601     case TARGET_NR_getpagesize:
12602         return TARGET_PAGE_SIZE;
12603 #endif
12604     case TARGET_NR_gettid:
12605         return get_errno(sys_gettid());
12606 #ifdef TARGET_NR_readahead
12607     case TARGET_NR_readahead:
12608 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12609         if (regpairs_aligned(cpu_env, num)) {
12610             arg2 = arg3;
12611             arg3 = arg4;
12612             arg4 = arg5;
12613         }
12614         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12615 #else
12616         ret = get_errno(readahead(arg1, arg2, arg3));
12617 #endif
12618         return ret;
12619 #endif
12620 #ifdef CONFIG_ATTR
12621 #ifdef TARGET_NR_setxattr
12622     case TARGET_NR_listxattr:
12623     case TARGET_NR_llistxattr:
12624     {
12625         void *b = 0;
12626         if (arg2) {
12627             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12628             if (!b) {
12629                 return -TARGET_EFAULT;
12630             }
12631         }
12632         p = lock_user_string(arg1);
12633         if (p) {
12634             if (num == TARGET_NR_listxattr) {
12635                 ret = get_errno(listxattr(p, b, arg3));
12636             } else {
12637                 ret = get_errno(llistxattr(p, b, arg3));
12638             }
12639         } else {
12640             ret = -TARGET_EFAULT;
12641         }
12642         unlock_user(p, arg1, 0);
12643         unlock_user(b, arg2, arg3);
12644         return ret;
12645     }
12646     case TARGET_NR_flistxattr:
12647     {
12648         void *b = 0;
12649         if (arg2) {
12650             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12651             if (!b) {
12652                 return -TARGET_EFAULT;
12653             }
12654         }
12655         ret = get_errno(flistxattr(arg1, b, arg3));
12656         unlock_user(b, arg2, arg3);
12657         return ret;
12658     }
12659     case TARGET_NR_setxattr:
12660     case TARGET_NR_lsetxattr:
12661         {
12662             void *n, *v = 0;
12663             if (arg3) {
12664                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12665                 if (!v) {
12666                     return -TARGET_EFAULT;
12667                 }
12668             }
12669             p = lock_user_string(arg1);
12670             n = lock_user_string(arg2);
12671             if (p && n) {
12672                 if (num == TARGET_NR_setxattr) {
12673                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12674                 } else {
12675                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12676                 }
12677             } else {
12678                 ret = -TARGET_EFAULT;
12679             }
12680             unlock_user(p, arg1, 0);
12681             unlock_user(n, arg2, 0);
12682             unlock_user(v, arg3, 0);
12683         }
12684         return ret;
12685     case TARGET_NR_fsetxattr:
12686         {
12687             void *n, *v = 0;
12688             if (arg3) {
12689                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12690                 if (!v) {
12691                     return -TARGET_EFAULT;
12692                 }
12693             }
12694             n = lock_user_string(arg2);
12695             if (n) {
12696                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12697             } else {
12698                 ret = -TARGET_EFAULT;
12699             }
12700             unlock_user(n, arg2, 0);
12701             unlock_user(v, arg3, 0);
12702         }
12703         return ret;
12704     case TARGET_NR_getxattr:
12705     case TARGET_NR_lgetxattr:
12706         {
12707             void *n, *v = 0;
12708             if (arg3) {
12709                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12710                 if (!v) {
12711                     return -TARGET_EFAULT;
12712                 }
12713             }
12714             p = lock_user_string(arg1);
12715             n = lock_user_string(arg2);
12716             if (p && n) {
12717                 if (num == TARGET_NR_getxattr) {
12718                     ret = get_errno(getxattr(p, n, v, arg4));
12719                 } else {
12720                     ret = get_errno(lgetxattr(p, n, v, arg4));
12721                 }
12722             } else {
12723                 ret = -TARGET_EFAULT;
12724             }
12725             unlock_user(p, arg1, 0);
12726             unlock_user(n, arg2, 0);
12727             unlock_user(v, arg3, arg4);
12728         }
12729         return ret;
12730     case TARGET_NR_fgetxattr:
12731         {
12732             void *n, *v = 0;
12733             if (arg3) {
12734                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12735                 if (!v) {
12736                     return -TARGET_EFAULT;
12737                 }
12738             }
12739             n = lock_user_string(arg2);
12740             if (n) {
12741                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12742             } else {
12743                 ret = -TARGET_EFAULT;
12744             }
12745             unlock_user(n, arg2, 0);
12746             unlock_user(v, arg3, arg4);
12747         }
12748         return ret;
12749     case TARGET_NR_removexattr:
12750     case TARGET_NR_lremovexattr:
12751         {
12752             void *n;
12753             p = lock_user_string(arg1);
12754             n = lock_user_string(arg2);
12755             if (p && n) {
12756                 if (num == TARGET_NR_removexattr) {
12757                     ret = get_errno(removexattr(p, n));
12758                 } else {
12759                     ret = get_errno(lremovexattr(p, n));
12760                 }
12761             } else {
12762                 ret = -TARGET_EFAULT;
12763             }
12764             unlock_user(p, arg1, 0);
12765             unlock_user(n, arg2, 0);
12766         }
12767         return ret;
12768     case TARGET_NR_fremovexattr:
12769         {
12770             void *n;
12771             n = lock_user_string(arg2);
12772             if (n) {
12773                 ret = get_errno(fremovexattr(arg1, n));
12774             } else {
12775                 ret = -TARGET_EFAULT;
12776             }
12777             unlock_user(n, arg2, 0);
12778         }
12779         return ret;
12780 #endif
12781 #endif /* CONFIG_ATTR */
12782 #ifdef TARGET_NR_set_thread_area
12783     case TARGET_NR_set_thread_area:
12784 #if defined(TARGET_MIPS)
12785       cpu_env->active_tc.CP0_UserLocal = arg1;
12786       return 0;
12787 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12788       return do_set_thread_area(cpu_env, arg1);
12789 #elif defined(TARGET_M68K)
12790       {
12791           TaskState *ts = get_task_state(cpu);
12792           ts->tp_value = arg1;
12793           return 0;
12794       }
12795 #else
12796       return -TARGET_ENOSYS;
12797 #endif
12798 #endif
12799 #ifdef TARGET_NR_get_thread_area
12800     case TARGET_NR_get_thread_area:
12801 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12802         return do_get_thread_area(cpu_env, arg1);
12803 #elif defined(TARGET_M68K)
12804         {
12805             TaskState *ts = get_task_state(cpu);
12806             return ts->tp_value;
12807         }
12808 #else
12809         return -TARGET_ENOSYS;
12810 #endif
12811 #endif
12812 #ifdef TARGET_NR_getdomainname
12813     case TARGET_NR_getdomainname:
12814         return -TARGET_ENOSYS;
12815 #endif
12816 
12817 #ifdef TARGET_NR_clock_settime
12818     case TARGET_NR_clock_settime:
12819     {
12820         struct timespec ts;
12821 
12822         ret = target_to_host_timespec(&ts, arg2);
12823         if (!is_error(ret)) {
12824             ret = get_errno(clock_settime(arg1, &ts));
12825         }
12826         return ret;
12827     }
12828 #endif
12829 #ifdef TARGET_NR_clock_settime64
12830     case TARGET_NR_clock_settime64:
12831     {
12832         struct timespec ts;
12833 
12834         ret = target_to_host_timespec64(&ts, arg2);
12835         if (!is_error(ret)) {
12836             ret = get_errno(clock_settime(arg1, &ts));
12837         }
12838         return ret;
12839     }
12840 #endif
12841 #ifdef TARGET_NR_clock_gettime
12842     case TARGET_NR_clock_gettime:
12843     {
12844         struct timespec ts;
12845         ret = get_errno(clock_gettime(arg1, &ts));
12846         if (!is_error(ret)) {
12847             ret = host_to_target_timespec(arg2, &ts);
12848         }
12849         return ret;
12850     }
12851 #endif
12852 #ifdef TARGET_NR_clock_gettime64
12853     case TARGET_NR_clock_gettime64:
12854     {
12855         struct timespec ts;
12856         ret = get_errno(clock_gettime(arg1, &ts));
12857         if (!is_error(ret)) {
12858             ret = host_to_target_timespec64(arg2, &ts);
12859         }
12860         return ret;
12861     }
12862 #endif
12863 #ifdef TARGET_NR_clock_getres
12864     case TARGET_NR_clock_getres:
12865     {
12866         struct timespec ts;
12867         ret = get_errno(clock_getres(arg1, &ts));
12868         if (!is_error(ret)) {
12869             host_to_target_timespec(arg2, &ts);
12870         }
12871         return ret;
12872     }
12873 #endif
12874 #ifdef TARGET_NR_clock_getres_time64
12875     case TARGET_NR_clock_getres_time64:
12876     {
12877         struct timespec ts;
12878         ret = get_errno(clock_getres(arg1, &ts));
12879         if (!is_error(ret)) {
12880             host_to_target_timespec64(arg2, &ts);
12881         }
12882         return ret;
12883     }
12884 #endif
12885 #ifdef TARGET_NR_clock_nanosleep
12886     case TARGET_NR_clock_nanosleep:
12887     {
12888         struct timespec ts;
12889         if (target_to_host_timespec(&ts, arg3)) {
12890             return -TARGET_EFAULT;
12891         }
12892         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12893                                              &ts, arg4 ? &ts : NULL));
12894         /*
12895          * if the call is interrupted by a signal handler, it fails
12896          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12897          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12898          */
12899         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12900             host_to_target_timespec(arg4, &ts)) {
12901               return -TARGET_EFAULT;
12902         }
12903 
12904         return ret;
12905     }
12906 #endif
12907 #ifdef TARGET_NR_clock_nanosleep_time64
12908     case TARGET_NR_clock_nanosleep_time64:
12909     {
12910         struct timespec ts;
12911 
12912         if (target_to_host_timespec64(&ts, arg3)) {
12913             return -TARGET_EFAULT;
12914         }
12915 
12916         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12917                                              &ts, arg4 ? &ts : NULL));
12918 
12919         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12920             host_to_target_timespec64(arg4, &ts)) {
12921             return -TARGET_EFAULT;
12922         }
12923         return ret;
12924     }
12925 #endif
12926 
12927 #if defined(TARGET_NR_set_tid_address)
12928     case TARGET_NR_set_tid_address:
12929     {
12930         TaskState *ts = get_task_state(cpu);
12931         ts->child_tidptr = arg1;
12932         /* do not call host set_tid_address() syscall, instead return tid() */
12933         return get_errno(sys_gettid());
12934     }
12935 #endif
12936 
12937     case TARGET_NR_tkill:
12938         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12939 
12940     case TARGET_NR_tgkill:
12941         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12942                          target_to_host_signal(arg3)));
12943 
12944 #ifdef TARGET_NR_set_robust_list
12945     case TARGET_NR_set_robust_list:
12946     case TARGET_NR_get_robust_list:
12947         /* The ABI for supporting robust futexes has userspace pass
12948          * the kernel a pointer to a linked list which is updated by
12949          * userspace after the syscall; the list is walked by the kernel
12950          * when the thread exits. Since the linked list in QEMU guest
12951          * memory isn't a valid linked list for the host and we have
12952          * no way to reliably intercept the thread-death event, we can't
12953          * support these. Silently return ENOSYS so that guest userspace
12954          * falls back to a non-robust futex implementation (which should
12955          * be OK except in the corner case of the guest crashing while
12956          * holding a mutex that is shared with another process via
12957          * shared memory).
12958          */
12959         return -TARGET_ENOSYS;
12960 #endif
12961 
12962 #if defined(TARGET_NR_utimensat)
12963     case TARGET_NR_utimensat:
12964         {
12965             struct timespec *tsp, ts[2];
12966             if (!arg3) {
12967                 tsp = NULL;
12968             } else {
12969                 if (target_to_host_timespec(ts, arg3)) {
12970                     return -TARGET_EFAULT;
12971                 }
12972                 if (target_to_host_timespec(ts + 1, arg3 +
12973                                             sizeof(struct target_timespec))) {
12974                     return -TARGET_EFAULT;
12975                 }
12976                 tsp = ts;
12977             }
12978             if (!arg2)
12979                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12980             else {
12981                 if (!(p = lock_user_string(arg2))) {
12982                     return -TARGET_EFAULT;
12983                 }
12984                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12985                 unlock_user(p, arg2, 0);
12986             }
12987         }
12988         return ret;
12989 #endif
12990 #ifdef TARGET_NR_utimensat_time64
12991     case TARGET_NR_utimensat_time64:
12992         {
12993             struct timespec *tsp, ts[2];
12994             if (!arg3) {
12995                 tsp = NULL;
12996             } else {
12997                 if (target_to_host_timespec64(ts, arg3)) {
12998                     return -TARGET_EFAULT;
12999                 }
13000                 if (target_to_host_timespec64(ts + 1, arg3 +
13001                                      sizeof(struct target__kernel_timespec))) {
13002                     return -TARGET_EFAULT;
13003                 }
13004                 tsp = ts;
13005             }
13006             if (!arg2)
13007                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
13008             else {
13009                 p = lock_user_string(arg2);
13010                 if (!p) {
13011                     return -TARGET_EFAULT;
13012                 }
13013                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
13014                 unlock_user(p, arg2, 0);
13015             }
13016         }
13017         return ret;
13018 #endif
13019 #ifdef TARGET_NR_futex
13020     case TARGET_NR_futex:
13021         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
13022 #endif
13023 #ifdef TARGET_NR_futex_time64
13024     case TARGET_NR_futex_time64:
13025         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
13026 #endif
13027 #ifdef CONFIG_INOTIFY
13028 #if defined(TARGET_NR_inotify_init)
13029     case TARGET_NR_inotify_init:
13030         ret = get_errno(inotify_init());
13031         if (ret >= 0) {
13032             fd_trans_register(ret, &target_inotify_trans);
13033         }
13034         return ret;
13035 #endif
13036 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
13037     case TARGET_NR_inotify_init1:
13038         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
13039                                           fcntl_flags_tbl)));
13040         if (ret >= 0) {
13041             fd_trans_register(ret, &target_inotify_trans);
13042         }
13043         return ret;
13044 #endif
13045 #if defined(TARGET_NR_inotify_add_watch)
13046     case TARGET_NR_inotify_add_watch:
13047         p = lock_user_string(arg2);
13048         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13049         unlock_user(p, arg2, 0);
13050         return ret;
13051 #endif
13052 #if defined(TARGET_NR_inotify_rm_watch)
13053     case TARGET_NR_inotify_rm_watch:
13054         return get_errno(inotify_rm_watch(arg1, arg2));
13055 #endif
13056 #endif
13057 
13058 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13059     case TARGET_NR_mq_open:
13060         {
13061             struct mq_attr posix_mq_attr;
13062             struct mq_attr *pposix_mq_attr;
13063             int host_flags;
13064 
13065             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13066             pposix_mq_attr = NULL;
13067             if (arg4) {
13068                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13069                     return -TARGET_EFAULT;
13070                 }
13071                 pposix_mq_attr = &posix_mq_attr;
13072             }
13073             p = lock_user_string(arg1 - 1);
13074             if (!p) {
13075                 return -TARGET_EFAULT;
13076             }
13077             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13078             unlock_user (p, arg1, 0);
13079         }
13080         return ret;
13081 
13082     case TARGET_NR_mq_unlink:
13083         p = lock_user_string(arg1 - 1);
13084         if (!p) {
13085             return -TARGET_EFAULT;
13086         }
13087         ret = get_errno(mq_unlink(p));
13088         unlock_user (p, arg1, 0);
13089         return ret;
13090 
13091 #ifdef TARGET_NR_mq_timedsend
13092     case TARGET_NR_mq_timedsend:
13093         {
13094             struct timespec ts;
13095 
13096             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13097             if (arg5 != 0) {
13098                 if (target_to_host_timespec(&ts, arg5)) {
13099                     return -TARGET_EFAULT;
13100                 }
13101                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13102                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13103                     return -TARGET_EFAULT;
13104                 }
13105             } else {
13106                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13107             }
13108             unlock_user (p, arg2, arg3);
13109         }
13110         return ret;
13111 #endif
13112 #ifdef TARGET_NR_mq_timedsend_time64
13113     case TARGET_NR_mq_timedsend_time64:
13114         {
13115             struct timespec ts;
13116 
13117             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13118             if (arg5 != 0) {
13119                 if (target_to_host_timespec64(&ts, arg5)) {
13120                     return -TARGET_EFAULT;
13121                 }
13122                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13123                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13124                     return -TARGET_EFAULT;
13125                 }
13126             } else {
13127                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13128             }
13129             unlock_user(p, arg2, arg3);
13130         }
13131         return ret;
13132 #endif
13133 
13134 #ifdef TARGET_NR_mq_timedreceive
13135     case TARGET_NR_mq_timedreceive:
13136         {
13137             struct timespec ts;
13138             unsigned int prio;
13139 
13140             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13141             if (arg5 != 0) {
13142                 if (target_to_host_timespec(&ts, arg5)) {
13143                     return -TARGET_EFAULT;
13144                 }
13145                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13146                                                      &prio, &ts));
13147                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13148                     return -TARGET_EFAULT;
13149                 }
13150             } else {
13151                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13152                                                      &prio, NULL));
13153             }
13154             unlock_user (p, arg2, arg3);
13155             if (arg4 != 0)
13156                 put_user_u32(prio, arg4);
13157         }
13158         return ret;
13159 #endif
13160 #ifdef TARGET_NR_mq_timedreceive_time64
13161     case TARGET_NR_mq_timedreceive_time64:
13162         {
13163             struct timespec ts;
13164             unsigned int prio;
13165 
13166             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13167             if (arg5 != 0) {
13168                 if (target_to_host_timespec64(&ts, arg5)) {
13169                     return -TARGET_EFAULT;
13170                 }
13171                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13172                                                      &prio, &ts));
13173                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13174                     return -TARGET_EFAULT;
13175                 }
13176             } else {
13177                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13178                                                      &prio, NULL));
13179             }
13180             unlock_user(p, arg2, arg3);
13181             if (arg4 != 0) {
13182                 put_user_u32(prio, arg4);
13183             }
13184         }
13185         return ret;
13186 #endif
13187 
13188     /* Not implemented for now... */
13189 /*     case TARGET_NR_mq_notify: */
13190 /*         break; */
13191 
13192     case TARGET_NR_mq_getsetattr:
13193         {
13194             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13195             ret = 0;
13196             if (arg2 != 0) {
13197                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13198                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13199                                            &posix_mq_attr_out));
13200             } else if (arg3 != 0) {
13201                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13202             }
13203             if (ret == 0 && arg3 != 0) {
13204                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13205             }
13206         }
13207         return ret;
13208 #endif
13209 
13210 #ifdef CONFIG_SPLICE
13211 #ifdef TARGET_NR_tee
13212     case TARGET_NR_tee:
13213         {
13214             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13215         }
13216         return ret;
13217 #endif
13218 #ifdef TARGET_NR_splice
13219     case TARGET_NR_splice:
13220         {
13221             loff_t loff_in, loff_out;
13222             loff_t *ploff_in = NULL, *ploff_out = NULL;
13223             if (arg2) {
13224                 if (get_user_u64(loff_in, arg2)) {
13225                     return -TARGET_EFAULT;
13226                 }
13227                 ploff_in = &loff_in;
13228             }
13229             if (arg4) {
13230                 if (get_user_u64(loff_out, arg4)) {
13231                     return -TARGET_EFAULT;
13232                 }
13233                 ploff_out = &loff_out;
13234             }
13235             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13236             if (arg2) {
13237                 if (put_user_u64(loff_in, arg2)) {
13238                     return -TARGET_EFAULT;
13239                 }
13240             }
13241             if (arg4) {
13242                 if (put_user_u64(loff_out, arg4)) {
13243                     return -TARGET_EFAULT;
13244                 }
13245             }
13246         }
13247         return ret;
13248 #endif
13249 #ifdef TARGET_NR_vmsplice
13250 	case TARGET_NR_vmsplice:
13251         {
13252             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13253             if (vec != NULL) {
13254                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13255                 unlock_iovec(vec, arg2, arg3, 0);
13256             } else {
13257                 ret = -host_to_target_errno(errno);
13258             }
13259         }
13260         return ret;
13261 #endif
13262 #endif /* CONFIG_SPLICE */
13263 #ifdef CONFIG_EVENTFD
13264 #if defined(TARGET_NR_eventfd)
13265     case TARGET_NR_eventfd:
13266         ret = get_errno(eventfd(arg1, 0));
13267         if (ret >= 0) {
13268             fd_trans_register(ret, &target_eventfd_trans);
13269         }
13270         return ret;
13271 #endif
13272 #if defined(TARGET_NR_eventfd2)
13273     case TARGET_NR_eventfd2:
13274     {
13275         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13276         if (arg2 & TARGET_O_NONBLOCK) {
13277             host_flags |= O_NONBLOCK;
13278         }
13279         if (arg2 & TARGET_O_CLOEXEC) {
13280             host_flags |= O_CLOEXEC;
13281         }
13282         ret = get_errno(eventfd(arg1, host_flags));
13283         if (ret >= 0) {
13284             fd_trans_register(ret, &target_eventfd_trans);
13285         }
13286         return ret;
13287     }
13288 #endif
13289 #endif /* CONFIG_EVENTFD  */
13290 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13291     case TARGET_NR_fallocate:
13292 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13293         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13294                                   target_offset64(arg5, arg6)));
13295 #else
13296         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13297 #endif
13298         return ret;
13299 #endif
13300 #if defined(CONFIG_SYNC_FILE_RANGE)
13301 #if defined(TARGET_NR_sync_file_range)
13302     case TARGET_NR_sync_file_range:
13303 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13304 #if defined(TARGET_MIPS)
13305         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13306                                         target_offset64(arg5, arg6), arg7));
13307 #else
13308         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13309                                         target_offset64(arg4, arg5), arg6));
13310 #endif /* !TARGET_MIPS */
13311 #else
13312         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13313 #endif
13314         return ret;
13315 #endif
13316 #if defined(TARGET_NR_sync_file_range2) || \
13317     defined(TARGET_NR_arm_sync_file_range)
13318 #if defined(TARGET_NR_sync_file_range2)
13319     case TARGET_NR_sync_file_range2:
13320 #endif
13321 #if defined(TARGET_NR_arm_sync_file_range)
13322     case TARGET_NR_arm_sync_file_range:
13323 #endif
13324         /* This is like sync_file_range but the arguments are reordered */
13325 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13326         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13327                                         target_offset64(arg5, arg6), arg2));
13328 #else
13329         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13330 #endif
13331         return ret;
13332 #endif
13333 #endif
13334 #if defined(TARGET_NR_signalfd4)
13335     case TARGET_NR_signalfd4:
13336         return do_signalfd4(arg1, arg2, arg4);
13337 #endif
13338 #if defined(TARGET_NR_signalfd)
13339     case TARGET_NR_signalfd:
13340         return do_signalfd4(arg1, arg2, 0);
13341 #endif
13342 #if defined(CONFIG_EPOLL)
13343 #if defined(TARGET_NR_epoll_create)
13344     case TARGET_NR_epoll_create:
13345         return get_errno(epoll_create(arg1));
13346 #endif
13347 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13348     case TARGET_NR_epoll_create1:
13349         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13350 #endif
13351 #if defined(TARGET_NR_epoll_ctl)
13352     case TARGET_NR_epoll_ctl:
13353     {
13354         struct epoll_event ep;
13355         struct epoll_event *epp = 0;
13356         if (arg4) {
13357             if (arg2 != EPOLL_CTL_DEL) {
13358                 struct target_epoll_event *target_ep;
13359                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13360                     return -TARGET_EFAULT;
13361                 }
13362                 ep.events = tswap32(target_ep->events);
13363                 /*
13364                  * The epoll_data_t union is just opaque data to the kernel,
13365                  * so we transfer all 64 bits across and need not worry what
13366                  * actual data type it is.
13367                  */
13368                 ep.data.u64 = tswap64(target_ep->data.u64);
13369                 unlock_user_struct(target_ep, arg4, 0);
13370             }
13371             /*
13372              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13373              * non-null pointer, even though this argument is ignored.
13374              *
13375              */
13376             epp = &ep;
13377         }
13378         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13379     }
13380 #endif
13381 
13382 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13383 #if defined(TARGET_NR_epoll_wait)
13384     case TARGET_NR_epoll_wait:
13385 #endif
13386 #if defined(TARGET_NR_epoll_pwait)
13387     case TARGET_NR_epoll_pwait:
13388 #endif
13389     {
13390         struct target_epoll_event *target_ep;
13391         struct epoll_event *ep;
13392         int epfd = arg1;
13393         int maxevents = arg3;
13394         int timeout = arg4;
13395 
13396         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13397             return -TARGET_EINVAL;
13398         }
13399 
13400         target_ep = lock_user(VERIFY_WRITE, arg2,
13401                               maxevents * sizeof(struct target_epoll_event), 1);
13402         if (!target_ep) {
13403             return -TARGET_EFAULT;
13404         }
13405 
13406         ep = g_try_new(struct epoll_event, maxevents);
13407         if (!ep) {
13408             unlock_user(target_ep, arg2, 0);
13409             return -TARGET_ENOMEM;
13410         }
13411 
13412         switch (num) {
13413 #if defined(TARGET_NR_epoll_pwait)
13414         case TARGET_NR_epoll_pwait:
13415         {
13416             sigset_t *set = NULL;
13417 
13418             if (arg5) {
13419                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13420                 if (ret != 0) {
13421                     break;
13422                 }
13423             }
13424 
13425             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13426                                              set, SIGSET_T_SIZE));
13427 
13428             if (set) {
13429                 finish_sigsuspend_mask(ret);
13430             }
13431             break;
13432         }
13433 #endif
13434 #if defined(TARGET_NR_epoll_wait)
13435         case TARGET_NR_epoll_wait:
13436             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13437                                              NULL, 0));
13438             break;
13439 #endif
13440         default:
13441             ret = -TARGET_ENOSYS;
13442         }
13443         if (!is_error(ret)) {
13444             int i;
13445             for (i = 0; i < ret; i++) {
13446                 target_ep[i].events = tswap32(ep[i].events);
13447                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13448             }
13449             unlock_user(target_ep, arg2,
13450                         ret * sizeof(struct target_epoll_event));
13451         } else {
13452             unlock_user(target_ep, arg2, 0);
13453         }
13454         g_free(ep);
13455         return ret;
13456     }
13457 #endif
13458 #endif
13459 #ifdef TARGET_NR_prlimit64
13460     case TARGET_NR_prlimit64:
13461     {
13462         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13463         struct target_rlimit64 *target_rnew, *target_rold;
13464         struct host_rlimit64 rnew, rold, *rnewp = 0;
13465         int resource = target_to_host_resource(arg2);
13466 
13467         if (arg3 && (resource != RLIMIT_AS &&
13468                      resource != RLIMIT_DATA &&
13469                      resource != RLIMIT_STACK)) {
13470             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13471                 return -TARGET_EFAULT;
13472             }
13473             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13474             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13475             unlock_user_struct(target_rnew, arg3, 0);
13476             rnewp = &rnew;
13477         }
13478 
13479         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13480         if (!is_error(ret) && arg4) {
13481             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13482                 return -TARGET_EFAULT;
13483             }
13484             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13485             __put_user(rold.rlim_max, &target_rold->rlim_max);
13486             unlock_user_struct(target_rold, arg4, 1);
13487         }
13488         return ret;
13489     }
13490 #endif
13491 #ifdef TARGET_NR_gethostname
13492     case TARGET_NR_gethostname:
13493     {
13494         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13495         if (name) {
13496             ret = get_errno(gethostname(name, arg2));
13497             unlock_user(name, arg1, arg2);
13498         } else {
13499             ret = -TARGET_EFAULT;
13500         }
13501         return ret;
13502     }
13503 #endif
13504 #ifdef TARGET_NR_atomic_cmpxchg_32
13505     case TARGET_NR_atomic_cmpxchg_32:
13506     {
13507         /* should use start_exclusive from main.c */
13508         abi_ulong mem_value;
13509         if (get_user_u32(mem_value, arg6)) {
13510             target_siginfo_t info;
13511             info.si_signo = SIGSEGV;
13512             info.si_errno = 0;
13513             info.si_code = TARGET_SEGV_MAPERR;
13514             info._sifields._sigfault._addr = arg6;
13515             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13516             ret = 0xdeadbeef;
13517 
13518         }
13519         if (mem_value == arg2)
13520             put_user_u32(arg1, arg6);
13521         return mem_value;
13522     }
13523 #endif
13524 #ifdef TARGET_NR_atomic_barrier
13525     case TARGET_NR_atomic_barrier:
13526         /* Like the kernel implementation and the
13527            qemu arm barrier, no-op this? */
13528         return 0;
13529 #endif
13530 
13531 #ifdef TARGET_NR_timer_create
13532     case TARGET_NR_timer_create:
13533     {
13534         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13535 
13536         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13537 
13538         int clkid = arg1;
13539         int timer_index = next_free_host_timer();
13540 
13541         if (timer_index < 0) {
13542             ret = -TARGET_EAGAIN;
13543         } else {
13544             timer_t *phtimer = g_posix_timers  + timer_index;
13545 
13546             if (arg2) {
13547                 phost_sevp = &host_sevp;
13548                 ret = target_to_host_sigevent(phost_sevp, arg2);
13549                 if (ret != 0) {
13550                     free_host_timer_slot(timer_index);
13551                     return ret;
13552                 }
13553             }
13554 
13555             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13556             if (ret) {
13557                 free_host_timer_slot(timer_index);
13558             } else {
13559                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13560                     timer_delete(*phtimer);
13561                     free_host_timer_slot(timer_index);
13562                     return -TARGET_EFAULT;
13563                 }
13564             }
13565         }
13566         return ret;
13567     }
13568 #endif
13569 
13570 #ifdef TARGET_NR_timer_settime
13571     case TARGET_NR_timer_settime:
13572     {
13573         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13574          * struct itimerspec * old_value */
13575         target_timer_t timerid = get_timer_id(arg1);
13576 
13577         if (timerid < 0) {
13578             ret = timerid;
13579         } else if (arg3 == 0) {
13580             ret = -TARGET_EINVAL;
13581         } else {
13582             timer_t htimer = g_posix_timers[timerid];
13583             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13584 
13585             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13586                 return -TARGET_EFAULT;
13587             }
13588             ret = get_errno(
13589                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13590             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13591                 return -TARGET_EFAULT;
13592             }
13593         }
13594         return ret;
13595     }
13596 #endif
13597 
13598 #ifdef TARGET_NR_timer_settime64
13599     case TARGET_NR_timer_settime64:
13600     {
13601         target_timer_t timerid = get_timer_id(arg1);
13602 
13603         if (timerid < 0) {
13604             ret = timerid;
13605         } else if (arg3 == 0) {
13606             ret = -TARGET_EINVAL;
13607         } else {
13608             timer_t htimer = g_posix_timers[timerid];
13609             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13610 
13611             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13612                 return -TARGET_EFAULT;
13613             }
13614             ret = get_errno(
13615                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13616             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13617                 return -TARGET_EFAULT;
13618             }
13619         }
13620         return ret;
13621     }
13622 #endif
13623 
13624 #ifdef TARGET_NR_timer_gettime
13625     case TARGET_NR_timer_gettime:
13626     {
13627         /* args: timer_t timerid, struct itimerspec *curr_value */
13628         target_timer_t timerid = get_timer_id(arg1);
13629 
13630         if (timerid < 0) {
13631             ret = timerid;
13632         } else if (!arg2) {
13633             ret = -TARGET_EFAULT;
13634         } else {
13635             timer_t htimer = g_posix_timers[timerid];
13636             struct itimerspec hspec;
13637             ret = get_errno(timer_gettime(htimer, &hspec));
13638 
13639             if (host_to_target_itimerspec(arg2, &hspec)) {
13640                 ret = -TARGET_EFAULT;
13641             }
13642         }
13643         return ret;
13644     }
13645 #endif
13646 
13647 #ifdef TARGET_NR_timer_gettime64
13648     case TARGET_NR_timer_gettime64:
13649     {
13650         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13651         target_timer_t timerid = get_timer_id(arg1);
13652 
13653         if (timerid < 0) {
13654             ret = timerid;
13655         } else if (!arg2) {
13656             ret = -TARGET_EFAULT;
13657         } else {
13658             timer_t htimer = g_posix_timers[timerid];
13659             struct itimerspec hspec;
13660             ret = get_errno(timer_gettime(htimer, &hspec));
13661 
13662             if (host_to_target_itimerspec64(arg2, &hspec)) {
13663                 ret = -TARGET_EFAULT;
13664             }
13665         }
13666         return ret;
13667     }
13668 #endif
13669 
13670 #ifdef TARGET_NR_timer_getoverrun
13671     case TARGET_NR_timer_getoverrun:
13672     {
13673         /* args: timer_t timerid */
13674         target_timer_t timerid = get_timer_id(arg1);
13675 
13676         if (timerid < 0) {
13677             ret = timerid;
13678         } else {
13679             timer_t htimer = g_posix_timers[timerid];
13680             ret = get_errno(timer_getoverrun(htimer));
13681         }
13682         return ret;
13683     }
13684 #endif
13685 
13686 #ifdef TARGET_NR_timer_delete
13687     case TARGET_NR_timer_delete:
13688     {
13689         /* args: timer_t timerid */
13690         target_timer_t timerid = get_timer_id(arg1);
13691 
13692         if (timerid < 0) {
13693             ret = timerid;
13694         } else {
13695             timer_t htimer = g_posix_timers[timerid];
13696             ret = get_errno(timer_delete(htimer));
13697             free_host_timer_slot(timerid);
13698         }
13699         return ret;
13700     }
13701 #endif
13702 
13703 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13704     case TARGET_NR_timerfd_create:
13705         ret = get_errno(timerfd_create(arg1,
13706                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13707         if (ret >= 0) {
13708             fd_trans_register(ret, &target_timerfd_trans);
13709         }
13710         return ret;
13711 #endif
13712 
13713 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13714     case TARGET_NR_timerfd_gettime:
13715         {
13716             struct itimerspec its_curr;
13717 
13718             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13719 
13720             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13721                 return -TARGET_EFAULT;
13722             }
13723         }
13724         return ret;
13725 #endif
13726 
13727 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13728     case TARGET_NR_timerfd_gettime64:
13729         {
13730             struct itimerspec its_curr;
13731 
13732             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13733 
13734             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13735                 return -TARGET_EFAULT;
13736             }
13737         }
13738         return ret;
13739 #endif
13740 
13741 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13742     case TARGET_NR_timerfd_settime:
13743         {
13744             struct itimerspec its_new, its_old, *p_new;
13745 
13746             if (arg3) {
13747                 if (target_to_host_itimerspec(&its_new, arg3)) {
13748                     return -TARGET_EFAULT;
13749                 }
13750                 p_new = &its_new;
13751             } else {
13752                 p_new = NULL;
13753             }
13754 
13755             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13756 
13757             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13758                 return -TARGET_EFAULT;
13759             }
13760         }
13761         return ret;
13762 #endif
13763 
13764 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13765     case TARGET_NR_timerfd_settime64:
13766         {
13767             struct itimerspec its_new, its_old, *p_new;
13768 
13769             if (arg3) {
13770                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13771                     return -TARGET_EFAULT;
13772                 }
13773                 p_new = &its_new;
13774             } else {
13775                 p_new = NULL;
13776             }
13777 
13778             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13779 
13780             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13781                 return -TARGET_EFAULT;
13782             }
13783         }
13784         return ret;
13785 #endif
13786 
13787 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13788     case TARGET_NR_ioprio_get:
13789         return get_errno(ioprio_get(arg1, arg2));
13790 #endif
13791 
13792 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13793     case TARGET_NR_ioprio_set:
13794         return get_errno(ioprio_set(arg1, arg2, arg3));
13795 #endif
13796 
13797 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13798     case TARGET_NR_setns:
13799         return get_errno(setns(arg1, arg2));
13800 #endif
13801 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13802     case TARGET_NR_unshare:
13803         return get_errno(unshare(arg1));
13804 #endif
13805 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13806     case TARGET_NR_kcmp:
13807         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13808 #endif
13809 #ifdef TARGET_NR_swapcontext
13810     case TARGET_NR_swapcontext:
13811         /* PowerPC specific.  */
13812         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13813 #endif
13814 #ifdef TARGET_NR_memfd_create
13815     case TARGET_NR_memfd_create:
13816         p = lock_user_string(arg1);
13817         if (!p) {
13818             return -TARGET_EFAULT;
13819         }
13820         ret = get_errno(memfd_create(p, arg2));
13821         fd_trans_unregister(ret);
13822         unlock_user(p, arg1, 0);
13823         return ret;
13824 #endif
13825 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13826     case TARGET_NR_membarrier:
13827         return get_errno(membarrier(arg1, arg2));
13828 #endif
13829 
13830 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13831     case TARGET_NR_copy_file_range:
13832         {
13833             loff_t inoff, outoff;
13834             loff_t *pinoff = NULL, *poutoff = NULL;
13835 
13836             if (arg2) {
13837                 if (get_user_u64(inoff, arg2)) {
13838                     return -TARGET_EFAULT;
13839                 }
13840                 pinoff = &inoff;
13841             }
13842             if (arg4) {
13843                 if (get_user_u64(outoff, arg4)) {
13844                     return -TARGET_EFAULT;
13845                 }
13846                 poutoff = &outoff;
13847             }
13848             /* Do not sign-extend the count parameter. */
13849             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13850                                                  (abi_ulong)arg5, arg6));
13851             if (!is_error(ret) && ret > 0) {
13852                 if (arg2) {
13853                     if (put_user_u64(inoff, arg2)) {
13854                         return -TARGET_EFAULT;
13855                     }
13856                 }
13857                 if (arg4) {
13858                     if (put_user_u64(outoff, arg4)) {
13859                         return -TARGET_EFAULT;
13860                     }
13861                 }
13862             }
13863         }
13864         return ret;
13865 #endif
13866 
13867 #if defined(TARGET_NR_pivot_root)
13868     case TARGET_NR_pivot_root:
13869         {
13870             void *p2;
13871             p = lock_user_string(arg1); /* new_root */
13872             p2 = lock_user_string(arg2); /* put_old */
13873             if (!p || !p2) {
13874                 ret = -TARGET_EFAULT;
13875             } else {
13876                 ret = get_errno(pivot_root(p, p2));
13877             }
13878             unlock_user(p2, arg2, 0);
13879             unlock_user(p, arg1, 0);
13880         }
13881         return ret;
13882 #endif
13883 
13884 #if defined(TARGET_NR_riscv_hwprobe)
13885     case TARGET_NR_riscv_hwprobe:
13886         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13887 #endif
13888 
13889     default:
13890         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13891         return -TARGET_ENOSYS;
13892     }
13893     return ret;
13894 }
13895 
do_syscall(CPUArchState * cpu_env,int num,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)13896 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13897                     abi_long arg2, abi_long arg3, abi_long arg4,
13898                     abi_long arg5, abi_long arg6, abi_long arg7,
13899                     abi_long arg8)
13900 {
13901     CPUState *cpu = env_cpu(cpu_env);
13902     abi_long ret;
13903 
13904 #ifdef DEBUG_ERESTARTSYS
13905     /* Debug-only code for exercising the syscall-restart code paths
13906      * in the per-architecture cpu main loops: restart every syscall
13907      * the guest makes once before letting it through.
13908      */
13909     {
13910         static bool flag;
13911         flag = !flag;
13912         if (flag) {
13913             return -QEMU_ERESTARTSYS;
13914         }
13915     }
13916 #endif
13917 
13918     record_syscall_start(cpu, num, arg1,
13919                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13920 
13921     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13922         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13923     }
13924 
13925     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13926                       arg5, arg6, arg7, arg8);
13927 
13928     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13929         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13930                           arg3, arg4, arg5, arg6);
13931     }
13932 
13933     record_syscall_return(cpu, num, ret);
13934     return ret;
13935 }
13936