xref: /openbmc/qemu/linux-user/syscall.c (revision 1af52156676065b1fc2d4815bf23b1c4c99938b3)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include "exec/mmap-lock.h"
30 #include <elf.h>
31 #include <endian.h>
32 #include <grp.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/mount.h>
37 #include <sys/file.h>
38 #include <sys/fsuid.h>
39 #include <sys/personality.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
42 #include <sys/swap.h>
43 #include <linux/capability.h>
44 #include <sched.h>
45 #include <sys/timex.h>
46 #include <sys/socket.h>
47 #include <linux/sockios.h>
48 #include <sys/un.h>
49 #include <sys/uio.h>
50 #include <poll.h>
51 #include <sys/times.h>
52 #include <sys/shm.h>
53 #include <sys/sem.h>
54 #include <sys/statfs.h>
55 #include <utime.h>
56 #include <sys/sysinfo.h>
57 #include <sys/signalfd.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp.h>
61 #include <netinet/udp.h>
62 #include <linux/wireless.h>
63 #include <linux/icmp.h>
64 #include <linux/icmpv6.h>
65 #include <linux/if_tun.h>
66 #include <linux/in6.h>
67 #include <linux/errqueue.h>
68 #include <linux/random.h>
69 #ifdef CONFIG_TIMERFD
70 #include <sys/timerfd.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
84 #ifdef HAVE_SYS_KCOV_H
85 #include <sys/kcov.h>
86 #endif
87 
88 #define termios host_termios
89 #define termios2 host_termios2
90 #define winsize host_winsize
91 #define termio host_termio
92 #define sgttyb host_sgttyb /* same as target */
93 #define tchars host_tchars /* same as target */
94 #define ltchars host_ltchars /* same as target */
95 
96 #include <linux/termios.h>
97 #include <linux/unistd.h>
98 #include <linux/cdrom.h>
99 #include <linux/hdreg.h>
100 #include <linux/soundcard.h>
101 #include <linux/kd.h>
102 #include <linux/mtio.h>
103 #include <linux/fs.h>
104 #include <linux/fd.h>
105 #if defined(CONFIG_FIEMAP)
106 #include <linux/fiemap.h>
107 #endif
108 #include <linux/fb.h>
109 #if defined(CONFIG_USBFS)
110 #include <linux/usbdevice_fs.h>
111 #include <linux/usb/ch9.h>
112 #endif
113 #include <linux/vt.h>
114 #include <linux/dm-ioctl.h>
115 #include <linux/reboot.h>
116 #include <linux/route.h>
117 #include <linux/filter.h>
118 #include <linux/blkpg.h>
119 #include <netpacket/packet.h>
120 #include <linux/netlink.h>
121 #include <linux/if_alg.h>
122 #include <linux/rtc.h>
123 #include <sound/asound.h>
124 #ifdef HAVE_BTRFS_H
125 #include <linux/btrfs.h>
126 #endif
127 #ifdef HAVE_DRM_H
128 #include <libdrm/drm.h>
129 #include <libdrm/i915_drm.h>
130 #endif
131 #include "linux_loop.h"
132 #include "uname.h"
133 
134 #include "qemu.h"
135 #include "user-internals.h"
136 #include "strace.h"
137 #include "signal-common.h"
138 #include "loader.h"
139 #include "user-mmap.h"
140 #include "user/page-protection.h"
141 #include "user/safe-syscall.h"
142 #include "user/signal.h"
143 #include "qemu/guest-random.h"
144 #include "qemu/selfmap.h"
145 #include "user/syscall-trace.h"
146 #include "special-errno.h"
147 #include "qapi/error.h"
148 #include "fd-trans.h"
149 #include "user/cpu_loop.h"
150 
151 #if defined(__powerpc__)
152 /*
153  * On PowerPC termios2 is lacking and termios along with ioctls w/o 2
154  * behaves like termios2 and things with 2 on other architectures.
155  *
156  * Just define termios2-related things to be the same with termios-related
157  * ones to support PowerPC.
158  */
159 #define host_termios2 host_termios
160 #define TCGETS2 TCGETS
161 #define TCSETS2 TCSETS
162 #define TCSETSW2 TCSETSW
163 #define TCSETSF2 TCSETSF
164 #endif
165 
166 #ifndef CLONE_IO
167 #define CLONE_IO                0x80000000      /* Clone io context */
168 #endif
169 
170 /* We can't directly call the host clone syscall, because this will
171  * badly confuse libc (breaking mutexes, for example). So we must
172  * divide clone flags into:
173  *  * flag combinations that look like pthread_create()
174  *  * flag combinations that look like fork()
175  *  * flags we can implement within QEMU itself
176  *  * flags we can't support and will return an error for
177  */
178 /* For thread creation, all these flags must be present; for
179  * fork, none must be present.
180  */
181 #define CLONE_THREAD_FLAGS                              \
182     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
183      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
184 
185 /* These flags are ignored:
186  * CLONE_DETACHED is now ignored by the kernel;
187  * CLONE_IO is just an optimisation hint to the I/O scheduler
188  */
189 #define CLONE_IGNORED_FLAGS                     \
190     (CLONE_DETACHED | CLONE_IO)
191 
192 #ifndef CLONE_PIDFD
193 # define CLONE_PIDFD 0x00001000
194 #endif
195 
196 /* Flags for fork which we can implement within QEMU itself */
197 #define CLONE_OPTIONAL_FORK_FLAGS               \
198     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
199      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
200 
201 /* Flags for thread creation which we can implement within QEMU itself */
202 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
203     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
204      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
205 
206 #define CLONE_INVALID_FORK_FLAGS                                        \
207     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
208 
209 #define CLONE_INVALID_THREAD_FLAGS                                      \
210     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
211        CLONE_IGNORED_FLAGS))
212 
213 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
214  * have almost all been allocated. We cannot support any of
215  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
216  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
217  * The checks against the invalid thread masks above will catch these.
218  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
219  */
220 
221 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
222  * once. This exercises the codepaths for restart.
223  */
224 //#define DEBUG_ERESTARTSYS
225 
226 //#include <linux/msdos_fs.h>
227 #define VFAT_IOCTL_READDIR_BOTH \
228     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
229 #define VFAT_IOCTL_READDIR_SHORT \
230     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
231 
232 #undef _syscall0
233 #undef _syscall1
234 #undef _syscall2
235 #undef _syscall3
236 #undef _syscall4
237 #undef _syscall5
238 #undef _syscall6
239 
240 #define _syscall0(type,name)		\
241 static type name (void)			\
242 {					\
243 	return syscall(__NR_##name);	\
244 }
245 
246 #define _syscall1(type,name,type1,arg1)		\
247 static type name (type1 arg1)			\
248 {						\
249 	return syscall(__NR_##name, arg1);	\
250 }
251 
252 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
253 static type name (type1 arg1,type2 arg2)		\
254 {							\
255 	return syscall(__NR_##name, arg1, arg2);	\
256 }
257 
258 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
259 static type name (type1 arg1,type2 arg2,type3 arg3)		\
260 {								\
261 	return syscall(__NR_##name, arg1, arg2, arg3);		\
262 }
263 
264 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
265 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
266 {										\
267 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
268 }
269 
270 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
271 		  type5,arg5)							\
272 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
273 {										\
274 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
275 }
276 
277 
278 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
279 		  type5,arg5,type6,arg6)					\
280 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
281                   type6 arg6)							\
282 {										\
283 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
284 }
285 
286 
287 #define __NR_sys_uname __NR_uname
288 #define __NR_sys_getcwd1 __NR_getcwd
289 #define __NR_sys_getdents __NR_getdents
290 #define __NR_sys_getdents64 __NR_getdents64
291 #define __NR_sys_getpriority __NR_getpriority
292 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
293 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
294 #define __NR_sys_syslog __NR_syslog
295 #if defined(__NR_futex)
296 # define __NR_sys_futex __NR_futex
297 #endif
298 #if defined(__NR_futex_time64)
299 # define __NR_sys_futex_time64 __NR_futex_time64
300 #endif
301 #define __NR_sys_statx __NR_statx
302 
303 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
304 #define __NR__llseek __NR_lseek
305 #endif
306 
307 /* Newer kernel ports have llseek() instead of _llseek() */
308 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
309 #define TARGET_NR__llseek TARGET_NR_llseek
310 #endif
311 
312 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
313 #ifndef TARGET_O_NONBLOCK_MASK
314 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
315 #endif
316 
317 #define __NR_sys_gettid __NR_gettid
318 _syscall0(int, sys_gettid)
319 
320 /* For the 64-bit guest on 32-bit host case we must emulate
321  * getdents using getdents64, because otherwise the host
322  * might hand us back more dirent records than we can fit
323  * into the guest buffer after structure format conversion.
324  * Otherwise we emulate getdents with getdents if the host has it.
325  */
326 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
327 #define EMULATE_GETDENTS_WITH_GETDENTS
328 #endif
329 
330 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
331 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
332 #endif
333 #if (defined(TARGET_NR_getdents) && \
334       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
335     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
336 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
337 #endif
338 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
339 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
340           loff_t *, res, unsigned int, wh);
341 #endif
342 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
343 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
344           siginfo_t *, uinfo)
345 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
346 #ifdef __NR_exit_group
347 _syscall1(int,exit_group,int,error_code)
348 #endif
349 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
350 #define __NR_sys_close_range __NR_close_range
351 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
352 #ifndef CLOSE_RANGE_CLOEXEC
353 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
354 #endif
355 #endif
356 #if defined(__NR_futex)
357 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
358           const struct timespec *,timeout,int *,uaddr2,int,val3)
359 #endif
360 #if defined(__NR_futex_time64)
361 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
362           const struct timespec *,timeout,int *,uaddr2,int,val3)
363 #endif
364 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
365 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
366 #endif
367 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
368 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
369                              unsigned int, flags);
370 #endif
371 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
372 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
373 #endif
374 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
375 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
376           unsigned long *, user_mask_ptr);
377 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
378 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
379           unsigned long *, user_mask_ptr);
380 /* sched_attr is not defined in glibc < 2.41 */
381 #ifndef SCHED_ATTR_SIZE_VER0
382 struct sched_attr {
383     uint32_t size;
384     uint32_t sched_policy;
385     uint64_t sched_flags;
386     int32_t sched_nice;
387     uint32_t sched_priority;
388     uint64_t sched_runtime;
389     uint64_t sched_deadline;
390     uint64_t sched_period;
391     uint32_t sched_util_min;
392     uint32_t sched_util_max;
393 };
394 #endif
395 #define __NR_sys_sched_getattr __NR_sched_getattr
396 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
397           unsigned int, size, unsigned int, flags);
398 #define __NR_sys_sched_setattr __NR_sched_setattr
399 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
400           unsigned int, flags);
401 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
402 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
403 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
404 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
405           const struct sched_param *, param);
406 #define __NR_sys_sched_getparam __NR_sched_getparam
407 _syscall2(int, sys_sched_getparam, pid_t, pid,
408           struct sched_param *, param);
409 #define __NR_sys_sched_setparam __NR_sched_setparam
410 _syscall2(int, sys_sched_setparam, pid_t, pid,
411           const struct sched_param *, param);
412 #define __NR_sys_getcpu __NR_getcpu
413 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
414 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
415           void *, arg);
416 _syscall2(int, capget, struct __user_cap_header_struct *, header,
417           struct __user_cap_data_struct *, data);
418 _syscall2(int, capset, struct __user_cap_header_struct *, header,
419           struct __user_cap_data_struct *, data);
420 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
421 _syscall2(int, ioprio_get, int, which, int, who)
422 #endif
423 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
424 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
425 #endif
426 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
427 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
428 #endif
429 
430 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
431 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
432           unsigned long, idx1, unsigned long, idx2)
433 #endif
434 
435 /*
436  * It is assumed that struct statx is architecture independent.
437  */
438 #if defined(TARGET_NR_statx) && defined(__NR_statx)
439 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
440           unsigned int, mask, struct target_statx *, statxbuf)
441 #endif
442 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
443 _syscall2(int, membarrier, int, cmd, int, flags)
444 #endif
445 
446 static const bitmask_transtbl fcntl_flags_tbl[] = {
447   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
448   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
449   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
450   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
451   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
452   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
453   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
454   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
455   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
456   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
457   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
458   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
459   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
460 #if defined(O_DIRECT)
461   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
462 #endif
463 #if defined(O_NOATIME)
464   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
465 #endif
466 #if defined(O_CLOEXEC)
467   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
468 #endif
469 #if defined(O_PATH)
470   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
471 #endif
472 #if defined(O_TMPFILE)
473   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
474 #endif
475   /* Don't terminate the list prematurely on 64-bit host+guest.  */
476 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
477   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
478 #endif
479 };
480 
481 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
482 
483 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
484 #if defined(__NR_utimensat)
485 #define __NR_sys_utimensat __NR_utimensat
486 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
487           const struct timespec *,tsp,int,flags)
488 #else
489 static int sys_utimensat(int dirfd, const char *pathname,
490                          const struct timespec times[2], int flags)
491 {
492     errno = ENOSYS;
493     return -1;
494 }
495 #endif
496 #endif /* TARGET_NR_utimensat */
497 
498 #ifdef TARGET_NR_renameat2
499 #if defined(__NR_renameat2)
500 #define __NR_sys_renameat2 __NR_renameat2
501 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
502           const char *, new, unsigned int, flags)
503 #else
504 static int sys_renameat2(int oldfd, const char *old,
505                          int newfd, const char *new, int flags)
506 {
507     if (flags == 0) {
508         return renameat(oldfd, old, newfd, new);
509     }
510     errno = ENOSYS;
511     return -1;
512 }
513 #endif
514 #endif /* TARGET_NR_renameat2 */
515 
516 #ifdef CONFIG_INOTIFY
517 #include <sys/inotify.h>
518 #else
519 /* Userspace can usually survive runtime without inotify */
520 #undef TARGET_NR_inotify_init
521 #undef TARGET_NR_inotify_init1
522 #undef TARGET_NR_inotify_add_watch
523 #undef TARGET_NR_inotify_rm_watch
524 #endif /* CONFIG_INOTIFY  */
525 
526 #if defined(TARGET_NR_prlimit64)
527 #ifndef __NR_prlimit64
528 # define __NR_prlimit64 -1
529 #endif
530 #define __NR_sys_prlimit64 __NR_prlimit64
531 /* The glibc rlimit structure may not be that used by the underlying syscall */
532 struct host_rlimit64 {
533     uint64_t rlim_cur;
534     uint64_t rlim_max;
535 };
536 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
537           const struct host_rlimit64 *, new_limit,
538           struct host_rlimit64 *, old_limit)
539 #endif
540 
541 
542 #if defined(TARGET_NR_timer_create)
543 /* Maximum of 32 active POSIX timers allowed at any one time. */
544 #define GUEST_TIMER_MAX 32
545 static timer_t g_posix_timers[GUEST_TIMER_MAX];
546 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
547 
next_free_host_timer(void)548 static inline int next_free_host_timer(void)
549 {
550     int k;
551     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
552         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
553             return k;
554         }
555     }
556     return -1;
557 }
558 
free_host_timer_slot(int id)559 static inline void free_host_timer_slot(int id)
560 {
561     qatomic_store_release(g_posix_timer_allocated + id, 0);
562 }
563 #endif
564 
host_to_target_errno(int host_errno)565 static inline int host_to_target_errno(int host_errno)
566 {
567     switch (host_errno) {
568 #define E(X)  case X: return TARGET_##X;
569 #include "errnos.c.inc"
570 #undef E
571     default:
572         return host_errno;
573     }
574 }
575 
target_to_host_errno(int target_errno)576 static inline int target_to_host_errno(int target_errno)
577 {
578     switch (target_errno) {
579 #define E(X)  case TARGET_##X: return X;
580 #include "errnos.c.inc"
581 #undef E
582     default:
583         return target_errno;
584     }
585 }
586 
get_errno(abi_long ret)587 abi_long get_errno(abi_long ret)
588 {
589     if (ret == -1)
590         return -host_to_target_errno(errno);
591     else
592         return ret;
593 }
594 
target_strerror(int err)595 const char *target_strerror(int err)
596 {
597     if (err == QEMU_ERESTARTSYS) {
598         return "To be restarted";
599     }
600     if (err == QEMU_ESIGRETURN) {
601         return "Successful exit from sigreturn";
602     }
603 
604     return strerror(target_to_host_errno(err));
605 }
606 
check_zeroed_user(abi_long addr,size_t ksize,size_t usize)607 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
608 {
609     int i;
610     uint8_t b;
611     if (usize <= ksize) {
612         return 1;
613     }
614     for (i = ksize; i < usize; i++) {
615         if (get_user_u8(b, addr + i)) {
616             return -TARGET_EFAULT;
617         }
618         if (b != 0) {
619             return 0;
620         }
621     }
622     return 1;
623 }
624 
625 /*
626  * Copies a target struct to a host struct, in a way that guarantees
627  * backwards-compatibility for struct syscall arguments.
628  *
629  * Similar to kernels uaccess.h:copy_struct_from_user()
630  */
copy_struct_from_user(void * dst,size_t ksize,abi_ptr src,size_t usize)631 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
632 {
633     size_t size = MIN(ksize, usize);
634     size_t rest = MAX(ksize, usize) - size;
635 
636     /* Deal with trailing bytes. */
637     if (usize < ksize) {
638         memset(dst + size, 0, rest);
639     } else if (usize > ksize) {
640         int ret = check_zeroed_user(src, ksize, usize);
641         if (ret <= 0) {
642             return ret ?: -TARGET_E2BIG;
643         }
644     }
645     /* Copy the interoperable parts of the struct. */
646     if (copy_from_user(dst, src, size)) {
647         return -TARGET_EFAULT;
648     }
649     return 0;
650 }
651 
652 #define safe_syscall0(type, name) \
653 static type safe_##name(void) \
654 { \
655     return safe_syscall(__NR_##name); \
656 }
657 
658 #define safe_syscall1(type, name, type1, arg1) \
659 static type safe_##name(type1 arg1) \
660 { \
661     return safe_syscall(__NR_##name, arg1); \
662 }
663 
664 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
665 static type safe_##name(type1 arg1, type2 arg2) \
666 { \
667     return safe_syscall(__NR_##name, arg1, arg2); \
668 }
669 
670 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
671 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
672 { \
673     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
674 }
675 
676 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
677     type4, arg4) \
678 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
679 { \
680     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
681 }
682 
683 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
684     type4, arg4, type5, arg5) \
685 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
686     type5 arg5) \
687 { \
688     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
689 }
690 
691 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
692     type4, arg4, type5, arg5, type6, arg6) \
693 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
694     type5 arg5, type6 arg6) \
695 { \
696     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
697 }
698 
safe_syscall3(ssize_t,read,int,fd,void *,buff,size_t,count)699 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
700 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
701 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
702               int, flags, mode_t, mode)
703 
704 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
705               const struct open_how_ver0 *, how, size_t, size)
706 
707 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
708 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
709               struct rusage *, rusage)
710 #endif
711 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
712               int, options, struct rusage *, rusage)
713 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
714 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
715               char **, argv, char **, envp, int, flags)
716 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
717     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
718 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
719               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
720 #endif
721 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
722 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
723               struct timespec *, tsp, const sigset_t *, sigmask,
724               size_t, sigsetsize)
725 #endif
726 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
727               int, maxevents, int, timeout, const sigset_t *, sigmask,
728               size_t, sigsetsize)
729 #if defined(__NR_futex)
730 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
731               const struct timespec *,timeout,int *,uaddr2,int,val3)
732 #endif
733 #if defined(__NR_futex_time64)
734 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
735               const struct timespec *,timeout,int *,uaddr2,int,val3)
736 #endif
737 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
738 safe_syscall2(int, kill, pid_t, pid, int, sig)
739 safe_syscall2(int, tkill, int, tid, int, sig)
740 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
741 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
742 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
743 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
744               unsigned long, pos_l, unsigned long, pos_h)
745 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
746               unsigned long, pos_l, unsigned long, pos_h)
747 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
748               socklen_t, addrlen)
749 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
750               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
751 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
752               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
753 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
754 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
755 safe_syscall2(int, flock, int, fd, int, operation)
756 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
757 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
758               const struct timespec *, uts, size_t, sigsetsize)
759 #endif
760 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
761               int, flags)
762 #if defined(TARGET_NR_nanosleep)
763 safe_syscall2(int, nanosleep, const struct timespec *, req,
764               struct timespec *, rem)
765 #endif
766 #if defined(TARGET_NR_clock_nanosleep) || \
767     defined(TARGET_NR_clock_nanosleep_time64)
768 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
769               const struct timespec *, req, struct timespec *, rem)
770 #endif
771 #ifdef __NR_ipc
772 #ifdef __s390x__
773 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
774               void *, ptr)
775 #else
776 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
777               void *, ptr, long, fifth)
778 #endif
779 #endif
780 #ifdef __NR_msgsnd
781 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
782               int, flags)
783 #endif
784 #ifdef __NR_msgrcv
785 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
786               long, msgtype, int, flags)
787 #endif
788 #ifdef __NR_semtimedop
789 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
790               unsigned, nsops, const struct timespec *, timeout)
791 #endif
792 #if defined(TARGET_NR_mq_timedsend) || \
793     defined(TARGET_NR_mq_timedsend_time64)
794 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
795               size_t, len, unsigned, prio, const struct timespec *, timeout)
796 #endif
797 #if defined(TARGET_NR_mq_timedreceive) || \
798     defined(TARGET_NR_mq_timedreceive_time64)
799 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
800               size_t, len, unsigned *, prio, const struct timespec *, timeout)
801 #endif
802 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
803 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
804               int, outfd, loff_t *, poutoff, size_t, length,
805               unsigned int, flags)
806 #endif
807 #if defined(TARGET_NR_fchmodat2) && defined(__NR_fchmodat2)
808 safe_syscall4(int, fchmodat2, int, dfd, const char *, filename,
809               unsigned short, mode, unsigned int, flags)
810 #endif
811 
812 /* We do ioctl like this rather than via safe_syscall3 to preserve the
813  * "third argument might be integer or pointer or not present" behaviour of
814  * the libc function.
815  */
816 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
817 /* Similarly for fcntl. Since we always build with LFS enabled,
818  * we should be using the 64-bit structures automatically.
819  */
820 #ifdef __NR_fcntl64
821 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
822 #else
823 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
824 #endif
825 
826 static inline int host_to_target_sock_type(int host_type)
827 {
828     int target_type;
829 
830     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
831     case SOCK_DGRAM:
832         target_type = TARGET_SOCK_DGRAM;
833         break;
834     case SOCK_STREAM:
835         target_type = TARGET_SOCK_STREAM;
836         break;
837     default:
838         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
839         break;
840     }
841 
842 #if defined(SOCK_CLOEXEC)
843     if (host_type & SOCK_CLOEXEC) {
844         target_type |= TARGET_SOCK_CLOEXEC;
845     }
846 #endif
847 
848 #if defined(SOCK_NONBLOCK)
849     if (host_type & SOCK_NONBLOCK) {
850         target_type |= TARGET_SOCK_NONBLOCK;
851     }
852 #endif
853 
854     return target_type;
855 }
856 
857 static abi_ulong target_brk, initial_target_brk;
858 
target_set_brk(abi_ulong new_brk)859 void target_set_brk(abi_ulong new_brk)
860 {
861     target_brk = TARGET_PAGE_ALIGN(new_brk);
862     initial_target_brk = target_brk;
863 }
864 
865 /* do_brk() must return target values and target errnos. */
do_brk(abi_ulong brk_val)866 abi_long do_brk(abi_ulong brk_val)
867 {
868     abi_long mapped_addr;
869     abi_ulong new_brk;
870     abi_ulong old_brk;
871 
872     /* brk pointers are always untagged */
873 
874     /* do not allow to shrink below initial brk value */
875     if (brk_val < initial_target_brk) {
876         return target_brk;
877     }
878 
879     new_brk = TARGET_PAGE_ALIGN(brk_val);
880     old_brk = TARGET_PAGE_ALIGN(target_brk);
881 
882     /* new and old target_brk might be on the same page */
883     if (new_brk == old_brk) {
884         target_brk = brk_val;
885         return target_brk;
886     }
887 
888     /* Release heap if necessary */
889     if (new_brk < old_brk) {
890         target_munmap(new_brk, old_brk - new_brk);
891 
892         target_brk = brk_val;
893         return target_brk;
894     }
895 
896     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
897                               PROT_READ | PROT_WRITE,
898                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
899                               -1, 0);
900 
901     if (mapped_addr == old_brk) {
902         target_brk = brk_val;
903         return target_brk;
904     }
905 
906 #if defined(TARGET_ALPHA)
907     /* We (partially) emulate OSF/1 on Alpha, which requires we
908        return a proper errno, not an unchanged brk value.  */
909     return -TARGET_ENOMEM;
910 #endif
911     /* For everything else, return the previous break. */
912     return target_brk;
913 }
914 
915 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
916     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
copy_from_user_fdset(fd_set * fds,abi_ulong target_fds_addr,int n)917 static inline abi_long copy_from_user_fdset(fd_set *fds,
918                                             abi_ulong target_fds_addr,
919                                             int n)
920 {
921     int i, nw, j, k;
922     abi_ulong b, *target_fds;
923 
924     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
925     if (!(target_fds = lock_user(VERIFY_READ,
926                                  target_fds_addr,
927                                  sizeof(abi_ulong) * nw,
928                                  1)))
929         return -TARGET_EFAULT;
930 
931     FD_ZERO(fds);
932     k = 0;
933     for (i = 0; i < nw; i++) {
934         /* grab the abi_ulong */
935         __get_user(b, &target_fds[i]);
936         for (j = 0; j < TARGET_ABI_BITS; j++) {
937             /* check the bit inside the abi_ulong */
938             if ((b >> j) & 1)
939                 FD_SET(k, fds);
940             k++;
941         }
942     }
943 
944     unlock_user(target_fds, target_fds_addr, 0);
945 
946     return 0;
947 }
948 
copy_from_user_fdset_ptr(fd_set * fds,fd_set ** fds_ptr,abi_ulong target_fds_addr,int n)949 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
950                                                  abi_ulong target_fds_addr,
951                                                  int n)
952 {
953     if (target_fds_addr) {
954         if (copy_from_user_fdset(fds, target_fds_addr, n))
955             return -TARGET_EFAULT;
956         *fds_ptr = fds;
957     } else {
958         *fds_ptr = NULL;
959     }
960     return 0;
961 }
962 
copy_to_user_fdset(abi_ulong target_fds_addr,const fd_set * fds,int n)963 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
964                                           const fd_set *fds,
965                                           int n)
966 {
967     int i, nw, j, k;
968     abi_long v;
969     abi_ulong *target_fds;
970 
971     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
972     if (!(target_fds = lock_user(VERIFY_WRITE,
973                                  target_fds_addr,
974                                  sizeof(abi_ulong) * nw,
975                                  0)))
976         return -TARGET_EFAULT;
977 
978     k = 0;
979     for (i = 0; i < nw; i++) {
980         v = 0;
981         for (j = 0; j < TARGET_ABI_BITS; j++) {
982             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
983             k++;
984         }
985         __put_user(v, &target_fds[i]);
986     }
987 
988     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
989 
990     return 0;
991 }
992 #endif
993 
994 #if defined(__alpha__)
995 #define HOST_HZ 1024
996 #else
997 #define HOST_HZ 100
998 #endif
999 
host_to_target_clock_t(long ticks)1000 static inline abi_long host_to_target_clock_t(long ticks)
1001 {
1002 #if HOST_HZ == TARGET_HZ
1003     return ticks;
1004 #else
1005     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1006 #endif
1007 }
1008 
host_to_target_rusage(abi_ulong target_addr,const struct rusage * rusage)1009 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1010                                              const struct rusage *rusage)
1011 {
1012     struct target_rusage *target_rusage;
1013 
1014     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1015         return -TARGET_EFAULT;
1016     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1017     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1018     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1019     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1020     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1021     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1022     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1023     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1024     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1025     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1026     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1027     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1028     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1029     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1030     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1031     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1032     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1033     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1034     unlock_user_struct(target_rusage, target_addr, 1);
1035 
1036     return 0;
1037 }
1038 
1039 #ifdef TARGET_NR_setrlimit
target_to_host_rlim(abi_ulong target_rlim)1040 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1041 {
1042     abi_ulong target_rlim_swap;
1043     rlim_t result;
1044 
1045     target_rlim_swap = tswapal(target_rlim);
1046     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1047         return RLIM_INFINITY;
1048 
1049     result = target_rlim_swap;
1050     if (target_rlim_swap != (rlim_t)result)
1051         return RLIM_INFINITY;
1052 
1053     return result;
1054 }
1055 #endif
1056 
1057 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
host_to_target_rlim(rlim_t rlim)1058 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1059 {
1060     abi_ulong target_rlim_swap;
1061     abi_ulong result;
1062 
1063     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1064         target_rlim_swap = TARGET_RLIM_INFINITY;
1065     else
1066         target_rlim_swap = rlim;
1067     result = tswapal(target_rlim_swap);
1068 
1069     return result;
1070 }
1071 #endif
1072 
target_to_host_resource(int code)1073 static inline int target_to_host_resource(int code)
1074 {
1075     switch (code) {
1076     case TARGET_RLIMIT_AS:
1077         return RLIMIT_AS;
1078     case TARGET_RLIMIT_CORE:
1079         return RLIMIT_CORE;
1080     case TARGET_RLIMIT_CPU:
1081         return RLIMIT_CPU;
1082     case TARGET_RLIMIT_DATA:
1083         return RLIMIT_DATA;
1084     case TARGET_RLIMIT_FSIZE:
1085         return RLIMIT_FSIZE;
1086     case TARGET_RLIMIT_LOCKS:
1087         return RLIMIT_LOCKS;
1088     case TARGET_RLIMIT_MEMLOCK:
1089         return RLIMIT_MEMLOCK;
1090     case TARGET_RLIMIT_MSGQUEUE:
1091         return RLIMIT_MSGQUEUE;
1092     case TARGET_RLIMIT_NICE:
1093         return RLIMIT_NICE;
1094     case TARGET_RLIMIT_NOFILE:
1095         return RLIMIT_NOFILE;
1096     case TARGET_RLIMIT_NPROC:
1097         return RLIMIT_NPROC;
1098     case TARGET_RLIMIT_RSS:
1099         return RLIMIT_RSS;
1100     case TARGET_RLIMIT_RTPRIO:
1101         return RLIMIT_RTPRIO;
1102 #ifdef RLIMIT_RTTIME
1103     case TARGET_RLIMIT_RTTIME:
1104         return RLIMIT_RTTIME;
1105 #endif
1106     case TARGET_RLIMIT_SIGPENDING:
1107         return RLIMIT_SIGPENDING;
1108     case TARGET_RLIMIT_STACK:
1109         return RLIMIT_STACK;
1110     default:
1111         return code;
1112     }
1113 }
1114 
copy_from_user_timeval(struct timeval * tv,abi_ulong target_tv_addr)1115 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1116                                               abi_ulong target_tv_addr)
1117 {
1118     struct target_timeval *target_tv;
1119 
1120     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1121         return -TARGET_EFAULT;
1122     }
1123 
1124     __get_user(tv->tv_sec, &target_tv->tv_sec);
1125     __get_user(tv->tv_usec, &target_tv->tv_usec);
1126 
1127     unlock_user_struct(target_tv, target_tv_addr, 0);
1128 
1129     return 0;
1130 }
1131 
copy_to_user_timeval(abi_ulong target_tv_addr,const struct timeval * tv)1132 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1133                                             const struct timeval *tv)
1134 {
1135     struct target_timeval *target_tv;
1136 
1137     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1138         return -TARGET_EFAULT;
1139     }
1140 
1141     __put_user(tv->tv_sec, &target_tv->tv_sec);
1142     __put_user(tv->tv_usec, &target_tv->tv_usec);
1143 
1144     unlock_user_struct(target_tv, target_tv_addr, 1);
1145 
1146     return 0;
1147 }
1148 
1149 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
copy_from_user_timeval64(struct timeval * tv,abi_ulong target_tv_addr)1150 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1151                                                 abi_ulong target_tv_addr)
1152 {
1153     struct target__kernel_sock_timeval *target_tv;
1154 
1155     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1156         return -TARGET_EFAULT;
1157     }
1158 
1159     __get_user(tv->tv_sec, &target_tv->tv_sec);
1160     __get_user(tv->tv_usec, &target_tv->tv_usec);
1161 
1162     unlock_user_struct(target_tv, target_tv_addr, 0);
1163 
1164     return 0;
1165 }
1166 #endif
1167 
copy_to_user_timeval64(abi_ulong target_tv_addr,const struct timeval * tv)1168 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1169                                               const struct timeval *tv)
1170 {
1171     struct target__kernel_sock_timeval *target_tv;
1172 
1173     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1174         return -TARGET_EFAULT;
1175     }
1176 
1177     __put_user(tv->tv_sec, &target_tv->tv_sec);
1178     __put_user(tv->tv_usec, &target_tv->tv_usec);
1179 
1180     unlock_user_struct(target_tv, target_tv_addr, 1);
1181 
1182     return 0;
1183 }
1184 
1185 #if defined(TARGET_NR_futex) || \
1186     defined(TARGET_NR_rt_sigtimedwait) || \
1187     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1188     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1189     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1190     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1191     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1192     defined(TARGET_NR_timer_settime) || \
1193     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_timespec(struct timespec * host_ts,abi_ulong target_addr)1194 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1195                                                abi_ulong target_addr)
1196 {
1197     struct target_timespec *target_ts;
1198 
1199     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1200         return -TARGET_EFAULT;
1201     }
1202     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1203     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1204     unlock_user_struct(target_ts, target_addr, 0);
1205     return 0;
1206 }
1207 #endif
1208 
1209 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1210     defined(TARGET_NR_timer_settime64) || \
1211     defined(TARGET_NR_mq_timedsend_time64) || \
1212     defined(TARGET_NR_mq_timedreceive_time64) || \
1213     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1214     defined(TARGET_NR_clock_nanosleep_time64) || \
1215     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1216     defined(TARGET_NR_utimensat) || \
1217     defined(TARGET_NR_utimensat_time64) || \
1218     defined(TARGET_NR_semtimedop_time64) || \
1219     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
target_to_host_timespec64(struct timespec * host_ts,abi_ulong target_addr)1220 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1221                                                  abi_ulong target_addr)
1222 {
1223     struct target__kernel_timespec *target_ts;
1224 
1225     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1226         return -TARGET_EFAULT;
1227     }
1228     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1229     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1230     /* in 32bit mode, this drops the padding */
1231     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1232     unlock_user_struct(target_ts, target_addr, 0);
1233     return 0;
1234 }
1235 #endif
1236 
host_to_target_timespec(abi_ulong target_addr,struct timespec * host_ts)1237 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1238                                                struct timespec *host_ts)
1239 {
1240     struct target_timespec *target_ts;
1241 
1242     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1243         return -TARGET_EFAULT;
1244     }
1245     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1246     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1247     unlock_user_struct(target_ts, target_addr, 1);
1248     return 0;
1249 }
1250 
host_to_target_timespec64(abi_ulong target_addr,struct timespec * host_ts)1251 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1252                                                  struct timespec *host_ts)
1253 {
1254     struct target__kernel_timespec *target_ts;
1255 
1256     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1257         return -TARGET_EFAULT;
1258     }
1259     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1260     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1261     unlock_user_struct(target_ts, target_addr, 1);
1262     return 0;
1263 }
1264 
1265 #if defined(TARGET_NR_gettimeofday)
copy_to_user_timezone(abi_ulong target_tz_addr,struct timezone * tz)1266 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1267                                              struct timezone *tz)
1268 {
1269     struct target_timezone *target_tz;
1270 
1271     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1272         return -TARGET_EFAULT;
1273     }
1274 
1275     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1276     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1277 
1278     unlock_user_struct(target_tz, target_tz_addr, 1);
1279 
1280     return 0;
1281 }
1282 #endif
1283 
1284 #if defined(TARGET_NR_settimeofday)
copy_from_user_timezone(struct timezone * tz,abi_ulong target_tz_addr)1285 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1286                                                abi_ulong target_tz_addr)
1287 {
1288     struct target_timezone *target_tz;
1289 
1290     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1291         return -TARGET_EFAULT;
1292     }
1293 
1294     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1295     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1296 
1297     unlock_user_struct(target_tz, target_tz_addr, 0);
1298 
1299     return 0;
1300 }
1301 #endif
1302 
1303 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1304 #include <mqueue.h>
1305 
copy_from_user_mq_attr(struct mq_attr * attr,abi_ulong target_mq_attr_addr)1306 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1307                                               abi_ulong target_mq_attr_addr)
1308 {
1309     struct target_mq_attr *target_mq_attr;
1310 
1311     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1312                           target_mq_attr_addr, 1))
1313         return -TARGET_EFAULT;
1314 
1315     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1316     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1317     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1318     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1319 
1320     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1321 
1322     return 0;
1323 }
1324 
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,const struct mq_attr * attr)1325 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1326                                             const struct mq_attr *attr)
1327 {
1328     struct target_mq_attr *target_mq_attr;
1329 
1330     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1331                           target_mq_attr_addr, 0))
1332         return -TARGET_EFAULT;
1333 
1334     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1335     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1336     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1337     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1338 
1339     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1340 
1341     return 0;
1342 }
1343 #endif
1344 
1345 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1346 /* do_select() must return target values and target errnos. */
do_select(int n,abi_ulong rfd_addr,abi_ulong wfd_addr,abi_ulong efd_addr,abi_ulong target_tv_addr)1347 static abi_long do_select(int n,
1348                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1349                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1350 {
1351     fd_set rfds, wfds, efds;
1352     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1353     struct timeval tv;
1354     struct timespec ts, *ts_ptr;
1355     abi_long ret;
1356 
1357     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1358     if (ret) {
1359         return ret;
1360     }
1361     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1362     if (ret) {
1363         return ret;
1364     }
1365     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1366     if (ret) {
1367         return ret;
1368     }
1369 
1370     if (target_tv_addr) {
1371         if (copy_from_user_timeval(&tv, target_tv_addr))
1372             return -TARGET_EFAULT;
1373         ts.tv_sec = tv.tv_sec;
1374         ts.tv_nsec = tv.tv_usec * 1000;
1375         ts_ptr = &ts;
1376     } else {
1377         ts_ptr = NULL;
1378     }
1379 
1380     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1381                                   ts_ptr, NULL));
1382 
1383     if (!is_error(ret)) {
1384         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1385             return -TARGET_EFAULT;
1386         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1387             return -TARGET_EFAULT;
1388         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1389             return -TARGET_EFAULT;
1390 
1391         if (target_tv_addr) {
1392             tv.tv_sec = ts.tv_sec;
1393             tv.tv_usec = ts.tv_nsec / 1000;
1394             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1395                 return -TARGET_EFAULT;
1396             }
1397         }
1398     }
1399 
1400     return ret;
1401 }
1402 
1403 #if defined(TARGET_WANT_OLD_SYS_SELECT)
do_old_select(abi_ulong arg1)1404 static abi_long do_old_select(abi_ulong arg1)
1405 {
1406     struct target_sel_arg_struct *sel;
1407     abi_ulong inp, outp, exp, tvp;
1408     long nsel;
1409 
1410     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1411         return -TARGET_EFAULT;
1412     }
1413 
1414     nsel = tswapal(sel->n);
1415     inp = tswapal(sel->inp);
1416     outp = tswapal(sel->outp);
1417     exp = tswapal(sel->exp);
1418     tvp = tswapal(sel->tvp);
1419 
1420     unlock_user_struct(sel, arg1, 0);
1421 
1422     return do_select(nsel, inp, outp, exp, tvp);
1423 }
1424 #endif
1425 #endif
1426 
1427 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
do_pselect6(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,bool time64)1428 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1429                             abi_long arg4, abi_long arg5, abi_long arg6,
1430                             bool time64)
1431 {
1432     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1433     fd_set rfds, wfds, efds;
1434     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1435     struct timespec ts, *ts_ptr;
1436     abi_long ret;
1437 
1438     /*
1439      * The 6th arg is actually two args smashed together,
1440      * so we cannot use the C library.
1441      */
1442     struct {
1443         sigset_t *set;
1444         size_t size;
1445     } sig, *sig_ptr;
1446 
1447     abi_ulong arg_sigset, arg_sigsize, *arg7;
1448 
1449     n = arg1;
1450     rfd_addr = arg2;
1451     wfd_addr = arg3;
1452     efd_addr = arg4;
1453     ts_addr = arg5;
1454 
1455     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1456     if (ret) {
1457         return ret;
1458     }
1459     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1460     if (ret) {
1461         return ret;
1462     }
1463     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1464     if (ret) {
1465         return ret;
1466     }
1467 
1468     /*
1469      * This takes a timespec, and not a timeval, so we cannot
1470      * use the do_select() helper ...
1471      */
1472     if (ts_addr) {
1473         if (time64) {
1474             if (target_to_host_timespec64(&ts, ts_addr)) {
1475                 return -TARGET_EFAULT;
1476             }
1477         } else {
1478             if (target_to_host_timespec(&ts, ts_addr)) {
1479                 return -TARGET_EFAULT;
1480             }
1481         }
1482             ts_ptr = &ts;
1483     } else {
1484         ts_ptr = NULL;
1485     }
1486 
1487     /* Extract the two packed args for the sigset */
1488     sig_ptr = NULL;
1489     if (arg6) {
1490         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1491         if (!arg7) {
1492             return -TARGET_EFAULT;
1493         }
1494         arg_sigset = tswapal(arg7[0]);
1495         arg_sigsize = tswapal(arg7[1]);
1496         unlock_user(arg7, arg6, 0);
1497 
1498         if (arg_sigset) {
1499             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1500             if (ret != 0) {
1501                 return ret;
1502             }
1503             sig_ptr = &sig;
1504             sig.size = SIGSET_T_SIZE;
1505         }
1506     }
1507 
1508     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1509                                   ts_ptr, sig_ptr));
1510 
1511     if (sig_ptr) {
1512         finish_sigsuspend_mask(ret);
1513     }
1514 
1515     if (!is_error(ret)) {
1516         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1517             return -TARGET_EFAULT;
1518         }
1519         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1520             return -TARGET_EFAULT;
1521         }
1522         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1523             return -TARGET_EFAULT;
1524         }
1525         if (time64) {
1526             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1527                 return -TARGET_EFAULT;
1528             }
1529         } else {
1530             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1531                 return -TARGET_EFAULT;
1532             }
1533         }
1534     }
1535     return ret;
1536 }
1537 #endif
1538 
1539 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1540     defined(TARGET_NR_ppoll_time64)
do_ppoll(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,bool ppoll,bool time64)1541 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1542                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1543 {
1544     struct target_pollfd *target_pfd;
1545     unsigned int nfds = arg2;
1546     struct pollfd *pfd;
1547     unsigned int i;
1548     abi_long ret;
1549 
1550     pfd = NULL;
1551     target_pfd = NULL;
1552     if (nfds) {
1553         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1554             return -TARGET_EINVAL;
1555         }
1556         target_pfd = lock_user(VERIFY_WRITE, arg1,
1557                                sizeof(struct target_pollfd) * nfds, 1);
1558         if (!target_pfd) {
1559             return -TARGET_EFAULT;
1560         }
1561 
1562         pfd = alloca(sizeof(struct pollfd) * nfds);
1563         for (i = 0; i < nfds; i++) {
1564             pfd[i].fd = tswap32(target_pfd[i].fd);
1565             pfd[i].events = tswap16(target_pfd[i].events);
1566         }
1567     }
1568     if (ppoll) {
1569         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1570         sigset_t *set = NULL;
1571 
1572         if (arg3) {
1573             if (time64) {
1574                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1575                     unlock_user(target_pfd, arg1, 0);
1576                     return -TARGET_EFAULT;
1577                 }
1578             } else {
1579                 if (target_to_host_timespec(timeout_ts, arg3)) {
1580                     unlock_user(target_pfd, arg1, 0);
1581                     return -TARGET_EFAULT;
1582                 }
1583             }
1584         } else {
1585             timeout_ts = NULL;
1586         }
1587 
1588         if (arg4) {
1589             ret = process_sigsuspend_mask(&set, arg4, arg5);
1590             if (ret != 0) {
1591                 unlock_user(target_pfd, arg1, 0);
1592                 return ret;
1593             }
1594         }
1595 
1596         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1597                                    set, SIGSET_T_SIZE));
1598 
1599         if (set) {
1600             finish_sigsuspend_mask(ret);
1601         }
1602         if (!is_error(ret) && arg3) {
1603             if (time64) {
1604                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1605                     return -TARGET_EFAULT;
1606                 }
1607             } else {
1608                 if (host_to_target_timespec(arg3, timeout_ts)) {
1609                     return -TARGET_EFAULT;
1610                 }
1611             }
1612         }
1613     } else {
1614           struct timespec ts, *pts;
1615 
1616           if (arg3 >= 0) {
1617               /* Convert ms to secs, ns */
1618               ts.tv_sec = arg3 / 1000;
1619               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1620               pts = &ts;
1621           } else {
1622               /* -ve poll() timeout means "infinite" */
1623               pts = NULL;
1624           }
1625           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1626     }
1627 
1628     if (!is_error(ret)) {
1629         for (i = 0; i < nfds; i++) {
1630             target_pfd[i].revents = tswap16(pfd[i].revents);
1631         }
1632     }
1633     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1634     return ret;
1635 }
1636 #endif
1637 
do_pipe(CPUArchState * cpu_env,abi_ulong pipedes,int flags,int is_pipe2)1638 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1639                         int flags, int is_pipe2)
1640 {
1641     int host_pipe[2];
1642     abi_long ret;
1643     ret = pipe2(host_pipe, flags);
1644 
1645     if (is_error(ret))
1646         return get_errno(ret);
1647 
1648     /* Several targets have special calling conventions for the original
1649        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1650     if (!is_pipe2) {
1651 #if defined(TARGET_ALPHA)
1652         cpu_env->ir[IR_A4] = host_pipe[1];
1653         return host_pipe[0];
1654 #elif defined(TARGET_MIPS)
1655         cpu_env->active_tc.gpr[3] = host_pipe[1];
1656         return host_pipe[0];
1657 #elif defined(TARGET_SH4)
1658         cpu_env->gregs[1] = host_pipe[1];
1659         return host_pipe[0];
1660 #elif defined(TARGET_SPARC)
1661         cpu_env->regwptr[1] = host_pipe[1];
1662         return host_pipe[0];
1663 #endif
1664     }
1665 
1666     if (put_user_s32(host_pipe[0], pipedes)
1667         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1668         return -TARGET_EFAULT;
1669     return get_errno(ret);
1670 }
1671 
target_to_host_sockaddr(int fd,struct sockaddr * addr,abi_ulong target_addr,socklen_t len)1672 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1673                                                abi_ulong target_addr,
1674                                                socklen_t len)
1675 {
1676     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1677     sa_family_t sa_family;
1678     struct target_sockaddr *target_saddr;
1679 
1680     if (fd_trans_target_to_host_addr(fd)) {
1681         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1682     }
1683 
1684     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1685     if (!target_saddr)
1686         return -TARGET_EFAULT;
1687 
1688     sa_family = tswap16(target_saddr->sa_family);
1689 
1690     /* Oops. The caller might send a incomplete sun_path; sun_path
1691      * must be terminated by \0 (see the manual page), but
1692      * unfortunately it is quite common to specify sockaddr_un
1693      * length as "strlen(x->sun_path)" while it should be
1694      * "strlen(...) + 1". We'll fix that here if needed.
1695      * Linux kernel has a similar feature.
1696      */
1697 
1698     if (sa_family == AF_UNIX) {
1699         if (len < unix_maxlen && len > 0) {
1700             char *cp = (char*)target_saddr;
1701 
1702             if ( cp[len-1] && !cp[len] )
1703                 len++;
1704         }
1705         if (len > unix_maxlen)
1706             len = unix_maxlen;
1707     }
1708 
1709     memcpy(addr, target_saddr, len);
1710     addr->sa_family = sa_family;
1711     if (sa_family == AF_NETLINK) {
1712         struct sockaddr_nl *nladdr;
1713 
1714         nladdr = (struct sockaddr_nl *)addr;
1715         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1716         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1717     } else if (sa_family == AF_PACKET) {
1718 	struct target_sockaddr_ll *lladdr;
1719 
1720 	lladdr = (struct target_sockaddr_ll *)addr;
1721 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1722 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1723     } else if (sa_family == AF_INET6) {
1724         struct sockaddr_in6 *in6addr;
1725 
1726         in6addr = (struct sockaddr_in6 *)addr;
1727         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1728     }
1729     unlock_user(target_saddr, target_addr, 0);
1730 
1731     return 0;
1732 }
1733 
host_to_target_sockaddr(abi_ulong target_addr,struct sockaddr * addr,socklen_t len)1734 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1735                                                struct sockaddr *addr,
1736                                                socklen_t len)
1737 {
1738     struct target_sockaddr *target_saddr;
1739 
1740     if (len == 0) {
1741         return 0;
1742     }
1743     assert(addr);
1744 
1745     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1746     if (!target_saddr)
1747         return -TARGET_EFAULT;
1748     memcpy(target_saddr, addr, len);
1749     if (len >= offsetof(struct target_sockaddr, sa_family) +
1750         sizeof(target_saddr->sa_family)) {
1751         target_saddr->sa_family = tswap16(addr->sa_family);
1752     }
1753     if (addr->sa_family == AF_NETLINK &&
1754         len >= sizeof(struct target_sockaddr_nl)) {
1755         struct target_sockaddr_nl *target_nl =
1756                (struct target_sockaddr_nl *)target_saddr;
1757         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1758         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1759     } else if (addr->sa_family == AF_PACKET) {
1760         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1761         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1762         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1763     } else if (addr->sa_family == AF_INET6 &&
1764                len >= sizeof(struct target_sockaddr_in6)) {
1765         struct target_sockaddr_in6 *target_in6 =
1766                (struct target_sockaddr_in6 *)target_saddr;
1767         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1768     }
1769     unlock_user(target_saddr, target_addr, len);
1770 
1771     return 0;
1772 }
1773 
target_to_host_cmsg(struct msghdr * msgh,struct target_msghdr * target_msgh)1774 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1775                                            struct target_msghdr *target_msgh)
1776 {
1777     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1778     abi_long msg_controllen;
1779     abi_ulong target_cmsg_addr;
1780     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1781     socklen_t space = 0;
1782 
1783     msg_controllen = tswapal(target_msgh->msg_controllen);
1784     if (msg_controllen < sizeof (struct target_cmsghdr))
1785         goto the_end;
1786     target_cmsg_addr = tswapal(target_msgh->msg_control);
1787     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1788     target_cmsg_start = target_cmsg;
1789     if (!target_cmsg)
1790         return -TARGET_EFAULT;
1791 
1792     while (cmsg && target_cmsg) {
1793         void *data = CMSG_DATA(cmsg);
1794         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1795 
1796         int len = tswapal(target_cmsg->cmsg_len)
1797             - sizeof(struct target_cmsghdr);
1798 
1799         space += CMSG_SPACE(len);
1800         if (space > msgh->msg_controllen) {
1801             space -= CMSG_SPACE(len);
1802             /* This is a QEMU bug, since we allocated the payload
1803              * area ourselves (unlike overflow in host-to-target
1804              * conversion, which is just the guest giving us a buffer
1805              * that's too small). It can't happen for the payload types
1806              * we currently support; if it becomes an issue in future
1807              * we would need to improve our allocation strategy to
1808              * something more intelligent than "twice the size of the
1809              * target buffer we're reading from".
1810              */
1811             qemu_log_mask(LOG_UNIMP,
1812                           ("Unsupported ancillary data %d/%d: "
1813                            "unhandled msg size\n"),
1814                           tswap32(target_cmsg->cmsg_level),
1815                           tswap32(target_cmsg->cmsg_type));
1816             break;
1817         }
1818 
1819         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1820             cmsg->cmsg_level = SOL_SOCKET;
1821         } else {
1822             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1823         }
1824         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1825         cmsg->cmsg_len = CMSG_LEN(len);
1826 
1827         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1828             int *fd = (int *)data;
1829             int *target_fd = (int *)target_data;
1830             int i, numfds = len / sizeof(int);
1831 
1832             for (i = 0; i < numfds; i++) {
1833                 __get_user(fd[i], target_fd + i);
1834             }
1835         } else if (cmsg->cmsg_level == SOL_SOCKET
1836                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1837             struct ucred *cred = (struct ucred *)data;
1838             struct target_ucred *target_cred =
1839                 (struct target_ucred *)target_data;
1840 
1841             __get_user(cred->pid, &target_cred->pid);
1842             __get_user(cred->uid, &target_cred->uid);
1843             __get_user(cred->gid, &target_cred->gid);
1844         } else if (cmsg->cmsg_level == SOL_ALG) {
1845             uint32_t *dst = (uint32_t *)data;
1846 
1847             memcpy(dst, target_data, len);
1848             /* fix endianness of first 32-bit word */
1849             if (len >= sizeof(uint32_t)) {
1850                 *dst = tswap32(*dst);
1851             }
1852         } else {
1853             qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n",
1854                           cmsg->cmsg_level, cmsg->cmsg_type);
1855             memcpy(data, target_data, len);
1856         }
1857 
1858         cmsg = CMSG_NXTHDR(msgh, cmsg);
1859         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1860                                          target_cmsg_start);
1861     }
1862     unlock_user(target_cmsg, target_cmsg_addr, 0);
1863  the_end:
1864     msgh->msg_controllen = space;
1865     return 0;
1866 }
1867 
host_to_target_cmsg(struct target_msghdr * target_msgh,struct msghdr * msgh)1868 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1869                                            struct msghdr *msgh)
1870 {
1871     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1872     abi_long msg_controllen;
1873     abi_ulong target_cmsg_addr;
1874     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1875     socklen_t space = 0;
1876 
1877     msg_controllen = tswapal(target_msgh->msg_controllen);
1878     if (msg_controllen < sizeof (struct target_cmsghdr))
1879         goto the_end;
1880     target_cmsg_addr = tswapal(target_msgh->msg_control);
1881     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1882     target_cmsg_start = target_cmsg;
1883     if (!target_cmsg)
1884         return -TARGET_EFAULT;
1885 
1886     while (cmsg && target_cmsg) {
1887         void *data = CMSG_DATA(cmsg);
1888         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1889 
1890         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1891         int tgt_len, tgt_space;
1892 
1893         /* We never copy a half-header but may copy half-data;
1894          * this is Linux's behaviour in put_cmsg(). Note that
1895          * truncation here is a guest problem (which we report
1896          * to the guest via the CTRUNC bit), unlike truncation
1897          * in target_to_host_cmsg, which is a QEMU bug.
1898          */
1899         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1900             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1901             break;
1902         }
1903 
1904         if (cmsg->cmsg_level == SOL_SOCKET) {
1905             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1906         } else {
1907             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1908         }
1909         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1910 
1911         /* Payload types which need a different size of payload on
1912          * the target must adjust tgt_len here.
1913          */
1914         tgt_len = len;
1915         switch (cmsg->cmsg_level) {
1916         case SOL_SOCKET:
1917             switch (cmsg->cmsg_type) {
1918             case SO_TIMESTAMP:
1919                 tgt_len = sizeof(struct target_timeval);
1920                 break;
1921             default:
1922                 break;
1923             }
1924             break;
1925         default:
1926             break;
1927         }
1928 
1929         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1930             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1931             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1932         }
1933 
1934         /* We must now copy-and-convert len bytes of payload
1935          * into tgt_len bytes of destination space. Bear in mind
1936          * that in both source and destination we may be dealing
1937          * with a truncated value!
1938          */
1939         switch (cmsg->cmsg_level) {
1940         case SOL_SOCKET:
1941             switch (cmsg->cmsg_type) {
1942             case SCM_RIGHTS:
1943             {
1944                 int *fd = (int *)data;
1945                 int *target_fd = (int *)target_data;
1946                 int i, numfds = tgt_len / sizeof(int);
1947 
1948                 for (i = 0; i < numfds; i++) {
1949                     __put_user(fd[i], target_fd + i);
1950                 }
1951                 break;
1952             }
1953             case SO_TIMESTAMP:
1954             {
1955                 struct timeval *tv = (struct timeval *)data;
1956                 struct target_timeval *target_tv =
1957                     (struct target_timeval *)target_data;
1958 
1959                 if (len != sizeof(struct timeval) ||
1960                     tgt_len != sizeof(struct target_timeval)) {
1961                     goto unimplemented;
1962                 }
1963 
1964                 /* copy struct timeval to target */
1965                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1966                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1967                 break;
1968             }
1969             case SCM_CREDENTIALS:
1970             {
1971                 struct ucred *cred = (struct ucred *)data;
1972                 struct target_ucred *target_cred =
1973                     (struct target_ucred *)target_data;
1974 
1975                 __put_user(cred->pid, &target_cred->pid);
1976                 __put_user(cred->uid, &target_cred->uid);
1977                 __put_user(cred->gid, &target_cred->gid);
1978                 break;
1979             }
1980             default:
1981                 goto unimplemented;
1982             }
1983             break;
1984 
1985         case SOL_IP:
1986             switch (cmsg->cmsg_type) {
1987             case IP_TTL:
1988             {
1989                 uint32_t *v = (uint32_t *)data;
1990                 uint32_t *t_int = (uint32_t *)target_data;
1991 
1992                 if (len != sizeof(uint32_t) ||
1993                     tgt_len != sizeof(uint32_t)) {
1994                     goto unimplemented;
1995                 }
1996                 __put_user(*v, t_int);
1997                 break;
1998             }
1999             case IP_RECVERR:
2000             {
2001                 struct errhdr_t {
2002                    struct sock_extended_err ee;
2003                    struct sockaddr_in offender;
2004                 };
2005                 struct errhdr_t *errh = (struct errhdr_t *)data;
2006                 struct errhdr_t *target_errh =
2007                     (struct errhdr_t *)target_data;
2008 
2009                 if (len != sizeof(struct errhdr_t) ||
2010                     tgt_len != sizeof(struct errhdr_t)) {
2011                     goto unimplemented;
2012                 }
2013                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2014                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2015                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2016                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2017                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2018                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2019                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2020                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2021                     (void *) &errh->offender, sizeof(errh->offender));
2022                 break;
2023             }
2024             case IP_PKTINFO:
2025             {
2026                 struct in_pktinfo *pkti = data;
2027                 struct target_in_pktinfo *target_pi = target_data;
2028 
2029                 __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex);
2030                 target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr;
2031                 target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr;
2032                 break;
2033             }
2034             default:
2035                 goto unimplemented;
2036             }
2037             break;
2038 
2039         case SOL_IPV6:
2040             switch (cmsg->cmsg_type) {
2041             case IPV6_HOPLIMIT:
2042             {
2043                 uint32_t *v = (uint32_t *)data;
2044                 uint32_t *t_int = (uint32_t *)target_data;
2045 
2046                 if (len != sizeof(uint32_t) ||
2047                     tgt_len != sizeof(uint32_t)) {
2048                     goto unimplemented;
2049                 }
2050                 __put_user(*v, t_int);
2051                 break;
2052             }
2053             case IPV6_RECVERR:
2054             {
2055                 struct errhdr6_t {
2056                    struct sock_extended_err ee;
2057                    struct sockaddr_in6 offender;
2058                 };
2059                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2060                 struct errhdr6_t *target_errh =
2061                     (struct errhdr6_t *)target_data;
2062 
2063                 if (len != sizeof(struct errhdr6_t) ||
2064                     tgt_len != sizeof(struct errhdr6_t)) {
2065                     goto unimplemented;
2066                 }
2067                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2068                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2069                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2070                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2071                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2072                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2073                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2074                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2075                     (void *) &errh->offender, sizeof(errh->offender));
2076                 break;
2077             }
2078             default:
2079                 goto unimplemented;
2080             }
2081             break;
2082 
2083         default:
2084         unimplemented:
2085             qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n",
2086                           cmsg->cmsg_level, cmsg->cmsg_type);
2087             memcpy(target_data, data, MIN(len, tgt_len));
2088             if (tgt_len > len) {
2089                 memset(target_data + len, 0, tgt_len - len);
2090             }
2091         }
2092 
2093         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2094         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2095         if (msg_controllen < tgt_space) {
2096             tgt_space = msg_controllen;
2097         }
2098         msg_controllen -= tgt_space;
2099         space += tgt_space;
2100         cmsg = CMSG_NXTHDR(msgh, cmsg);
2101         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2102                                          target_cmsg_start);
2103     }
2104     unlock_user(target_cmsg, target_cmsg_addr, space);
2105  the_end:
2106     target_msgh->msg_controllen = tswapal(space);
2107     return 0;
2108 }
2109 
2110 /* do_setsockopt() Must return target values and target errnos. */
do_setsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,socklen_t optlen)2111 static abi_long do_setsockopt(int sockfd, int level, int optname,
2112                               abi_ulong optval_addr, socklen_t optlen)
2113 {
2114     abi_long ret;
2115     int val;
2116 
2117     switch(level) {
2118     case SOL_TCP:
2119     case SOL_UDP:
2120         /* TCP and UDP options all take an 'int' value.  */
2121         if (optlen < sizeof(uint32_t))
2122             return -TARGET_EINVAL;
2123 
2124         if (get_user_u32(val, optval_addr))
2125             return -TARGET_EFAULT;
2126         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2127         break;
2128     case SOL_IP:
2129         switch(optname) {
2130         case IP_TOS:
2131         case IP_TTL:
2132         case IP_HDRINCL:
2133         case IP_ROUTER_ALERT:
2134         case IP_RECVOPTS:
2135         case IP_RETOPTS:
2136         case IP_PKTINFO:
2137         case IP_MTU_DISCOVER:
2138         case IP_RECVERR:
2139         case IP_RECVTTL:
2140         case IP_RECVTOS:
2141 #ifdef IP_FREEBIND
2142         case IP_FREEBIND:
2143 #endif
2144         case IP_MULTICAST_TTL:
2145         case IP_MULTICAST_LOOP:
2146             val = 0;
2147             if (optlen >= sizeof(uint32_t)) {
2148                 if (get_user_u32(val, optval_addr))
2149                     return -TARGET_EFAULT;
2150             } else if (optlen >= 1) {
2151                 if (get_user_u8(val, optval_addr))
2152                     return -TARGET_EFAULT;
2153             }
2154             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2155             break;
2156         case IP_MULTICAST_IF:
2157         case IP_ADD_MEMBERSHIP:
2158         case IP_DROP_MEMBERSHIP:
2159         {
2160             struct ip_mreqn ip_mreq;
2161             struct target_ip_mreqn *target_smreqn;
2162             int min_size;
2163 
2164             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2165                               sizeof(struct target_ip_mreq));
2166 
2167             if (optname == IP_MULTICAST_IF) {
2168                 min_size = sizeof(struct in_addr);
2169             } else {
2170                 min_size = sizeof(struct target_ip_mreq);
2171             }
2172             if (optlen < min_size ||
2173                 optlen > sizeof (struct target_ip_mreqn)) {
2174                 return -TARGET_EINVAL;
2175             }
2176 
2177             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2178             if (!target_smreqn) {
2179                 return -TARGET_EFAULT;
2180             }
2181             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2182             if (optlen >= sizeof(struct target_ip_mreq)) {
2183                 ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2184                 if (optlen >= sizeof(struct target_ip_mreqn)) {
2185                     __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex);
2186                     optlen = sizeof(struct ip_mreqn);
2187                 }
2188             }
2189             unlock_user(target_smreqn, optval_addr, 0);
2190             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2191             break;
2192         }
2193         case IP_BLOCK_SOURCE:
2194         case IP_UNBLOCK_SOURCE:
2195         case IP_ADD_SOURCE_MEMBERSHIP:
2196         case IP_DROP_SOURCE_MEMBERSHIP:
2197         {
2198             struct ip_mreq_source *ip_mreq_source;
2199 
2200             if (optlen != sizeof (struct target_ip_mreq_source))
2201                 return -TARGET_EINVAL;
2202 
2203             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2204             if (!ip_mreq_source) {
2205                 return -TARGET_EFAULT;
2206             }
2207             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2208             unlock_user (ip_mreq_source, optval_addr, 0);
2209             break;
2210         }
2211         default:
2212             goto unimplemented;
2213         }
2214         break;
2215     case SOL_IPV6:
2216         switch (optname) {
2217         case IPV6_MTU_DISCOVER:
2218         case IPV6_MTU:
2219         case IPV6_V6ONLY:
2220         case IPV6_RECVPKTINFO:
2221         case IPV6_UNICAST_HOPS:
2222         case IPV6_MULTICAST_HOPS:
2223         case IPV6_MULTICAST_LOOP:
2224         case IPV6_RECVERR:
2225         case IPV6_RECVHOPLIMIT:
2226         case IPV6_2292HOPLIMIT:
2227         case IPV6_CHECKSUM:
2228         case IPV6_ADDRFORM:
2229         case IPV6_2292PKTINFO:
2230         case IPV6_RECVTCLASS:
2231         case IPV6_RECVRTHDR:
2232         case IPV6_2292RTHDR:
2233         case IPV6_RECVHOPOPTS:
2234         case IPV6_2292HOPOPTS:
2235         case IPV6_RECVDSTOPTS:
2236         case IPV6_2292DSTOPTS:
2237         case IPV6_TCLASS:
2238         case IPV6_ADDR_PREFERENCES:
2239 #ifdef IPV6_RECVPATHMTU
2240         case IPV6_RECVPATHMTU:
2241 #endif
2242 #ifdef IPV6_TRANSPARENT
2243         case IPV6_TRANSPARENT:
2244 #endif
2245 #ifdef IPV6_FREEBIND
2246         case IPV6_FREEBIND:
2247 #endif
2248 #ifdef IPV6_RECVORIGDSTADDR
2249         case IPV6_RECVORIGDSTADDR:
2250 #endif
2251             val = 0;
2252             if (optlen < sizeof(uint32_t)) {
2253                 return -TARGET_EINVAL;
2254             }
2255             if (get_user_u32(val, optval_addr)) {
2256                 return -TARGET_EFAULT;
2257             }
2258             ret = get_errno(setsockopt(sockfd, level, optname,
2259                                        &val, sizeof(val)));
2260             break;
2261         case IPV6_PKTINFO:
2262         {
2263             struct in6_pktinfo pki;
2264 
2265             if (optlen < sizeof(pki)) {
2266                 return -TARGET_EINVAL;
2267             }
2268 
2269             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2270                 return -TARGET_EFAULT;
2271             }
2272 
2273             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2274 
2275             ret = get_errno(setsockopt(sockfd, level, optname,
2276                                        &pki, sizeof(pki)));
2277             break;
2278         }
2279         case IPV6_ADD_MEMBERSHIP:
2280         case IPV6_DROP_MEMBERSHIP:
2281         {
2282             struct ipv6_mreq ipv6mreq;
2283 
2284             if (optlen < sizeof(ipv6mreq)) {
2285                 return -TARGET_EINVAL;
2286             }
2287 
2288             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2289                 return -TARGET_EFAULT;
2290             }
2291 
2292             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2293 
2294             ret = get_errno(setsockopt(sockfd, level, optname,
2295                                        &ipv6mreq, sizeof(ipv6mreq)));
2296             break;
2297         }
2298         default:
2299             goto unimplemented;
2300         }
2301         break;
2302     case SOL_ICMPV6:
2303         switch (optname) {
2304         case ICMPV6_FILTER:
2305         {
2306             struct icmp6_filter icmp6f;
2307 
2308             if (optlen > sizeof(icmp6f)) {
2309                 optlen = sizeof(icmp6f);
2310             }
2311 
2312             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2313                 return -TARGET_EFAULT;
2314             }
2315 
2316             for (val = 0; val < 8; val++) {
2317                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2318             }
2319 
2320             ret = get_errno(setsockopt(sockfd, level, optname,
2321                                        &icmp6f, optlen));
2322             break;
2323         }
2324         default:
2325             goto unimplemented;
2326         }
2327         break;
2328     case SOL_RAW:
2329         switch (optname) {
2330         case ICMP_FILTER:
2331         case IPV6_CHECKSUM:
2332             /* those take an u32 value */
2333             if (optlen < sizeof(uint32_t)) {
2334                 return -TARGET_EINVAL;
2335             }
2336 
2337             if (get_user_u32(val, optval_addr)) {
2338                 return -TARGET_EFAULT;
2339             }
2340             ret = get_errno(setsockopt(sockfd, level, optname,
2341                                        &val, sizeof(val)));
2342             break;
2343 
2344         default:
2345             goto unimplemented;
2346         }
2347         break;
2348 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2349     case SOL_ALG:
2350         switch (optname) {
2351         case ALG_SET_KEY:
2352         {
2353             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2354             if (!alg_key) {
2355                 return -TARGET_EFAULT;
2356             }
2357             ret = get_errno(setsockopt(sockfd, level, optname,
2358                                        alg_key, optlen));
2359             unlock_user(alg_key, optval_addr, optlen);
2360             break;
2361         }
2362         case ALG_SET_AEAD_AUTHSIZE:
2363         {
2364             ret = get_errno(setsockopt(sockfd, level, optname,
2365                                        NULL, optlen));
2366             break;
2367         }
2368         default:
2369             goto unimplemented;
2370         }
2371         break;
2372 #endif
2373     case TARGET_SOL_SOCKET:
2374         switch (optname) {
2375         case TARGET_SO_RCVTIMEO:
2376         case TARGET_SO_SNDTIMEO:
2377         {
2378                 struct timeval tv;
2379 
2380                 if (optlen != sizeof(struct target_timeval)) {
2381                     return -TARGET_EINVAL;
2382                 }
2383 
2384                 if (copy_from_user_timeval(&tv, optval_addr)) {
2385                     return -TARGET_EFAULT;
2386                 }
2387 
2388                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2389                                 optname == TARGET_SO_RCVTIMEO ?
2390                                     SO_RCVTIMEO : SO_SNDTIMEO,
2391                                 &tv, sizeof(tv)));
2392                 return ret;
2393         }
2394         case TARGET_SO_ATTACH_FILTER:
2395         {
2396                 struct target_sock_fprog *tfprog;
2397                 struct target_sock_filter *tfilter;
2398                 struct sock_fprog fprog;
2399                 struct sock_filter *filter;
2400                 int i;
2401 
2402                 if (optlen != sizeof(*tfprog)) {
2403                     return -TARGET_EINVAL;
2404                 }
2405                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2406                     return -TARGET_EFAULT;
2407                 }
2408                 if (!lock_user_struct(VERIFY_READ, tfilter,
2409                                       tswapal(tfprog->filter), 0)) {
2410                     unlock_user_struct(tfprog, optval_addr, 1);
2411                     return -TARGET_EFAULT;
2412                 }
2413 
2414                 fprog.len = tswap16(tfprog->len);
2415                 filter = g_try_new(struct sock_filter, fprog.len);
2416                 if (filter == NULL) {
2417                     unlock_user_struct(tfilter, tfprog->filter, 1);
2418                     unlock_user_struct(tfprog, optval_addr, 1);
2419                     return -TARGET_ENOMEM;
2420                 }
2421                 for (i = 0; i < fprog.len; i++) {
2422                     filter[i].code = tswap16(tfilter[i].code);
2423                     filter[i].jt = tfilter[i].jt;
2424                     filter[i].jf = tfilter[i].jf;
2425                     filter[i].k = tswap32(tfilter[i].k);
2426                 }
2427                 fprog.filter = filter;
2428 
2429                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2430                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2431                 g_free(filter);
2432 
2433                 unlock_user_struct(tfilter, tfprog->filter, 1);
2434                 unlock_user_struct(tfprog, optval_addr, 1);
2435                 return ret;
2436         }
2437 	case TARGET_SO_BINDTODEVICE:
2438 	{
2439 		char *dev_ifname, *addr_ifname;
2440 
2441 		if (optlen > IFNAMSIZ - 1) {
2442 		    optlen = IFNAMSIZ - 1;
2443 		}
2444 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2445 		if (!dev_ifname) {
2446 		    return -TARGET_EFAULT;
2447 		}
2448 		optname = SO_BINDTODEVICE;
2449 		addr_ifname = alloca(IFNAMSIZ);
2450 		memcpy(addr_ifname, dev_ifname, optlen);
2451 		addr_ifname[optlen] = 0;
2452 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2453                                            addr_ifname, optlen));
2454 		unlock_user (dev_ifname, optval_addr, 0);
2455 		return ret;
2456 	}
2457         case TARGET_SO_LINGER:
2458         {
2459                 struct linger lg;
2460                 struct target_linger *tlg;
2461 
2462                 if (optlen != sizeof(struct target_linger)) {
2463                     return -TARGET_EINVAL;
2464                 }
2465                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2466                     return -TARGET_EFAULT;
2467                 }
2468                 __get_user(lg.l_onoff, &tlg->l_onoff);
2469                 __get_user(lg.l_linger, &tlg->l_linger);
2470                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2471                                 &lg, sizeof(lg)));
2472                 unlock_user_struct(tlg, optval_addr, 0);
2473                 return ret;
2474         }
2475             /* Options with 'int' argument.  */
2476         case TARGET_SO_DEBUG:
2477 		optname = SO_DEBUG;
2478 		break;
2479         case TARGET_SO_REUSEADDR:
2480 		optname = SO_REUSEADDR;
2481 		break;
2482 #ifdef SO_REUSEPORT
2483         case TARGET_SO_REUSEPORT:
2484                 optname = SO_REUSEPORT;
2485                 break;
2486 #endif
2487         case TARGET_SO_TYPE:
2488 		optname = SO_TYPE;
2489 		break;
2490         case TARGET_SO_ERROR:
2491 		optname = SO_ERROR;
2492 		break;
2493         case TARGET_SO_DONTROUTE:
2494 		optname = SO_DONTROUTE;
2495 		break;
2496         case TARGET_SO_BROADCAST:
2497 		optname = SO_BROADCAST;
2498 		break;
2499         case TARGET_SO_SNDBUF:
2500 		optname = SO_SNDBUF;
2501 		break;
2502         case TARGET_SO_SNDBUFFORCE:
2503                 optname = SO_SNDBUFFORCE;
2504                 break;
2505         case TARGET_SO_RCVBUF:
2506 		optname = SO_RCVBUF;
2507 		break;
2508         case TARGET_SO_RCVBUFFORCE:
2509                 optname = SO_RCVBUFFORCE;
2510                 break;
2511         case TARGET_SO_KEEPALIVE:
2512 		optname = SO_KEEPALIVE;
2513 		break;
2514         case TARGET_SO_OOBINLINE:
2515 		optname = SO_OOBINLINE;
2516 		break;
2517         case TARGET_SO_NO_CHECK:
2518 		optname = SO_NO_CHECK;
2519 		break;
2520         case TARGET_SO_PRIORITY:
2521 		optname = SO_PRIORITY;
2522 		break;
2523 #ifdef SO_BSDCOMPAT
2524         case TARGET_SO_BSDCOMPAT:
2525 		optname = SO_BSDCOMPAT;
2526 		break;
2527 #endif
2528         case TARGET_SO_PASSCRED:
2529 		optname = SO_PASSCRED;
2530 		break;
2531         case TARGET_SO_PASSSEC:
2532                 optname = SO_PASSSEC;
2533                 break;
2534         case TARGET_SO_TIMESTAMP:
2535 		optname = SO_TIMESTAMP;
2536 		break;
2537         case TARGET_SO_RCVLOWAT:
2538 		optname = SO_RCVLOWAT;
2539 		break;
2540         default:
2541             goto unimplemented;
2542         }
2543 	if (optlen < sizeof(uint32_t))
2544             return -TARGET_EINVAL;
2545 
2546 	if (get_user_u32(val, optval_addr))
2547             return -TARGET_EFAULT;
2548 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2549         break;
2550 #ifdef SOL_NETLINK
2551     case SOL_NETLINK:
2552         switch (optname) {
2553         case NETLINK_PKTINFO:
2554         case NETLINK_ADD_MEMBERSHIP:
2555         case NETLINK_DROP_MEMBERSHIP:
2556         case NETLINK_BROADCAST_ERROR:
2557         case NETLINK_NO_ENOBUFS:
2558 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2559         case NETLINK_LISTEN_ALL_NSID:
2560         case NETLINK_CAP_ACK:
2561 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2562 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2563         case NETLINK_EXT_ACK:
2564 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2565 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2566         case NETLINK_GET_STRICT_CHK:
2567 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2568             break;
2569         default:
2570             goto unimplemented;
2571         }
2572         val = 0;
2573         if (optlen < sizeof(uint32_t)) {
2574             return -TARGET_EINVAL;
2575         }
2576         if (get_user_u32(val, optval_addr)) {
2577             return -TARGET_EFAULT;
2578         }
2579         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2580                                    sizeof(val)));
2581         break;
2582 #endif /* SOL_NETLINK */
2583     default:
2584     unimplemented:
2585         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2586                       level, optname);
2587         ret = -TARGET_ENOPROTOOPT;
2588     }
2589     return ret;
2590 }
2591 
2592 /* do_getsockopt() Must return target values and target errnos. */
do_getsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,abi_ulong optlen)2593 static abi_long do_getsockopt(int sockfd, int level, int optname,
2594                               abi_ulong optval_addr, abi_ulong optlen)
2595 {
2596     abi_long ret;
2597     int len, val;
2598     socklen_t lv;
2599 
2600     switch(level) {
2601     case TARGET_SOL_SOCKET:
2602         level = SOL_SOCKET;
2603         switch (optname) {
2604         /* These don't just return a single integer */
2605         case TARGET_SO_PEERNAME:
2606             goto unimplemented;
2607         case TARGET_SO_RCVTIMEO: {
2608             struct timeval tv;
2609             socklen_t tvlen;
2610 
2611             optname = SO_RCVTIMEO;
2612 
2613 get_timeout:
2614             if (get_user_u32(len, optlen)) {
2615                 return -TARGET_EFAULT;
2616             }
2617             if (len < 0) {
2618                 return -TARGET_EINVAL;
2619             }
2620 
2621             tvlen = sizeof(tv);
2622             ret = get_errno(getsockopt(sockfd, level, optname,
2623                                        &tv, &tvlen));
2624             if (ret < 0) {
2625                 return ret;
2626             }
2627             if (len > sizeof(struct target_timeval)) {
2628                 len = sizeof(struct target_timeval);
2629             }
2630             if (copy_to_user_timeval(optval_addr, &tv)) {
2631                 return -TARGET_EFAULT;
2632             }
2633             if (put_user_u32(len, optlen)) {
2634                 return -TARGET_EFAULT;
2635             }
2636             break;
2637         }
2638         case TARGET_SO_SNDTIMEO:
2639             optname = SO_SNDTIMEO;
2640             goto get_timeout;
2641         case TARGET_SO_PEERCRED: {
2642             struct ucred cr;
2643             socklen_t crlen;
2644             struct target_ucred *tcr;
2645 
2646             if (get_user_u32(len, optlen)) {
2647                 return -TARGET_EFAULT;
2648             }
2649             if (len < 0) {
2650                 return -TARGET_EINVAL;
2651             }
2652 
2653             crlen = sizeof(cr);
2654             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2655                                        &cr, &crlen));
2656             if (ret < 0) {
2657                 return ret;
2658             }
2659             if (len > crlen) {
2660                 len = crlen;
2661             }
2662             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2663                 return -TARGET_EFAULT;
2664             }
2665             __put_user(cr.pid, &tcr->pid);
2666             __put_user(cr.uid, &tcr->uid);
2667             __put_user(cr.gid, &tcr->gid);
2668             unlock_user_struct(tcr, optval_addr, 1);
2669             if (put_user_u32(len, optlen)) {
2670                 return -TARGET_EFAULT;
2671             }
2672             break;
2673         }
2674         case TARGET_SO_PEERSEC: {
2675             char *name;
2676 
2677             if (get_user_u32(len, optlen)) {
2678                 return -TARGET_EFAULT;
2679             }
2680             if (len < 0) {
2681                 return -TARGET_EINVAL;
2682             }
2683             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2684             if (!name) {
2685                 return -TARGET_EFAULT;
2686             }
2687             lv = len;
2688             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2689                                        name, &lv));
2690             if (put_user_u32(lv, optlen)) {
2691                 ret = -TARGET_EFAULT;
2692             }
2693             unlock_user(name, optval_addr, lv);
2694             break;
2695         }
2696         case TARGET_SO_LINGER:
2697         {
2698             struct linger lg;
2699             socklen_t lglen;
2700             struct target_linger *tlg;
2701 
2702             if (get_user_u32(len, optlen)) {
2703                 return -TARGET_EFAULT;
2704             }
2705             if (len < 0) {
2706                 return -TARGET_EINVAL;
2707             }
2708 
2709             lglen = sizeof(lg);
2710             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2711                                        &lg, &lglen));
2712             if (ret < 0) {
2713                 return ret;
2714             }
2715             if (len > lglen) {
2716                 len = lglen;
2717             }
2718             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2719                 return -TARGET_EFAULT;
2720             }
2721             __put_user(lg.l_onoff, &tlg->l_onoff);
2722             __put_user(lg.l_linger, &tlg->l_linger);
2723             unlock_user_struct(tlg, optval_addr, 1);
2724             if (put_user_u32(len, optlen)) {
2725                 return -TARGET_EFAULT;
2726             }
2727             break;
2728         }
2729         /* Options with 'int' argument.  */
2730         case TARGET_SO_DEBUG:
2731             optname = SO_DEBUG;
2732             goto int_case;
2733         case TARGET_SO_REUSEADDR:
2734             optname = SO_REUSEADDR;
2735             goto int_case;
2736 #ifdef SO_REUSEPORT
2737         case TARGET_SO_REUSEPORT:
2738             optname = SO_REUSEPORT;
2739             goto int_case;
2740 #endif
2741         case TARGET_SO_TYPE:
2742             optname = SO_TYPE;
2743             goto int_case;
2744         case TARGET_SO_ERROR:
2745             optname = SO_ERROR;
2746             goto int_case;
2747         case TARGET_SO_DONTROUTE:
2748             optname = SO_DONTROUTE;
2749             goto int_case;
2750         case TARGET_SO_BROADCAST:
2751             optname = SO_BROADCAST;
2752             goto int_case;
2753         case TARGET_SO_SNDBUF:
2754             optname = SO_SNDBUF;
2755             goto int_case;
2756         case TARGET_SO_RCVBUF:
2757             optname = SO_RCVBUF;
2758             goto int_case;
2759         case TARGET_SO_KEEPALIVE:
2760             optname = SO_KEEPALIVE;
2761             goto int_case;
2762         case TARGET_SO_OOBINLINE:
2763             optname = SO_OOBINLINE;
2764             goto int_case;
2765         case TARGET_SO_NO_CHECK:
2766             optname = SO_NO_CHECK;
2767             goto int_case;
2768         case TARGET_SO_PRIORITY:
2769             optname = SO_PRIORITY;
2770             goto int_case;
2771 #ifdef SO_BSDCOMPAT
2772         case TARGET_SO_BSDCOMPAT:
2773             optname = SO_BSDCOMPAT;
2774             goto int_case;
2775 #endif
2776         case TARGET_SO_PASSCRED:
2777             optname = SO_PASSCRED;
2778             goto int_case;
2779         case TARGET_SO_TIMESTAMP:
2780             optname = SO_TIMESTAMP;
2781             goto int_case;
2782         case TARGET_SO_RCVLOWAT:
2783             optname = SO_RCVLOWAT;
2784             goto int_case;
2785         case TARGET_SO_ACCEPTCONN:
2786             optname = SO_ACCEPTCONN;
2787             goto int_case;
2788         case TARGET_SO_PROTOCOL:
2789             optname = SO_PROTOCOL;
2790             goto int_case;
2791         case TARGET_SO_DOMAIN:
2792             optname = SO_DOMAIN;
2793             goto int_case;
2794         default:
2795             goto int_case;
2796         }
2797         break;
2798     case SOL_TCP:
2799     case SOL_UDP:
2800         /* TCP and UDP options all take an 'int' value.  */
2801     int_case:
2802         if (get_user_u32(len, optlen))
2803             return -TARGET_EFAULT;
2804         if (len < 0)
2805             return -TARGET_EINVAL;
2806         lv = sizeof(lv);
2807         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2808         if (ret < 0)
2809             return ret;
2810         switch (optname) {
2811         case SO_TYPE:
2812             val = host_to_target_sock_type(val);
2813             break;
2814         case SO_ERROR:
2815             val = host_to_target_errno(val);
2816             break;
2817         }
2818         if (len > lv)
2819             len = lv;
2820         if (len == 4) {
2821             if (put_user_u32(val, optval_addr))
2822                 return -TARGET_EFAULT;
2823         } else {
2824             if (put_user_u8(val, optval_addr))
2825                 return -TARGET_EFAULT;
2826         }
2827         if (put_user_u32(len, optlen))
2828             return -TARGET_EFAULT;
2829         break;
2830     case SOL_IP:
2831         switch(optname) {
2832         case IP_TOS:
2833         case IP_TTL:
2834         case IP_HDRINCL:
2835         case IP_ROUTER_ALERT:
2836         case IP_RECVOPTS:
2837         case IP_RETOPTS:
2838         case IP_PKTINFO:
2839         case IP_MTU_DISCOVER:
2840         case IP_RECVERR:
2841         case IP_RECVTOS:
2842 #ifdef IP_FREEBIND
2843         case IP_FREEBIND:
2844 #endif
2845         case IP_MULTICAST_TTL:
2846         case IP_MULTICAST_LOOP:
2847             if (get_user_u32(len, optlen))
2848                 return -TARGET_EFAULT;
2849             if (len < 0)
2850                 return -TARGET_EINVAL;
2851             lv = sizeof(lv);
2852             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2853             if (ret < 0)
2854                 return ret;
2855             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2856                 len = 1;
2857                 if (put_user_u32(len, optlen)
2858                     || put_user_u8(val, optval_addr))
2859                     return -TARGET_EFAULT;
2860             } else {
2861                 if (len > sizeof(int))
2862                     len = sizeof(int);
2863                 if (put_user_u32(len, optlen)
2864                     || put_user_u32(val, optval_addr))
2865                     return -TARGET_EFAULT;
2866             }
2867             break;
2868         default:
2869             ret = -TARGET_ENOPROTOOPT;
2870             break;
2871         }
2872         break;
2873     case SOL_IPV6:
2874         switch (optname) {
2875         case IPV6_MTU_DISCOVER:
2876         case IPV6_MTU:
2877         case IPV6_V6ONLY:
2878         case IPV6_RECVPKTINFO:
2879         case IPV6_UNICAST_HOPS:
2880         case IPV6_MULTICAST_HOPS:
2881         case IPV6_MULTICAST_LOOP:
2882         case IPV6_RECVERR:
2883         case IPV6_RECVHOPLIMIT:
2884         case IPV6_2292HOPLIMIT:
2885         case IPV6_CHECKSUM:
2886         case IPV6_ADDRFORM:
2887         case IPV6_2292PKTINFO:
2888         case IPV6_RECVTCLASS:
2889         case IPV6_RECVRTHDR:
2890         case IPV6_2292RTHDR:
2891         case IPV6_RECVHOPOPTS:
2892         case IPV6_2292HOPOPTS:
2893         case IPV6_RECVDSTOPTS:
2894         case IPV6_2292DSTOPTS:
2895         case IPV6_TCLASS:
2896         case IPV6_ADDR_PREFERENCES:
2897 #ifdef IPV6_RECVPATHMTU
2898         case IPV6_RECVPATHMTU:
2899 #endif
2900 #ifdef IPV6_TRANSPARENT
2901         case IPV6_TRANSPARENT:
2902 #endif
2903 #ifdef IPV6_FREEBIND
2904         case IPV6_FREEBIND:
2905 #endif
2906 #ifdef IPV6_RECVORIGDSTADDR
2907         case IPV6_RECVORIGDSTADDR:
2908 #endif
2909             if (get_user_u32(len, optlen))
2910                 return -TARGET_EFAULT;
2911             if (len < 0)
2912                 return -TARGET_EINVAL;
2913             lv = sizeof(lv);
2914             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2915             if (ret < 0)
2916                 return ret;
2917             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2918                 len = 1;
2919                 if (put_user_u32(len, optlen)
2920                     || put_user_u8(val, optval_addr))
2921                     return -TARGET_EFAULT;
2922             } else {
2923                 if (len > sizeof(int))
2924                     len = sizeof(int);
2925                 if (put_user_u32(len, optlen)
2926                     || put_user_u32(val, optval_addr))
2927                     return -TARGET_EFAULT;
2928             }
2929             break;
2930         default:
2931             ret = -TARGET_ENOPROTOOPT;
2932             break;
2933         }
2934         break;
2935 #ifdef SOL_NETLINK
2936     case SOL_NETLINK:
2937         switch (optname) {
2938         case NETLINK_PKTINFO:
2939         case NETLINK_BROADCAST_ERROR:
2940         case NETLINK_NO_ENOBUFS:
2941 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2942         case NETLINK_LISTEN_ALL_NSID:
2943         case NETLINK_CAP_ACK:
2944 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2945 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2946         case NETLINK_EXT_ACK:
2947 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2948 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2949         case NETLINK_GET_STRICT_CHK:
2950 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2951             if (get_user_u32(len, optlen)) {
2952                 return -TARGET_EFAULT;
2953             }
2954             if (len != sizeof(val)) {
2955                 return -TARGET_EINVAL;
2956             }
2957             lv = len;
2958             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2959             if (ret < 0) {
2960                 return ret;
2961             }
2962             if (put_user_u32(lv, optlen)
2963                 || put_user_u32(val, optval_addr)) {
2964                 return -TARGET_EFAULT;
2965             }
2966             break;
2967 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2968         case NETLINK_LIST_MEMBERSHIPS:
2969         {
2970             uint32_t *results;
2971             int i;
2972             if (get_user_u32(len, optlen)) {
2973                 return -TARGET_EFAULT;
2974             }
2975             if (len < 0) {
2976                 return -TARGET_EINVAL;
2977             }
2978             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2979             if (!results && len > 0) {
2980                 return -TARGET_EFAULT;
2981             }
2982             lv = len;
2983             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2984             if (ret < 0) {
2985                 unlock_user(results, optval_addr, 0);
2986                 return ret;
2987             }
2988             /* swap host endianness to target endianness. */
2989             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2990                 results[i] = tswap32(results[i]);
2991             }
2992             if (put_user_u32(lv, optlen)) {
2993                 return -TARGET_EFAULT;
2994             }
2995             unlock_user(results, optval_addr, 0);
2996             break;
2997         }
2998 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2999         default:
3000             goto unimplemented;
3001         }
3002         break;
3003 #endif /* SOL_NETLINK */
3004     default:
3005     unimplemented:
3006         qemu_log_mask(LOG_UNIMP,
3007                       "getsockopt level=%d optname=%d not yet supported\n",
3008                       level, optname);
3009         ret = -TARGET_EOPNOTSUPP;
3010         break;
3011     }
3012     return ret;
3013 }
3014 
3015 /* Convert target low/high pair representing file offset into the host
3016  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3017  * as the kernel doesn't handle them either.
3018  */
target_to_host_low_high(abi_ulong tlow,abi_ulong thigh,unsigned long * hlow,unsigned long * hhigh)3019 static void target_to_host_low_high(abi_ulong tlow,
3020                                     abi_ulong thigh,
3021                                     unsigned long *hlow,
3022                                     unsigned long *hhigh)
3023 {
3024     uint64_t off = tlow |
3025         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3026         TARGET_LONG_BITS / 2;
3027 
3028     *hlow = off;
3029     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3030 }
3031 
lock_iovec(int type,abi_ulong target_addr,abi_ulong count,int copy)3032 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3033                                 abi_ulong count, int copy)
3034 {
3035     struct target_iovec *target_vec;
3036     struct iovec *vec;
3037     abi_ulong total_len, max_len;
3038     int i;
3039     int err = 0;
3040     bool bad_address = false;
3041 
3042     if (count == 0) {
3043         errno = 0;
3044         return NULL;
3045     }
3046     if (count > IOV_MAX) {
3047         errno = EINVAL;
3048         return NULL;
3049     }
3050 
3051     vec = g_try_new0(struct iovec, count);
3052     if (vec == NULL) {
3053         errno = ENOMEM;
3054         return NULL;
3055     }
3056 
3057     target_vec = lock_user(VERIFY_READ, target_addr,
3058                            count * sizeof(struct target_iovec), 1);
3059     if (target_vec == NULL) {
3060         err = EFAULT;
3061         goto fail2;
3062     }
3063 
3064     /* ??? If host page size > target page size, this will result in a
3065        value larger than what we can actually support.  */
3066     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3067     total_len = 0;
3068 
3069     for (i = 0; i < count; i++) {
3070         abi_ulong base = tswapal(target_vec[i].iov_base);
3071         abi_long len = tswapal(target_vec[i].iov_len);
3072 
3073         if (len < 0) {
3074             err = EINVAL;
3075             goto fail;
3076         } else if (len == 0) {
3077             /* Zero length pointer is ignored.  */
3078             vec[i].iov_base = 0;
3079         } else {
3080             vec[i].iov_base = lock_user(type, base, len, copy);
3081             /* If the first buffer pointer is bad, this is a fault.  But
3082              * subsequent bad buffers will result in a partial write; this
3083              * is realized by filling the vector with null pointers and
3084              * zero lengths. */
3085             if (!vec[i].iov_base) {
3086                 if (i == 0) {
3087                     err = EFAULT;
3088                     goto fail;
3089                 } else {
3090                     bad_address = true;
3091                 }
3092             }
3093             if (bad_address) {
3094                 len = 0;
3095             }
3096             if (len > max_len - total_len) {
3097                 len = max_len - total_len;
3098             }
3099         }
3100         vec[i].iov_len = len;
3101         total_len += len;
3102     }
3103 
3104     unlock_user(target_vec, target_addr, 0);
3105     return vec;
3106 
3107  fail:
3108     while (--i >= 0) {
3109         if (tswapal(target_vec[i].iov_len) > 0) {
3110             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3111         }
3112     }
3113     unlock_user(target_vec, target_addr, 0);
3114  fail2:
3115     g_free(vec);
3116     errno = err;
3117     return NULL;
3118 }
3119 
unlock_iovec(struct iovec * vec,abi_ulong target_addr,abi_ulong count,int copy)3120 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3121                          abi_ulong count, int copy)
3122 {
3123     struct target_iovec *target_vec;
3124     int i;
3125 
3126     target_vec = lock_user(VERIFY_READ, target_addr,
3127                            count * sizeof(struct target_iovec), 1);
3128     if (target_vec) {
3129         for (i = 0; i < count; i++) {
3130             abi_ulong base = tswapal(target_vec[i].iov_base);
3131             abi_long len = tswapal(target_vec[i].iov_len);
3132             if (len < 0) {
3133                 break;
3134             }
3135             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3136         }
3137         unlock_user(target_vec, target_addr, 0);
3138     }
3139 
3140     g_free(vec);
3141 }
3142 
target_to_host_sock_type(int * type)3143 static inline int target_to_host_sock_type(int *type)
3144 {
3145     int host_type = 0;
3146     int target_type = *type;
3147 
3148     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3149     case TARGET_SOCK_DGRAM:
3150         host_type = SOCK_DGRAM;
3151         break;
3152     case TARGET_SOCK_STREAM:
3153         host_type = SOCK_STREAM;
3154         break;
3155     default:
3156         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3157         break;
3158     }
3159     if (target_type & TARGET_SOCK_CLOEXEC) {
3160 #if defined(SOCK_CLOEXEC)
3161         host_type |= SOCK_CLOEXEC;
3162 #else
3163         return -TARGET_EINVAL;
3164 #endif
3165     }
3166     if (target_type & TARGET_SOCK_NONBLOCK) {
3167 #if defined(SOCK_NONBLOCK)
3168         host_type |= SOCK_NONBLOCK;
3169 #elif !defined(O_NONBLOCK)
3170         return -TARGET_EINVAL;
3171 #endif
3172     }
3173     *type = host_type;
3174     return 0;
3175 }
3176 
3177 /* Try to emulate socket type flags after socket creation.  */
sock_flags_fixup(int fd,int target_type)3178 static int sock_flags_fixup(int fd, int target_type)
3179 {
3180 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3181     if (target_type & TARGET_SOCK_NONBLOCK) {
3182         int flags = fcntl(fd, F_GETFL);
3183         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3184             close(fd);
3185             return -TARGET_EINVAL;
3186         }
3187     }
3188 #endif
3189     return fd;
3190 }
3191 
3192 /* do_socket() Must return target values and target errnos. */
do_socket(int domain,int type,int protocol)3193 static abi_long do_socket(int domain, int type, int protocol)
3194 {
3195     int target_type = type;
3196     int ret;
3197 
3198     ret = target_to_host_sock_type(&type);
3199     if (ret) {
3200         return ret;
3201     }
3202 
3203     if (domain == PF_NETLINK && !(
3204 #ifdef CONFIG_RTNETLINK
3205          protocol == NETLINK_ROUTE ||
3206 #endif
3207          protocol == NETLINK_KOBJECT_UEVENT ||
3208          protocol == NETLINK_AUDIT)) {
3209         return -TARGET_EPROTONOSUPPORT;
3210     }
3211 
3212     if (domain == AF_PACKET ||
3213         (domain == AF_INET && type == SOCK_PACKET)) {
3214         protocol = tswap16(protocol);
3215     }
3216 
3217     ret = get_errno(socket(domain, type, protocol));
3218     if (ret >= 0) {
3219         ret = sock_flags_fixup(ret, target_type);
3220         if (type == SOCK_PACKET) {
3221             /* Manage an obsolete case :
3222              * if socket type is SOCK_PACKET, bind by name
3223              */
3224             fd_trans_register(ret, &target_packet_trans);
3225         } else if (domain == PF_NETLINK) {
3226             switch (protocol) {
3227 #ifdef CONFIG_RTNETLINK
3228             case NETLINK_ROUTE:
3229                 fd_trans_register(ret, &target_netlink_route_trans);
3230                 break;
3231 #endif
3232             case NETLINK_KOBJECT_UEVENT:
3233                 /* nothing to do: messages are strings */
3234                 break;
3235             case NETLINK_AUDIT:
3236                 fd_trans_register(ret, &target_netlink_audit_trans);
3237                 break;
3238             default:
3239                 g_assert_not_reached();
3240             }
3241         }
3242     }
3243     return ret;
3244 }
3245 
3246 /* do_bind() Must return target values and target errnos. */
do_bind(int sockfd,abi_ulong target_addr,socklen_t addrlen)3247 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3248                         socklen_t addrlen)
3249 {
3250     void *addr;
3251     abi_long ret;
3252 
3253     if ((int)addrlen < 0) {
3254         return -TARGET_EINVAL;
3255     }
3256 
3257     addr = alloca(addrlen+1);
3258 
3259     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3260     if (ret)
3261         return ret;
3262 
3263     return get_errno(bind(sockfd, addr, addrlen));
3264 }
3265 
3266 /* do_connect() Must return target values and target errnos. */
do_connect(int sockfd,abi_ulong target_addr,socklen_t addrlen)3267 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3268                            socklen_t addrlen)
3269 {
3270     void *addr;
3271     abi_long ret;
3272 
3273     if ((int)addrlen < 0) {
3274         return -TARGET_EINVAL;
3275     }
3276 
3277     addr = alloca(addrlen+1);
3278 
3279     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3280     if (ret)
3281         return ret;
3282 
3283     return get_errno(safe_connect(sockfd, addr, addrlen));
3284 }
3285 
3286 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
do_sendrecvmsg_locked(int fd,struct target_msghdr * msgp,int flags,int send)3287 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3288                                       int flags, int send)
3289 {
3290     abi_long ret, len;
3291     struct msghdr msg;
3292     abi_ulong count;
3293     struct iovec *vec;
3294     abi_ulong target_vec;
3295 
3296     if (msgp->msg_name) {
3297         msg.msg_namelen = tswap32(msgp->msg_namelen);
3298         msg.msg_name = alloca(msg.msg_namelen+1);
3299         ret = target_to_host_sockaddr(fd, msg.msg_name,
3300                                       tswapal(msgp->msg_name),
3301                                       msg.msg_namelen);
3302         if (ret == -TARGET_EFAULT) {
3303             /* For connected sockets msg_name and msg_namelen must
3304              * be ignored, so returning EFAULT immediately is wrong.
3305              * Instead, pass a bad msg_name to the host kernel, and
3306              * let it decide whether to return EFAULT or not.
3307              */
3308             msg.msg_name = (void *)-1;
3309         } else if (ret) {
3310             goto out2;
3311         }
3312     } else {
3313         msg.msg_name = NULL;
3314         msg.msg_namelen = 0;
3315     }
3316     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3317     msg.msg_control = alloca(msg.msg_controllen);
3318     memset(msg.msg_control, 0, msg.msg_controllen);
3319 
3320     msg.msg_flags = tswap32(msgp->msg_flags);
3321 
3322     count = tswapal(msgp->msg_iovlen);
3323     target_vec = tswapal(msgp->msg_iov);
3324 
3325     if (count > IOV_MAX) {
3326         /* sendrcvmsg returns a different errno for this condition than
3327          * readv/writev, so we must catch it here before lock_iovec() does.
3328          */
3329         ret = -TARGET_EMSGSIZE;
3330         goto out2;
3331     }
3332 
3333     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3334                      target_vec, count, send);
3335     if (vec == NULL) {
3336         ret = -host_to_target_errno(errno);
3337         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3338         if (!send || ret) {
3339             goto out2;
3340         }
3341     }
3342     msg.msg_iovlen = count;
3343     msg.msg_iov = vec;
3344 
3345     if (send) {
3346         if (fd_trans_target_to_host_data(fd)) {
3347             void *host_msg;
3348 
3349             host_msg = g_malloc(msg.msg_iov->iov_len);
3350             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3351             ret = fd_trans_target_to_host_data(fd)(host_msg,
3352                                                    msg.msg_iov->iov_len);
3353             if (ret >= 0) {
3354                 msg.msg_iov->iov_base = host_msg;
3355                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3356             }
3357             g_free(host_msg);
3358         } else {
3359             ret = target_to_host_cmsg(&msg, msgp);
3360             if (ret == 0) {
3361                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3362             }
3363         }
3364     } else {
3365         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3366         if (!is_error(ret)) {
3367             len = ret;
3368             if (fd_trans_host_to_target_data(fd)) {
3369                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3370                                                MIN(msg.msg_iov->iov_len, len));
3371             }
3372             if (!is_error(ret)) {
3373                 ret = host_to_target_cmsg(msgp, &msg);
3374             }
3375             if (!is_error(ret)) {
3376                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3377                 msgp->msg_flags = tswap32(msg.msg_flags);
3378                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3379                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3380                                     msg.msg_name, msg.msg_namelen);
3381                     if (ret) {
3382                         goto out;
3383                     }
3384                 }
3385 
3386                 ret = len;
3387             }
3388         }
3389     }
3390 
3391 out:
3392     if (vec) {
3393         unlock_iovec(vec, target_vec, count, !send);
3394     }
3395 out2:
3396     return ret;
3397 }
3398 
do_sendrecvmsg(int fd,abi_ulong target_msg,int flags,int send)3399 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3400                                int flags, int send)
3401 {
3402     abi_long ret;
3403     struct target_msghdr *msgp;
3404 
3405     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3406                           msgp,
3407                           target_msg,
3408                           send ? 1 : 0)) {
3409         return -TARGET_EFAULT;
3410     }
3411     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3412     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3413     return ret;
3414 }
3415 
3416 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3417  * so it might not have this *mmsg-specific flag either.
3418  */
3419 #ifndef MSG_WAITFORONE
3420 #define MSG_WAITFORONE 0x10000
3421 #endif
3422 
do_sendrecvmmsg(int fd,abi_ulong target_msgvec,unsigned int vlen,unsigned int flags,int send)3423 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3424                                 unsigned int vlen, unsigned int flags,
3425                                 int send)
3426 {
3427     struct target_mmsghdr *mmsgp;
3428     abi_long ret = 0;
3429     int i;
3430 
3431     if (vlen > UIO_MAXIOV) {
3432         vlen = UIO_MAXIOV;
3433     }
3434 
3435     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3436     if (!mmsgp) {
3437         return -TARGET_EFAULT;
3438     }
3439 
3440     for (i = 0; i < vlen; i++) {
3441         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3442         if (is_error(ret)) {
3443             break;
3444         }
3445         mmsgp[i].msg_len = tswap32(ret);
3446         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3447         if (flags & MSG_WAITFORONE) {
3448             flags |= MSG_DONTWAIT;
3449         }
3450     }
3451 
3452     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3453 
3454     /* Return number of datagrams sent if we sent any at all;
3455      * otherwise return the error.
3456      */
3457     if (i) {
3458         return i;
3459     }
3460     return ret;
3461 }
3462 
3463 /* do_accept4() Must return target values and target errnos. */
do_accept4(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr,int flags)3464 static abi_long do_accept4(int fd, abi_ulong target_addr,
3465                            abi_ulong target_addrlen_addr, int flags)
3466 {
3467     socklen_t addrlen, ret_addrlen;
3468     void *addr;
3469     abi_long ret;
3470     int host_flags;
3471 
3472     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3473         return -TARGET_EINVAL;
3474     }
3475 
3476     host_flags = 0;
3477     if (flags & TARGET_SOCK_NONBLOCK) {
3478         host_flags |= SOCK_NONBLOCK;
3479     }
3480     if (flags & TARGET_SOCK_CLOEXEC) {
3481         host_flags |= SOCK_CLOEXEC;
3482     }
3483 
3484     if (target_addr == 0) {
3485         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3486     }
3487 
3488     /* linux returns EFAULT if addrlen pointer is invalid */
3489     if (get_user_u32(addrlen, target_addrlen_addr))
3490         return -TARGET_EFAULT;
3491 
3492     if ((int)addrlen < 0) {
3493         return -TARGET_EINVAL;
3494     }
3495 
3496     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3497         return -TARGET_EFAULT;
3498     }
3499 
3500     addr = alloca(addrlen);
3501 
3502     ret_addrlen = addrlen;
3503     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3504     if (!is_error(ret)) {
3505         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3506         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3507             ret = -TARGET_EFAULT;
3508         }
3509     }
3510     return ret;
3511 }
3512 
3513 /* do_getpeername() Must return target values and target errnos. */
do_getpeername(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3514 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3515                                abi_ulong target_addrlen_addr)
3516 {
3517     socklen_t addrlen, ret_addrlen;
3518     void *addr;
3519     abi_long ret;
3520 
3521     if (get_user_u32(addrlen, target_addrlen_addr))
3522         return -TARGET_EFAULT;
3523 
3524     if ((int)addrlen < 0) {
3525         return -TARGET_EINVAL;
3526     }
3527 
3528     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3529         return -TARGET_EFAULT;
3530     }
3531 
3532     addr = alloca(addrlen);
3533 
3534     ret_addrlen = addrlen;
3535     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3536     if (!is_error(ret)) {
3537         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3538         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3539             ret = -TARGET_EFAULT;
3540         }
3541     }
3542     return ret;
3543 }
3544 
3545 /* do_getsockname() Must return target values and target errnos. */
do_getsockname(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3546 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3547                                abi_ulong target_addrlen_addr)
3548 {
3549     socklen_t addrlen, ret_addrlen;
3550     void *addr;
3551     abi_long ret;
3552 
3553     if (get_user_u32(addrlen, target_addrlen_addr))
3554         return -TARGET_EFAULT;
3555 
3556     if ((int)addrlen < 0) {
3557         return -TARGET_EINVAL;
3558     }
3559 
3560     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3561         return -TARGET_EFAULT;
3562     }
3563 
3564     addr = alloca(addrlen);
3565 
3566     ret_addrlen = addrlen;
3567     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3568     if (!is_error(ret)) {
3569         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3570         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3571             ret = -TARGET_EFAULT;
3572         }
3573     }
3574     return ret;
3575 }
3576 
3577 /* do_socketpair() Must return target values and target errnos. */
do_socketpair(int domain,int type,int protocol,abi_ulong target_tab_addr)3578 static abi_long do_socketpair(int domain, int type, int protocol,
3579                               abi_ulong target_tab_addr)
3580 {
3581     int tab[2];
3582     abi_long ret;
3583 
3584     target_to_host_sock_type(&type);
3585 
3586     ret = get_errno(socketpair(domain, type, protocol, tab));
3587     if (!is_error(ret)) {
3588         if (put_user_s32(tab[0], target_tab_addr)
3589             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3590             ret = -TARGET_EFAULT;
3591     }
3592     return ret;
3593 }
3594 
3595 /* do_sendto() Must return target values and target errnos. */
do_sendto(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,socklen_t addrlen)3596 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3597                           abi_ulong target_addr, socklen_t addrlen)
3598 {
3599     void *addr;
3600     void *host_msg = NULL;
3601     void *copy_msg = NULL;
3602     abi_long ret;
3603 
3604     if ((int)addrlen < 0) {
3605         return -TARGET_EINVAL;
3606     }
3607 
3608     if (len != 0) {
3609         host_msg = lock_user(VERIFY_READ, msg, len, 1);
3610         if (!host_msg) {
3611             return -TARGET_EFAULT;
3612         }
3613         if (fd_trans_target_to_host_data(fd)) {
3614             copy_msg = host_msg;
3615             host_msg = g_malloc(len);
3616             memcpy(host_msg, copy_msg, len);
3617             ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3618             if (ret < 0) {
3619                 goto fail;
3620             }
3621         }
3622     }
3623     if (target_addr) {
3624         addr = alloca(addrlen+1);
3625         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3626         if (ret) {
3627             goto fail;
3628         }
3629         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3630     } else {
3631         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3632     }
3633 fail:
3634     if (copy_msg) {
3635         g_free(host_msg);
3636         host_msg = copy_msg;
3637     }
3638     unlock_user(host_msg, msg, 0);
3639     return ret;
3640 }
3641 
3642 /* do_recvfrom() Must return target values and target errnos. */
do_recvfrom(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,abi_ulong target_addrlen)3643 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3644                             abi_ulong target_addr,
3645                             abi_ulong target_addrlen)
3646 {
3647     socklen_t addrlen, ret_addrlen;
3648     void *addr;
3649     void *host_msg;
3650     abi_long ret;
3651 
3652     if (!msg) {
3653         host_msg = NULL;
3654     } else {
3655         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3656         if (!host_msg) {
3657             return -TARGET_EFAULT;
3658         }
3659     }
3660     if (target_addr) {
3661         if (get_user_u32(addrlen, target_addrlen)) {
3662             ret = -TARGET_EFAULT;
3663             goto fail;
3664         }
3665         if ((int)addrlen < 0) {
3666             ret = -TARGET_EINVAL;
3667             goto fail;
3668         }
3669         addr = alloca(addrlen);
3670         ret_addrlen = addrlen;
3671         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3672                                       addr, &ret_addrlen));
3673     } else {
3674         addr = NULL; /* To keep compiler quiet.  */
3675         addrlen = 0; /* To keep compiler quiet.  */
3676         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3677     }
3678     if (!is_error(ret)) {
3679         if (fd_trans_host_to_target_data(fd)) {
3680             abi_long trans;
3681             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3682             if (is_error(trans)) {
3683                 ret = trans;
3684                 goto fail;
3685             }
3686         }
3687         if (target_addr) {
3688             host_to_target_sockaddr(target_addr, addr,
3689                                     MIN(addrlen, ret_addrlen));
3690             if (put_user_u32(ret_addrlen, target_addrlen)) {
3691                 ret = -TARGET_EFAULT;
3692                 goto fail;
3693             }
3694         }
3695         unlock_user(host_msg, msg, len);
3696     } else {
3697 fail:
3698         unlock_user(host_msg, msg, 0);
3699     }
3700     return ret;
3701 }
3702 
3703 #ifdef TARGET_NR_socketcall
3704 /* do_socketcall() must return target values and target errnos. */
do_socketcall(int num,abi_ulong vptr)3705 static abi_long do_socketcall(int num, abi_ulong vptr)
3706 {
3707     static const unsigned nargs[] = { /* number of arguments per operation */
3708         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3709         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3710         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3711         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3712         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3713         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3714         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3715         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3716         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3717         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3718         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3719         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3720         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3721         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3722         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3723         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3724         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3725         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3726         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3727         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3728     };
3729     abi_long a[6]; /* max 6 args */
3730     unsigned i;
3731 
3732     /* check the range of the first argument num */
3733     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3734     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3735         return -TARGET_EINVAL;
3736     }
3737     /* ensure we have space for args */
3738     if (nargs[num] > ARRAY_SIZE(a)) {
3739         return -TARGET_EINVAL;
3740     }
3741     /* collect the arguments in a[] according to nargs[] */
3742     for (i = 0; i < nargs[num]; ++i) {
3743         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3744             return -TARGET_EFAULT;
3745         }
3746     }
3747     /* now when we have the args, invoke the appropriate underlying function */
3748     switch (num) {
3749     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3750         return do_socket(a[0], a[1], a[2]);
3751     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3752         return do_bind(a[0], a[1], a[2]);
3753     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3754         return do_connect(a[0], a[1], a[2]);
3755     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3756         return get_errno(listen(a[0], a[1]));
3757     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3758         return do_accept4(a[0], a[1], a[2], 0);
3759     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3760         return do_getsockname(a[0], a[1], a[2]);
3761     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3762         return do_getpeername(a[0], a[1], a[2]);
3763     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3764         return do_socketpair(a[0], a[1], a[2], a[3]);
3765     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3766         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3767     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3768         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3769     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3770         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3771     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3772         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3773     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3774         return get_errno(shutdown(a[0], a[1]));
3775     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3776         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3777     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3778         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3779     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3780         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3781     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3782         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3783     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3784         return do_accept4(a[0], a[1], a[2], a[3]);
3785     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3786         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3787     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3788         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3789     default:
3790         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3791         return -TARGET_EINVAL;
3792     }
3793 }
3794 #endif
3795 
3796 #ifndef TARGET_SEMID64_DS
3797 /* asm-generic version of this struct */
3798 struct target_semid64_ds
3799 {
3800   struct target_ipc_perm sem_perm;
3801   abi_ulong sem_otime;
3802 #if TARGET_ABI_BITS == 32
3803   abi_ulong __unused1;
3804 #endif
3805   abi_ulong sem_ctime;
3806 #if TARGET_ABI_BITS == 32
3807   abi_ulong __unused2;
3808 #endif
3809   abi_ulong sem_nsems;
3810   abi_ulong __unused3;
3811   abi_ulong __unused4;
3812 };
3813 #endif
3814 
target_to_host_ipc_perm(struct ipc_perm * host_ip,abi_ulong target_addr)3815 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3816                                                abi_ulong target_addr)
3817 {
3818     struct target_ipc_perm *target_ip;
3819     struct target_semid64_ds *target_sd;
3820 
3821     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3822         return -TARGET_EFAULT;
3823     target_ip = &(target_sd->sem_perm);
3824     host_ip->__key = tswap32(target_ip->__key);
3825     host_ip->uid = tswap32(target_ip->uid);
3826     host_ip->gid = tswap32(target_ip->gid);
3827     host_ip->cuid = tswap32(target_ip->cuid);
3828     host_ip->cgid = tswap32(target_ip->cgid);
3829 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3830     host_ip->mode = tswap32(target_ip->mode);
3831 #else
3832     host_ip->mode = tswap16(target_ip->mode);
3833 #endif
3834 #if defined(TARGET_PPC)
3835     host_ip->__seq = tswap32(target_ip->__seq);
3836 #else
3837     host_ip->__seq = tswap16(target_ip->__seq);
3838 #endif
3839     unlock_user_struct(target_sd, target_addr, 0);
3840     return 0;
3841 }
3842 
host_to_target_ipc_perm(abi_ulong target_addr,struct ipc_perm * host_ip)3843 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3844                                                struct ipc_perm *host_ip)
3845 {
3846     struct target_ipc_perm *target_ip;
3847     struct target_semid64_ds *target_sd;
3848 
3849     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3850         return -TARGET_EFAULT;
3851     target_ip = &(target_sd->sem_perm);
3852     target_ip->__key = tswap32(host_ip->__key);
3853     target_ip->uid = tswap32(host_ip->uid);
3854     target_ip->gid = tswap32(host_ip->gid);
3855     target_ip->cuid = tswap32(host_ip->cuid);
3856     target_ip->cgid = tswap32(host_ip->cgid);
3857 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3858     target_ip->mode = tswap32(host_ip->mode);
3859 #else
3860     target_ip->mode = tswap16(host_ip->mode);
3861 #endif
3862 #if defined(TARGET_PPC)
3863     target_ip->__seq = tswap32(host_ip->__seq);
3864 #else
3865     target_ip->__seq = tswap16(host_ip->__seq);
3866 #endif
3867     unlock_user_struct(target_sd, target_addr, 1);
3868     return 0;
3869 }
3870 
target_to_host_semid_ds(struct semid_ds * host_sd,abi_ulong target_addr)3871 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3872                                                abi_ulong target_addr)
3873 {
3874     struct target_semid64_ds *target_sd;
3875 
3876     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3877         return -TARGET_EFAULT;
3878     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3879         return -TARGET_EFAULT;
3880     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3881     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3882     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3883     unlock_user_struct(target_sd, target_addr, 0);
3884     return 0;
3885 }
3886 
host_to_target_semid_ds(abi_ulong target_addr,struct semid_ds * host_sd)3887 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3888                                                struct semid_ds *host_sd)
3889 {
3890     struct target_semid64_ds *target_sd;
3891 
3892     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3893         return -TARGET_EFAULT;
3894     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3895         return -TARGET_EFAULT;
3896     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3897     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3898     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3899     unlock_user_struct(target_sd, target_addr, 1);
3900     return 0;
3901 }
3902 
3903 struct target_seminfo {
3904     int semmap;
3905     int semmni;
3906     int semmns;
3907     int semmnu;
3908     int semmsl;
3909     int semopm;
3910     int semume;
3911     int semusz;
3912     int semvmx;
3913     int semaem;
3914 };
3915 
host_to_target_seminfo(abi_ulong target_addr,struct seminfo * host_seminfo)3916 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3917                                               struct seminfo *host_seminfo)
3918 {
3919     struct target_seminfo *target_seminfo;
3920     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3921         return -TARGET_EFAULT;
3922     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3923     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3924     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3925     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3926     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3927     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3928     __put_user(host_seminfo->semume, &target_seminfo->semume);
3929     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3930     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3931     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3932     unlock_user_struct(target_seminfo, target_addr, 1);
3933     return 0;
3934 }
3935 
3936 union semun {
3937 	int val;
3938 	struct semid_ds *buf;
3939 	unsigned short *array;
3940 	struct seminfo *__buf;
3941 };
3942 
3943 union target_semun {
3944 	int val;
3945 	abi_ulong buf;
3946 	abi_ulong array;
3947 	abi_ulong __buf;
3948 };
3949 
target_to_host_semarray(int semid,unsigned short ** host_array,abi_ulong target_addr)3950 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3951                                                abi_ulong target_addr)
3952 {
3953     int nsems;
3954     unsigned short *array;
3955     union semun semun;
3956     struct semid_ds semid_ds;
3957     int i, ret;
3958 
3959     semun.buf = &semid_ds;
3960 
3961     ret = semctl(semid, 0, IPC_STAT, semun);
3962     if (ret == -1)
3963         return get_errno(ret);
3964 
3965     nsems = semid_ds.sem_nsems;
3966 
3967     *host_array = g_try_new(unsigned short, nsems);
3968     if (!*host_array) {
3969         return -TARGET_ENOMEM;
3970     }
3971     array = lock_user(VERIFY_READ, target_addr,
3972                       nsems*sizeof(unsigned short), 1);
3973     if (!array) {
3974         g_free(*host_array);
3975         return -TARGET_EFAULT;
3976     }
3977 
3978     for(i=0; i<nsems; i++) {
3979         __get_user((*host_array)[i], &array[i]);
3980     }
3981     unlock_user(array, target_addr, 0);
3982 
3983     return 0;
3984 }
3985 
host_to_target_semarray(int semid,abi_ulong target_addr,unsigned short ** host_array)3986 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3987                                                unsigned short **host_array)
3988 {
3989     int nsems;
3990     unsigned short *array;
3991     union semun semun;
3992     struct semid_ds semid_ds;
3993     int i, ret;
3994 
3995     semun.buf = &semid_ds;
3996 
3997     ret = semctl(semid, 0, IPC_STAT, semun);
3998     if (ret == -1)
3999         return get_errno(ret);
4000 
4001     nsems = semid_ds.sem_nsems;
4002 
4003     array = lock_user(VERIFY_WRITE, target_addr,
4004                       nsems*sizeof(unsigned short), 0);
4005     if (!array)
4006         return -TARGET_EFAULT;
4007 
4008     for(i=0; i<nsems; i++) {
4009         __put_user((*host_array)[i], &array[i]);
4010     }
4011     g_free(*host_array);
4012     unlock_user(array, target_addr, 1);
4013 
4014     return 0;
4015 }
4016 
do_semctl(int semid,int semnum,int cmd,abi_ulong target_arg)4017 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4018                                  abi_ulong target_arg)
4019 {
4020     union target_semun target_su = { .buf = target_arg };
4021     union semun arg;
4022     struct semid_ds dsarg;
4023     unsigned short *array = NULL;
4024     struct seminfo seminfo;
4025     abi_long ret = -TARGET_EINVAL;
4026     abi_long err;
4027     cmd &= 0xff;
4028 
4029     switch( cmd ) {
4030 	case GETVAL:
4031 	case SETVAL:
4032             /* In 64 bit cross-endian situations, we will erroneously pick up
4033              * the wrong half of the union for the "val" element.  To rectify
4034              * this, the entire 8-byte structure is byteswapped, followed by
4035 	     * a swap of the 4 byte val field. In other cases, the data is
4036 	     * already in proper host byte order. */
4037 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4038 		target_su.buf = tswapal(target_su.buf);
4039 		arg.val = tswap32(target_su.val);
4040 	    } else {
4041 		arg.val = target_su.val;
4042 	    }
4043             ret = get_errno(semctl(semid, semnum, cmd, arg));
4044             break;
4045 	case GETALL:
4046 	case SETALL:
4047             err = target_to_host_semarray(semid, &array, target_su.array);
4048             if (err)
4049                 return err;
4050             arg.array = array;
4051             ret = get_errno(semctl(semid, semnum, cmd, arg));
4052             err = host_to_target_semarray(semid, target_su.array, &array);
4053             if (err)
4054                 return err;
4055             break;
4056 	case IPC_STAT:
4057 	case IPC_SET:
4058 	case SEM_STAT:
4059             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4060             if (err)
4061                 return err;
4062             arg.buf = &dsarg;
4063             ret = get_errno(semctl(semid, semnum, cmd, arg));
4064             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4065             if (err)
4066                 return err;
4067             break;
4068 	case IPC_INFO:
4069 	case SEM_INFO:
4070             arg.__buf = &seminfo;
4071             ret = get_errno(semctl(semid, semnum, cmd, arg));
4072             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4073             if (err)
4074                 return err;
4075             break;
4076 	case IPC_RMID:
4077 	case GETPID:
4078 	case GETNCNT:
4079 	case GETZCNT:
4080             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4081             break;
4082     }
4083 
4084     return ret;
4085 }
4086 
4087 struct target_sembuf {
4088     unsigned short sem_num;
4089     short sem_op;
4090     short sem_flg;
4091 };
4092 
target_to_host_sembuf(struct sembuf * host_sembuf,abi_ulong target_addr,unsigned nsops)4093 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4094                                              abi_ulong target_addr,
4095                                              unsigned nsops)
4096 {
4097     struct target_sembuf *target_sembuf;
4098     int i;
4099 
4100     target_sembuf = lock_user(VERIFY_READ, target_addr,
4101                               nsops*sizeof(struct target_sembuf), 1);
4102     if (!target_sembuf)
4103         return -TARGET_EFAULT;
4104 
4105     for(i=0; i<nsops; i++) {
4106         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4107         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4108         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4109     }
4110 
4111     unlock_user(target_sembuf, target_addr, 0);
4112 
4113     return 0;
4114 }
4115 
4116 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4117     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4118 
4119 /*
4120  * This macro is required to handle the s390 variants, which passes the
4121  * arguments in a different order than default.
4122  */
4123 #ifdef __s390x__
4124 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4125   (__nsops), (__timeout), (__sops)
4126 #else
4127 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4128   (__nsops), 0, (__sops), (__timeout)
4129 #endif
4130 
do_semtimedop(int semid,abi_long ptr,unsigned nsops,abi_long timeout,bool time64)4131 static inline abi_long do_semtimedop(int semid,
4132                                      abi_long ptr,
4133                                      unsigned nsops,
4134                                      abi_long timeout, bool time64)
4135 {
4136     struct sembuf *sops;
4137     struct timespec ts, *pts = NULL;
4138     abi_long ret;
4139 
4140     if (timeout) {
4141         pts = &ts;
4142         if (time64) {
4143             if (target_to_host_timespec64(pts, timeout)) {
4144                 return -TARGET_EFAULT;
4145             }
4146         } else {
4147             if (target_to_host_timespec(pts, timeout)) {
4148                 return -TARGET_EFAULT;
4149             }
4150         }
4151     }
4152 
4153     if (nsops > TARGET_SEMOPM) {
4154         return -TARGET_E2BIG;
4155     }
4156 
4157     sops = g_new(struct sembuf, nsops);
4158 
4159     if (target_to_host_sembuf(sops, ptr, nsops)) {
4160         g_free(sops);
4161         return -TARGET_EFAULT;
4162     }
4163 
4164     ret = -TARGET_ENOSYS;
4165 #ifdef __NR_semtimedop
4166     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4167 #endif
4168 #ifdef __NR_ipc
4169     if (ret == -TARGET_ENOSYS) {
4170         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4171                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4172     }
4173 #endif
4174     g_free(sops);
4175     return ret;
4176 }
4177 #endif
4178 
4179 struct target_msqid_ds
4180 {
4181     struct target_ipc_perm msg_perm;
4182     abi_ulong msg_stime;
4183 #if TARGET_ABI_BITS == 32
4184     abi_ulong __unused1;
4185 #endif
4186     abi_ulong msg_rtime;
4187 #if TARGET_ABI_BITS == 32
4188     abi_ulong __unused2;
4189 #endif
4190     abi_ulong msg_ctime;
4191 #if TARGET_ABI_BITS == 32
4192     abi_ulong __unused3;
4193 #endif
4194     abi_ulong __msg_cbytes;
4195     abi_ulong msg_qnum;
4196     abi_ulong msg_qbytes;
4197     abi_ulong msg_lspid;
4198     abi_ulong msg_lrpid;
4199     abi_ulong __unused4;
4200     abi_ulong __unused5;
4201 };
4202 
target_to_host_msqid_ds(struct msqid_ds * host_md,abi_ulong target_addr)4203 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4204                                                abi_ulong target_addr)
4205 {
4206     struct target_msqid_ds *target_md;
4207 
4208     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4209         return -TARGET_EFAULT;
4210     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4211         return -TARGET_EFAULT;
4212     host_md->msg_stime = tswapal(target_md->msg_stime);
4213     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4214     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4215     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4216     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4217     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4218     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4219     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4220     unlock_user_struct(target_md, target_addr, 0);
4221     return 0;
4222 }
4223 
host_to_target_msqid_ds(abi_ulong target_addr,struct msqid_ds * host_md)4224 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4225                                                struct msqid_ds *host_md)
4226 {
4227     struct target_msqid_ds *target_md;
4228 
4229     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4230         return -TARGET_EFAULT;
4231     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4232         return -TARGET_EFAULT;
4233     target_md->msg_stime = tswapal(host_md->msg_stime);
4234     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4235     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4236     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4237     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4238     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4239     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4240     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4241     unlock_user_struct(target_md, target_addr, 1);
4242     return 0;
4243 }
4244 
4245 struct target_msginfo {
4246     int msgpool;
4247     int msgmap;
4248     int msgmax;
4249     int msgmnb;
4250     int msgmni;
4251     int msgssz;
4252     int msgtql;
4253     unsigned short int msgseg;
4254 };
4255 
host_to_target_msginfo(abi_ulong target_addr,struct msginfo * host_msginfo)4256 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4257                                               struct msginfo *host_msginfo)
4258 {
4259     struct target_msginfo *target_msginfo;
4260     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4261         return -TARGET_EFAULT;
4262     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4263     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4264     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4265     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4266     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4267     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4268     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4269     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4270     unlock_user_struct(target_msginfo, target_addr, 1);
4271     return 0;
4272 }
4273 
do_msgctl(int msgid,int cmd,abi_long ptr)4274 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4275 {
4276     struct msqid_ds dsarg;
4277     struct msginfo msginfo;
4278     abi_long ret = -TARGET_EINVAL;
4279 
4280     cmd &= 0xff;
4281 
4282     switch (cmd) {
4283     case IPC_STAT:
4284     case IPC_SET:
4285     case MSG_STAT:
4286         if (target_to_host_msqid_ds(&dsarg,ptr))
4287             return -TARGET_EFAULT;
4288         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4289         if (host_to_target_msqid_ds(ptr,&dsarg))
4290             return -TARGET_EFAULT;
4291         break;
4292     case IPC_RMID:
4293         ret = get_errno(msgctl(msgid, cmd, NULL));
4294         break;
4295     case IPC_INFO:
4296     case MSG_INFO:
4297         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4298         if (host_to_target_msginfo(ptr, &msginfo))
4299             return -TARGET_EFAULT;
4300         break;
4301     }
4302 
4303     return ret;
4304 }
4305 
4306 struct target_msgbuf {
4307     abi_long mtype;
4308     char	mtext[1];
4309 };
4310 
do_msgsnd(int msqid,abi_long msgp,ssize_t msgsz,int msgflg)4311 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4312                                  ssize_t msgsz, int msgflg)
4313 {
4314     struct target_msgbuf *target_mb;
4315     struct msgbuf *host_mb;
4316     abi_long ret = 0;
4317 
4318     if (msgsz < 0) {
4319         return -TARGET_EINVAL;
4320     }
4321 
4322     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4323         return -TARGET_EFAULT;
4324     host_mb = g_try_malloc(msgsz + sizeof(long));
4325     if (!host_mb) {
4326         unlock_user_struct(target_mb, msgp, 0);
4327         return -TARGET_ENOMEM;
4328     }
4329     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4330     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4331     ret = -TARGET_ENOSYS;
4332 #ifdef __NR_msgsnd
4333     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4334 #endif
4335 #ifdef __NR_ipc
4336     if (ret == -TARGET_ENOSYS) {
4337 #ifdef __s390x__
4338         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4339                                  host_mb));
4340 #else
4341         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4342                                  host_mb, 0));
4343 #endif
4344     }
4345 #endif
4346     g_free(host_mb);
4347     unlock_user_struct(target_mb, msgp, 0);
4348 
4349     return ret;
4350 }
4351 
4352 #ifdef __NR_ipc
4353 #if defined(__sparc__)
4354 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4355 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4356 #elif defined(__s390x__)
4357 /* The s390 sys_ipc variant has only five parameters.  */
4358 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4359     ((long int[]){(long int)__msgp, __msgtyp})
4360 #else
4361 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4362     ((long int[]){(long int)__msgp, __msgtyp}), 0
4363 #endif
4364 #endif
4365 
do_msgrcv(int msqid,abi_long msgp,ssize_t msgsz,abi_long msgtyp,int msgflg)4366 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4367                                  ssize_t msgsz, abi_long msgtyp,
4368                                  int msgflg)
4369 {
4370     struct target_msgbuf *target_mb;
4371     char *target_mtext;
4372     struct msgbuf *host_mb;
4373     abi_long ret = 0;
4374 
4375     if (msgsz < 0) {
4376         return -TARGET_EINVAL;
4377     }
4378 
4379     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4380         return -TARGET_EFAULT;
4381 
4382     host_mb = g_try_malloc(msgsz + sizeof(long));
4383     if (!host_mb) {
4384         ret = -TARGET_ENOMEM;
4385         goto end;
4386     }
4387     ret = -TARGET_ENOSYS;
4388 #ifdef __NR_msgrcv
4389     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4390 #endif
4391 #ifdef __NR_ipc
4392     if (ret == -TARGET_ENOSYS) {
4393         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4394                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4395     }
4396 #endif
4397 
4398     if (ret > 0) {
4399         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4400         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4401         if (!target_mtext) {
4402             ret = -TARGET_EFAULT;
4403             goto end;
4404         }
4405         memcpy(target_mb->mtext, host_mb->mtext, ret);
4406         unlock_user(target_mtext, target_mtext_addr, ret);
4407     }
4408 
4409     target_mb->mtype = tswapal(host_mb->mtype);
4410 
4411 end:
4412     if (target_mb)
4413         unlock_user_struct(target_mb, msgp, 1);
4414     g_free(host_mb);
4415     return ret;
4416 }
4417 
target_to_host_shmid_ds(struct shmid_ds * host_sd,abi_ulong target_addr)4418 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4419                                                abi_ulong target_addr)
4420 {
4421     struct target_shmid_ds *target_sd;
4422 
4423     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4424         return -TARGET_EFAULT;
4425     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4426         return -TARGET_EFAULT;
4427     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4428     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4429     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4430     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4431     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4432     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4433     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4434     unlock_user_struct(target_sd, target_addr, 0);
4435     return 0;
4436 }
4437 
host_to_target_shmid_ds(abi_ulong target_addr,struct shmid_ds * host_sd)4438 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4439                                                struct shmid_ds *host_sd)
4440 {
4441     struct target_shmid_ds *target_sd;
4442 
4443     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4444         return -TARGET_EFAULT;
4445     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4446         return -TARGET_EFAULT;
4447     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4448     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4449     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4450     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4451     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4452     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4453     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4454     unlock_user_struct(target_sd, target_addr, 1);
4455     return 0;
4456 }
4457 
4458 struct  target_shminfo {
4459     abi_ulong shmmax;
4460     abi_ulong shmmin;
4461     abi_ulong shmmni;
4462     abi_ulong shmseg;
4463     abi_ulong shmall;
4464 };
4465 
host_to_target_shminfo(abi_ulong target_addr,struct shminfo * host_shminfo)4466 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4467                                               struct shminfo *host_shminfo)
4468 {
4469     struct target_shminfo *target_shminfo;
4470     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4471         return -TARGET_EFAULT;
4472     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4473     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4474     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4475     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4476     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4477     unlock_user_struct(target_shminfo, target_addr, 1);
4478     return 0;
4479 }
4480 
4481 struct target_shm_info {
4482     int used_ids;
4483     abi_ulong shm_tot;
4484     abi_ulong shm_rss;
4485     abi_ulong shm_swp;
4486     abi_ulong swap_attempts;
4487     abi_ulong swap_successes;
4488 };
4489 
host_to_target_shm_info(abi_ulong target_addr,struct shm_info * host_shm_info)4490 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4491                                                struct shm_info *host_shm_info)
4492 {
4493     struct target_shm_info *target_shm_info;
4494     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4495         return -TARGET_EFAULT;
4496     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4497     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4498     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4499     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4500     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4501     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4502     unlock_user_struct(target_shm_info, target_addr, 1);
4503     return 0;
4504 }
4505 
do_shmctl(int shmid,int cmd,abi_long buf)4506 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4507 {
4508     struct shmid_ds dsarg;
4509     struct shminfo shminfo;
4510     struct shm_info shm_info;
4511     abi_long ret = -TARGET_EINVAL;
4512 
4513     cmd &= 0xff;
4514 
4515     switch(cmd) {
4516     case IPC_STAT:
4517     case IPC_SET:
4518     case SHM_STAT:
4519         if (target_to_host_shmid_ds(&dsarg, buf))
4520             return -TARGET_EFAULT;
4521         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4522         if (host_to_target_shmid_ds(buf, &dsarg))
4523             return -TARGET_EFAULT;
4524         break;
4525     case IPC_INFO:
4526         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4527         if (host_to_target_shminfo(buf, &shminfo))
4528             return -TARGET_EFAULT;
4529         break;
4530     case SHM_INFO:
4531         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4532         if (host_to_target_shm_info(buf, &shm_info))
4533             return -TARGET_EFAULT;
4534         break;
4535     case IPC_RMID:
4536     case SHM_LOCK:
4537     case SHM_UNLOCK:
4538         ret = get_errno(shmctl(shmid, cmd, NULL));
4539         break;
4540     }
4541 
4542     return ret;
4543 }
4544 
4545 #ifdef TARGET_NR_ipc
4546 /* ??? This only works with linear mappings.  */
4547 /* do_ipc() must return target values and target errnos. */
do_ipc(CPUArchState * cpu_env,unsigned int call,abi_long first,abi_long second,abi_long third,abi_long ptr,abi_long fifth)4548 static abi_long do_ipc(CPUArchState *cpu_env,
4549                        unsigned int call, abi_long first,
4550                        abi_long second, abi_long third,
4551                        abi_long ptr, abi_long fifth)
4552 {
4553     int version;
4554     abi_long ret = 0;
4555 
4556     version = call >> 16;
4557     call &= 0xffff;
4558 
4559     switch (call) {
4560     case IPCOP_semop:
4561         ret = do_semtimedop(first, ptr, second, 0, false);
4562         break;
4563     case IPCOP_semtimedop:
4564     /*
4565      * The s390 sys_ipc variant has only five parameters instead of six
4566      * (as for default variant) and the only difference is the handling of
4567      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4568      * to a struct timespec where the generic variant uses fifth parameter.
4569      */
4570 #if defined(TARGET_S390X)
4571         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4572 #else
4573         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4574 #endif
4575         break;
4576 
4577     case IPCOP_semget:
4578         ret = get_errno(semget(first, second, third));
4579         break;
4580 
4581     case IPCOP_semctl: {
4582         /* The semun argument to semctl is passed by value, so dereference the
4583          * ptr argument. */
4584         abi_ulong atptr;
4585         get_user_ual(atptr, ptr);
4586         ret = do_semctl(first, second, third, atptr);
4587         break;
4588     }
4589 
4590     case IPCOP_msgget:
4591         ret = get_errno(msgget(first, second));
4592         break;
4593 
4594     case IPCOP_msgsnd:
4595         ret = do_msgsnd(first, ptr, second, third);
4596         break;
4597 
4598     case IPCOP_msgctl:
4599         ret = do_msgctl(first, second, ptr);
4600         break;
4601 
4602     case IPCOP_msgrcv:
4603         switch (version) {
4604         case 0:
4605             {
4606                 struct target_ipc_kludge {
4607                     abi_long msgp;
4608                     abi_long msgtyp;
4609                 } *tmp;
4610 
4611                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4612                     ret = -TARGET_EFAULT;
4613                     break;
4614                 }
4615 
4616                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4617 
4618                 unlock_user_struct(tmp, ptr, 0);
4619                 break;
4620             }
4621         default:
4622             ret = do_msgrcv(first, ptr, second, fifth, third);
4623         }
4624         break;
4625 
4626     case IPCOP_shmat:
4627         switch (version) {
4628         default:
4629         {
4630             abi_ulong raddr;
4631             raddr = target_shmat(cpu_env, first, ptr, second);
4632             if (is_error(raddr))
4633                 return get_errno(raddr);
4634             if (put_user_ual(raddr, third))
4635                 return -TARGET_EFAULT;
4636             break;
4637         }
4638         case 1:
4639             ret = -TARGET_EINVAL;
4640             break;
4641         }
4642 	break;
4643     case IPCOP_shmdt:
4644         ret = target_shmdt(ptr);
4645 	break;
4646 
4647     case IPCOP_shmget:
4648 	/* IPC_* flag values are the same on all linux platforms */
4649 	ret = get_errno(shmget(first, second, third));
4650 	break;
4651 
4652 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4653     case IPCOP_shmctl:
4654         ret = do_shmctl(first, second, ptr);
4655         break;
4656     default:
4657         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4658                       call, version);
4659 	ret = -TARGET_ENOSYS;
4660 	break;
4661     }
4662     return ret;
4663 }
4664 #endif
4665 
4666 /* kernel structure types definitions */
4667 
4668 #define STRUCT(name, ...) STRUCT_ ## name,
4669 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4670 enum {
4671 #include "syscall_types.h"
4672 STRUCT_MAX
4673 };
4674 #undef STRUCT
4675 #undef STRUCT_SPECIAL
4676 
4677 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4678 #define STRUCT_SPECIAL(name)
4679 #include "syscall_types.h"
4680 #undef STRUCT
4681 #undef STRUCT_SPECIAL
4682 
4683 #define MAX_STRUCT_SIZE 4096
4684 
4685 #ifdef CONFIG_FIEMAP
4686 /* So fiemap access checks don't overflow on 32 bit systems.
4687  * This is very slightly smaller than the limit imposed by
4688  * the underlying kernel.
4689  */
4690 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4691                             / sizeof(struct fiemap_extent))
4692 
do_ioctl_fs_ioc_fiemap(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4693 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4694                                        int fd, int cmd, abi_long arg)
4695 {
4696     /* The parameter for this ioctl is a struct fiemap followed
4697      * by an array of struct fiemap_extent whose size is set
4698      * in fiemap->fm_extent_count. The array is filled in by the
4699      * ioctl.
4700      */
4701     int target_size_in, target_size_out;
4702     struct fiemap *fm;
4703     const argtype *arg_type = ie->arg_type;
4704     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4705     void *argptr, *p;
4706     abi_long ret;
4707     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4708     uint32_t outbufsz;
4709     int free_fm = 0;
4710 
4711     assert(arg_type[0] == TYPE_PTR);
4712     assert(ie->access == IOC_RW);
4713     arg_type++;
4714     target_size_in = thunk_type_size(arg_type, 0);
4715     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4716     if (!argptr) {
4717         return -TARGET_EFAULT;
4718     }
4719     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4720     unlock_user(argptr, arg, 0);
4721     fm = (struct fiemap *)buf_temp;
4722     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4723         return -TARGET_EINVAL;
4724     }
4725 
4726     outbufsz = sizeof (*fm) +
4727         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4728 
4729     if (outbufsz > MAX_STRUCT_SIZE) {
4730         /* We can't fit all the extents into the fixed size buffer.
4731          * Allocate one that is large enough and use it instead.
4732          */
4733         fm = g_try_malloc(outbufsz);
4734         if (!fm) {
4735             return -TARGET_ENOMEM;
4736         }
4737         memcpy(fm, buf_temp, sizeof(struct fiemap));
4738         free_fm = 1;
4739     }
4740     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4741     if (!is_error(ret)) {
4742         target_size_out = target_size_in;
4743         /* An extent_count of 0 means we were only counting the extents
4744          * so there are no structs to copy
4745          */
4746         if (fm->fm_extent_count != 0) {
4747             target_size_out += fm->fm_mapped_extents * extent_size;
4748         }
4749         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4750         if (!argptr) {
4751             ret = -TARGET_EFAULT;
4752         } else {
4753             /* Convert the struct fiemap */
4754             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4755             if (fm->fm_extent_count != 0) {
4756                 p = argptr + target_size_in;
4757                 /* ...and then all the struct fiemap_extents */
4758                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4759                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4760                                   THUNK_TARGET);
4761                     p += extent_size;
4762                 }
4763             }
4764             unlock_user(argptr, arg, target_size_out);
4765         }
4766     }
4767     if (free_fm) {
4768         g_free(fm);
4769     }
4770     return ret;
4771 }
4772 #endif
4773 
do_ioctl_ifconf(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4774 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4775                                 int fd, int cmd, abi_long arg)
4776 {
4777     const argtype *arg_type = ie->arg_type;
4778     int target_size;
4779     void *argptr;
4780     int ret;
4781     struct ifconf *host_ifconf;
4782     uint32_t outbufsz;
4783     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4784     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4785     int target_ifreq_size;
4786     int nb_ifreq;
4787     int free_buf = 0;
4788     int i;
4789     int target_ifc_len;
4790     abi_long target_ifc_buf;
4791     int host_ifc_len;
4792     char *host_ifc_buf;
4793 
4794     assert(arg_type[0] == TYPE_PTR);
4795     assert(ie->access == IOC_RW);
4796 
4797     arg_type++;
4798     target_size = thunk_type_size(arg_type, 0);
4799 
4800     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4801     if (!argptr)
4802         return -TARGET_EFAULT;
4803     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4804     unlock_user(argptr, arg, 0);
4805 
4806     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4807     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4808     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4809 
4810     if (target_ifc_buf != 0) {
4811         target_ifc_len = host_ifconf->ifc_len;
4812         nb_ifreq = target_ifc_len / target_ifreq_size;
4813         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4814 
4815         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4816         if (outbufsz > MAX_STRUCT_SIZE) {
4817             /*
4818              * We can't fit all the extents into the fixed size buffer.
4819              * Allocate one that is large enough and use it instead.
4820              */
4821             host_ifconf = g_try_malloc(outbufsz);
4822             if (!host_ifconf) {
4823                 return -TARGET_ENOMEM;
4824             }
4825             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4826             free_buf = 1;
4827         }
4828         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4829 
4830         host_ifconf->ifc_len = host_ifc_len;
4831     } else {
4832       host_ifc_buf = NULL;
4833     }
4834     host_ifconf->ifc_buf = host_ifc_buf;
4835 
4836     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4837     if (!is_error(ret)) {
4838 	/* convert host ifc_len to target ifc_len */
4839 
4840         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4841         target_ifc_len = nb_ifreq * target_ifreq_size;
4842         host_ifconf->ifc_len = target_ifc_len;
4843 
4844 	/* restore target ifc_buf */
4845 
4846         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4847 
4848 	/* copy struct ifconf to target user */
4849 
4850         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4851         if (!argptr)
4852             return -TARGET_EFAULT;
4853         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4854         unlock_user(argptr, arg, target_size);
4855 
4856         if (target_ifc_buf != 0) {
4857             /* copy ifreq[] to target user */
4858             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4859             for (i = 0; i < nb_ifreq ; i++) {
4860                 thunk_convert(argptr + i * target_ifreq_size,
4861                               host_ifc_buf + i * sizeof(struct ifreq),
4862                               ifreq_arg_type, THUNK_TARGET);
4863             }
4864             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4865         }
4866     }
4867 
4868     if (free_buf) {
4869         g_free(host_ifconf);
4870     }
4871 
4872     return ret;
4873 }
4874 
4875 #if defined(CONFIG_USBFS)
4876 #if HOST_LONG_BITS > 64
4877 #error USBDEVFS thunks do not support >64 bit hosts yet.
4878 #endif
4879 struct live_urb {
4880     uint64_t target_urb_adr;
4881     uint64_t target_buf_adr;
4882     char *target_buf_ptr;
4883     struct usbdevfs_urb host_urb;
4884 };
4885 
usbdevfs_urb_hashtable(void)4886 static GHashTable *usbdevfs_urb_hashtable(void)
4887 {
4888     static GHashTable *urb_hashtable;
4889 
4890     if (!urb_hashtable) {
4891         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4892     }
4893     return urb_hashtable;
4894 }
4895 
urb_hashtable_insert(struct live_urb * urb)4896 static void urb_hashtable_insert(struct live_urb *urb)
4897 {
4898     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4899     g_hash_table_insert(urb_hashtable, urb, urb);
4900 }
4901 
urb_hashtable_lookup(uint64_t target_urb_adr)4902 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4903 {
4904     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4905     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4906 }
4907 
urb_hashtable_remove(struct live_urb * urb)4908 static void urb_hashtable_remove(struct live_urb *urb)
4909 {
4910     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4911     g_hash_table_remove(urb_hashtable, urb);
4912 }
4913 
4914 static abi_long
do_ioctl_usbdevfs_reapurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4915 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4916                           int fd, int cmd, abi_long arg)
4917 {
4918     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4919     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4920     struct live_urb *lurb;
4921     void *argptr;
4922     uint64_t hurb;
4923     int target_size;
4924     uintptr_t target_urb_adr;
4925     abi_long ret;
4926 
4927     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4928 
4929     memset(buf_temp, 0, sizeof(uint64_t));
4930     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4931     if (is_error(ret)) {
4932         return ret;
4933     }
4934 
4935     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4936     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4937     if (!lurb->target_urb_adr) {
4938         return -TARGET_EFAULT;
4939     }
4940     urb_hashtable_remove(lurb);
4941     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4942         lurb->host_urb.buffer_length);
4943     lurb->target_buf_ptr = NULL;
4944 
4945     /* restore the guest buffer pointer */
4946     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4947 
4948     /* update the guest urb struct */
4949     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4950     if (!argptr) {
4951         g_free(lurb);
4952         return -TARGET_EFAULT;
4953     }
4954     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4955     unlock_user(argptr, lurb->target_urb_adr, target_size);
4956 
4957     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4958     /* write back the urb handle */
4959     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4960     if (!argptr) {
4961         g_free(lurb);
4962         return -TARGET_EFAULT;
4963     }
4964 
4965     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4966     target_urb_adr = lurb->target_urb_adr;
4967     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4968     unlock_user(argptr, arg, target_size);
4969 
4970     g_free(lurb);
4971     return ret;
4972 }
4973 
4974 static abi_long
do_ioctl_usbdevfs_discardurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4975 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4976                              uint8_t *buf_temp __attribute__((unused)),
4977                              int fd, int cmd, abi_long arg)
4978 {
4979     struct live_urb *lurb;
4980 
4981     /* map target address back to host URB with metadata. */
4982     lurb = urb_hashtable_lookup(arg);
4983     if (!lurb) {
4984         return -TARGET_EFAULT;
4985     }
4986     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4987 }
4988 
4989 static abi_long
do_ioctl_usbdevfs_submiturb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4990 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4991                             int fd, int cmd, abi_long arg)
4992 {
4993     const argtype *arg_type = ie->arg_type;
4994     int target_size;
4995     abi_long ret;
4996     void *argptr;
4997     int rw_dir;
4998     struct live_urb *lurb;
4999 
5000     /*
5001      * each submitted URB needs to map to a unique ID for the
5002      * kernel, and that unique ID needs to be a pointer to
5003      * host memory.  hence, we need to malloc for each URB.
5004      * isochronous transfers have a variable length struct.
5005      */
5006     arg_type++;
5007     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5008 
5009     /* construct host copy of urb and metadata */
5010     lurb = g_try_new0(struct live_urb, 1);
5011     if (!lurb) {
5012         return -TARGET_ENOMEM;
5013     }
5014 
5015     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5016     if (!argptr) {
5017         g_free(lurb);
5018         return -TARGET_EFAULT;
5019     }
5020     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5021     unlock_user(argptr, arg, 0);
5022 
5023     lurb->target_urb_adr = arg;
5024     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5025 
5026     /* buffer space used depends on endpoint type so lock the entire buffer */
5027     /* control type urbs should check the buffer contents for true direction */
5028     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5029     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5030         lurb->host_urb.buffer_length, 1);
5031     if (lurb->target_buf_ptr == NULL) {
5032         g_free(lurb);
5033         return -TARGET_EFAULT;
5034     }
5035 
5036     /* update buffer pointer in host copy */
5037     lurb->host_urb.buffer = lurb->target_buf_ptr;
5038 
5039     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5040     if (is_error(ret)) {
5041         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5042         g_free(lurb);
5043     } else {
5044         urb_hashtable_insert(lurb);
5045     }
5046 
5047     return ret;
5048 }
5049 #endif /* CONFIG_USBFS */
5050 
do_ioctl_dm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5051 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5052                             int cmd, abi_long arg)
5053 {
5054     void *argptr;
5055     struct dm_ioctl *host_dm;
5056     abi_long guest_data;
5057     uint32_t guest_data_size;
5058     int target_size;
5059     const argtype *arg_type = ie->arg_type;
5060     abi_long ret;
5061     void *big_buf = NULL;
5062     char *host_data;
5063 
5064     arg_type++;
5065     target_size = thunk_type_size(arg_type, 0);
5066     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5067     if (!argptr) {
5068         ret = -TARGET_EFAULT;
5069         goto out;
5070     }
5071     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5072     unlock_user(argptr, arg, 0);
5073 
5074     /* buf_temp is too small, so fetch things into a bigger buffer */
5075     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5076     memcpy(big_buf, buf_temp, target_size);
5077     buf_temp = big_buf;
5078     host_dm = big_buf;
5079 
5080     guest_data = arg + host_dm->data_start;
5081     if ((guest_data - arg) < 0) {
5082         ret = -TARGET_EINVAL;
5083         goto out;
5084     }
5085     guest_data_size = host_dm->data_size - host_dm->data_start;
5086     host_data = (char*)host_dm + host_dm->data_start;
5087 
5088     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5089     if (!argptr) {
5090         ret = -TARGET_EFAULT;
5091         goto out;
5092     }
5093 
5094     switch (ie->host_cmd) {
5095     case DM_REMOVE_ALL:
5096     case DM_LIST_DEVICES:
5097     case DM_DEV_CREATE:
5098     case DM_DEV_REMOVE:
5099     case DM_DEV_SUSPEND:
5100     case DM_DEV_STATUS:
5101     case DM_DEV_WAIT:
5102     case DM_TABLE_STATUS:
5103     case DM_TABLE_CLEAR:
5104     case DM_TABLE_DEPS:
5105     case DM_LIST_VERSIONS:
5106         /* no input data */
5107         break;
5108     case DM_DEV_RENAME:
5109     case DM_DEV_SET_GEOMETRY:
5110         /* data contains only strings */
5111         memcpy(host_data, argptr, guest_data_size);
5112         break;
5113     case DM_TARGET_MSG:
5114         memcpy(host_data, argptr, guest_data_size);
5115         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5116         break;
5117     case DM_TABLE_LOAD:
5118     {
5119         void *gspec = argptr;
5120         void *cur_data = host_data;
5121         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5122         int spec_size = thunk_type_size(dm_arg_type, 0);
5123         int i;
5124 
5125         for (i = 0; i < host_dm->target_count; i++) {
5126             struct dm_target_spec *spec = cur_data;
5127             uint32_t next;
5128             int slen;
5129 
5130             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5131             slen = strlen((char*)gspec + spec_size) + 1;
5132             next = spec->next;
5133             spec->next = sizeof(*spec) + slen;
5134             strcpy((char*)&spec[1], gspec + spec_size);
5135             gspec += next;
5136             cur_data += spec->next;
5137         }
5138         break;
5139     }
5140     default:
5141         ret = -TARGET_EINVAL;
5142         unlock_user(argptr, guest_data, 0);
5143         goto out;
5144     }
5145     unlock_user(argptr, guest_data, 0);
5146 
5147     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5148     if (!is_error(ret)) {
5149         guest_data = arg + host_dm->data_start;
5150         guest_data_size = host_dm->data_size - host_dm->data_start;
5151         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5152         switch (ie->host_cmd) {
5153         case DM_REMOVE_ALL:
5154         case DM_DEV_CREATE:
5155         case DM_DEV_REMOVE:
5156         case DM_DEV_RENAME:
5157         case DM_DEV_SUSPEND:
5158         case DM_DEV_STATUS:
5159         case DM_TABLE_LOAD:
5160         case DM_TABLE_CLEAR:
5161         case DM_TARGET_MSG:
5162         case DM_DEV_SET_GEOMETRY:
5163             /* no return data */
5164             break;
5165         case DM_LIST_DEVICES:
5166         {
5167             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5168             uint32_t remaining_data = guest_data_size;
5169             void *cur_data = argptr;
5170             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5171             int nl_size = 12; /* can't use thunk_size due to alignment */
5172 
5173             while (1) {
5174                 uint32_t next = nl->next;
5175                 if (next) {
5176                     nl->next = nl_size + (strlen(nl->name) + 1);
5177                 }
5178                 if (remaining_data < nl->next) {
5179                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5180                     break;
5181                 }
5182                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5183                 strcpy(cur_data + nl_size, nl->name);
5184                 cur_data += nl->next;
5185                 remaining_data -= nl->next;
5186                 if (!next) {
5187                     break;
5188                 }
5189                 nl = (void*)nl + next;
5190             }
5191             break;
5192         }
5193         case DM_DEV_WAIT:
5194         case DM_TABLE_STATUS:
5195         {
5196             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5197             void *cur_data = argptr;
5198             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5199             int spec_size = thunk_type_size(dm_arg_type, 0);
5200             int i;
5201 
5202             for (i = 0; i < host_dm->target_count; i++) {
5203                 uint32_t next = spec->next;
5204                 int slen = strlen((char*)&spec[1]) + 1;
5205                 spec->next = (cur_data - argptr) + spec_size + slen;
5206                 if (guest_data_size < spec->next) {
5207                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5208                     break;
5209                 }
5210                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5211                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5212                 cur_data = argptr + spec->next;
5213                 spec = (void*)host_dm + host_dm->data_start + next;
5214             }
5215             break;
5216         }
5217         case DM_TABLE_DEPS:
5218         {
5219             void *hdata = (void*)host_dm + host_dm->data_start;
5220             int count = *(uint32_t*)hdata;
5221             uint64_t *hdev = hdata + 8;
5222             uint64_t *gdev = argptr + 8;
5223             int i;
5224 
5225             *(uint32_t*)argptr = tswap32(count);
5226             for (i = 0; i < count; i++) {
5227                 *gdev = tswap64(*hdev);
5228                 gdev++;
5229                 hdev++;
5230             }
5231             break;
5232         }
5233         case DM_LIST_VERSIONS:
5234         {
5235             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5236             uint32_t remaining_data = guest_data_size;
5237             void *cur_data = argptr;
5238             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5239             int vers_size = thunk_type_size(dm_arg_type, 0);
5240 
5241             while (1) {
5242                 uint32_t next = vers->next;
5243                 if (next) {
5244                     vers->next = vers_size + (strlen(vers->name) + 1);
5245                 }
5246                 if (remaining_data < vers->next) {
5247                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5248                     break;
5249                 }
5250                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5251                 strcpy(cur_data + vers_size, vers->name);
5252                 cur_data += vers->next;
5253                 remaining_data -= vers->next;
5254                 if (!next) {
5255                     break;
5256                 }
5257                 vers = (void*)vers + next;
5258             }
5259             break;
5260         }
5261         default:
5262             unlock_user(argptr, guest_data, 0);
5263             ret = -TARGET_EINVAL;
5264             goto out;
5265         }
5266         unlock_user(argptr, guest_data, guest_data_size);
5267 
5268         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5269         if (!argptr) {
5270             ret = -TARGET_EFAULT;
5271             goto out;
5272         }
5273         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5274         unlock_user(argptr, arg, target_size);
5275     }
5276 out:
5277     g_free(big_buf);
5278     return ret;
5279 }
5280 
do_ioctl_blkpg(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5281 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5282                                int cmd, abi_long arg)
5283 {
5284     void *argptr;
5285     int target_size;
5286     const argtype *arg_type = ie->arg_type;
5287     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5288     abi_long ret;
5289 
5290     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5291     struct blkpg_partition host_part;
5292 
5293     /* Read and convert blkpg */
5294     arg_type++;
5295     target_size = thunk_type_size(arg_type, 0);
5296     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5297     if (!argptr) {
5298         ret = -TARGET_EFAULT;
5299         goto out;
5300     }
5301     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5302     unlock_user(argptr, arg, 0);
5303 
5304     switch (host_blkpg->op) {
5305     case BLKPG_ADD_PARTITION:
5306     case BLKPG_DEL_PARTITION:
5307         /* payload is struct blkpg_partition */
5308         break;
5309     default:
5310         /* Unknown opcode */
5311         ret = -TARGET_EINVAL;
5312         goto out;
5313     }
5314 
5315     /* Read and convert blkpg->data */
5316     arg = (abi_long)(uintptr_t)host_blkpg->data;
5317     target_size = thunk_type_size(part_arg_type, 0);
5318     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5319     if (!argptr) {
5320         ret = -TARGET_EFAULT;
5321         goto out;
5322     }
5323     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5324     unlock_user(argptr, arg, 0);
5325 
5326     /* Swizzle the data pointer to our local copy and call! */
5327     host_blkpg->data = &host_part;
5328     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5329 
5330 out:
5331     return ret;
5332 }
5333 
do_ioctl_rt(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5334 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5335                                 int fd, int cmd, abi_long arg)
5336 {
5337     const argtype *arg_type = ie->arg_type;
5338     const StructEntry *se;
5339     const argtype *field_types;
5340     const int *dst_offsets, *src_offsets;
5341     int target_size;
5342     void *argptr;
5343     abi_ulong *target_rt_dev_ptr = NULL;
5344     unsigned long *host_rt_dev_ptr = NULL;
5345     abi_long ret;
5346     int i;
5347 
5348     assert(ie->access == IOC_W);
5349     assert(*arg_type == TYPE_PTR);
5350     arg_type++;
5351     assert(*arg_type == TYPE_STRUCT);
5352     target_size = thunk_type_size(arg_type, 0);
5353     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5354     if (!argptr) {
5355         return -TARGET_EFAULT;
5356     }
5357     arg_type++;
5358     assert(*arg_type == (int)STRUCT_rtentry);
5359     se = struct_entries + *arg_type++;
5360     assert(se->convert[0] == NULL);
5361     /* convert struct here to be able to catch rt_dev string */
5362     field_types = se->field_types;
5363     dst_offsets = se->field_offsets[THUNK_HOST];
5364     src_offsets = se->field_offsets[THUNK_TARGET];
5365     for (i = 0; i < se->nb_fields; i++) {
5366         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5367             assert(*field_types == TYPE_PTRVOID);
5368             target_rt_dev_ptr = argptr + src_offsets[i];
5369             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5370             if (*target_rt_dev_ptr != 0) {
5371                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5372                                                   tswapal(*target_rt_dev_ptr));
5373                 if (!*host_rt_dev_ptr) {
5374                     unlock_user(argptr, arg, 0);
5375                     return -TARGET_EFAULT;
5376                 }
5377             } else {
5378                 *host_rt_dev_ptr = 0;
5379             }
5380             field_types++;
5381             continue;
5382         }
5383         field_types = thunk_convert(buf_temp + dst_offsets[i],
5384                                     argptr + src_offsets[i],
5385                                     field_types, THUNK_HOST);
5386     }
5387     unlock_user(argptr, arg, 0);
5388 
5389     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5390 
5391     assert(host_rt_dev_ptr != NULL);
5392     assert(target_rt_dev_ptr != NULL);
5393     if (*host_rt_dev_ptr != 0) {
5394         unlock_user((void *)*host_rt_dev_ptr,
5395                     *target_rt_dev_ptr, 0);
5396     }
5397     return ret;
5398 }
5399 
do_ioctl_kdsigaccept(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5400 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5401                                      int fd, int cmd, abi_long arg)
5402 {
5403     int sig = target_to_host_signal(arg);
5404     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5405 }
5406 
do_ioctl_SIOCGSTAMP(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5407 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5408                                     int fd, int cmd, abi_long arg)
5409 {
5410     struct timeval tv;
5411     abi_long ret;
5412 
5413     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5414     if (is_error(ret)) {
5415         return ret;
5416     }
5417 
5418     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5419         if (copy_to_user_timeval(arg, &tv)) {
5420             return -TARGET_EFAULT;
5421         }
5422     } else {
5423         if (copy_to_user_timeval64(arg, &tv)) {
5424             return -TARGET_EFAULT;
5425         }
5426     }
5427 
5428     return ret;
5429 }
5430 
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5431 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5432                                       int fd, int cmd, abi_long arg)
5433 {
5434     struct timespec ts;
5435     abi_long ret;
5436 
5437     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5438     if (is_error(ret)) {
5439         return ret;
5440     }
5441 
5442     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5443         if (host_to_target_timespec(arg, &ts)) {
5444             return -TARGET_EFAULT;
5445         }
5446     } else{
5447         if (host_to_target_timespec64(arg, &ts)) {
5448             return -TARGET_EFAULT;
5449         }
5450     }
5451 
5452     return ret;
5453 }
5454 
5455 #ifdef TIOCGPTPEER
do_ioctl_tiocgptpeer(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5456 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5457                                      int fd, int cmd, abi_long arg)
5458 {
5459     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5460     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5461 }
5462 #endif
5463 
5464 #ifdef HAVE_DRM_H
5465 
unlock_drm_version(struct drm_version * host_ver,struct target_drm_version * target_ver,bool copy)5466 static void unlock_drm_version(struct drm_version *host_ver,
5467                                struct target_drm_version *target_ver,
5468                                bool copy)
5469 {
5470     unlock_user(host_ver->name, target_ver->name,
5471                                 copy ? host_ver->name_len : 0);
5472     unlock_user(host_ver->date, target_ver->date,
5473                                 copy ? host_ver->date_len : 0);
5474     unlock_user(host_ver->desc, target_ver->desc,
5475                                 copy ? host_ver->desc_len : 0);
5476 }
5477 
target_to_host_drmversion(struct drm_version * host_ver,struct target_drm_version * target_ver)5478 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5479                                           struct target_drm_version *target_ver)
5480 {
5481     memset(host_ver, 0, sizeof(*host_ver));
5482 
5483     __get_user(host_ver->name_len, &target_ver->name_len);
5484     if (host_ver->name_len) {
5485         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5486                                    target_ver->name_len, 0);
5487         if (!host_ver->name) {
5488             return -EFAULT;
5489         }
5490     }
5491 
5492     __get_user(host_ver->date_len, &target_ver->date_len);
5493     if (host_ver->date_len) {
5494         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5495                                    target_ver->date_len, 0);
5496         if (!host_ver->date) {
5497             goto err;
5498         }
5499     }
5500 
5501     __get_user(host_ver->desc_len, &target_ver->desc_len);
5502     if (host_ver->desc_len) {
5503         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5504                                    target_ver->desc_len, 0);
5505         if (!host_ver->desc) {
5506             goto err;
5507         }
5508     }
5509 
5510     return 0;
5511 err:
5512     unlock_drm_version(host_ver, target_ver, false);
5513     return -EFAULT;
5514 }
5515 
host_to_target_drmversion(struct target_drm_version * target_ver,struct drm_version * host_ver)5516 static inline void host_to_target_drmversion(
5517                                           struct target_drm_version *target_ver,
5518                                           struct drm_version *host_ver)
5519 {
5520     __put_user(host_ver->version_major, &target_ver->version_major);
5521     __put_user(host_ver->version_minor, &target_ver->version_minor);
5522     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5523     __put_user(host_ver->name_len, &target_ver->name_len);
5524     __put_user(host_ver->date_len, &target_ver->date_len);
5525     __put_user(host_ver->desc_len, &target_ver->desc_len);
5526     unlock_drm_version(host_ver, target_ver, true);
5527 }
5528 
do_ioctl_drm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5529 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5530                              int fd, int cmd, abi_long arg)
5531 {
5532     struct drm_version *ver;
5533     struct target_drm_version *target_ver;
5534     abi_long ret;
5535 
5536     switch (ie->host_cmd) {
5537     case DRM_IOCTL_VERSION:
5538         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5539             return -TARGET_EFAULT;
5540         }
5541         ver = (struct drm_version *)buf_temp;
5542         ret = target_to_host_drmversion(ver, target_ver);
5543         if (!is_error(ret)) {
5544             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5545             if (is_error(ret)) {
5546                 unlock_drm_version(ver, target_ver, false);
5547             } else {
5548                 host_to_target_drmversion(target_ver, ver);
5549             }
5550         }
5551         unlock_user_struct(target_ver, arg, 0);
5552         return ret;
5553     }
5554     return -TARGET_ENOSYS;
5555 }
5556 
do_ioctl_drm_i915_getparam(const IOCTLEntry * ie,struct drm_i915_getparam * gparam,int fd,abi_long arg)5557 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5558                                            struct drm_i915_getparam *gparam,
5559                                            int fd, abi_long arg)
5560 {
5561     abi_long ret;
5562     int value;
5563     struct target_drm_i915_getparam *target_gparam;
5564 
5565     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5566         return -TARGET_EFAULT;
5567     }
5568 
5569     __get_user(gparam->param, &target_gparam->param);
5570     gparam->value = &value;
5571     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5572     put_user_s32(value, target_gparam->value);
5573 
5574     unlock_user_struct(target_gparam, arg, 0);
5575     return ret;
5576 }
5577 
do_ioctl_drm_i915(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5578 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5579                                   int fd, int cmd, abi_long arg)
5580 {
5581     switch (ie->host_cmd) {
5582     case DRM_IOCTL_I915_GETPARAM:
5583         return do_ioctl_drm_i915_getparam(ie,
5584                                           (struct drm_i915_getparam *)buf_temp,
5585                                           fd, arg);
5586     default:
5587         return -TARGET_ENOSYS;
5588     }
5589 }
5590 
5591 #endif
5592 
do_ioctl_TUNSETTXFILTER(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5593 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5594                                         int fd, int cmd, abi_long arg)
5595 {
5596     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5597     struct tun_filter *target_filter;
5598     char *target_addr;
5599 
5600     assert(ie->access == IOC_W);
5601 
5602     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5603     if (!target_filter) {
5604         return -TARGET_EFAULT;
5605     }
5606     filter->flags = tswap16(target_filter->flags);
5607     filter->count = tswap16(target_filter->count);
5608     unlock_user(target_filter, arg, 0);
5609 
5610     if (filter->count) {
5611         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5612             MAX_STRUCT_SIZE) {
5613             return -TARGET_EFAULT;
5614         }
5615 
5616         target_addr = lock_user(VERIFY_READ,
5617                                 arg + offsetof(struct tun_filter, addr),
5618                                 filter->count * ETH_ALEN, 1);
5619         if (!target_addr) {
5620             return -TARGET_EFAULT;
5621         }
5622         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5623         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5624     }
5625 
5626     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5627 }
5628 
5629 IOCTLEntry ioctl_entries[] = {
5630 #define IOCTL(cmd, access, ...) \
5631     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5632 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5633     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5634 #define IOCTL_IGNORE(cmd) \
5635     { TARGET_ ## cmd, 0, #cmd },
5636 #include "ioctls.h"
5637     { 0, 0, },
5638 };
5639 
5640 /* ??? Implement proper locking for ioctls.  */
5641 /* do_ioctl() Must return target values and target errnos. */
do_ioctl(int fd,int cmd,abi_long arg)5642 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5643 {
5644     const IOCTLEntry *ie;
5645     const argtype *arg_type;
5646     abi_long ret;
5647     uint8_t buf_temp[MAX_STRUCT_SIZE];
5648     int target_size;
5649     void *argptr;
5650 
5651     ie = ioctl_entries;
5652     for(;;) {
5653         if (ie->target_cmd == 0) {
5654             qemu_log_mask(
5655                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5656             return -TARGET_ENOTTY;
5657         }
5658         if (ie->target_cmd == cmd)
5659             break;
5660         ie++;
5661     }
5662     arg_type = ie->arg_type;
5663     if (ie->do_ioctl) {
5664         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5665     } else if (!ie->host_cmd) {
5666         /* Some architectures define BSD ioctls in their headers
5667            that are not implemented in Linux.  */
5668         return -TARGET_ENOTTY;
5669     }
5670 
5671     switch(arg_type[0]) {
5672     case TYPE_NULL:
5673         /* no argument */
5674         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5675         break;
5676     case TYPE_PTRVOID:
5677     case TYPE_INT:
5678     case TYPE_LONG:
5679     case TYPE_ULONG:
5680         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5681         break;
5682     case TYPE_PTR:
5683         arg_type++;
5684         target_size = thunk_type_size(arg_type, 0);
5685         switch(ie->access) {
5686         case IOC_R:
5687             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5688             if (!is_error(ret)) {
5689                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5690                 if (!argptr)
5691                     return -TARGET_EFAULT;
5692                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5693                 unlock_user(argptr, arg, target_size);
5694             }
5695             break;
5696         case IOC_W:
5697             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5698             if (!argptr)
5699                 return -TARGET_EFAULT;
5700             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5701             unlock_user(argptr, arg, 0);
5702             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5703             break;
5704         default:
5705         case IOC_RW:
5706             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5707             if (!argptr)
5708                 return -TARGET_EFAULT;
5709             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5710             unlock_user(argptr, arg, 0);
5711             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5712             if (!is_error(ret)) {
5713                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5714                 if (!argptr)
5715                     return -TARGET_EFAULT;
5716                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5717                 unlock_user(argptr, arg, target_size);
5718             }
5719             break;
5720         }
5721         break;
5722     default:
5723         qemu_log_mask(LOG_UNIMP,
5724                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5725                       (long)cmd, arg_type[0]);
5726         ret = -TARGET_ENOTTY;
5727         break;
5728     }
5729     return ret;
5730 }
5731 
5732 static const bitmask_transtbl iflag_tbl[] = {
5733         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5734         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5735         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5736         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5737         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5738         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5739         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5740         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5741         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5742         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5743         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5744         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5745         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5746         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5747         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5748 };
5749 
5750 static const bitmask_transtbl oflag_tbl[] = {
5751 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5752 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5753 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5754 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5755 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5756 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5757 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5758 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5759 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5760 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5761 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5762 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5763 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5764 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5765 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5766 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5767 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5768 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5769 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5770 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5771 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5772 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5773 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5774 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5775 };
5776 
5777 #if defined(TARGET_CIBAUD) && defined(CIBAUD)
5778 
5779 # define BAUD_TRANSTBL(baud) \
5780     { TARGET_CBAUD, TARGET_##baud, CBAUD, baud }, \
5781     { TARGET_CIBAUD, TARGET_##baud << TARGET_IBSHIFT, CIBAUD, baud << IBSHIFT },
5782 
5783 #else
5784 
5785 /* Alpha in particular does not have CIBAUD/IBSHIFT */
5786 
5787 # define BAUD_TRANSTBL(baud) \
5788     { TARGET_CBAUD, TARGET_##baud, CBAUD, baud },
5789 
5790 #endif
5791 
5792 static const bitmask_transtbl cflag_tbl[] = {
5793 	BAUD_TRANSTBL(B0)
5794 	BAUD_TRANSTBL(B50)
5795 	BAUD_TRANSTBL(B75)
5796 	BAUD_TRANSTBL(B110)
5797 	BAUD_TRANSTBL(B134)
5798 	BAUD_TRANSTBL(B150)
5799 	BAUD_TRANSTBL(B200)
5800 	BAUD_TRANSTBL(B300)
5801 	BAUD_TRANSTBL(B600)
5802 	BAUD_TRANSTBL(B1200)
5803 	BAUD_TRANSTBL(B1800)
5804 	BAUD_TRANSTBL(B2400)
5805 	BAUD_TRANSTBL(B4800)
5806 	BAUD_TRANSTBL(B9600)
5807 	BAUD_TRANSTBL(B19200)
5808 	BAUD_TRANSTBL(B38400)
5809 	BAUD_TRANSTBL(B57600)
5810 	BAUD_TRANSTBL(B115200)
5811 	BAUD_TRANSTBL(B230400)
5812 	BAUD_TRANSTBL(B460800)
5813 	BAUD_TRANSTBL(B500000)
5814 	BAUD_TRANSTBL(B576000)
5815 	BAUD_TRANSTBL(B921600)
5816 	BAUD_TRANSTBL(B1000000)
5817 	BAUD_TRANSTBL(B1152000)
5818 	BAUD_TRANSTBL(B1500000)
5819 	BAUD_TRANSTBL(B2000000)
5820 
5821 	BAUD_TRANSTBL(BOTHER)
5822 
5823 	/* SPARC in particular is missing these higher baud rates */
5824 
5825 #if defined(TARGET_B2500000) && defined(B2500000)
5826 	BAUD_TRANSTBL(B2500000)
5827 #endif
5828 
5829 #if defined(TARGET_B3000000) && defined(B3000000)
5830 	BAUD_TRANSTBL(B3000000)
5831 #endif
5832 
5833 #if defined(TARGET_B3500000) && defined(B3500000)
5834 	BAUD_TRANSTBL(B3500000)
5835 #endif
5836 
5837 #if defined(TARGET_B4000000) && defined(B4000000)
5838 	BAUD_TRANSTBL(B4000000)
5839 #endif
5840 
5841 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5842 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5843 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5844 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5845 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5846 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5847 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5848 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5849 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5850 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5851 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5852 };
5853 
5854 static const bitmask_transtbl lflag_tbl[] = {
5855   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5856   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5857   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5858   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5859   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5860   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5861   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5862   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5863   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5864   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5865   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5866   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5867   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5868   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5869   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5870   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5871 };
5872 
target_to_host_termios(void * dst,const void * src)5873 static void target_to_host_termios (void *dst, const void *src)
5874 {
5875     struct host_termios *host = dst;
5876     const struct target_termios *target = src;
5877 
5878     host->c_iflag =
5879         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5880     host->c_oflag =
5881         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5882     host->c_cflag =
5883         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5884     host->c_lflag =
5885         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5886     host->c_line = target->c_line;
5887 
5888     memset(host->c_cc, 0, sizeof(host->c_cc));
5889     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5890     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5891     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5892     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5893     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5894     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5895     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5896     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5897     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5898     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5899     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5900     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5901     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5902     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5903     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5904     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5905     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5906 }
5907 
host_to_target_termios(void * dst,const void * src)5908 static void host_to_target_termios (void *dst, const void *src)
5909 {
5910     struct target_termios *target = dst;
5911     const struct host_termios *host = src;
5912 
5913     target->c_iflag =
5914         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5915     target->c_oflag =
5916         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5917     target->c_cflag =
5918         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5919     target->c_lflag =
5920         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5921     target->c_line = host->c_line;
5922 
5923     memset(target->c_cc, 0, sizeof(target->c_cc));
5924     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5925     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5926     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5927     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5928     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5929     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5930     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5931     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5932     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5933     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5934     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5935     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5936     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5937     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5938     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5939     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5940     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5941 }
5942 
5943 static const StructEntry struct_termios_def = {
5944     .convert = { host_to_target_termios, target_to_host_termios },
5945     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5946     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5947     .print = print_termios,
5948 };
5949 
5950 #ifdef TARGET_TCGETS2
target_to_host_termios2(void * dst,const void * src)5951 static void target_to_host_termios2 (void *dst, const void *src)
5952 {
5953     struct host_termios2 *host = dst;
5954     const struct target_termios2 *target = src;
5955 
5956     host->c_iflag =
5957         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5958     host->c_oflag =
5959         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5960     host->c_cflag =
5961         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5962     host->c_lflag =
5963         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5964     host->c_line = target->c_line;
5965     host->c_ispeed = tswap32(target->c_ispeed);
5966     host->c_ospeed = tswap32(target->c_ospeed);
5967 
5968     memset(host->c_cc, 0, sizeof(host->c_cc));
5969     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5970     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5971     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5972     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5973     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5974     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5975     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5976     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5977     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5978     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5979     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5980     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5981     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5982     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5983     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5984     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5985     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5986 }
5987 
host_to_target_termios2(void * dst,const void * src)5988 static void host_to_target_termios2 (void *dst, const void *src)
5989 {
5990     struct target_termios2 *target = dst;
5991     const struct host_termios2 *host = src;
5992 
5993     target->c_iflag =
5994         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5995     target->c_oflag =
5996         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5997     target->c_cflag =
5998         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5999     target->c_lflag =
6000         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6001     target->c_line = host->c_line;
6002     target->c_ispeed = tswap32(host->c_ispeed);
6003     target->c_ospeed = tswap32(host->c_ospeed);
6004 
6005     memset(target->c_cc, 0, sizeof(target->c_cc));
6006     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6007     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6008     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6009     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6010     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6011     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6012     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6013     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6014     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6015     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6016     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6017     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6018     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6019     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6020     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6021     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6022     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6023 }
6024 
6025 static const StructEntry struct_termios2_def = {
6026     .convert = { host_to_target_termios2, target_to_host_termios2 },
6027     .size = { sizeof(struct target_termios2), sizeof(struct host_termios2) },
6028     .align = { __alignof__(struct target_termios2), __alignof__(struct host_termios2) },
6029     .print = print_termios2,
6030 };
6031 #endif
6032 
6033 /* If the host does not provide these bits, they may be safely discarded. */
6034 #ifndef MAP_SYNC
6035 #define MAP_SYNC 0
6036 #endif
6037 #ifndef MAP_UNINITIALIZED
6038 #define MAP_UNINITIALIZED 0
6039 #endif
6040 
6041 static const bitmask_transtbl mmap_flags_tbl[] = {
6042     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6043     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6044       MAP_ANONYMOUS, MAP_ANONYMOUS },
6045     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6046       MAP_GROWSDOWN, MAP_GROWSDOWN },
6047     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6048       MAP_DENYWRITE, MAP_DENYWRITE },
6049     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6050       MAP_EXECUTABLE, MAP_EXECUTABLE },
6051     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6052     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6053       MAP_NORESERVE, MAP_NORESERVE },
6054     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6055     /* MAP_STACK had been ignored by the kernel for quite some time.
6056        Recognize it for the target insofar as we do not want to pass
6057        it through to the host.  */
6058     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6059     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
6060     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
6061     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
6062       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
6063     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
6064       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
6065 };
6066 
6067 /*
6068  * Arrange for legacy / undefined architecture specific flags to be
6069  * ignored by mmap handling code.
6070  */
6071 #ifndef TARGET_MAP_32BIT
6072 #define TARGET_MAP_32BIT 0
6073 #endif
6074 #ifndef TARGET_MAP_HUGE_2MB
6075 #define TARGET_MAP_HUGE_2MB 0
6076 #endif
6077 #ifndef TARGET_MAP_HUGE_1GB
6078 #define TARGET_MAP_HUGE_1GB 0
6079 #endif
6080 
do_mmap(abi_ulong addr,abi_ulong len,int prot,int target_flags,int fd,off_t offset)6081 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
6082                         int target_flags, int fd, off_t offset)
6083 {
6084     /*
6085      * The historical set of flags that all mmap types implicitly support.
6086      */
6087     enum {
6088         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
6089                                | TARGET_MAP_PRIVATE
6090                                | TARGET_MAP_FIXED
6091                                | TARGET_MAP_ANONYMOUS
6092                                | TARGET_MAP_DENYWRITE
6093                                | TARGET_MAP_EXECUTABLE
6094                                | TARGET_MAP_UNINITIALIZED
6095                                | TARGET_MAP_GROWSDOWN
6096                                | TARGET_MAP_LOCKED
6097                                | TARGET_MAP_NORESERVE
6098                                | TARGET_MAP_POPULATE
6099                                | TARGET_MAP_NONBLOCK
6100                                | TARGET_MAP_STACK
6101                                | TARGET_MAP_HUGETLB
6102                                | TARGET_MAP_32BIT
6103                                | TARGET_MAP_HUGE_2MB
6104                                | TARGET_MAP_HUGE_1GB
6105     };
6106     int host_flags;
6107 
6108     switch (target_flags & TARGET_MAP_TYPE) {
6109     case TARGET_MAP_PRIVATE:
6110         host_flags = MAP_PRIVATE;
6111         break;
6112     case TARGET_MAP_SHARED:
6113         host_flags = MAP_SHARED;
6114         break;
6115     case TARGET_MAP_SHARED_VALIDATE:
6116         /*
6117          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
6118          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
6119          */
6120         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
6121             return -TARGET_EOPNOTSUPP;
6122         }
6123         host_flags = MAP_SHARED_VALIDATE;
6124         if (target_flags & TARGET_MAP_SYNC) {
6125             host_flags |= MAP_SYNC;
6126         }
6127         break;
6128     default:
6129         return -TARGET_EINVAL;
6130     }
6131     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
6132 
6133     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
6134 }
6135 
6136 /*
6137  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6138  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6139  */
6140 #if defined(TARGET_I386)
6141 
6142 /* NOTE: there is really one LDT for all the threads */
6143 static uint8_t *ldt_table;
6144 
read_ldt(abi_ulong ptr,unsigned long bytecount)6145 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6146 {
6147     int size;
6148     void *p;
6149 
6150     if (!ldt_table)
6151         return 0;
6152     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6153     if (size > bytecount)
6154         size = bytecount;
6155     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6156     if (!p)
6157         return -TARGET_EFAULT;
6158     /* ??? Should this by byteswapped?  */
6159     memcpy(p, ldt_table, size);
6160     unlock_user(p, ptr, size);
6161     return size;
6162 }
6163 
6164 /* XXX: add locking support */
write_ldt(CPUX86State * env,abi_ulong ptr,unsigned long bytecount,int oldmode)6165 static abi_long write_ldt(CPUX86State *env,
6166                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6167 {
6168     struct target_modify_ldt_ldt_s ldt_info;
6169     struct target_modify_ldt_ldt_s *target_ldt_info;
6170     int seg_32bit, contents, read_exec_only, limit_in_pages;
6171     int seg_not_present, useable, lm;
6172     uint32_t *lp, entry_1, entry_2;
6173 
6174     if (bytecount != sizeof(ldt_info))
6175         return -TARGET_EINVAL;
6176     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6177         return -TARGET_EFAULT;
6178     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6179     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6180     ldt_info.limit = tswap32(target_ldt_info->limit);
6181     ldt_info.flags = tswap32(target_ldt_info->flags);
6182     unlock_user_struct(target_ldt_info, ptr, 0);
6183 
6184     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6185         return -TARGET_EINVAL;
6186     seg_32bit = ldt_info.flags & 1;
6187     contents = (ldt_info.flags >> 1) & 3;
6188     read_exec_only = (ldt_info.flags >> 3) & 1;
6189     limit_in_pages = (ldt_info.flags >> 4) & 1;
6190     seg_not_present = (ldt_info.flags >> 5) & 1;
6191     useable = (ldt_info.flags >> 6) & 1;
6192 #ifdef TARGET_ABI32
6193     lm = 0;
6194 #else
6195     lm = (ldt_info.flags >> 7) & 1;
6196 #endif
6197     if (contents == 3) {
6198         if (oldmode)
6199             return -TARGET_EINVAL;
6200         if (seg_not_present == 0)
6201             return -TARGET_EINVAL;
6202     }
6203     /* allocate the LDT */
6204     if (!ldt_table) {
6205         env->ldt.base = target_mmap(0,
6206                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6207                                     PROT_READ|PROT_WRITE,
6208                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6209         if (env->ldt.base == -1)
6210             return -TARGET_ENOMEM;
6211         memset(g2h_untagged(env->ldt.base), 0,
6212                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6213         env->ldt.limit = 0xffff;
6214         ldt_table = g2h_untagged(env->ldt.base);
6215     }
6216 
6217     /* NOTE: same code as Linux kernel */
6218     /* Allow LDTs to be cleared by the user. */
6219     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6220         if (oldmode ||
6221             (contents == 0		&&
6222              read_exec_only == 1	&&
6223              seg_32bit == 0		&&
6224              limit_in_pages == 0	&&
6225              seg_not_present == 1	&&
6226              useable == 0 )) {
6227             entry_1 = 0;
6228             entry_2 = 0;
6229             goto install;
6230         }
6231     }
6232 
6233     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6234         (ldt_info.limit & 0x0ffff);
6235     entry_2 = (ldt_info.base_addr & 0xff000000) |
6236         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6237         (ldt_info.limit & 0xf0000) |
6238         ((read_exec_only ^ 1) << 9) |
6239         (contents << 10) |
6240         ((seg_not_present ^ 1) << 15) |
6241         (seg_32bit << 22) |
6242         (limit_in_pages << 23) |
6243         (lm << 21) |
6244         0x7000;
6245     if (!oldmode)
6246         entry_2 |= (useable << 20);
6247 
6248     /* Install the new entry ...  */
6249 install:
6250     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6251     lp[0] = tswap32(entry_1);
6252     lp[1] = tswap32(entry_2);
6253     return 0;
6254 }
6255 
6256 /* specific and weird i386 syscalls */
do_modify_ldt(CPUX86State * env,int func,abi_ulong ptr,unsigned long bytecount)6257 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6258                               unsigned long bytecount)
6259 {
6260     abi_long ret;
6261 
6262     switch (func) {
6263     case 0:
6264         ret = read_ldt(ptr, bytecount);
6265         break;
6266     case 1:
6267         ret = write_ldt(env, ptr, bytecount, 1);
6268         break;
6269     case 0x11:
6270         ret = write_ldt(env, ptr, bytecount, 0);
6271         break;
6272     default:
6273         ret = -TARGET_ENOSYS;
6274         break;
6275     }
6276     return ret;
6277 }
6278 
6279 #if defined(TARGET_ABI32)
do_set_thread_area(CPUX86State * env,abi_ulong ptr)6280 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6281 {
6282     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6283     struct target_modify_ldt_ldt_s ldt_info;
6284     struct target_modify_ldt_ldt_s *target_ldt_info;
6285     int seg_32bit, contents, read_exec_only, limit_in_pages;
6286     int seg_not_present, useable, lm;
6287     uint32_t *lp, entry_1, entry_2;
6288     int i;
6289 
6290     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6291     if (!target_ldt_info)
6292         return -TARGET_EFAULT;
6293     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6294     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6295     ldt_info.limit = tswap32(target_ldt_info->limit);
6296     ldt_info.flags = tswap32(target_ldt_info->flags);
6297     if (ldt_info.entry_number == -1) {
6298         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6299             if (gdt_table[i] == 0) {
6300                 ldt_info.entry_number = i;
6301                 target_ldt_info->entry_number = tswap32(i);
6302                 break;
6303             }
6304         }
6305     }
6306     unlock_user_struct(target_ldt_info, ptr, 1);
6307 
6308     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6309         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6310            return -TARGET_EINVAL;
6311     seg_32bit = ldt_info.flags & 1;
6312     contents = (ldt_info.flags >> 1) & 3;
6313     read_exec_only = (ldt_info.flags >> 3) & 1;
6314     limit_in_pages = (ldt_info.flags >> 4) & 1;
6315     seg_not_present = (ldt_info.flags >> 5) & 1;
6316     useable = (ldt_info.flags >> 6) & 1;
6317 #ifdef TARGET_ABI32
6318     lm = 0;
6319 #else
6320     lm = (ldt_info.flags >> 7) & 1;
6321 #endif
6322 
6323     if (contents == 3) {
6324         if (seg_not_present == 0)
6325             return -TARGET_EINVAL;
6326     }
6327 
6328     /* NOTE: same code as Linux kernel */
6329     /* Allow LDTs to be cleared by the user. */
6330     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6331         if ((contents == 0             &&
6332              read_exec_only == 1       &&
6333              seg_32bit == 0            &&
6334              limit_in_pages == 0       &&
6335              seg_not_present == 1      &&
6336              useable == 0 )) {
6337             entry_1 = 0;
6338             entry_2 = 0;
6339             goto install;
6340         }
6341     }
6342 
6343     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6344         (ldt_info.limit & 0x0ffff);
6345     entry_2 = (ldt_info.base_addr & 0xff000000) |
6346         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6347         (ldt_info.limit & 0xf0000) |
6348         ((read_exec_only ^ 1) << 9) |
6349         (contents << 10) |
6350         ((seg_not_present ^ 1) << 15) |
6351         (seg_32bit << 22) |
6352         (limit_in_pages << 23) |
6353         (useable << 20) |
6354         (lm << 21) |
6355         0x7000;
6356 
6357     /* Install the new entry ...  */
6358 install:
6359     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6360     lp[0] = tswap32(entry_1);
6361     lp[1] = tswap32(entry_2);
6362     return 0;
6363 }
6364 
do_get_thread_area(CPUX86State * env,abi_ulong ptr)6365 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6366 {
6367     struct target_modify_ldt_ldt_s *target_ldt_info;
6368     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6369     uint32_t base_addr, limit, flags;
6370     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6371     int seg_not_present, useable, lm;
6372     uint32_t *lp, entry_1, entry_2;
6373 
6374     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6375     if (!target_ldt_info)
6376         return -TARGET_EFAULT;
6377     idx = tswap32(target_ldt_info->entry_number);
6378     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6379         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6380         unlock_user_struct(target_ldt_info, ptr, 1);
6381         return -TARGET_EINVAL;
6382     }
6383     lp = (uint32_t *)(gdt_table + idx);
6384     entry_1 = tswap32(lp[0]);
6385     entry_2 = tswap32(lp[1]);
6386 
6387     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6388     contents = (entry_2 >> 10) & 3;
6389     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6390     seg_32bit = (entry_2 >> 22) & 1;
6391     limit_in_pages = (entry_2 >> 23) & 1;
6392     useable = (entry_2 >> 20) & 1;
6393 #ifdef TARGET_ABI32
6394     lm = 0;
6395 #else
6396     lm = (entry_2 >> 21) & 1;
6397 #endif
6398     flags = (seg_32bit << 0) | (contents << 1) |
6399         (read_exec_only << 3) | (limit_in_pages << 4) |
6400         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6401     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6402     base_addr = (entry_1 >> 16) |
6403         (entry_2 & 0xff000000) |
6404         ((entry_2 & 0xff) << 16);
6405     target_ldt_info->base_addr = tswapal(base_addr);
6406     target_ldt_info->limit = tswap32(limit);
6407     target_ldt_info->flags = tswap32(flags);
6408     unlock_user_struct(target_ldt_info, ptr, 1);
6409     return 0;
6410 }
6411 
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6412 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6413 {
6414     return -TARGET_ENOSYS;
6415 }
6416 #else
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6417 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6418 {
6419     abi_long ret = 0;
6420     abi_ulong val;
6421     int idx;
6422 
6423     switch(code) {
6424     case TARGET_ARCH_SET_GS:
6425     case TARGET_ARCH_SET_FS:
6426         if (code == TARGET_ARCH_SET_GS)
6427             idx = R_GS;
6428         else
6429             idx = R_FS;
6430         cpu_x86_load_seg(env, idx, 0);
6431         env->segs[idx].base = addr;
6432         break;
6433     case TARGET_ARCH_GET_GS:
6434     case TARGET_ARCH_GET_FS:
6435         if (code == TARGET_ARCH_GET_GS)
6436             idx = R_GS;
6437         else
6438             idx = R_FS;
6439         val = env->segs[idx].base;
6440         if (put_user(val, addr, abi_ulong))
6441             ret = -TARGET_EFAULT;
6442         break;
6443     default:
6444         ret = -TARGET_EINVAL;
6445         break;
6446     }
6447     return ret;
6448 }
6449 #endif /* defined(TARGET_ABI32 */
6450 #endif /* defined(TARGET_I386) */
6451 
6452 /*
6453  * These constants are generic.  Supply any that are missing from the host.
6454  */
6455 #ifndef PR_SET_NAME
6456 # define PR_SET_NAME    15
6457 # define PR_GET_NAME    16
6458 #endif
6459 #ifndef PR_SET_FP_MODE
6460 # define PR_SET_FP_MODE 45
6461 # define PR_GET_FP_MODE 46
6462 # define PR_FP_MODE_FR   (1 << 0)
6463 # define PR_FP_MODE_FRE  (1 << 1)
6464 #endif
6465 #ifndef PR_SVE_SET_VL
6466 # define PR_SVE_SET_VL  50
6467 # define PR_SVE_GET_VL  51
6468 # define PR_SVE_VL_LEN_MASK  0xffff
6469 # define PR_SVE_VL_INHERIT   (1 << 17)
6470 #endif
6471 #ifndef PR_PAC_RESET_KEYS
6472 # define PR_PAC_RESET_KEYS  54
6473 # define PR_PAC_APIAKEY   (1 << 0)
6474 # define PR_PAC_APIBKEY   (1 << 1)
6475 # define PR_PAC_APDAKEY   (1 << 2)
6476 # define PR_PAC_APDBKEY   (1 << 3)
6477 # define PR_PAC_APGAKEY   (1 << 4)
6478 #endif
6479 #ifndef PR_SET_TAGGED_ADDR_CTRL
6480 # define PR_SET_TAGGED_ADDR_CTRL 55
6481 # define PR_GET_TAGGED_ADDR_CTRL 56
6482 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6483 #endif
6484 #ifndef PR_SET_IO_FLUSHER
6485 # define PR_SET_IO_FLUSHER 57
6486 # define PR_GET_IO_FLUSHER 58
6487 #endif
6488 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6489 # define PR_SET_SYSCALL_USER_DISPATCH 59
6490 # define PR_SYS_DISPATCH_OFF 0
6491 # define PR_SYS_DISPATCH_ON 1
6492 # define SYSCALL_DISPATCH_FILTER_ALLOW 0
6493 # define SYSCALL_DISPATCH_FILTER_BLOCK 1
6494 #endif
6495 #ifndef PR_SME_SET_VL
6496 # define PR_SME_SET_VL  63
6497 # define PR_SME_GET_VL  64
6498 # define PR_SME_VL_LEN_MASK  0xffff
6499 # define PR_SME_VL_INHERIT   (1 << 17)
6500 #endif
6501 #ifndef PR_GET_SHADOW_STACK_STATUS
6502 # define PR_GET_SHADOW_STACK_STATUS  74
6503 # define PR_SET_SHADOW_STACK_STATUS  75
6504 # define PR_LOCK_SHADOW_STACK_STATUS 76
6505 #endif
6506 #ifndef SHADOW_STACK_SET_TOKEN
6507 # define SHADOW_STACK_SET_TOKEN  (1u << 0)
6508 #endif
6509 #ifndef SHADOW_STACK_SET_MARKER
6510 # define SHADOW_STACK_SET_MARKER (1u << 1)
6511 #endif
6512 
6513 #include "target_prctl.h"
6514 
do_prctl_inval0(CPUArchState * env)6515 static abi_long do_prctl_inval0(CPUArchState *env)
6516 {
6517     return -TARGET_EINVAL;
6518 }
6519 
do_prctl_inval1(CPUArchState * env,abi_long arg2)6520 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6521 {
6522     return -TARGET_EINVAL;
6523 }
6524 
6525 #ifndef do_prctl_get_fp_mode
6526 #define do_prctl_get_fp_mode do_prctl_inval0
6527 #endif
6528 #ifndef do_prctl_set_fp_mode
6529 #define do_prctl_set_fp_mode do_prctl_inval1
6530 #endif
6531 #ifndef do_prctl_sve_get_vl
6532 #define do_prctl_sve_get_vl do_prctl_inval0
6533 #endif
6534 #ifndef do_prctl_sve_set_vl
6535 #define do_prctl_sve_set_vl do_prctl_inval1
6536 #endif
6537 #ifndef do_prctl_reset_keys
6538 #define do_prctl_reset_keys do_prctl_inval1
6539 #endif
6540 #ifndef do_prctl_set_tagged_addr_ctrl
6541 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6542 #endif
6543 #ifndef do_prctl_get_tagged_addr_ctrl
6544 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6545 #endif
6546 #ifndef do_prctl_get_unalign
6547 #define do_prctl_get_unalign do_prctl_inval1
6548 #endif
6549 #ifndef do_prctl_set_unalign
6550 #define do_prctl_set_unalign do_prctl_inval1
6551 #endif
6552 #ifndef do_prctl_sme_get_vl
6553 #define do_prctl_sme_get_vl do_prctl_inval0
6554 #endif
6555 #ifndef do_prctl_sme_set_vl
6556 #define do_prctl_sme_set_vl do_prctl_inval1
6557 #endif
6558 #ifndef do_prctl_get_shadow_stack_status
6559 #define do_prctl_get_shadow_stack_status do_prctl_inval1
6560 #endif
6561 #ifndef do_prctl_set_shadow_stack_status
6562 #define do_prctl_set_shadow_stack_status do_prctl_inval1
6563 #endif
6564 #ifndef do_prctl_lock_shadow_stack_status
6565 #define do_prctl_lock_shadow_stack_status do_prctl_inval1
6566 #endif
6567 
do_prctl_syscall_user_dispatch(CPUArchState * env,abi_ulong arg2,abi_ulong arg3,abi_ulong arg4,abi_ulong arg5)6568 static abi_long do_prctl_syscall_user_dispatch(CPUArchState *env,
6569                                                abi_ulong arg2, abi_ulong arg3,
6570                                                abi_ulong arg4, abi_ulong arg5)
6571 {
6572     CPUState *cpu = env_cpu(env);
6573     TaskState *ts = get_task_state(cpu);
6574 
6575     switch (arg2) {
6576     case PR_SYS_DISPATCH_OFF:
6577         if (arg3 || arg4 || arg5) {
6578             return -TARGET_EINVAL;
6579         }
6580         ts->sys_dispatch_len = -1;
6581         return 0;
6582     case PR_SYS_DISPATCH_ON:
6583         if (arg3 && arg3 + arg4 <= arg3) {
6584             return -TARGET_EINVAL;
6585         }
6586         if (arg5 && !access_ok(cpu, VERIFY_READ, arg5, 1)) {
6587             return -TARGET_EFAULT;
6588         }
6589         ts->sys_dispatch = arg3;
6590         ts->sys_dispatch_len = arg4;
6591         ts->sys_dispatch_selector = arg5;
6592         return 0;
6593     default:
6594         return -TARGET_EINVAL;
6595     }
6596 }
6597 
do_prctl(CPUArchState * env,abi_long option,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)6598 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6599                          abi_long arg3, abi_long arg4, abi_long arg5)
6600 {
6601     abi_long ret;
6602 
6603     switch (option) {
6604     case PR_GET_PDEATHSIG:
6605         {
6606             int deathsig;
6607             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6608                                   arg3, arg4, arg5));
6609             if (!is_error(ret) &&
6610                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6611                 return -TARGET_EFAULT;
6612             }
6613             return ret;
6614         }
6615     case PR_SET_PDEATHSIG:
6616         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6617                                arg3, arg4, arg5));
6618     case PR_GET_NAME:
6619         {
6620             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6621             if (!name) {
6622                 return -TARGET_EFAULT;
6623             }
6624             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6625                                   arg3, arg4, arg5));
6626             unlock_user(name, arg2, 16);
6627             return ret;
6628         }
6629     case PR_SET_NAME:
6630         {
6631             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6632             if (!name) {
6633                 return -TARGET_EFAULT;
6634             }
6635             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6636                                   arg3, arg4, arg5));
6637             unlock_user(name, arg2, 0);
6638             return ret;
6639         }
6640     case PR_GET_FP_MODE:
6641         return do_prctl_get_fp_mode(env);
6642     case PR_SET_FP_MODE:
6643         return do_prctl_set_fp_mode(env, arg2);
6644     case PR_SVE_GET_VL:
6645         return do_prctl_sve_get_vl(env);
6646     case PR_SVE_SET_VL:
6647         return do_prctl_sve_set_vl(env, arg2);
6648     case PR_SME_GET_VL:
6649         return do_prctl_sme_get_vl(env);
6650     case PR_SME_SET_VL:
6651         return do_prctl_sme_set_vl(env, arg2);
6652     case PR_PAC_RESET_KEYS:
6653         if (arg3 || arg4 || arg5) {
6654             return -TARGET_EINVAL;
6655         }
6656         return do_prctl_reset_keys(env, arg2);
6657     case PR_SET_TAGGED_ADDR_CTRL:
6658         if (arg3 || arg4 || arg5) {
6659             return -TARGET_EINVAL;
6660         }
6661         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6662     case PR_GET_TAGGED_ADDR_CTRL:
6663         if (arg2 || arg3 || arg4 || arg5) {
6664             return -TARGET_EINVAL;
6665         }
6666         return do_prctl_get_tagged_addr_ctrl(env);
6667     case PR_GET_SHADOW_STACK_STATUS:
6668         if (arg3 || arg4 || arg5) {
6669             return -TARGET_EINVAL;
6670         }
6671         return do_prctl_get_shadow_stack_status(env, arg2);
6672     case PR_SET_SHADOW_STACK_STATUS:
6673         if (arg3 || arg4 || arg5) {
6674             return -TARGET_EINVAL;
6675         }
6676         return do_prctl_set_shadow_stack_status(env, arg2);
6677     case PR_LOCK_SHADOW_STACK_STATUS:
6678         if (arg3 || arg4 || arg5) {
6679             return -TARGET_EINVAL;
6680         }
6681         return do_prctl_lock_shadow_stack_status(env, arg2);
6682 
6683     case PR_GET_UNALIGN:
6684         return do_prctl_get_unalign(env, arg2);
6685     case PR_SET_UNALIGN:
6686         return do_prctl_set_unalign(env, arg2);
6687 
6688     case PR_SET_SYSCALL_USER_DISPATCH:
6689         return do_prctl_syscall_user_dispatch(env, arg2, arg3, arg4, arg5);
6690 
6691     case PR_CAP_AMBIENT:
6692     case PR_CAPBSET_READ:
6693     case PR_CAPBSET_DROP:
6694     case PR_GET_DUMPABLE:
6695     case PR_SET_DUMPABLE:
6696     case PR_GET_KEEPCAPS:
6697     case PR_SET_KEEPCAPS:
6698     case PR_GET_SECUREBITS:
6699     case PR_SET_SECUREBITS:
6700     case PR_GET_TIMING:
6701     case PR_SET_TIMING:
6702     case PR_GET_TIMERSLACK:
6703     case PR_SET_TIMERSLACK:
6704     case PR_MCE_KILL:
6705     case PR_MCE_KILL_GET:
6706     case PR_GET_NO_NEW_PRIVS:
6707     case PR_SET_NO_NEW_PRIVS:
6708     case PR_GET_IO_FLUSHER:
6709     case PR_SET_IO_FLUSHER:
6710     case PR_SET_CHILD_SUBREAPER:
6711     case PR_GET_SPECULATION_CTRL:
6712     case PR_SET_SPECULATION_CTRL:
6713         /* Some prctl options have no pointer arguments and we can pass on. */
6714         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6715 
6716     case PR_GET_CHILD_SUBREAPER:
6717         {
6718             int val;
6719             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6720                                   arg3, arg4, arg5));
6721             if (!is_error(ret) && put_user_s32(val, arg2)) {
6722                 return -TARGET_EFAULT;
6723             }
6724             return ret;
6725         }
6726 
6727     case PR_GET_TID_ADDRESS:
6728         {
6729             TaskState *ts = get_task_state(env_cpu(env));
6730             return put_user_ual(ts->child_tidptr, arg2);
6731         }
6732 
6733     case PR_GET_FPEXC:
6734     case PR_SET_FPEXC:
6735         /* Was used for SPE on PowerPC. */
6736         return -TARGET_EINVAL;
6737 
6738     case PR_GET_ENDIAN:
6739     case PR_SET_ENDIAN:
6740     case PR_GET_FPEMU:
6741     case PR_SET_FPEMU:
6742     case PR_SET_MM:
6743     case PR_GET_SECCOMP:
6744     case PR_SET_SECCOMP:
6745     case PR_GET_THP_DISABLE:
6746     case PR_SET_THP_DISABLE:
6747     case PR_GET_TSC:
6748     case PR_SET_TSC:
6749         /* Disable to prevent the target disabling stuff we need. */
6750         return -TARGET_EINVAL;
6751 
6752     default:
6753         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6754                       option);
6755         return -TARGET_EINVAL;
6756     }
6757 }
6758 
6759 #ifdef TARGET_AARCH64
do_map_shadow_stack(CPUArchState * env,abi_ulong addr,abi_ulong size,abi_int flags)6760 static abi_long do_map_shadow_stack(CPUArchState *env, abi_ulong addr,
6761                                     abi_ulong size, abi_int flags)
6762 {
6763     ARMCPU *cpu = env_archcpu(env);
6764     abi_ulong alloc_size;
6765 
6766     if (!cpu_isar_feature(aa64_gcs, cpu)) {
6767         return -TARGET_EOPNOTSUPP;
6768     }
6769     if (flags & ~(SHADOW_STACK_SET_TOKEN | SHADOW_STACK_SET_MARKER)) {
6770         return -TARGET_EINVAL;
6771     }
6772     if (addr & ~TARGET_PAGE_MASK) {
6773         return -TARGET_EINVAL;
6774     }
6775     if (size == 8 || !QEMU_IS_ALIGNED(size, 8)) {
6776         return -TARGET_EINVAL;
6777     }
6778 
6779     alloc_size = TARGET_PAGE_ALIGN(size);
6780     if (alloc_size < size) {
6781         return -TARGET_EOVERFLOW;
6782     }
6783 
6784     mmap_lock();
6785     addr = gcs_alloc(addr, alloc_size);
6786     if (addr != -1) {
6787         if (flags & SHADOW_STACK_SET_TOKEN) {
6788             abi_ptr cap_ptr = addr + size - 8;
6789             uint64_t cap_val;
6790 
6791             if (flags & SHADOW_STACK_SET_MARKER) {
6792                 /* Leave an extra empty frame at top-of-stack. */
6793                 cap_ptr -= 8;
6794             }
6795             cap_val = (cap_ptr & TARGET_PAGE_MASK) | 1;
6796             if (put_user_u64(cap_val, cap_ptr)) {
6797                 /* Allocation succeeded above. */
6798                 g_assert_not_reached();
6799             }
6800         }
6801     }
6802     mmap_unlock();
6803     return get_errno(addr);
6804 }
6805 #endif
6806 
6807 #define NEW_STACK_SIZE 0x40000
6808 
6809 
6810 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6811 typedef struct {
6812     CPUArchState *env;
6813     pthread_mutex_t mutex;
6814     pthread_cond_t cond;
6815     pthread_t thread;
6816     uint32_t tid;
6817     abi_ulong child_tidptr;
6818     abi_ulong parent_tidptr;
6819     sigset_t sigmask;
6820 } new_thread_info;
6821 
clone_func(void * arg)6822 static void *clone_func(void *arg)
6823 {
6824     new_thread_info *info = arg;
6825     CPUArchState *env;
6826     CPUState *cpu;
6827     TaskState *ts;
6828 
6829     rcu_register_thread();
6830     tcg_register_thread();
6831     env = info->env;
6832     cpu = env_cpu(env);
6833     thread_cpu = cpu;
6834     ts = get_task_state(cpu);
6835     info->tid = sys_gettid();
6836     task_settid(ts);
6837     if (info->child_tidptr)
6838         put_user_u32(info->tid, info->child_tidptr);
6839     if (info->parent_tidptr)
6840         put_user_u32(info->tid, info->parent_tidptr);
6841     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6842     /* Enable signals.  */
6843     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6844     /* Signal to the parent that we're ready.  */
6845     pthread_mutex_lock(&info->mutex);
6846     pthread_cond_broadcast(&info->cond);
6847     pthread_mutex_unlock(&info->mutex);
6848     /* Wait until the parent has finished initializing the tls state.  */
6849     pthread_mutex_lock(&clone_lock);
6850     pthread_mutex_unlock(&clone_lock);
6851     cpu_loop(env);
6852     /* never exits */
6853     return NULL;
6854 }
6855 
clone_fork_start(void)6856 void clone_fork_start(void)
6857 {
6858     pthread_mutex_lock(&clone_lock);
6859 }
6860 
clone_fork_end(bool child)6861 void clone_fork_end(bool child)
6862 {
6863     if (child) {
6864         pthread_mutex_init(&clone_lock, NULL);
6865     } else {
6866         pthread_mutex_unlock(&clone_lock);
6867     }
6868 }
6869 
6870 /* do_fork() Must return host values and target errnos (unlike most
6871    do_*() functions). */
do_fork(CPUArchState * env,unsigned int flags,abi_ulong newsp,abi_ulong parent_tidptr,target_ulong newtls,abi_ulong child_tidptr)6872 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6873                    abi_ulong parent_tidptr, target_ulong newtls,
6874                    abi_ulong child_tidptr)
6875 {
6876     CPUState *cpu = env_cpu(env);
6877     int ret;
6878     TaskState *ts;
6879     CPUState *new_cpu;
6880     CPUArchState *new_env;
6881     sigset_t sigmask;
6882 
6883     flags &= ~CLONE_IGNORED_FLAGS;
6884 
6885     /* Emulate vfork() with fork() */
6886     if (flags & CLONE_VFORK)
6887         flags &= ~(CLONE_VFORK | CLONE_VM);
6888 
6889     if (flags & CLONE_VM) {
6890         TaskState *parent_ts = get_task_state(cpu);
6891         new_thread_info info;
6892         pthread_attr_t attr;
6893 
6894         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6895             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6896             return -TARGET_EINVAL;
6897         }
6898 
6899         ts = g_new0(TaskState, 1);
6900         init_task_state(ts);
6901 
6902 #ifdef TARGET_AARCH64
6903         /*
6904          * If GCS is enabled in the parent thread, it is also enabled
6905          * in the child thread, but with a newly allocated stack.
6906          */
6907         abi_long new_gcspr = 0;
6908         if (env->cp15.gcscr_el[0] & GCSCR_PCRSEL) {
6909             new_gcspr = gcs_new_stack(ts);
6910             if (new_gcspr == -1) {
6911                 g_free(ts);
6912                 return -TARGET_ENOMEM;
6913             }
6914         }
6915 #endif
6916 
6917         /* Grab a mutex so that thread setup appears atomic.  */
6918         pthread_mutex_lock(&clone_lock);
6919 
6920         /*
6921          * If this is our first additional thread, we need to ensure we
6922          * generate code for parallel execution and flush old translations.
6923          * Do this now so that the copy gets CF_PARALLEL too.
6924          */
6925         begin_parallel_context(cpu);
6926 
6927         /* we create a new CPU instance. */
6928         new_env = cpu_copy(env);
6929         /* Init regs that differ from the parent.  */
6930         cpu_clone_regs_child(new_env, newsp, flags);
6931         cpu_clone_regs_parent(env, flags);
6932         new_cpu = env_cpu(new_env);
6933         new_cpu->opaque = ts;
6934         ts->bprm = parent_ts->bprm;
6935         ts->info = parent_ts->info;
6936         ts->signal_mask = parent_ts->signal_mask;
6937 
6938 #ifdef TARGET_AARCH64
6939         ts->gcs_el0_locked = parent_ts->gcs_el0_locked;
6940         new_env->cp15.gcspr_el[0] = new_gcspr;
6941 #endif
6942 
6943         if (flags & CLONE_CHILD_CLEARTID) {
6944             ts->child_tidptr = child_tidptr;
6945         }
6946 
6947         if (flags & CLONE_SETTLS) {
6948             cpu_set_tls (new_env, newtls);
6949         }
6950 
6951         memset(&info, 0, sizeof(info));
6952         pthread_mutex_init(&info.mutex, NULL);
6953         pthread_mutex_lock(&info.mutex);
6954         pthread_cond_init(&info.cond, NULL);
6955         info.env = new_env;
6956         if (flags & CLONE_CHILD_SETTID) {
6957             info.child_tidptr = child_tidptr;
6958         }
6959         if (flags & CLONE_PARENT_SETTID) {
6960             info.parent_tidptr = parent_tidptr;
6961         }
6962 
6963         ret = pthread_attr_init(&attr);
6964         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6965         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6966         /* It is not safe to deliver signals until the child has finished
6967            initializing, so temporarily block all signals.  */
6968         sigfillset(&sigmask);
6969         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6970         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6971 
6972         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6973         /* TODO: Free new CPU state if thread creation failed.  */
6974 
6975         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6976         pthread_attr_destroy(&attr);
6977         if (ret == 0) {
6978             /* Wait for the child to initialize.  */
6979             pthread_cond_wait(&info.cond, &info.mutex);
6980             ret = info.tid;
6981         } else {
6982             ret = -1;
6983         }
6984         pthread_mutex_unlock(&info.mutex);
6985         pthread_cond_destroy(&info.cond);
6986         pthread_mutex_destroy(&info.mutex);
6987         pthread_mutex_unlock(&clone_lock);
6988     } else {
6989         /* if no CLONE_VM, we consider it is a fork */
6990         if (flags & CLONE_INVALID_FORK_FLAGS) {
6991             return -TARGET_EINVAL;
6992         }
6993 
6994         /* We can't support custom termination signals */
6995         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6996             return -TARGET_EINVAL;
6997         }
6998 
6999 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
7000         if (flags & CLONE_PIDFD) {
7001             return -TARGET_EINVAL;
7002         }
7003 #endif
7004 
7005         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
7006         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
7007             return -TARGET_EINVAL;
7008         }
7009 
7010         if (block_signals()) {
7011             return -QEMU_ERESTARTSYS;
7012         }
7013 
7014         fork_start();
7015         ret = fork();
7016         if (ret == 0) {
7017             /* Child Process.  */
7018             cpu_clone_regs_child(env, newsp, flags);
7019             fork_end(ret);
7020             /* There is a race condition here.  The parent process could
7021                theoretically read the TID in the child process before the child
7022                tid is set.  This would require using either ptrace
7023                (not implemented) or having *_tidptr to point at a shared memory
7024                mapping.  We can't repeat the spinlock hack used above because
7025                the child process gets its own copy of the lock.  */
7026             if (flags & CLONE_CHILD_SETTID)
7027                 put_user_u32(sys_gettid(), child_tidptr);
7028             if (flags & CLONE_PARENT_SETTID)
7029                 put_user_u32(sys_gettid(), parent_tidptr);
7030             ts = get_task_state(cpu);
7031             if (flags & CLONE_SETTLS)
7032                 cpu_set_tls (env, newtls);
7033             if (flags & CLONE_CHILD_CLEARTID)
7034                 ts->child_tidptr = child_tidptr;
7035         } else {
7036             cpu_clone_regs_parent(env, flags);
7037             if (flags & CLONE_PIDFD) {
7038                 int pid_fd = 0;
7039 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
7040                 int pid_child = ret;
7041                 pid_fd = pidfd_open(pid_child, 0);
7042                 if (pid_fd >= 0) {
7043                     qemu_set_cloexec(pid_fd);
7044                 } else {
7045                     pid_fd = 0;
7046                 }
7047 #endif
7048                 put_user_u32(pid_fd, parent_tidptr);
7049             }
7050             fork_end(ret);
7051         }
7052         g_assert(!cpu_in_exclusive_context(cpu));
7053     }
7054     return ret;
7055 }
7056 
7057 /* warning : doesn't handle linux specific flags... */
target_to_host_fcntl_cmd(int cmd)7058 static int target_to_host_fcntl_cmd(int cmd)
7059 {
7060     int ret;
7061 
7062     switch(cmd) {
7063     case TARGET_F_DUPFD:
7064     case TARGET_F_GETFD:
7065     case TARGET_F_SETFD:
7066     case TARGET_F_GETFL:
7067     case TARGET_F_SETFL:
7068     case TARGET_F_OFD_GETLK:
7069     case TARGET_F_OFD_SETLK:
7070     case TARGET_F_OFD_SETLKW:
7071         ret = cmd;
7072         break;
7073     case TARGET_F_GETLK:
7074         ret = F_GETLK;
7075         break;
7076     case TARGET_F_SETLK:
7077         ret = F_SETLK;
7078         break;
7079     case TARGET_F_SETLKW:
7080         ret = F_SETLKW;
7081         break;
7082     case TARGET_F_GETOWN:
7083         ret = F_GETOWN;
7084         break;
7085     case TARGET_F_SETOWN:
7086         ret = F_SETOWN;
7087         break;
7088     case TARGET_F_GETSIG:
7089         ret = F_GETSIG;
7090         break;
7091     case TARGET_F_SETSIG:
7092         ret = F_SETSIG;
7093         break;
7094 #if TARGET_ABI_BITS == 32
7095     case TARGET_F_GETLK64:
7096         ret = F_GETLK;
7097         break;
7098     case TARGET_F_SETLK64:
7099         ret = F_SETLK;
7100         break;
7101     case TARGET_F_SETLKW64:
7102         ret = F_SETLKW;
7103         break;
7104 #endif
7105     case TARGET_F_SETLEASE:
7106         ret = F_SETLEASE;
7107         break;
7108     case TARGET_F_GETLEASE:
7109         ret = F_GETLEASE;
7110         break;
7111 #ifdef F_DUPFD_CLOEXEC
7112     case TARGET_F_DUPFD_CLOEXEC:
7113         ret = F_DUPFD_CLOEXEC;
7114         break;
7115 #endif
7116     case TARGET_F_NOTIFY:
7117         ret = F_NOTIFY;
7118         break;
7119 #ifdef F_GETOWN_EX
7120     case TARGET_F_GETOWN_EX:
7121         ret = F_GETOWN_EX;
7122         break;
7123 #endif
7124 #ifdef F_SETOWN_EX
7125     case TARGET_F_SETOWN_EX:
7126         ret = F_SETOWN_EX;
7127         break;
7128 #endif
7129 #ifdef F_SETPIPE_SZ
7130     case TARGET_F_SETPIPE_SZ:
7131         ret = F_SETPIPE_SZ;
7132         break;
7133     case TARGET_F_GETPIPE_SZ:
7134         ret = F_GETPIPE_SZ;
7135         break;
7136 #endif
7137 #ifdef F_ADD_SEALS
7138     case TARGET_F_ADD_SEALS:
7139         ret = F_ADD_SEALS;
7140         break;
7141     case TARGET_F_GET_SEALS:
7142         ret = F_GET_SEALS;
7143         break;
7144 #endif
7145     default:
7146         ret = -TARGET_EINVAL;
7147         break;
7148     }
7149 
7150 #if defined(__powerpc64__)
7151     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
7152      * is not supported by kernel. The glibc fcntl call actually adjusts
7153      * them to 5, 6 and 7 before making the syscall(). Since we make the
7154      * syscall directly, adjust to what is supported by the kernel.
7155      */
7156     if (ret >= F_GETLK && ret <= F_SETLKW) {
7157         ret -= F_GETLK - 5;
7158     }
7159 #endif
7160 
7161     return ret;
7162 }
7163 
7164 #define FLOCK_TRANSTBL \
7165     switch (type) { \
7166     TRANSTBL_CONVERT(F_RDLCK); \
7167     TRANSTBL_CONVERT(F_WRLCK); \
7168     TRANSTBL_CONVERT(F_UNLCK); \
7169     }
7170 
target_to_host_flock(int type)7171 static int target_to_host_flock(int type)
7172 {
7173 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
7174     FLOCK_TRANSTBL
7175 #undef  TRANSTBL_CONVERT
7176     return -TARGET_EINVAL;
7177 }
7178 
host_to_target_flock(int type)7179 static int host_to_target_flock(int type)
7180 {
7181 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
7182     FLOCK_TRANSTBL
7183 #undef  TRANSTBL_CONVERT
7184     /* if we don't know how to convert the value coming
7185      * from the host we copy to the target field as-is
7186      */
7187     return type;
7188 }
7189 
copy_from_user_flock(struct flock * fl,abi_ulong target_flock_addr)7190 static inline abi_long copy_from_user_flock(struct flock *fl,
7191                                             abi_ulong target_flock_addr)
7192 {
7193     struct target_flock *target_fl;
7194     int l_type;
7195 
7196     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7197         return -TARGET_EFAULT;
7198     }
7199 
7200     __get_user(l_type, &target_fl->l_type);
7201     l_type = target_to_host_flock(l_type);
7202     if (l_type < 0) {
7203         return l_type;
7204     }
7205     fl->l_type = l_type;
7206     __get_user(fl->l_whence, &target_fl->l_whence);
7207     __get_user(fl->l_start, &target_fl->l_start);
7208     __get_user(fl->l_len, &target_fl->l_len);
7209     __get_user(fl->l_pid, &target_fl->l_pid);
7210     unlock_user_struct(target_fl, target_flock_addr, 0);
7211     return 0;
7212 }
7213 
copy_to_user_flock(abi_ulong target_flock_addr,const struct flock * fl)7214 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
7215                                           const struct flock *fl)
7216 {
7217     struct target_flock *target_fl;
7218     short l_type;
7219 
7220     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7221         return -TARGET_EFAULT;
7222     }
7223 
7224     l_type = host_to_target_flock(fl->l_type);
7225     __put_user(l_type, &target_fl->l_type);
7226     __put_user(fl->l_whence, &target_fl->l_whence);
7227     __put_user(fl->l_start, &target_fl->l_start);
7228     __put_user(fl->l_len, &target_fl->l_len);
7229     __put_user(fl->l_pid, &target_fl->l_pid);
7230     unlock_user_struct(target_fl, target_flock_addr, 1);
7231     return 0;
7232 }
7233 
7234 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
7235 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
7236 
7237 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
7238 struct target_oabi_flock64 {
7239     abi_short l_type;
7240     abi_short l_whence;
7241     abi_llong l_start;
7242     abi_llong l_len;
7243     abi_int   l_pid;
7244 } QEMU_PACKED;
7245 
copy_from_user_oabi_flock64(struct flock * fl,abi_ulong target_flock_addr)7246 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
7247                                                    abi_ulong target_flock_addr)
7248 {
7249     struct target_oabi_flock64 *target_fl;
7250     int l_type;
7251 
7252     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7253         return -TARGET_EFAULT;
7254     }
7255 
7256     __get_user(l_type, &target_fl->l_type);
7257     l_type = target_to_host_flock(l_type);
7258     if (l_type < 0) {
7259         return l_type;
7260     }
7261     fl->l_type = l_type;
7262     __get_user(fl->l_whence, &target_fl->l_whence);
7263     __get_user(fl->l_start, &target_fl->l_start);
7264     __get_user(fl->l_len, &target_fl->l_len);
7265     __get_user(fl->l_pid, &target_fl->l_pid);
7266     unlock_user_struct(target_fl, target_flock_addr, 0);
7267     return 0;
7268 }
7269 
copy_to_user_oabi_flock64(abi_ulong target_flock_addr,const struct flock * fl)7270 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7271                                                  const struct flock *fl)
7272 {
7273     struct target_oabi_flock64 *target_fl;
7274     short l_type;
7275 
7276     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7277         return -TARGET_EFAULT;
7278     }
7279 
7280     l_type = host_to_target_flock(fl->l_type);
7281     __put_user(l_type, &target_fl->l_type);
7282     __put_user(fl->l_whence, &target_fl->l_whence);
7283     __put_user(fl->l_start, &target_fl->l_start);
7284     __put_user(fl->l_len, &target_fl->l_len);
7285     __put_user(fl->l_pid, &target_fl->l_pid);
7286     unlock_user_struct(target_fl, target_flock_addr, 1);
7287     return 0;
7288 }
7289 #endif
7290 
copy_from_user_flock64(struct flock * fl,abi_ulong target_flock_addr)7291 static inline abi_long copy_from_user_flock64(struct flock *fl,
7292                                               abi_ulong target_flock_addr)
7293 {
7294     struct target_flock64 *target_fl;
7295     int l_type;
7296 
7297     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7298         return -TARGET_EFAULT;
7299     }
7300 
7301     __get_user(l_type, &target_fl->l_type);
7302     l_type = target_to_host_flock(l_type);
7303     if (l_type < 0) {
7304         return l_type;
7305     }
7306     fl->l_type = l_type;
7307     __get_user(fl->l_whence, &target_fl->l_whence);
7308     __get_user(fl->l_start, &target_fl->l_start);
7309     __get_user(fl->l_len, &target_fl->l_len);
7310     __get_user(fl->l_pid, &target_fl->l_pid);
7311     unlock_user_struct(target_fl, target_flock_addr, 0);
7312     return 0;
7313 }
7314 
copy_to_user_flock64(abi_ulong target_flock_addr,const struct flock * fl)7315 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7316                                             const struct flock *fl)
7317 {
7318     struct target_flock64 *target_fl;
7319     short l_type;
7320 
7321     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7322         return -TARGET_EFAULT;
7323     }
7324 
7325     l_type = host_to_target_flock(fl->l_type);
7326     __put_user(l_type, &target_fl->l_type);
7327     __put_user(fl->l_whence, &target_fl->l_whence);
7328     __put_user(fl->l_start, &target_fl->l_start);
7329     __put_user(fl->l_len, &target_fl->l_len);
7330     __put_user(fl->l_pid, &target_fl->l_pid);
7331     unlock_user_struct(target_fl, target_flock_addr, 1);
7332     return 0;
7333 }
7334 
do_fcntl(int fd,int cmd,abi_ulong arg)7335 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7336 {
7337     struct flock fl;
7338 #ifdef F_GETOWN_EX
7339     struct f_owner_ex fox;
7340     struct target_f_owner_ex *target_fox;
7341 #endif
7342     abi_long ret;
7343     int host_cmd = target_to_host_fcntl_cmd(cmd);
7344 
7345     if (host_cmd == -TARGET_EINVAL)
7346 	    return host_cmd;
7347 
7348     switch(cmd) {
7349     case TARGET_F_GETLK:
7350         ret = copy_from_user_flock(&fl, arg);
7351         if (ret) {
7352             return ret;
7353         }
7354         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7355         if (ret == 0) {
7356             ret = copy_to_user_flock(arg, &fl);
7357         }
7358         break;
7359 
7360     case TARGET_F_SETLK:
7361     case TARGET_F_SETLKW:
7362         ret = copy_from_user_flock(&fl, arg);
7363         if (ret) {
7364             return ret;
7365         }
7366         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7367         break;
7368 
7369     case TARGET_F_GETLK64:
7370     case TARGET_F_OFD_GETLK:
7371         ret = copy_from_user_flock64(&fl, arg);
7372         if (ret) {
7373             return ret;
7374         }
7375         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7376         if (ret == 0) {
7377             ret = copy_to_user_flock64(arg, &fl);
7378         }
7379         break;
7380     case TARGET_F_SETLK64:
7381     case TARGET_F_SETLKW64:
7382     case TARGET_F_OFD_SETLK:
7383     case TARGET_F_OFD_SETLKW:
7384         ret = copy_from_user_flock64(&fl, arg);
7385         if (ret) {
7386             return ret;
7387         }
7388         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7389         break;
7390 
7391     case TARGET_F_GETFL:
7392         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7393         if (ret >= 0) {
7394             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7395             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7396             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7397                 ret |= TARGET_O_LARGEFILE;
7398             }
7399         }
7400         break;
7401 
7402     case TARGET_F_SETFL:
7403         ret = get_errno(safe_fcntl(fd, host_cmd,
7404                                    target_to_host_bitmask(arg,
7405                                                           fcntl_flags_tbl)));
7406         break;
7407 
7408 #ifdef F_GETOWN_EX
7409     case TARGET_F_GETOWN_EX:
7410         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7411         if (ret >= 0) {
7412             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7413                 return -TARGET_EFAULT;
7414             target_fox->type = tswap32(fox.type);
7415             target_fox->pid = tswap32(fox.pid);
7416             unlock_user_struct(target_fox, arg, 1);
7417         }
7418         break;
7419 #endif
7420 
7421 #ifdef F_SETOWN_EX
7422     case TARGET_F_SETOWN_EX:
7423         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7424             return -TARGET_EFAULT;
7425         fox.type = tswap32(target_fox->type);
7426         fox.pid = tswap32(target_fox->pid);
7427         unlock_user_struct(target_fox, arg, 0);
7428         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7429         break;
7430 #endif
7431 
7432     case TARGET_F_SETSIG:
7433         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7434         break;
7435 
7436     case TARGET_F_GETSIG:
7437         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7438         break;
7439 
7440     case TARGET_F_SETOWN:
7441     case TARGET_F_GETOWN:
7442     case TARGET_F_SETLEASE:
7443     case TARGET_F_GETLEASE:
7444     case TARGET_F_SETPIPE_SZ:
7445     case TARGET_F_GETPIPE_SZ:
7446     case TARGET_F_ADD_SEALS:
7447     case TARGET_F_GET_SEALS:
7448         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7449         break;
7450 
7451     default:
7452         ret = get_errno(safe_fcntl(fd, cmd, arg));
7453         break;
7454     }
7455     return ret;
7456 }
7457 
7458 #ifdef USE_UID16
7459 
high2lowuid(int uid)7460 static inline int high2lowuid(int uid)
7461 {
7462     if (uid > 65535)
7463         return 65534;
7464     else
7465         return uid;
7466 }
7467 
high2lowgid(int gid)7468 static inline int high2lowgid(int gid)
7469 {
7470     if (gid > 65535)
7471         return 65534;
7472     else
7473         return gid;
7474 }
7475 
low2highuid(int uid)7476 static inline int low2highuid(int uid)
7477 {
7478     if ((int16_t)uid == -1)
7479         return -1;
7480     else
7481         return uid;
7482 }
7483 
low2highgid(int gid)7484 static inline int low2highgid(int gid)
7485 {
7486     if ((int16_t)gid == -1)
7487         return -1;
7488     else
7489         return gid;
7490 }
tswapid(int id)7491 static inline int tswapid(int id)
7492 {
7493     return tswap16(id);
7494 }
7495 
7496 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7497 
7498 #else /* !USE_UID16 */
high2lowuid(int uid)7499 static inline int high2lowuid(int uid)
7500 {
7501     return uid;
7502 }
high2lowgid(int gid)7503 static inline int high2lowgid(int gid)
7504 {
7505     return gid;
7506 }
low2highuid(int uid)7507 static inline int low2highuid(int uid)
7508 {
7509     return uid;
7510 }
low2highgid(int gid)7511 static inline int low2highgid(int gid)
7512 {
7513     return gid;
7514 }
tswapid(int id)7515 static inline int tswapid(int id)
7516 {
7517     return tswap32(id);
7518 }
7519 
7520 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7521 
7522 #endif /* USE_UID16 */
7523 
7524 /* We must do direct syscalls for setting UID/GID, because we want to
7525  * implement the Linux system call semantics of "change only for this thread",
7526  * not the libc/POSIX semantics of "change for all threads in process".
7527  * (See http://ewontfix.com/17/ for more details.)
7528  * We use the 32-bit version of the syscalls if present; if it is not
7529  * then either the host architecture supports 32-bit UIDs natively with
7530  * the standard syscall, or the 16-bit UID is the best we can do.
7531  */
7532 #ifdef __NR_setuid32
7533 #define __NR_sys_setuid __NR_setuid32
7534 #else
7535 #define __NR_sys_setuid __NR_setuid
7536 #endif
7537 #ifdef __NR_setgid32
7538 #define __NR_sys_setgid __NR_setgid32
7539 #else
7540 #define __NR_sys_setgid __NR_setgid
7541 #endif
7542 #ifdef __NR_setresuid32
7543 #define __NR_sys_setresuid __NR_setresuid32
7544 #else
7545 #define __NR_sys_setresuid __NR_setresuid
7546 #endif
7547 #ifdef __NR_setresgid32
7548 #define __NR_sys_setresgid __NR_setresgid32
7549 #else
7550 #define __NR_sys_setresgid __NR_setresgid
7551 #endif
7552 #ifdef __NR_setgroups32
7553 #define __NR_sys_setgroups __NR_setgroups32
7554 #else
7555 #define __NR_sys_setgroups __NR_setgroups
7556 #endif
7557 #ifdef __NR_sys_setreuid32
7558 #define __NR_sys_setreuid __NR_setreuid32
7559 #else
7560 #define __NR_sys_setreuid __NR_setreuid
7561 #endif
7562 #ifdef __NR_sys_setregid32
7563 #define __NR_sys_setregid __NR_setregid32
7564 #else
7565 #define __NR_sys_setregid __NR_setregid
7566 #endif
7567 
7568 _syscall1(int, sys_setuid, uid_t, uid)
7569 _syscall1(int, sys_setgid, gid_t, gid)
7570 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7571 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7572 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7573 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7574 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7575 
syscall_init(void)7576 void syscall_init(void)
7577 {
7578     IOCTLEntry *ie;
7579     const argtype *arg_type;
7580     int size;
7581 
7582     thunk_init(STRUCT_MAX);
7583 
7584 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7585 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7586 #include "syscall_types.h"
7587 #undef STRUCT
7588 #undef STRUCT_SPECIAL
7589 
7590     /* we patch the ioctl size if necessary. We rely on the fact that
7591        no ioctl has all the bits at '1' in the size field */
7592     ie = ioctl_entries;
7593     while (ie->target_cmd != 0) {
7594         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7595             TARGET_IOC_SIZEMASK) {
7596             arg_type = ie->arg_type;
7597             if (arg_type[0] != TYPE_PTR) {
7598                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7599                         ie->target_cmd);
7600                 exit(1);
7601             }
7602             arg_type++;
7603             size = thunk_type_size(arg_type, 0);
7604             ie->target_cmd = (ie->target_cmd &
7605                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7606                 (size << TARGET_IOC_SIZESHIFT);
7607         }
7608 
7609         /* automatic consistency check if same arch */
7610 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7611     (defined(__x86_64__) && defined(TARGET_X86_64))
7612         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7613             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7614                     ie->name, ie->target_cmd, ie->host_cmd);
7615         }
7616 #endif
7617         ie++;
7618     }
7619 }
7620 
7621 #ifdef TARGET_NR_truncate64
target_truncate64(CPUArchState * cpu_env,const char * arg1,abi_long arg2,abi_long arg3,abi_long arg4)7622 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7623                                          abi_long arg2,
7624                                          abi_long arg3,
7625                                          abi_long arg4)
7626 {
7627     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7628         arg2 = arg3;
7629         arg3 = arg4;
7630     }
7631     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7632 }
7633 #endif
7634 
7635 #ifdef TARGET_NR_ftruncate64
target_ftruncate64(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4)7636 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7637                                           abi_long arg2,
7638                                           abi_long arg3,
7639                                           abi_long arg4)
7640 {
7641     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7642         arg2 = arg3;
7643         arg3 = arg4;
7644     }
7645     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7646 }
7647 #endif
7648 
7649 #if defined(TARGET_NR_timer_settime) || \
7650     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec(struct itimerspec * host_its,abi_ulong target_addr)7651 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7652                                                  abi_ulong target_addr)
7653 {
7654     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7655                                 offsetof(struct target_itimerspec,
7656                                          it_interval)) ||
7657         target_to_host_timespec(&host_its->it_value, target_addr +
7658                                 offsetof(struct target_itimerspec,
7659                                          it_value))) {
7660         return -TARGET_EFAULT;
7661     }
7662 
7663     return 0;
7664 }
7665 #endif
7666 
7667 #if defined(TARGET_NR_timer_settime64) || \
7668     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec64(struct itimerspec * host_its,abi_ulong target_addr)7669 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7670                                                    abi_ulong target_addr)
7671 {
7672     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7673                                   offsetof(struct target__kernel_itimerspec,
7674                                            it_interval)) ||
7675         target_to_host_timespec64(&host_its->it_value, target_addr +
7676                                   offsetof(struct target__kernel_itimerspec,
7677                                            it_value))) {
7678         return -TARGET_EFAULT;
7679     }
7680 
7681     return 0;
7682 }
7683 #endif
7684 
7685 #if ((defined(TARGET_NR_timerfd_gettime) || \
7686       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7687       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
host_to_target_itimerspec(abi_ulong target_addr,struct itimerspec * host_its)7688 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7689                                                  struct itimerspec *host_its)
7690 {
7691     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7692                                                        it_interval),
7693                                 &host_its->it_interval) ||
7694         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7695                                                        it_value),
7696                                 &host_its->it_value)) {
7697         return -TARGET_EFAULT;
7698     }
7699     return 0;
7700 }
7701 #endif
7702 
7703 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7704       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7705       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
host_to_target_itimerspec64(abi_ulong target_addr,struct itimerspec * host_its)7706 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7707                                                    struct itimerspec *host_its)
7708 {
7709     if (host_to_target_timespec64(target_addr +
7710                                   offsetof(struct target__kernel_itimerspec,
7711                                            it_interval),
7712                                   &host_its->it_interval) ||
7713         host_to_target_timespec64(target_addr +
7714                                   offsetof(struct target__kernel_itimerspec,
7715                                            it_value),
7716                                   &host_its->it_value)) {
7717         return -TARGET_EFAULT;
7718     }
7719     return 0;
7720 }
7721 #endif
7722 
7723 #if defined(TARGET_NR_adjtimex) || \
7724     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
target_to_host_timex(struct timex * host_tx,abi_long target_addr)7725 static inline abi_long target_to_host_timex(struct timex *host_tx,
7726                                             abi_long target_addr)
7727 {
7728     struct target_timex *target_tx;
7729 
7730     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7731         return -TARGET_EFAULT;
7732     }
7733 
7734     __get_user(host_tx->modes, &target_tx->modes);
7735     __get_user(host_tx->offset, &target_tx->offset);
7736     __get_user(host_tx->freq, &target_tx->freq);
7737     __get_user(host_tx->maxerror, &target_tx->maxerror);
7738     __get_user(host_tx->esterror, &target_tx->esterror);
7739     __get_user(host_tx->status, &target_tx->status);
7740     __get_user(host_tx->constant, &target_tx->constant);
7741     __get_user(host_tx->precision, &target_tx->precision);
7742     __get_user(host_tx->tolerance, &target_tx->tolerance);
7743     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7744     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7745     __get_user(host_tx->tick, &target_tx->tick);
7746     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7747     __get_user(host_tx->jitter, &target_tx->jitter);
7748     __get_user(host_tx->shift, &target_tx->shift);
7749     __get_user(host_tx->stabil, &target_tx->stabil);
7750     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7751     __get_user(host_tx->calcnt, &target_tx->calcnt);
7752     __get_user(host_tx->errcnt, &target_tx->errcnt);
7753     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7754     __get_user(host_tx->tai, &target_tx->tai);
7755 
7756     unlock_user_struct(target_tx, target_addr, 0);
7757     return 0;
7758 }
7759 
host_to_target_timex(abi_long target_addr,struct timex * host_tx)7760 static inline abi_long host_to_target_timex(abi_long target_addr,
7761                                             struct timex *host_tx)
7762 {
7763     struct target_timex *target_tx;
7764 
7765     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7766         return -TARGET_EFAULT;
7767     }
7768 
7769     __put_user(host_tx->modes, &target_tx->modes);
7770     __put_user(host_tx->offset, &target_tx->offset);
7771     __put_user(host_tx->freq, &target_tx->freq);
7772     __put_user(host_tx->maxerror, &target_tx->maxerror);
7773     __put_user(host_tx->esterror, &target_tx->esterror);
7774     __put_user(host_tx->status, &target_tx->status);
7775     __put_user(host_tx->constant, &target_tx->constant);
7776     __put_user(host_tx->precision, &target_tx->precision);
7777     __put_user(host_tx->tolerance, &target_tx->tolerance);
7778     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7779     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7780     __put_user(host_tx->tick, &target_tx->tick);
7781     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7782     __put_user(host_tx->jitter, &target_tx->jitter);
7783     __put_user(host_tx->shift, &target_tx->shift);
7784     __put_user(host_tx->stabil, &target_tx->stabil);
7785     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7786     __put_user(host_tx->calcnt, &target_tx->calcnt);
7787     __put_user(host_tx->errcnt, &target_tx->errcnt);
7788     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7789     __put_user(host_tx->tai, &target_tx->tai);
7790 
7791     unlock_user_struct(target_tx, target_addr, 1);
7792     return 0;
7793 }
7794 #endif
7795 
7796 
7797 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
target_to_host_timex64(struct timex * host_tx,abi_long target_addr)7798 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7799                                               abi_long target_addr)
7800 {
7801     struct target__kernel_timex *target_tx;
7802 
7803     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7804                                  offsetof(struct target__kernel_timex,
7805                                           time))) {
7806         return -TARGET_EFAULT;
7807     }
7808 
7809     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7810         return -TARGET_EFAULT;
7811     }
7812 
7813     __get_user(host_tx->modes, &target_tx->modes);
7814     __get_user(host_tx->offset, &target_tx->offset);
7815     __get_user(host_tx->freq, &target_tx->freq);
7816     __get_user(host_tx->maxerror, &target_tx->maxerror);
7817     __get_user(host_tx->esterror, &target_tx->esterror);
7818     __get_user(host_tx->status, &target_tx->status);
7819     __get_user(host_tx->constant, &target_tx->constant);
7820     __get_user(host_tx->precision, &target_tx->precision);
7821     __get_user(host_tx->tolerance, &target_tx->tolerance);
7822     __get_user(host_tx->tick, &target_tx->tick);
7823     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7824     __get_user(host_tx->jitter, &target_tx->jitter);
7825     __get_user(host_tx->shift, &target_tx->shift);
7826     __get_user(host_tx->stabil, &target_tx->stabil);
7827     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7828     __get_user(host_tx->calcnt, &target_tx->calcnt);
7829     __get_user(host_tx->errcnt, &target_tx->errcnt);
7830     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7831     __get_user(host_tx->tai, &target_tx->tai);
7832 
7833     unlock_user_struct(target_tx, target_addr, 0);
7834     return 0;
7835 }
7836 
host_to_target_timex64(abi_long target_addr,struct timex * host_tx)7837 static inline abi_long host_to_target_timex64(abi_long target_addr,
7838                                               struct timex *host_tx)
7839 {
7840     struct target__kernel_timex *target_tx;
7841 
7842    if (copy_to_user_timeval64(target_addr +
7843                               offsetof(struct target__kernel_timex, time),
7844                               &host_tx->time)) {
7845         return -TARGET_EFAULT;
7846     }
7847 
7848     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7849         return -TARGET_EFAULT;
7850     }
7851 
7852     __put_user(host_tx->modes, &target_tx->modes);
7853     __put_user(host_tx->offset, &target_tx->offset);
7854     __put_user(host_tx->freq, &target_tx->freq);
7855     __put_user(host_tx->maxerror, &target_tx->maxerror);
7856     __put_user(host_tx->esterror, &target_tx->esterror);
7857     __put_user(host_tx->status, &target_tx->status);
7858     __put_user(host_tx->constant, &target_tx->constant);
7859     __put_user(host_tx->precision, &target_tx->precision);
7860     __put_user(host_tx->tolerance, &target_tx->tolerance);
7861     __put_user(host_tx->tick, &target_tx->tick);
7862     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7863     __put_user(host_tx->jitter, &target_tx->jitter);
7864     __put_user(host_tx->shift, &target_tx->shift);
7865     __put_user(host_tx->stabil, &target_tx->stabil);
7866     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7867     __put_user(host_tx->calcnt, &target_tx->calcnt);
7868     __put_user(host_tx->errcnt, &target_tx->errcnt);
7869     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7870     __put_user(host_tx->tai, &target_tx->tai);
7871 
7872     unlock_user_struct(target_tx, target_addr, 1);
7873     return 0;
7874 }
7875 #endif
7876 
7877 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7878 #define sigev_notify_thread_id _sigev_un._tid
7879 #endif
7880 
target_to_host_sigevent(struct sigevent * host_sevp,abi_ulong target_addr)7881 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7882                                                abi_ulong target_addr)
7883 {
7884     struct target_sigevent *target_sevp;
7885 
7886     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7887         return -TARGET_EFAULT;
7888     }
7889 
7890     /* This union is awkward on 64 bit systems because it has a 32 bit
7891      * integer and a pointer in it; we follow the conversion approach
7892      * used for handling sigval types in signal.c so the guest should get
7893      * the correct value back even if we did a 64 bit byteswap and it's
7894      * using the 32 bit integer.
7895      */
7896     host_sevp->sigev_value.sival_ptr =
7897         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7898     host_sevp->sigev_signo =
7899         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7900     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7901     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7902 
7903     unlock_user_struct(target_sevp, target_addr, 1);
7904     return 0;
7905 }
7906 
7907 #if defined(TARGET_NR_mlockall)
target_to_host_mlockall_arg(int arg)7908 static inline int target_to_host_mlockall_arg(int arg)
7909 {
7910     int result = 0;
7911 
7912     if (arg & TARGET_MCL_CURRENT) {
7913         result |= MCL_CURRENT;
7914     }
7915     if (arg & TARGET_MCL_FUTURE) {
7916         result |= MCL_FUTURE;
7917     }
7918 #ifdef MCL_ONFAULT
7919     if (arg & TARGET_MCL_ONFAULT) {
7920         result |= MCL_ONFAULT;
7921     }
7922 #endif
7923 
7924     return result;
7925 }
7926 #endif
7927 
target_to_host_msync_arg(abi_long arg)7928 static inline int target_to_host_msync_arg(abi_long arg)
7929 {
7930     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7931            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7932            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7933            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7934 }
7935 
7936 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7937      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7938      defined(TARGET_NR_newfstatat))
host_to_target_stat64(CPUArchState * cpu_env,abi_ulong target_addr,struct stat * host_st)7939 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7940                                              abi_ulong target_addr,
7941                                              struct stat *host_st)
7942 {
7943 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7944     if (cpu_env->eabi) {
7945         struct target_eabi_stat64 *target_st;
7946 
7947         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7948             return -TARGET_EFAULT;
7949         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7950         __put_user(host_st->st_dev, &target_st->st_dev);
7951         __put_user(host_st->st_ino, &target_st->st_ino);
7952 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7953         __put_user(host_st->st_ino, &target_st->__st_ino);
7954 #endif
7955         __put_user(host_st->st_mode, &target_st->st_mode);
7956         __put_user(host_st->st_nlink, &target_st->st_nlink);
7957         __put_user(host_st->st_uid, &target_st->st_uid);
7958         __put_user(host_st->st_gid, &target_st->st_gid);
7959         __put_user(host_st->st_rdev, &target_st->st_rdev);
7960         __put_user(host_st->st_size, &target_st->st_size);
7961         __put_user(host_st->st_blksize, &target_st->st_blksize);
7962         __put_user(host_st->st_blocks, &target_st->st_blocks);
7963         __put_user(host_st->st_atime, &target_st->target_st_atime);
7964         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7965         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7966 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7967         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7968         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7969         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7970 #endif
7971         unlock_user_struct(target_st, target_addr, 1);
7972     } else
7973 #endif
7974     {
7975 #if defined(TARGET_HAS_STRUCT_STAT64)
7976         struct target_stat64 *target_st;
7977 #else
7978         struct target_stat *target_st;
7979 #endif
7980 
7981         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7982             return -TARGET_EFAULT;
7983         memset(target_st, 0, sizeof(*target_st));
7984         __put_user(host_st->st_dev, &target_st->st_dev);
7985         __put_user(host_st->st_ino, &target_st->st_ino);
7986 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7987         __put_user(host_st->st_ino, &target_st->__st_ino);
7988 #endif
7989         __put_user(host_st->st_mode, &target_st->st_mode);
7990         __put_user(host_st->st_nlink, &target_st->st_nlink);
7991         __put_user(host_st->st_uid, &target_st->st_uid);
7992         __put_user(host_st->st_gid, &target_st->st_gid);
7993         __put_user(host_st->st_rdev, &target_st->st_rdev);
7994         /* XXX: better use of kernel struct */
7995         __put_user(host_st->st_size, &target_st->st_size);
7996         __put_user(host_st->st_blksize, &target_st->st_blksize);
7997         __put_user(host_st->st_blocks, &target_st->st_blocks);
7998         __put_user(host_st->st_atime, &target_st->target_st_atime);
7999         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
8000         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
8001 #ifdef HAVE_STRUCT_STAT_ST_ATIM
8002         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
8003         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
8004         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
8005 #endif
8006         unlock_user_struct(target_st, target_addr, 1);
8007     }
8008 
8009     return 0;
8010 }
8011 #endif
8012 
8013 #if defined(TARGET_NR_statx) && defined(__NR_statx)
host_to_target_statx(struct target_statx * host_stx,abi_ulong target_addr)8014 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
8015                                             abi_ulong target_addr)
8016 {
8017     struct target_statx *target_stx;
8018 
8019     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
8020         return -TARGET_EFAULT;
8021     }
8022     memset(target_stx, 0, sizeof(*target_stx));
8023 
8024     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
8025     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
8026     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
8027     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
8028     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
8029     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
8030     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
8031     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
8032     __put_user(host_stx->stx_size, &target_stx->stx_size);
8033     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
8034     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
8035     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
8036     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
8037     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
8038     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
8039     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
8040     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
8041     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
8042     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
8043     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
8044     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
8045     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
8046     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
8047 
8048     unlock_user_struct(target_stx, target_addr, 1);
8049 
8050     return 0;
8051 }
8052 #endif
8053 
do_sys_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)8054 static int do_sys_futex(int *uaddr, int op, int val,
8055                          const struct timespec *timeout, int *uaddr2,
8056                          int val3)
8057 {
8058 #if HOST_LONG_BITS == 64
8059 #if defined(__NR_futex)
8060     /* always a 64-bit time_t, it doesn't define _time64 version  */
8061     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
8062 
8063 #endif
8064 #else /* HOST_LONG_BITS == 64 */
8065 #if defined(__NR_futex_time64)
8066     if (sizeof(timeout->tv_sec) == 8) {
8067         /* _time64 function on 32bit arch */
8068         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
8069     }
8070 #endif
8071 #if defined(__NR_futex)
8072     /* old function on 32bit arch */
8073     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
8074 #endif
8075 #endif /* HOST_LONG_BITS == 64 */
8076     g_assert_not_reached();
8077 }
8078 
do_safe_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)8079 static int do_safe_futex(int *uaddr, int op, int val,
8080                          const struct timespec *timeout, int *uaddr2,
8081                          int val3)
8082 {
8083 #if HOST_LONG_BITS == 64
8084 #if defined(__NR_futex)
8085     /* always a 64-bit time_t, it doesn't define _time64 version  */
8086     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
8087 #endif
8088 #else /* HOST_LONG_BITS == 64 */
8089 #if defined(__NR_futex_time64)
8090     if (sizeof(timeout->tv_sec) == 8) {
8091         /* _time64 function on 32bit arch */
8092         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
8093                                            val3));
8094     }
8095 #endif
8096 #if defined(__NR_futex)
8097     /* old function on 32bit arch */
8098     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
8099 #endif
8100 #endif /* HOST_LONG_BITS == 64 */
8101     return -TARGET_ENOSYS;
8102 }
8103 
8104 /* ??? Using host futex calls even when target atomic operations
8105    are not really atomic probably breaks things.  However implementing
8106    futexes locally would make futexes shared between multiple processes
8107    tricky.  However they're probably useless because guest atomic
8108    operations won't work either.  */
8109 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
do_futex(CPUState * cpu,bool time64,target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)8110 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
8111                     int op, int val, target_ulong timeout,
8112                     target_ulong uaddr2, int val3)
8113 {
8114     struct timespec ts, *pts = NULL;
8115     void *haddr2 = NULL;
8116     int base_op;
8117 
8118     /* We assume FUTEX_* constants are the same on both host and target. */
8119 #ifdef FUTEX_CMD_MASK
8120     base_op = op & FUTEX_CMD_MASK;
8121 #else
8122     base_op = op;
8123 #endif
8124     switch (base_op) {
8125     case FUTEX_WAIT:
8126     case FUTEX_WAIT_BITSET:
8127         val = tswap32(val);
8128         break;
8129     case FUTEX_WAIT_REQUEUE_PI:
8130         val = tswap32(val);
8131         haddr2 = g2h(cpu, uaddr2);
8132         break;
8133     case FUTEX_LOCK_PI:
8134     case FUTEX_LOCK_PI2:
8135         break;
8136     case FUTEX_WAKE:
8137     case FUTEX_WAKE_BITSET:
8138     case FUTEX_TRYLOCK_PI:
8139     case FUTEX_UNLOCK_PI:
8140         timeout = 0;
8141         break;
8142     case FUTEX_FD:
8143         val = target_to_host_signal(val);
8144         timeout = 0;
8145         break;
8146     case FUTEX_CMP_REQUEUE:
8147     case FUTEX_CMP_REQUEUE_PI:
8148         val3 = tswap32(val3);
8149         /* fall through */
8150     case FUTEX_REQUEUE:
8151     case FUTEX_WAKE_OP:
8152         /*
8153          * For these, the 4th argument is not TIMEOUT, but VAL2.
8154          * But the prototype of do_safe_futex takes a pointer, so
8155          * insert casts to satisfy the compiler.  We do not need
8156          * to tswap VAL2 since it's not compared to guest memory.
8157           */
8158         pts = (struct timespec *)(uintptr_t)timeout;
8159         timeout = 0;
8160         haddr2 = g2h(cpu, uaddr2);
8161         break;
8162     default:
8163         return -TARGET_ENOSYS;
8164     }
8165     if (timeout) {
8166         pts = &ts;
8167         if (time64
8168             ? target_to_host_timespec64(pts, timeout)
8169             : target_to_host_timespec(pts, timeout)) {
8170             return -TARGET_EFAULT;
8171         }
8172     }
8173     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
8174 }
8175 #endif
8176 
8177 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_name_to_handle_at(abi_long dirfd,abi_long pathname,abi_long handle,abi_long mount_id,abi_long flags)8178 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
8179                                      abi_long handle, abi_long mount_id,
8180                                      abi_long flags)
8181 {
8182     struct file_handle *target_fh;
8183     struct file_handle *fh;
8184     int mid = 0;
8185     abi_long ret;
8186     char *name;
8187     unsigned int size, total_size;
8188 
8189     if (get_user_s32(size, handle)) {
8190         return -TARGET_EFAULT;
8191     }
8192 
8193     name = lock_user_string(pathname);
8194     if (!name) {
8195         return -TARGET_EFAULT;
8196     }
8197 
8198     total_size = sizeof(struct file_handle) + size;
8199     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
8200     if (!target_fh) {
8201         unlock_user(name, pathname, 0);
8202         return -TARGET_EFAULT;
8203     }
8204 
8205     fh = g_malloc0(total_size);
8206     fh->handle_bytes = size;
8207 
8208     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
8209     unlock_user(name, pathname, 0);
8210 
8211     /* man name_to_handle_at(2):
8212      * Other than the use of the handle_bytes field, the caller should treat
8213      * the file_handle structure as an opaque data type
8214      */
8215 
8216     memcpy(target_fh, fh, total_size);
8217     target_fh->handle_bytes = tswap32(fh->handle_bytes);
8218     target_fh->handle_type = tswap32(fh->handle_type);
8219     g_free(fh);
8220     unlock_user(target_fh, handle, total_size);
8221 
8222     if (put_user_s32(mid, mount_id)) {
8223         return -TARGET_EFAULT;
8224     }
8225 
8226     return ret;
8227 
8228 }
8229 #endif
8230 
8231 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_open_by_handle_at(abi_long mount_fd,abi_long handle,abi_long flags)8232 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
8233                                      abi_long flags)
8234 {
8235     struct file_handle *target_fh;
8236     struct file_handle *fh;
8237     unsigned int size, total_size;
8238     abi_long ret;
8239 
8240     if (get_user_s32(size, handle)) {
8241         return -TARGET_EFAULT;
8242     }
8243 
8244     total_size = sizeof(struct file_handle) + size;
8245     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
8246     if (!target_fh) {
8247         return -TARGET_EFAULT;
8248     }
8249 
8250     fh = g_memdup(target_fh, total_size);
8251     fh->handle_bytes = size;
8252     fh->handle_type = tswap32(target_fh->handle_type);
8253 
8254     ret = get_errno(open_by_handle_at(mount_fd, fh,
8255                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
8256 
8257     g_free(fh);
8258 
8259     unlock_user(target_fh, handle, total_size);
8260 
8261     return ret;
8262 }
8263 #endif
8264 
8265 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
8266 
do_signalfd4(int fd,abi_long mask,int flags)8267 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
8268 {
8269     int host_flags;
8270     target_sigset_t *target_mask;
8271     sigset_t host_mask;
8272     abi_long ret;
8273 
8274     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
8275         return -TARGET_EINVAL;
8276     }
8277     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
8278         return -TARGET_EFAULT;
8279     }
8280 
8281     target_to_host_sigset(&host_mask, target_mask);
8282 
8283     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
8284 
8285     ret = get_errno(signalfd(fd, &host_mask, host_flags));
8286     if (ret >= 0) {
8287         fd_trans_register(ret, &target_signalfd_trans);
8288     }
8289 
8290     unlock_user_struct(target_mask, mask, 0);
8291 
8292     return ret;
8293 }
8294 #endif
8295 
8296 /* Map host to target signal numbers for the wait family of syscalls.
8297    Assume all other status bits are the same.  */
host_to_target_waitstatus(int status)8298 int host_to_target_waitstatus(int status)
8299 {
8300     if (WIFSIGNALED(status)) {
8301         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8302     }
8303     if (WIFSTOPPED(status)) {
8304         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8305                | (status & 0xff);
8306     }
8307     return status;
8308 }
8309 
open_self_cmdline(CPUArchState * cpu_env,int fd)8310 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8311 {
8312     CPUState *cpu = env_cpu(cpu_env);
8313     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
8314     int i;
8315 
8316     for (i = 0; i < bprm->argc; i++) {
8317         size_t len = strlen(bprm->argv[i]) + 1;
8318 
8319         if (write(fd, bprm->argv[i], len) != len) {
8320             return -1;
8321         }
8322     }
8323 
8324     return 0;
8325 }
8326 
8327 struct open_self_maps_data {
8328     TaskState *ts;
8329     IntervalTreeRoot *host_maps;
8330     int fd;
8331     bool smaps;
8332 };
8333 
8334 /*
8335  * Subroutine to output one line of /proc/self/maps,
8336  * or one region of /proc/self/smaps.
8337  */
8338 
8339 #ifdef TARGET_HPPA
8340 # define test_stack(S, E, L)  (E == L)
8341 #else
8342 # define test_stack(S, E, L)  (S == L)
8343 #endif
8344 
open_self_maps_4(const struct open_self_maps_data * d,const MapInfo * mi,abi_ptr start,abi_ptr end,unsigned flags)8345 static void open_self_maps_4(const struct open_self_maps_data *d,
8346                              const MapInfo *mi, abi_ptr start,
8347                              abi_ptr end, unsigned flags)
8348 {
8349     const struct image_info *info = d->ts->info;
8350     const char *path = mi->path;
8351     uint64_t offset;
8352     int fd = d->fd;
8353     int count;
8354 
8355     if (test_stack(start, end, info->stack_limit)) {
8356         path = "[stack]";
8357     } else if (start == info->brk) {
8358         path = "[heap]";
8359     } else if (start == info->vdso) {
8360         path = "[vdso]";
8361 #ifdef TARGET_X86_64
8362     } else if (start == TARGET_VSYSCALL_PAGE) {
8363         path = "[vsyscall]";
8364 #endif
8365     }
8366 
8367     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8368     offset = mi->offset;
8369     if (mi->dev) {
8370         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8371         offset += hstart - mi->itree.start;
8372     }
8373 
8374     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8375                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8376                     start, end,
8377                     (flags & PAGE_READ) ? 'r' : '-',
8378                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8379                     (flags & PAGE_EXEC) ? 'x' : '-',
8380                     mi->is_priv ? 'p' : 's',
8381                     offset, major(mi->dev), minor(mi->dev),
8382                     (uint64_t)mi->inode);
8383     if (path) {
8384         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8385     } else {
8386         dprintf(fd, "\n");
8387     }
8388 
8389     if (d->smaps) {
8390         unsigned long size = end - start;
8391         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8392         unsigned long size_kb = size >> 10;
8393 
8394         dprintf(fd, "Size:                  %lu kB\n"
8395                 "KernelPageSize:        %lu kB\n"
8396                 "MMUPageSize:           %lu kB\n"
8397                 "Rss:                   0 kB\n"
8398                 "Pss:                   0 kB\n"
8399                 "Pss_Dirty:             0 kB\n"
8400                 "Shared_Clean:          0 kB\n"
8401                 "Shared_Dirty:          0 kB\n"
8402                 "Private_Clean:         0 kB\n"
8403                 "Private_Dirty:         0 kB\n"
8404                 "Referenced:            0 kB\n"
8405                 "Anonymous:             %lu kB\n"
8406                 "LazyFree:              0 kB\n"
8407                 "AnonHugePages:         0 kB\n"
8408                 "ShmemPmdMapped:        0 kB\n"
8409                 "FilePmdMapped:         0 kB\n"
8410                 "Shared_Hugetlb:        0 kB\n"
8411                 "Private_Hugetlb:       0 kB\n"
8412                 "Swap:                  0 kB\n"
8413                 "SwapPss:               0 kB\n"
8414                 "Locked:                0 kB\n"
8415                 "THPeligible:    0\n"
8416                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8417                 size_kb, page_size_kb, page_size_kb,
8418                 (flags & PAGE_ANON ? size_kb : 0),
8419                 (flags & PAGE_READ) ? " rd" : "",
8420                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8421                 (flags & PAGE_EXEC) ? " ex" : "",
8422                 mi->is_priv ? "" : " sh",
8423                 (flags & PAGE_READ) ? " mr" : "",
8424                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8425                 (flags & PAGE_EXEC) ? " me" : "",
8426                 mi->is_priv ? "" : " ms");
8427     }
8428 }
8429 
8430 /*
8431  * Callback for walk_memory_regions, when read_self_maps() fails.
8432  * Proceed without the benefit of host /proc/self/maps cross-check.
8433  */
open_self_maps_3(void * opaque,vaddr guest_start,vaddr guest_end,int flags)8434 static int open_self_maps_3(void *opaque, vaddr guest_start,
8435                             vaddr guest_end, int flags)
8436 {
8437     static const MapInfo mi = { .is_priv = true };
8438 
8439     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8440     return 0;
8441 }
8442 
8443 /*
8444  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8445  */
open_self_maps_2(void * opaque,vaddr guest_start,vaddr guest_end,int flags)8446 static int open_self_maps_2(void *opaque, vaddr guest_start,
8447                             vaddr guest_end, int flags)
8448 {
8449     const struct open_self_maps_data *d = opaque;
8450     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8451     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8452 
8453 #ifdef TARGET_X86_64
8454     /*
8455      * Because of the extremely high position of the page within the guest
8456      * virtual address space, this is not backed by host memory at all.
8457      * Therefore the loop below would fail.  This is the only instance
8458      * of not having host backing memory.
8459      */
8460     if (guest_start == TARGET_VSYSCALL_PAGE) {
8461         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8462     }
8463 #endif
8464 
8465     while (1) {
8466         IntervalTreeNode *n =
8467             interval_tree_iter_first(d->host_maps, host_start, host_start);
8468         MapInfo *mi = container_of(n, MapInfo, itree);
8469         uintptr_t this_hlast = MIN(host_last, n->last);
8470         target_ulong this_gend = h2g(this_hlast) + 1;
8471 
8472         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8473 
8474         if (this_hlast == host_last) {
8475             return 0;
8476         }
8477         host_start = this_hlast + 1;
8478         guest_start = h2g(host_start);
8479     }
8480 }
8481 
open_self_maps_1(CPUArchState * env,int fd,bool smaps)8482 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8483 {
8484     struct open_self_maps_data d = {
8485         .ts = get_task_state(env_cpu(env)),
8486         .fd = fd,
8487         .smaps = smaps
8488     };
8489 
8490     mmap_lock();
8491     d.host_maps = read_self_maps();
8492     if (d.host_maps) {
8493         walk_memory_regions(&d, open_self_maps_2);
8494         free_self_maps(d.host_maps);
8495     } else {
8496         walk_memory_regions(&d, open_self_maps_3);
8497     }
8498     mmap_unlock();
8499     return 0;
8500 }
8501 
open_self_maps(CPUArchState * cpu_env,int fd)8502 static int open_self_maps(CPUArchState *cpu_env, int fd)
8503 {
8504     return open_self_maps_1(cpu_env, fd, false);
8505 }
8506 
open_self_smaps(CPUArchState * cpu_env,int fd)8507 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8508 {
8509     return open_self_maps_1(cpu_env, fd, true);
8510 }
8511 
open_self_stat(CPUArchState * cpu_env,int fd)8512 static int open_self_stat(CPUArchState *cpu_env, int fd)
8513 {
8514     CPUState *cpu = env_cpu(cpu_env);
8515     TaskState *ts = get_task_state(cpu);
8516     g_autoptr(GString) buf = g_string_new(NULL);
8517     int i;
8518 
8519     for (i = 0; i < 44; i++) {
8520         if (i == 0) {
8521             /* pid */
8522             g_string_printf(buf, FMT_pid " ", getpid());
8523         } else if (i == 1) {
8524             /* app name */
8525             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8526             bin = bin ? bin + 1 : ts->bprm->argv[0];
8527             g_string_printf(buf, "(%.15s) ", bin);
8528         } else if (i == 2) {
8529             /* task state */
8530             g_string_assign(buf, "R "); /* we are running right now */
8531         } else if (i == 3) {
8532             /* ppid */
8533             g_string_printf(buf, FMT_pid " ", getppid());
8534         } else if (i == 4) {
8535             /* pgid */
8536             g_string_printf(buf, FMT_pid " ", getpgrp());
8537         } else if (i == 19) {
8538             /* num_threads */
8539             int cpus = 0;
8540             WITH_RCU_READ_LOCK_GUARD() {
8541                 CPUState *cpu_iter;
8542                 CPU_FOREACH(cpu_iter) {
8543                     cpus++;
8544                 }
8545             }
8546             g_string_printf(buf, "%d ", cpus);
8547         } else if (i == 21) {
8548             /* starttime */
8549             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8550         } else if (i == 27) {
8551             /* stack bottom */
8552             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8553         } else {
8554             /* for the rest, there is MasterCard */
8555             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8556         }
8557 
8558         if (write(fd, buf->str, buf->len) != buf->len) {
8559             return -1;
8560         }
8561     }
8562 
8563     return 0;
8564 }
8565 
open_self_auxv(CPUArchState * cpu_env,int fd)8566 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8567 {
8568     CPUState *cpu = env_cpu(cpu_env);
8569     TaskState *ts = get_task_state(cpu);
8570     abi_ulong auxv = ts->info->saved_auxv;
8571     abi_ulong len = ts->info->auxv_len;
8572     char *ptr;
8573 
8574     /*
8575      * Auxiliary vector is stored in target process stack.
8576      * read in whole auxv vector and copy it to file
8577      */
8578     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8579     if (ptr != NULL) {
8580         while (len > 0) {
8581             ssize_t r;
8582             r = write(fd, ptr, len);
8583             if (r <= 0) {
8584                 break;
8585             }
8586             len -= r;
8587             ptr += r;
8588         }
8589         lseek(fd, 0, SEEK_SET);
8590         unlock_user(ptr, auxv, len);
8591     }
8592 
8593     return 0;
8594 }
8595 
is_proc_myself(const char * filename,const char * entry)8596 static int is_proc_myself(const char *filename, const char *entry)
8597 {
8598     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8599         filename += strlen("/proc/");
8600         if (!strncmp(filename, "self/", strlen("self/"))) {
8601             filename += strlen("self/");
8602         } else if (*filename >= '1' && *filename <= '9') {
8603             char myself[80];
8604             snprintf(myself, sizeof(myself), "%d/", getpid());
8605             if (!strncmp(filename, myself, strlen(myself))) {
8606                 filename += strlen(myself);
8607             } else {
8608                 return 0;
8609             }
8610         } else {
8611             return 0;
8612         }
8613         if (!strcmp(filename, entry)) {
8614             return 1;
8615         }
8616     }
8617     return 0;
8618 }
8619 
excp_dump_file(FILE * logfile,CPUArchState * env,const char * fmt,int code)8620 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8621                       const char *fmt, int code)
8622 {
8623     if (logfile) {
8624         CPUState *cs = env_cpu(env);
8625 
8626         fprintf(logfile, fmt, code);
8627         fprintf(logfile, "Failing executable: %s\n", exec_path);
8628         cpu_dump_state(cs, logfile, 0);
8629         open_self_maps(env, fileno(logfile));
8630     }
8631 }
8632 
target_exception_dump(CPUArchState * env,const char * fmt,int code)8633 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8634 {
8635     /* dump to console */
8636     excp_dump_file(stderr, env, fmt, code);
8637 
8638     /* dump to log file */
8639     if (qemu_log_separate()) {
8640         FILE *logfile = qemu_log_trylock();
8641 
8642         excp_dump_file(logfile, env, fmt, code);
8643         qemu_log_unlock(logfile);
8644     }
8645 }
8646 
8647 #include "target_proc.h"
8648 
8649 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8650     defined(HAVE_ARCH_PROC_CPUINFO) || \
8651     defined(HAVE_ARCH_PROC_HARDWARE)
is_proc(const char * filename,const char * entry)8652 static int is_proc(const char *filename, const char *entry)
8653 {
8654     return strcmp(filename, entry) == 0;
8655 }
8656 #endif
8657 
8658 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
open_net_route(CPUArchState * cpu_env,int fd)8659 static int open_net_route(CPUArchState *cpu_env, int fd)
8660 {
8661     FILE *fp;
8662     char *line = NULL;
8663     size_t len = 0;
8664     ssize_t read;
8665 
8666     fp = fopen("/proc/net/route", "r");
8667     if (fp == NULL) {
8668         return -1;
8669     }
8670 
8671     /* read header */
8672 
8673     read = getline(&line, &len, fp);
8674     dprintf(fd, "%s", line);
8675 
8676     /* read routes */
8677 
8678     while ((read = getline(&line, &len, fp)) != -1) {
8679         char iface[16];
8680         uint32_t dest, gw, mask;
8681         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8682         int fields;
8683 
8684         fields = sscanf(line,
8685                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8686                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8687                         &mask, &mtu, &window, &irtt);
8688         if (fields != 11) {
8689             continue;
8690         }
8691         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8692                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8693                 metric, tswap32(mask), mtu, window, irtt);
8694     }
8695 
8696     free(line);
8697     fclose(fp);
8698 
8699     return 0;
8700 }
8701 #endif
8702 
maybe_do_fake_open(CPUArchState * cpu_env,int dirfd,const char * fname,int flags,mode_t mode,int openat2_resolve,bool safe)8703 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8704                               const char *fname, int flags, mode_t mode,
8705                               int openat2_resolve, bool safe)
8706 {
8707     g_autofree char *proc_name = NULL;
8708     const char *pathname;
8709     struct fake_open {
8710         const char *filename;
8711         int (*fill)(CPUArchState *cpu_env, int fd);
8712         int (*cmp)(const char *s1, const char *s2);
8713     };
8714     const struct fake_open *fake_open;
8715     static const struct fake_open fakes[] = {
8716         { "maps", open_self_maps, is_proc_myself },
8717         { "smaps", open_self_smaps, is_proc_myself },
8718         { "stat", open_self_stat, is_proc_myself },
8719         { "auxv", open_self_auxv, is_proc_myself },
8720         { "cmdline", open_self_cmdline, is_proc_myself },
8721 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8722         { "/proc/net/route", open_net_route, is_proc },
8723 #endif
8724 #if defined(HAVE_ARCH_PROC_CPUINFO)
8725         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8726 #endif
8727 #if defined(HAVE_ARCH_PROC_HARDWARE)
8728         { "/proc/hardware", open_hardware, is_proc },
8729 #endif
8730         { NULL, NULL, NULL }
8731     };
8732 
8733     /* if this is a file from /proc/ filesystem, expand full name */
8734     proc_name = realpath(fname, NULL);
8735     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8736         pathname = proc_name;
8737     } else {
8738         pathname = fname;
8739     }
8740 
8741     if (is_proc_myself(pathname, "exe")) {
8742         /* Honor openat2 resolve flags */
8743         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8744             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8745             errno = ELOOP;
8746             return -1;
8747         }
8748         if (safe) {
8749             return safe_openat(dirfd, exec_path, flags, mode);
8750         } else {
8751             return openat(dirfd, exec_path, flags, mode);
8752         }
8753     }
8754 
8755     for (fake_open = fakes; fake_open->filename; fake_open++) {
8756         if (fake_open->cmp(pathname, fake_open->filename)) {
8757             break;
8758         }
8759     }
8760 
8761     if (fake_open->filename) {
8762         const char *tmpdir;
8763         char filename[PATH_MAX];
8764         int fd, r;
8765 
8766         fd = memfd_create("qemu-open", 0);
8767         if (fd < 0) {
8768             if (errno != ENOSYS) {
8769                 return fd;
8770             }
8771             /* create temporary file to map stat to */
8772             tmpdir = getenv("TMPDIR");
8773             if (!tmpdir)
8774                 tmpdir = "/tmp";
8775             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8776             fd = mkstemp(filename);
8777             if (fd < 0) {
8778                 return fd;
8779             }
8780             unlink(filename);
8781         }
8782 
8783         if ((r = fake_open->fill(cpu_env, fd))) {
8784             int e = errno;
8785             close(fd);
8786             errno = e;
8787             return r;
8788         }
8789         lseek(fd, 0, SEEK_SET);
8790 
8791         return fd;
8792     }
8793 
8794     return -2;
8795 }
8796 
do_guest_openat(CPUArchState * cpu_env,int dirfd,const char * pathname,int flags,mode_t mode,bool safe)8797 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8798                     int flags, mode_t mode, bool safe)
8799 {
8800     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8801     if (fd > -2) {
8802         return fd;
8803     }
8804 
8805     if (safe) {
8806         return safe_openat(dirfd, path(pathname), flags, mode);
8807     } else {
8808         return openat(dirfd, path(pathname), flags, mode);
8809     }
8810 }
8811 
8812 
do_openat2(CPUArchState * cpu_env,abi_long dirfd,abi_ptr guest_pathname,abi_ptr guest_open_how,abi_ulong guest_size)8813 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8814                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8815                       abi_ulong guest_size)
8816 {
8817     struct open_how_ver0 how = {0};
8818     char *pathname;
8819     int ret;
8820 
8821     if (guest_size < sizeof(struct target_open_how_ver0)) {
8822         return -TARGET_EINVAL;
8823     }
8824     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8825     if (ret) {
8826         if (ret == -TARGET_E2BIG) {
8827             qemu_log_mask(LOG_UNIMP,
8828                           "Unimplemented openat2 open_how size: "
8829                           TARGET_ABI_FMT_lu "\n", guest_size);
8830         }
8831         return ret;
8832     }
8833     pathname = lock_user_string(guest_pathname);
8834     if (!pathname) {
8835         return -TARGET_EFAULT;
8836     }
8837 
8838     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8839     how.mode = tswap64(how.mode);
8840     how.resolve = tswap64(how.resolve);
8841     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8842                                 how.resolve, true);
8843     if (fd > -2) {
8844         ret = get_errno(fd);
8845     } else {
8846         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8847                                      sizeof(struct open_how_ver0)));
8848     }
8849 
8850     fd_trans_unregister(ret);
8851     unlock_user(pathname, guest_pathname, 0);
8852     return ret;
8853 }
8854 
do_guest_readlink(const char * pathname,char * buf,size_t bufsiz)8855 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8856 {
8857     ssize_t ret;
8858 
8859     if (!pathname || !buf) {
8860         errno = EFAULT;
8861         return -1;
8862     }
8863 
8864     if (!bufsiz) {
8865         /* Short circuit this for the magic exe check. */
8866         errno = EINVAL;
8867         return -1;
8868     }
8869 
8870     if (is_proc_myself((const char *)pathname, "exe")) {
8871         /*
8872          * Don't worry about sign mismatch as earlier mapping
8873          * logic would have thrown a bad address error.
8874          */
8875         ret = MIN(strlen(exec_path), bufsiz);
8876         /* We cannot NUL terminate the string. */
8877         memcpy(buf, exec_path, ret);
8878     } else {
8879         ret = readlink(path(pathname), buf, bufsiz);
8880     }
8881 
8882     return ret;
8883 }
8884 
do_execv(CPUArchState * cpu_env,int dirfd,abi_long pathname,abi_long guest_argp,abi_long guest_envp,int flags,bool is_execveat)8885 static int do_execv(CPUArchState *cpu_env, int dirfd,
8886                     abi_long pathname, abi_long guest_argp,
8887                     abi_long guest_envp, int flags, bool is_execveat)
8888 {
8889     int ret;
8890     char **argp, **envp;
8891     int argc, envc;
8892     abi_ulong gp;
8893     abi_ulong addr;
8894     char **q;
8895     void *p;
8896 
8897     argc = 0;
8898 
8899     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8900         if (get_user_ual(addr, gp)) {
8901             return -TARGET_EFAULT;
8902         }
8903         if (!addr) {
8904             break;
8905         }
8906         argc++;
8907     }
8908     envc = 0;
8909     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8910         if (get_user_ual(addr, gp)) {
8911             return -TARGET_EFAULT;
8912         }
8913         if (!addr) {
8914             break;
8915         }
8916         envc++;
8917     }
8918 
8919     argp = g_new0(char *, argc + 1);
8920     envp = g_new0(char *, envc + 1);
8921 
8922     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8923         if (get_user_ual(addr, gp)) {
8924             goto execve_efault;
8925         }
8926         if (!addr) {
8927             break;
8928         }
8929         *q = lock_user_string(addr);
8930         if (!*q) {
8931             goto execve_efault;
8932         }
8933     }
8934     *q = NULL;
8935 
8936     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8937         if (get_user_ual(addr, gp)) {
8938             goto execve_efault;
8939         }
8940         if (!addr) {
8941             break;
8942         }
8943         *q = lock_user_string(addr);
8944         if (!*q) {
8945             goto execve_efault;
8946         }
8947     }
8948     *q = NULL;
8949 
8950     /*
8951      * Although execve() is not an interruptible syscall it is
8952      * a special case where we must use the safe_syscall wrapper:
8953      * if we allow a signal to happen before we make the host
8954      * syscall then we will 'lose' it, because at the point of
8955      * execve the process leaves QEMU's control. So we use the
8956      * safe syscall wrapper to ensure that we either take the
8957      * signal as a guest signal, or else it does not happen
8958      * before the execve completes and makes it the other
8959      * program's problem.
8960      */
8961     p = lock_user_string(pathname);
8962     if (!p) {
8963         goto execve_efault;
8964     }
8965 
8966     const char *exe = p;
8967     if (is_proc_myself(p, "exe")) {
8968         exe = exec_path;
8969     }
8970     ret = is_execveat
8971         ? safe_execveat(dirfd, exe, argp, envp, flags)
8972         : safe_execve(exe, argp, envp);
8973     ret = get_errno(ret);
8974 
8975     unlock_user(p, pathname, 0);
8976 
8977     goto execve_end;
8978 
8979 execve_efault:
8980     ret = -TARGET_EFAULT;
8981 
8982 execve_end:
8983     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8984         if (get_user_ual(addr, gp) || !addr) {
8985             break;
8986         }
8987         unlock_user(*q, addr, 0);
8988     }
8989     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8990         if (get_user_ual(addr, gp) || !addr) {
8991             break;
8992         }
8993         unlock_user(*q, addr, 0);
8994     }
8995 
8996     g_free(argp);
8997     g_free(envp);
8998     return ret;
8999 }
9000 
9001 #define TIMER_MAGIC 0x0caf0000
9002 #define TIMER_MAGIC_MASK 0xffff0000
9003 
9004 /* Convert QEMU provided timer ID back to internal 16bit index format */
get_timer_id(abi_long arg)9005 static target_timer_t get_timer_id(abi_long arg)
9006 {
9007     target_timer_t timerid = arg;
9008 
9009     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
9010         return -TARGET_EINVAL;
9011     }
9012 
9013     timerid &= 0xffff;
9014 
9015     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
9016         return -TARGET_EINVAL;
9017     }
9018 
9019     return timerid;
9020 }
9021 
target_to_host_cpu_mask(unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)9022 static int target_to_host_cpu_mask(unsigned long *host_mask,
9023                                    size_t host_size,
9024                                    abi_ulong target_addr,
9025                                    size_t target_size)
9026 {
9027     unsigned target_bits = sizeof(abi_ulong) * 8;
9028     unsigned host_bits = sizeof(*host_mask) * 8;
9029     abi_ulong *target_mask;
9030     unsigned i, j;
9031 
9032     assert(host_size >= target_size);
9033 
9034     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
9035     if (!target_mask) {
9036         return -TARGET_EFAULT;
9037     }
9038     memset(host_mask, 0, host_size);
9039 
9040     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
9041         unsigned bit = i * target_bits;
9042         abi_ulong val;
9043 
9044         __get_user(val, &target_mask[i]);
9045         for (j = 0; j < target_bits; j++, bit++) {
9046             if (val & (1UL << j)) {
9047                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
9048             }
9049         }
9050     }
9051 
9052     unlock_user(target_mask, target_addr, 0);
9053     return 0;
9054 }
9055 
host_to_target_cpu_mask(const unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)9056 static int host_to_target_cpu_mask(const unsigned long *host_mask,
9057                                    size_t host_size,
9058                                    abi_ulong target_addr,
9059                                    size_t target_size)
9060 {
9061     unsigned target_bits = sizeof(abi_ulong) * 8;
9062     unsigned host_bits = sizeof(*host_mask) * 8;
9063     abi_ulong *target_mask;
9064     unsigned i, j;
9065 
9066     assert(host_size >= target_size);
9067 
9068     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
9069     if (!target_mask) {
9070         return -TARGET_EFAULT;
9071     }
9072 
9073     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
9074         unsigned bit = i * target_bits;
9075         abi_ulong val = 0;
9076 
9077         for (j = 0; j < target_bits; j++, bit++) {
9078             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
9079                 val |= 1UL << j;
9080             }
9081         }
9082         __put_user(val, &target_mask[i]);
9083     }
9084 
9085     unlock_user(target_mask, target_addr, target_size);
9086     return 0;
9087 }
9088 
9089 #ifdef TARGET_NR_getdents
do_getdents(abi_long dirfd,abi_long arg2,abi_long count)9090 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
9091 {
9092     g_autofree void *hdirp = NULL;
9093     void *tdirp;
9094     int hlen, hoff, toff;
9095     int hreclen, treclen;
9096     off_t prev_diroff = 0;
9097 
9098     hdirp = g_try_malloc(count);
9099     if (!hdirp) {
9100         return -TARGET_ENOMEM;
9101     }
9102 
9103 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9104     hlen = sys_getdents(dirfd, hdirp, count);
9105 #else
9106     hlen = sys_getdents64(dirfd, hdirp, count);
9107 #endif
9108 
9109     hlen = get_errno(hlen);
9110     if (is_error(hlen)) {
9111         return hlen;
9112     }
9113 
9114     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9115     if (!tdirp) {
9116         return -TARGET_EFAULT;
9117     }
9118 
9119     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
9120 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9121         struct linux_dirent *hde = hdirp + hoff;
9122 #else
9123         struct linux_dirent64 *hde = hdirp + hoff;
9124 #endif
9125         struct target_dirent *tde = tdirp + toff;
9126         int namelen;
9127         uint8_t type;
9128 
9129         namelen = strlen(hde->d_name);
9130         hreclen = hde->d_reclen;
9131         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
9132         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
9133 
9134         if (toff + treclen > count) {
9135             /*
9136              * If the host struct is smaller than the target struct, or
9137              * requires less alignment and thus packs into less space,
9138              * then the host can return more entries than we can pass
9139              * on to the guest.
9140              */
9141             if (toff == 0) {
9142                 toff = -TARGET_EINVAL; /* result buffer is too small */
9143                 break;
9144             }
9145             /*
9146              * Return what we have, resetting the file pointer to the
9147              * location of the first record not returned.
9148              */
9149             lseek(dirfd, prev_diroff, SEEK_SET);
9150             break;
9151         }
9152 
9153         prev_diroff = hde->d_off;
9154         tde->d_ino = tswapal(hde->d_ino);
9155         tde->d_off = tswapal(hde->d_off);
9156         tde->d_reclen = tswap16(treclen);
9157         memcpy(tde->d_name, hde->d_name, namelen + 1);
9158 
9159         /*
9160          * The getdents type is in what was formerly a padding byte at the
9161          * end of the structure.
9162          */
9163 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9164         type = *((uint8_t *)hde + hreclen - 1);
9165 #else
9166         type = hde->d_type;
9167 #endif
9168         *((uint8_t *)tde + treclen - 1) = type;
9169     }
9170 
9171     unlock_user(tdirp, arg2, toff);
9172     return toff;
9173 }
9174 #endif /* TARGET_NR_getdents */
9175 
9176 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
do_getdents64(abi_long dirfd,abi_long arg2,abi_long count)9177 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
9178 {
9179     g_autofree void *hdirp = NULL;
9180     void *tdirp;
9181     int hlen, hoff, toff;
9182     int hreclen, treclen;
9183     off_t prev_diroff = 0;
9184 
9185     hdirp = g_try_malloc(count);
9186     if (!hdirp) {
9187         return -TARGET_ENOMEM;
9188     }
9189 
9190     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
9191     if (is_error(hlen)) {
9192         return hlen;
9193     }
9194 
9195     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9196     if (!tdirp) {
9197         return -TARGET_EFAULT;
9198     }
9199 
9200     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
9201         struct linux_dirent64 *hde = hdirp + hoff;
9202         struct target_dirent64 *tde = tdirp + toff;
9203         int namelen;
9204 
9205         namelen = strlen(hde->d_name) + 1;
9206         hreclen = hde->d_reclen;
9207         treclen = offsetof(struct target_dirent64, d_name) + namelen;
9208         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
9209 
9210         if (toff + treclen > count) {
9211             /*
9212              * If the host struct is smaller than the target struct, or
9213              * requires less alignment and thus packs into less space,
9214              * then the host can return more entries than we can pass
9215              * on to the guest.
9216              */
9217             if (toff == 0) {
9218                 toff = -TARGET_EINVAL; /* result buffer is too small */
9219                 break;
9220             }
9221             /*
9222              * Return what we have, resetting the file pointer to the
9223              * location of the first record not returned.
9224              */
9225             lseek(dirfd, prev_diroff, SEEK_SET);
9226             break;
9227         }
9228 
9229         prev_diroff = hde->d_off;
9230         tde->d_ino = tswap64(hde->d_ino);
9231         tde->d_off = tswap64(hde->d_off);
9232         tde->d_reclen = tswap16(treclen);
9233         tde->d_type = hde->d_type;
9234         memcpy(tde->d_name, hde->d_name, namelen);
9235     }
9236 
9237     unlock_user(tdirp, arg2, toff);
9238     return toff;
9239 }
9240 #endif /* TARGET_NR_getdents64 */
9241 
9242 #if defined(TARGET_NR_riscv_hwprobe)
9243 
9244 #define RISCV_HWPROBE_KEY_MVENDORID     0
9245 #define RISCV_HWPROBE_KEY_MARCHID       1
9246 #define RISCV_HWPROBE_KEY_MIMPID        2
9247 
9248 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
9249 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
9250 
9251 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
9252 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
9253 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
9254 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
9255 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
9256 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
9257 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
9258 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
9259 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
9260 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
9261 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
9262 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
9263 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
9264 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
9265 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
9266 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
9267 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
9268 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
9269 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
9270 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
9271 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
9272 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
9273 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
9274 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
9275 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
9276 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
9277 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
9278 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
9279 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
9280 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
9281 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
9282 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
9283 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
9284 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
9285 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
9286 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
9287 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
9288 #define     RISCV_HWPROBE_EXT_ZIHINTPAUSE   (1ULL << 36)
9289 #define     RISCV_HWPROBE_EXT_ZVE32X        (1ULL << 37)
9290 #define     RISCV_HWPROBE_EXT_ZVE32F        (1ULL << 38)
9291 #define     RISCV_HWPROBE_EXT_ZVE64X        (1ULL << 39)
9292 #define     RISCV_HWPROBE_EXT_ZVE64F        (1ULL << 40)
9293 #define     RISCV_HWPROBE_EXT_ZVE64D        (1ULL << 41)
9294 #define     RISCV_HWPROBE_EXT_ZIMOP         (1ULL << 42)
9295 #define     RISCV_HWPROBE_EXT_ZCA           (1ULL << 43)
9296 #define     RISCV_HWPROBE_EXT_ZCB           (1ULL << 44)
9297 #define     RISCV_HWPROBE_EXT_ZCD           (1ULL << 45)
9298 #define     RISCV_HWPROBE_EXT_ZCF           (1ULL << 46)
9299 #define     RISCV_HWPROBE_EXT_ZCMOP         (1ULL << 47)
9300 #define     RISCV_HWPROBE_EXT_ZAWRS         (1ULL << 48)
9301 #define     RISCV_HWPROBE_EXT_SUPM          (1ULL << 49)
9302 #define     RISCV_HWPROBE_EXT_ZICNTR        (1ULL << 50)
9303 #define     RISCV_HWPROBE_EXT_ZIHPM         (1ULL << 51)
9304 #define     RISCV_HWPROBE_EXT_ZFBFMIN       (1ULL << 52)
9305 #define     RISCV_HWPROBE_EXT_ZVFBFMIN      (1ULL << 53)
9306 #define     RISCV_HWPROBE_EXT_ZVFBFWMA      (1ULL << 54)
9307 #define     RISCV_HWPROBE_EXT_ZICBOM        (1ULL << 55)
9308 #define     RISCV_HWPROBE_EXT_ZAAMO         (1ULL << 56)
9309 #define     RISCV_HWPROBE_EXT_ZALRSC        (1ULL << 57)
9310 #define     RISCV_HWPROBE_EXT_ZABHA         (1ULL << 58)
9311 
9312 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
9313 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
9314 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
9315 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
9316 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
9317 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
9318 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
9319 
9320 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
9321 #define RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS 7
9322 #define RISCV_HWPROBE_KEY_TIME_CSR_FREQ 8
9323 #define RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF        9
9324 #define     RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN     0
9325 #define     RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED    1
9326 #define     RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW        2
9327 #define     RISCV_HWPROBE_MISALIGNED_SCALAR_FAST        3
9328 #define     RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED 4
9329 #define RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF 10
9330 #define     RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN     0
9331 #define     RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW        2
9332 #define     RISCV_HWPROBE_MISALIGNED_VECTOR_FAST        3
9333 #define     RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED 4
9334 #define RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0     11
9335 #define RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE      12
9336 #define RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0    13
9337 
9338 struct riscv_hwprobe {
9339     abi_llong  key;
9340     abi_ullong value;
9341 };
9342 
risc_hwprobe_fill_pairs(CPURISCVState * env,struct riscv_hwprobe * pair,size_t pair_count)9343 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9344                                     struct riscv_hwprobe *pair,
9345                                     size_t pair_count)
9346 {
9347     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9348 
9349     for (; pair_count > 0; pair_count--, pair++) {
9350         abi_llong key;
9351         abi_ullong value;
9352         __put_user(0, &pair->value);
9353         __get_user(key, &pair->key);
9354         switch (key) {
9355         case RISCV_HWPROBE_KEY_MVENDORID:
9356             __put_user(cfg->mvendorid, &pair->value);
9357             break;
9358         case RISCV_HWPROBE_KEY_MARCHID:
9359             __put_user(cfg->marchid, &pair->value);
9360             break;
9361         case RISCV_HWPROBE_KEY_MIMPID:
9362             __put_user(cfg->mimpid, &pair->value);
9363             break;
9364         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9365             value = riscv_has_ext(env, RVI) &&
9366                     riscv_has_ext(env, RVM) &&
9367                     riscv_has_ext(env, RVA) ?
9368                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9369             __put_user(value, &pair->value);
9370             break;
9371         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9372             value = riscv_has_ext(env, RVF) &&
9373                     riscv_has_ext(env, RVD) ?
9374                     RISCV_HWPROBE_IMA_FD : 0;
9375             value |= riscv_has_ext(env, RVC) ?
9376                      RISCV_HWPROBE_IMA_C : 0;
9377             value |= riscv_has_ext(env, RVV) ?
9378                      RISCV_HWPROBE_IMA_V : 0;
9379             value |= cfg->ext_zba ?
9380                      RISCV_HWPROBE_EXT_ZBA : 0;
9381             value |= cfg->ext_zbb ?
9382                      RISCV_HWPROBE_EXT_ZBB : 0;
9383             value |= cfg->ext_zbs ?
9384                      RISCV_HWPROBE_EXT_ZBS : 0;
9385             value |= cfg->ext_zicboz ?
9386                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9387             value |= cfg->ext_zbc ?
9388                      RISCV_HWPROBE_EXT_ZBC : 0;
9389             value |= cfg->ext_zbkb ?
9390                      RISCV_HWPROBE_EXT_ZBKB : 0;
9391             value |= cfg->ext_zbkc ?
9392                      RISCV_HWPROBE_EXT_ZBKC : 0;
9393             value |= cfg->ext_zbkx ?
9394                      RISCV_HWPROBE_EXT_ZBKX : 0;
9395             value |= cfg->ext_zknd ?
9396                      RISCV_HWPROBE_EXT_ZKND : 0;
9397             value |= cfg->ext_zkne ?
9398                      RISCV_HWPROBE_EXT_ZKNE : 0;
9399             value |= cfg->ext_zknh ?
9400                      RISCV_HWPROBE_EXT_ZKNH : 0;
9401             value |= cfg->ext_zksed ?
9402                      RISCV_HWPROBE_EXT_ZKSED : 0;
9403             value |= cfg->ext_zksh ?
9404                      RISCV_HWPROBE_EXT_ZKSH : 0;
9405             value |= cfg->ext_zkt ?
9406                      RISCV_HWPROBE_EXT_ZKT : 0;
9407             value |= cfg->ext_zvbb ?
9408                      RISCV_HWPROBE_EXT_ZVBB : 0;
9409             value |= cfg->ext_zvbc ?
9410                      RISCV_HWPROBE_EXT_ZVBC : 0;
9411             value |= cfg->ext_zvkb ?
9412                      RISCV_HWPROBE_EXT_ZVKB : 0;
9413             value |= cfg->ext_zvkg ?
9414                      RISCV_HWPROBE_EXT_ZVKG : 0;
9415             value |= cfg->ext_zvkned ?
9416                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9417             value |= cfg->ext_zvknha ?
9418                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9419             value |= cfg->ext_zvknhb ?
9420                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9421             value |= cfg->ext_zvksed ?
9422                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9423             value |= cfg->ext_zvksh ?
9424                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9425             value |= cfg->ext_zvkt ?
9426                      RISCV_HWPROBE_EXT_ZVKT : 0;
9427             value |= cfg->ext_zfh ?
9428                      RISCV_HWPROBE_EXT_ZFH : 0;
9429             value |= cfg->ext_zfhmin ?
9430                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9431             value |= cfg->ext_zihintntl ?
9432                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9433             value |= cfg->ext_zvfh ?
9434                      RISCV_HWPROBE_EXT_ZVFH : 0;
9435             value |= cfg->ext_zvfhmin ?
9436                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9437             value |= cfg->ext_zfa ?
9438                      RISCV_HWPROBE_EXT_ZFA : 0;
9439             value |= cfg->ext_ztso ?
9440                      RISCV_HWPROBE_EXT_ZTSO : 0;
9441             value |= cfg->ext_zacas ?
9442                      RISCV_HWPROBE_EXT_ZACAS : 0;
9443             value |= cfg->ext_zicond ?
9444                      RISCV_HWPROBE_EXT_ZICOND : 0;
9445             value |= cfg->ext_zihintpause ?
9446                      RISCV_HWPROBE_EXT_ZIHINTPAUSE : 0;
9447             value |= cfg->ext_zve32x ?
9448                      RISCV_HWPROBE_EXT_ZVE32X : 0;
9449             value |= cfg->ext_zve32f ?
9450                      RISCV_HWPROBE_EXT_ZVE32F : 0;
9451             value |= cfg->ext_zve64x ?
9452                      RISCV_HWPROBE_EXT_ZVE64X : 0;
9453             value |= cfg->ext_zve64f ?
9454                      RISCV_HWPROBE_EXT_ZVE64F : 0;
9455             value |= cfg->ext_zve64d ?
9456                      RISCV_HWPROBE_EXT_ZVE64D : 0;
9457             value |= cfg->ext_zimop ?
9458                      RISCV_HWPROBE_EXT_ZIMOP : 0;
9459             value |= cfg->ext_zca ?
9460                      RISCV_HWPROBE_EXT_ZCA : 0;
9461             value |= cfg->ext_zcb ?
9462                      RISCV_HWPROBE_EXT_ZCB : 0;
9463             value |= cfg->ext_zcd ?
9464                      RISCV_HWPROBE_EXT_ZCD : 0;
9465             value |= cfg->ext_zcf ?
9466                      RISCV_HWPROBE_EXT_ZCF : 0;
9467             value |= cfg->ext_zcmop ?
9468                      RISCV_HWPROBE_EXT_ZCMOP : 0;
9469             value |= cfg->ext_zawrs ?
9470                      RISCV_HWPROBE_EXT_ZAWRS : 0;
9471             value |= cfg->ext_supm ?
9472                      RISCV_HWPROBE_EXT_SUPM : 0;
9473             value |= cfg->ext_zicntr ?
9474                      RISCV_HWPROBE_EXT_ZICNTR : 0;
9475             value |= cfg->ext_zihpm ?
9476                      RISCV_HWPROBE_EXT_ZIHPM : 0;
9477             value |= cfg->ext_zfbfmin ?
9478                      RISCV_HWPROBE_EXT_ZFBFMIN : 0;
9479             value |= cfg->ext_zvfbfmin ?
9480                      RISCV_HWPROBE_EXT_ZVFBFMIN : 0;
9481             value |= cfg->ext_zvfbfwma ?
9482                      RISCV_HWPROBE_EXT_ZVFBFWMA : 0;
9483             value |= cfg->ext_zicbom ?
9484                      RISCV_HWPROBE_EXT_ZICBOM : 0;
9485             value |= cfg->ext_zaamo ?
9486                      RISCV_HWPROBE_EXT_ZAAMO : 0;
9487             value |= cfg->ext_zalrsc ?
9488                      RISCV_HWPROBE_EXT_ZALRSC : 0;
9489             value |= cfg->ext_zabha ?
9490                      RISCV_HWPROBE_EXT_ZABHA : 0;
9491             __put_user(value, &pair->value);
9492             break;
9493         case RISCV_HWPROBE_KEY_CPUPERF_0:
9494             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9495             break;
9496         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9497             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9498             __put_user(value, &pair->value);
9499             break;
9500         case RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE:
9501             value = cfg->ext_zicbom ? cfg->cbom_blocksize : 0;
9502             __put_user(value, &pair->value);
9503             break;
9504         default:
9505             __put_user(-1, &pair->key);
9506             break;
9507         }
9508     }
9509 }
9510 
9511 /*
9512  * If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT.
9513  * If the cpumast_t has no bits set: -EINVAL.
9514  * Otherwise the cpumask_t contains some bit set: 0.
9515  * Unlike the kernel, we do not mask cpumask_t by the set of online cpus,
9516  * nor bound the search by cpumask_size().
9517  */
nonempty_cpu_set(abi_ulong cpusetsize,abi_ptr target_cpus)9518 static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus)
9519 {
9520     unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1);
9521     int ret = -TARGET_EFAULT;
9522 
9523     if (p) {
9524         ret = -TARGET_EINVAL;
9525         /*
9526          * Since we only care about the empty/non-empty state of the cpumask_t
9527          * not the individual bits, we do not need to repartition the bits
9528          * from target abi_ulong to host unsigned long.
9529          *
9530          * Note that the kernel does not round up cpusetsize to a multiple of
9531          * sizeof(abi_ulong).  After bounding cpusetsize by cpumask_size(),
9532          * it copies exactly cpusetsize bytes into a zeroed buffer.
9533          */
9534         for (abi_ulong i = 0; i < cpusetsize; ++i) {
9535             if (p[i]) {
9536                 ret = 0;
9537                 break;
9538             }
9539         }
9540         unlock_user(p, target_cpus, 0);
9541     }
9542     return ret;
9543 }
9544 
do_riscv_hwprobe(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)9545 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9546                                  abi_long arg2, abi_long arg3,
9547                                  abi_long arg4, abi_long arg5)
9548 {
9549     int ret;
9550     struct riscv_hwprobe *host_pairs;
9551 
9552     /* flags must be 0 */
9553     if (arg5 != 0) {
9554         return -TARGET_EINVAL;
9555     }
9556 
9557     /* check cpu_set */
9558     if (arg3 != 0) {
9559         ret = nonempty_cpu_set(arg3, arg4);
9560         if (ret != 0) {
9561             return ret;
9562         }
9563     } else if (arg4 != 0) {
9564         return -TARGET_EINVAL;
9565     }
9566 
9567     /* no pairs */
9568     if (arg2 == 0) {
9569         return 0;
9570     }
9571 
9572     host_pairs = lock_user(VERIFY_WRITE, arg1,
9573                            sizeof(*host_pairs) * (size_t)arg2, 0);
9574     if (host_pairs == NULL) {
9575         return -TARGET_EFAULT;
9576     }
9577     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9578     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9579     return 0;
9580 }
9581 #endif /* TARGET_NR_riscv_hwprobe */
9582 
9583 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
_syscall2(int,pivot_root,const char *,new_root,const char *,put_old)9584 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9585 #endif
9586 
9587 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9588 #define __NR_sys_open_tree __NR_open_tree
9589 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9590           unsigned int, __flags)
9591 #endif
9592 
9593 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9594 #define __NR_sys_move_mount __NR_move_mount
9595 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9596            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9597 #endif
9598 
9599 /* This is an internal helper for do_syscall so that it is easier
9600  * to have a single return point, so that actions, such as logging
9601  * of syscall results, can be performed.
9602  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9603  */
9604 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9605                             abi_long arg2, abi_long arg3, abi_long arg4,
9606                             abi_long arg5, abi_long arg6, abi_long arg7,
9607                             abi_long arg8)
9608 {
9609     CPUState *cpu = env_cpu(cpu_env);
9610     abi_long ret;
9611 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9612     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9613     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9614     || defined(TARGET_NR_statx)
9615     struct stat st;
9616 #endif
9617 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9618     || defined(TARGET_NR_fstatfs)
9619     struct statfs stfs;
9620 #endif
9621     void *p;
9622 
9623     switch(num) {
9624     case TARGET_NR_exit:
9625         /* In old applications this may be used to implement _exit(2).
9626            However in threaded applications it is used for thread termination,
9627            and _exit_group is used for application termination.
9628            Do thread termination if we have more then one thread.  */
9629 
9630         if (block_signals()) {
9631             return -QEMU_ERESTARTSYS;
9632         }
9633 
9634         pthread_mutex_lock(&clone_lock);
9635 
9636         if (CPU_NEXT(first_cpu)) {
9637             TaskState *ts = get_task_state(cpu);
9638 
9639             if (ts->child_tidptr) {
9640                 put_user_u32(0, ts->child_tidptr);
9641                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9642                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9643             }
9644 
9645 #ifdef TARGET_AARCH64
9646             if (ts->gcs_base) {
9647                 target_munmap(ts->gcs_base, ts->gcs_size);
9648             }
9649 #endif
9650 
9651             object_unparent(OBJECT(cpu));
9652             object_unref(OBJECT(cpu));
9653             /*
9654              * At this point the CPU should be unrealized and removed
9655              * from cpu lists. We can clean-up the rest of the thread
9656              * data without the lock held.
9657              */
9658 
9659             pthread_mutex_unlock(&clone_lock);
9660 
9661             thread_cpu = NULL;
9662             g_free(ts);
9663             rcu_unregister_thread();
9664             pthread_exit(NULL);
9665         }
9666 
9667         pthread_mutex_unlock(&clone_lock);
9668         preexit_cleanup(cpu_env, arg1);
9669         _exit(arg1);
9670         return 0; /* avoid warning */
9671     case TARGET_NR_read:
9672         if (arg2 == 0 && arg3 == 0) {
9673             return get_errno(safe_read(arg1, 0, 0));
9674         } else {
9675             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9676                 return -TARGET_EFAULT;
9677             ret = get_errno(safe_read(arg1, p, arg3));
9678             if (ret >= 0 &&
9679                 fd_trans_host_to_target_data(arg1)) {
9680                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9681             }
9682             unlock_user(p, arg2, ret);
9683         }
9684         return ret;
9685     case TARGET_NR_write:
9686         if (arg2 == 0 && arg3 == 0) {
9687             return get_errno(safe_write(arg1, 0, 0));
9688         }
9689         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9690             return -TARGET_EFAULT;
9691         if (fd_trans_target_to_host_data(arg1)) {
9692             void *copy = g_malloc(arg3);
9693             memcpy(copy, p, arg3);
9694             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9695             if (ret >= 0) {
9696                 ret = get_errno(safe_write(arg1, copy, ret));
9697             }
9698             g_free(copy);
9699         } else {
9700             ret = get_errno(safe_write(arg1, p, arg3));
9701         }
9702         unlock_user(p, arg2, 0);
9703         return ret;
9704 
9705 #ifdef TARGET_NR_open
9706     case TARGET_NR_open:
9707         if (!(p = lock_user_string(arg1)))
9708             return -TARGET_EFAULT;
9709         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9710                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9711                                   arg3, true));
9712         fd_trans_unregister(ret);
9713         unlock_user(p, arg1, 0);
9714         return ret;
9715 #endif
9716     case TARGET_NR_openat:
9717         if (!(p = lock_user_string(arg2)))
9718             return -TARGET_EFAULT;
9719         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9720                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9721                                   arg4, true));
9722         fd_trans_unregister(ret);
9723         unlock_user(p, arg2, 0);
9724         return ret;
9725     case TARGET_NR_openat2:
9726         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9727         return ret;
9728 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9729     case TARGET_NR_name_to_handle_at:
9730         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9731         return ret;
9732 #endif
9733 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9734     case TARGET_NR_open_by_handle_at:
9735         ret = do_open_by_handle_at(arg1, arg2, arg3);
9736         fd_trans_unregister(ret);
9737         return ret;
9738 #endif
9739 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9740     case TARGET_NR_pidfd_open:
9741         return get_errno(pidfd_open(arg1, arg2));
9742 #endif
9743 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9744     case TARGET_NR_pidfd_send_signal:
9745         {
9746             siginfo_t uinfo, *puinfo;
9747 
9748             if (arg3) {
9749                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9750                 if (!p) {
9751                     return -TARGET_EFAULT;
9752                  }
9753                  target_to_host_siginfo(&uinfo, p);
9754                  unlock_user(p, arg3, 0);
9755                  puinfo = &uinfo;
9756             } else {
9757                  puinfo = NULL;
9758             }
9759             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9760                                               puinfo, arg4));
9761         }
9762         return ret;
9763 #endif
9764 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9765     case TARGET_NR_pidfd_getfd:
9766         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9767 #endif
9768     case TARGET_NR_close:
9769         fd_trans_unregister(arg1);
9770         return get_errno(close(arg1));
9771 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9772     case TARGET_NR_close_range:
9773         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9774         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9775             abi_long fd, maxfd;
9776             maxfd = MIN(arg2, target_fd_max);
9777             for (fd = arg1; fd < maxfd; fd++) {
9778                 fd_trans_unregister(fd);
9779             }
9780         }
9781         return ret;
9782 #endif
9783 
9784     case TARGET_NR_brk:
9785         return do_brk(arg1);
9786 #ifdef TARGET_NR_fork
9787     case TARGET_NR_fork:
9788         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9789 #endif
9790 #ifdef TARGET_NR_waitpid
9791     case TARGET_NR_waitpid:
9792         {
9793             int status;
9794             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9795             if (!is_error(ret) && arg2 && ret
9796                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9797                 return -TARGET_EFAULT;
9798         }
9799         return ret;
9800 #endif
9801 #ifdef TARGET_NR_waitid
9802     case TARGET_NR_waitid:
9803         {
9804             struct rusage ru;
9805             siginfo_t info;
9806 
9807             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9808                                         arg4, (arg5 ? &ru : NULL)));
9809             if (!is_error(ret)) {
9810                 if (arg3) {
9811                     p = lock_user(VERIFY_WRITE, arg3,
9812                                   sizeof(target_siginfo_t), 0);
9813                     if (!p) {
9814                         return -TARGET_EFAULT;
9815                     }
9816                     host_to_target_siginfo(p, &info);
9817                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9818                 }
9819                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9820                     return -TARGET_EFAULT;
9821                 }
9822             }
9823         }
9824         return ret;
9825 #endif
9826 #ifdef TARGET_NR_creat /* not on alpha */
9827     case TARGET_NR_creat:
9828         if (!(p = lock_user_string(arg1)))
9829             return -TARGET_EFAULT;
9830         ret = get_errno(creat(p, arg2));
9831         fd_trans_unregister(ret);
9832         unlock_user(p, arg1, 0);
9833         return ret;
9834 #endif
9835 #ifdef TARGET_NR_link
9836     case TARGET_NR_link:
9837         {
9838             void * p2;
9839             p = lock_user_string(arg1);
9840             p2 = lock_user_string(arg2);
9841             if (!p || !p2)
9842                 ret = -TARGET_EFAULT;
9843             else
9844                 ret = get_errno(link(p, p2));
9845             unlock_user(p2, arg2, 0);
9846             unlock_user(p, arg1, 0);
9847         }
9848         return ret;
9849 #endif
9850 #if defined(TARGET_NR_linkat)
9851     case TARGET_NR_linkat:
9852         {
9853             void * p2 = NULL;
9854             if (!arg2 || !arg4)
9855                 return -TARGET_EFAULT;
9856             p  = lock_user_string(arg2);
9857             p2 = lock_user_string(arg4);
9858             if (!p || !p2)
9859                 ret = -TARGET_EFAULT;
9860             else
9861                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9862             unlock_user(p, arg2, 0);
9863             unlock_user(p2, arg4, 0);
9864         }
9865         return ret;
9866 #endif
9867 #ifdef TARGET_NR_unlink
9868     case TARGET_NR_unlink:
9869         if (!(p = lock_user_string(arg1)))
9870             return -TARGET_EFAULT;
9871         ret = get_errno(unlink(p));
9872         unlock_user(p, arg1, 0);
9873         return ret;
9874 #endif
9875 #if defined(TARGET_NR_unlinkat)
9876     case TARGET_NR_unlinkat:
9877         if (!(p = lock_user_string(arg2)))
9878             return -TARGET_EFAULT;
9879         ret = get_errno(unlinkat(arg1, p, arg3));
9880         unlock_user(p, arg2, 0);
9881         return ret;
9882 #endif
9883     case TARGET_NR_execveat:
9884         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9885     case TARGET_NR_execve:
9886         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9887     case TARGET_NR_chdir:
9888         if (!(p = lock_user_string(arg1)))
9889             return -TARGET_EFAULT;
9890         ret = get_errno(chdir(p));
9891         unlock_user(p, arg1, 0);
9892         return ret;
9893 #ifdef TARGET_NR_time
9894     case TARGET_NR_time:
9895         {
9896             time_t host_time;
9897             ret = get_errno(time(&host_time));
9898             if (!is_error(ret)
9899                 && arg1
9900                 && put_user_sal(host_time, arg1))
9901                 return -TARGET_EFAULT;
9902         }
9903         return ret;
9904 #endif
9905 #ifdef TARGET_NR_mknod
9906     case TARGET_NR_mknod:
9907         if (!(p = lock_user_string(arg1)))
9908             return -TARGET_EFAULT;
9909         ret = get_errno(mknod(p, arg2, arg3));
9910         unlock_user(p, arg1, 0);
9911         return ret;
9912 #endif
9913 #if defined(TARGET_NR_mknodat)
9914     case TARGET_NR_mknodat:
9915         if (!(p = lock_user_string(arg2)))
9916             return -TARGET_EFAULT;
9917         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9918         unlock_user(p, arg2, 0);
9919         return ret;
9920 #endif
9921 #ifdef TARGET_NR_chmod
9922     case TARGET_NR_chmod:
9923         if (!(p = lock_user_string(arg1)))
9924             return -TARGET_EFAULT;
9925         ret = get_errno(chmod(p, arg2));
9926         unlock_user(p, arg1, 0);
9927         return ret;
9928 #endif
9929 #ifdef TARGET_NR_lseek
9930     case TARGET_NR_lseek:
9931         return get_errno(lseek(arg1, arg2, arg3));
9932 #endif
9933 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9934     /* Alpha specific */
9935     case TARGET_NR_getxpid:
9936         cpu_env->ir[IR_A4] = getppid();
9937         return get_errno(getpid());
9938 #endif
9939 #ifdef TARGET_NR_getpid
9940     case TARGET_NR_getpid:
9941         return get_errno(getpid());
9942 #endif
9943     case TARGET_NR_mount:
9944         {
9945             /* need to look at the data field */
9946             void *p2, *p3;
9947 
9948             if (arg1) {
9949                 p = lock_user_string(arg1);
9950                 if (!p) {
9951                     return -TARGET_EFAULT;
9952                 }
9953             } else {
9954                 p = NULL;
9955             }
9956 
9957             p2 = lock_user_string(arg2);
9958             if (!p2) {
9959                 if (arg1) {
9960                     unlock_user(p, arg1, 0);
9961                 }
9962                 return -TARGET_EFAULT;
9963             }
9964 
9965             if (arg3) {
9966                 p3 = lock_user_string(arg3);
9967                 if (!p3) {
9968                     if (arg1) {
9969                         unlock_user(p, arg1, 0);
9970                     }
9971                     unlock_user(p2, arg2, 0);
9972                     return -TARGET_EFAULT;
9973                 }
9974             } else {
9975                 p3 = NULL;
9976             }
9977 
9978             /* FIXME - arg5 should be locked, but it isn't clear how to
9979              * do that since it's not guaranteed to be a NULL-terminated
9980              * string.
9981              */
9982             if (!arg5) {
9983                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9984             } else {
9985                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9986             }
9987             ret = get_errno(ret);
9988 
9989             if (arg1) {
9990                 unlock_user(p, arg1, 0);
9991             }
9992             unlock_user(p2, arg2, 0);
9993             if (arg3) {
9994                 unlock_user(p3, arg3, 0);
9995             }
9996         }
9997         return ret;
9998 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9999 #if defined(TARGET_NR_umount)
10000     case TARGET_NR_umount:
10001 #endif
10002 #if defined(TARGET_NR_oldumount)
10003     case TARGET_NR_oldumount:
10004 #endif
10005         if (!(p = lock_user_string(arg1)))
10006             return -TARGET_EFAULT;
10007         ret = get_errno(umount(p));
10008         unlock_user(p, arg1, 0);
10009         return ret;
10010 #endif
10011 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
10012     case TARGET_NR_move_mount:
10013         {
10014             void *p2, *p4;
10015 
10016             if (!arg2 || !arg4) {
10017                 return -TARGET_EFAULT;
10018             }
10019 
10020             p2 = lock_user_string(arg2);
10021             if (!p2) {
10022                 return -TARGET_EFAULT;
10023             }
10024 
10025             p4 = lock_user_string(arg4);
10026             if (!p4) {
10027                 unlock_user(p2, arg2, 0);
10028                 return -TARGET_EFAULT;
10029             }
10030             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
10031 
10032             unlock_user(p2, arg2, 0);
10033             unlock_user(p4, arg4, 0);
10034 
10035             return ret;
10036         }
10037 #endif
10038 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
10039     case TARGET_NR_open_tree:
10040         {
10041             void *p2;
10042             int host_flags;
10043 
10044             if (!arg2) {
10045                 return -TARGET_EFAULT;
10046             }
10047 
10048             p2 = lock_user_string(arg2);
10049             if (!p2) {
10050                 return -TARGET_EFAULT;
10051             }
10052 
10053             host_flags = arg3 & ~TARGET_O_CLOEXEC;
10054             if (arg3 & TARGET_O_CLOEXEC) {
10055                 host_flags |= O_CLOEXEC;
10056             }
10057 
10058             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
10059 
10060             unlock_user(p2, arg2, 0);
10061 
10062             return ret;
10063         }
10064 #endif
10065 #ifdef TARGET_NR_stime /* not on alpha */
10066     case TARGET_NR_stime:
10067         {
10068             struct timespec ts;
10069             ts.tv_nsec = 0;
10070             if (get_user_sal(ts.tv_sec, arg1)) {
10071                 return -TARGET_EFAULT;
10072             }
10073             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
10074         }
10075 #endif
10076 #ifdef TARGET_NR_alarm /* not on alpha */
10077     case TARGET_NR_alarm:
10078         return alarm(arg1);
10079 #endif
10080 #ifdef TARGET_NR_pause /* not on alpha */
10081     case TARGET_NR_pause:
10082         if (!block_signals()) {
10083             sigsuspend(&get_task_state(cpu)->signal_mask);
10084         }
10085         return -TARGET_EINTR;
10086 #endif
10087 #ifdef TARGET_NR_utime
10088     case TARGET_NR_utime:
10089         {
10090             struct utimbuf tbuf, *host_tbuf;
10091             struct target_utimbuf *target_tbuf;
10092             if (arg2) {
10093                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
10094                     return -TARGET_EFAULT;
10095                 tbuf.actime = tswapal(target_tbuf->actime);
10096                 tbuf.modtime = tswapal(target_tbuf->modtime);
10097                 unlock_user_struct(target_tbuf, arg2, 0);
10098                 host_tbuf = &tbuf;
10099             } else {
10100                 host_tbuf = NULL;
10101             }
10102             if (!(p = lock_user_string(arg1)))
10103                 return -TARGET_EFAULT;
10104             ret = get_errno(utime(p, host_tbuf));
10105             unlock_user(p, arg1, 0);
10106         }
10107         return ret;
10108 #endif
10109 #ifdef TARGET_NR_utimes
10110     case TARGET_NR_utimes:
10111         {
10112             struct timeval *tvp, tv[2];
10113             if (arg2) {
10114                 if (copy_from_user_timeval(&tv[0], arg2)
10115                     || copy_from_user_timeval(&tv[1],
10116                                               arg2 + sizeof(struct target_timeval)))
10117                     return -TARGET_EFAULT;
10118                 tvp = tv;
10119             } else {
10120                 tvp = NULL;
10121             }
10122             if (!(p = lock_user_string(arg1)))
10123                 return -TARGET_EFAULT;
10124             ret = get_errno(utimes(p, tvp));
10125             unlock_user(p, arg1, 0);
10126         }
10127         return ret;
10128 #endif
10129 #if defined(TARGET_NR_futimesat)
10130     case TARGET_NR_futimesat:
10131         {
10132             struct timeval *tvp, tv[2];
10133             if (arg3) {
10134                 if (copy_from_user_timeval(&tv[0], arg3)
10135                     || copy_from_user_timeval(&tv[1],
10136                                               arg3 + sizeof(struct target_timeval)))
10137                     return -TARGET_EFAULT;
10138                 tvp = tv;
10139             } else {
10140                 tvp = NULL;
10141             }
10142             if (!(p = lock_user_string(arg2))) {
10143                 return -TARGET_EFAULT;
10144             }
10145             ret = get_errno(futimesat(arg1, path(p), tvp));
10146             unlock_user(p, arg2, 0);
10147         }
10148         return ret;
10149 #endif
10150 #ifdef TARGET_NR_access
10151     case TARGET_NR_access:
10152         if (!(p = lock_user_string(arg1))) {
10153             return -TARGET_EFAULT;
10154         }
10155         ret = get_errno(access(path(p), arg2));
10156         unlock_user(p, arg1, 0);
10157         return ret;
10158 #endif
10159 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
10160     case TARGET_NR_faccessat:
10161         if (!(p = lock_user_string(arg2))) {
10162             return -TARGET_EFAULT;
10163         }
10164         ret = get_errno(faccessat(arg1, p, arg3, 0));
10165         unlock_user(p, arg2, 0);
10166         return ret;
10167 #endif
10168 #if defined(TARGET_NR_faccessat2)
10169     case TARGET_NR_faccessat2:
10170         if (!(p = lock_user_string(arg2))) {
10171             return -TARGET_EFAULT;
10172         }
10173         ret = get_errno(faccessat(arg1, p, arg3, arg4));
10174         unlock_user(p, arg2, 0);
10175         return ret;
10176 #endif
10177 #ifdef TARGET_NR_nice /* not on alpha */
10178     case TARGET_NR_nice:
10179         return get_errno(nice(arg1));
10180 #endif
10181     case TARGET_NR_sync:
10182         sync();
10183         return 0;
10184 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
10185     case TARGET_NR_syncfs:
10186         return get_errno(syncfs(arg1));
10187 #endif
10188     case TARGET_NR_kill:
10189         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
10190 #ifdef TARGET_NR_rename
10191     case TARGET_NR_rename:
10192         {
10193             void *p2;
10194             p = lock_user_string(arg1);
10195             p2 = lock_user_string(arg2);
10196             if (!p || !p2)
10197                 ret = -TARGET_EFAULT;
10198             else
10199                 ret = get_errno(rename(p, p2));
10200             unlock_user(p2, arg2, 0);
10201             unlock_user(p, arg1, 0);
10202         }
10203         return ret;
10204 #endif
10205 #if defined(TARGET_NR_renameat)
10206     case TARGET_NR_renameat:
10207         {
10208             void *p2;
10209             p  = lock_user_string(arg2);
10210             p2 = lock_user_string(arg4);
10211             if (!p || !p2)
10212                 ret = -TARGET_EFAULT;
10213             else
10214                 ret = get_errno(renameat(arg1, p, arg3, p2));
10215             unlock_user(p2, arg4, 0);
10216             unlock_user(p, arg2, 0);
10217         }
10218         return ret;
10219 #endif
10220 #if defined(TARGET_NR_renameat2)
10221     case TARGET_NR_renameat2:
10222         {
10223             void *p2;
10224             p  = lock_user_string(arg2);
10225             p2 = lock_user_string(arg4);
10226             if (!p || !p2) {
10227                 ret = -TARGET_EFAULT;
10228             } else {
10229                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
10230             }
10231             unlock_user(p2, arg4, 0);
10232             unlock_user(p, arg2, 0);
10233         }
10234         return ret;
10235 #endif
10236 #ifdef TARGET_NR_mkdir
10237     case TARGET_NR_mkdir:
10238         if (!(p = lock_user_string(arg1)))
10239             return -TARGET_EFAULT;
10240         ret = get_errno(mkdir(p, arg2));
10241         unlock_user(p, arg1, 0);
10242         return ret;
10243 #endif
10244 #if defined(TARGET_NR_mkdirat)
10245     case TARGET_NR_mkdirat:
10246         if (!(p = lock_user_string(arg2)))
10247             return -TARGET_EFAULT;
10248         ret = get_errno(mkdirat(arg1, p, arg3));
10249         unlock_user(p, arg2, 0);
10250         return ret;
10251 #endif
10252 #ifdef TARGET_NR_rmdir
10253     case TARGET_NR_rmdir:
10254         if (!(p = lock_user_string(arg1)))
10255             return -TARGET_EFAULT;
10256         ret = get_errno(rmdir(p));
10257         unlock_user(p, arg1, 0);
10258         return ret;
10259 #endif
10260     case TARGET_NR_dup:
10261         ret = get_errno(dup(arg1));
10262         if (ret >= 0) {
10263             fd_trans_dup(arg1, ret);
10264         }
10265         return ret;
10266 #ifdef TARGET_NR_pipe
10267     case TARGET_NR_pipe:
10268         return do_pipe(cpu_env, arg1, 0, 0);
10269 #endif
10270 #ifdef TARGET_NR_pipe2
10271     case TARGET_NR_pipe2:
10272         return do_pipe(cpu_env, arg1,
10273                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
10274 #endif
10275     case TARGET_NR_times:
10276         {
10277             struct target_tms *tmsp;
10278             struct tms tms;
10279             ret = get_errno(times(&tms));
10280             if (arg1) {
10281                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
10282                 if (!tmsp)
10283                     return -TARGET_EFAULT;
10284                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
10285                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
10286                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
10287                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
10288             }
10289             if (!is_error(ret))
10290                 ret = host_to_target_clock_t(ret);
10291         }
10292         return ret;
10293     case TARGET_NR_acct:
10294         if (arg1 == 0) {
10295             ret = get_errno(acct(NULL));
10296         } else {
10297             if (!(p = lock_user_string(arg1))) {
10298                 return -TARGET_EFAULT;
10299             }
10300             ret = get_errno(acct(path(p)));
10301             unlock_user(p, arg1, 0);
10302         }
10303         return ret;
10304 #ifdef TARGET_NR_umount2
10305     case TARGET_NR_umount2:
10306         if (!(p = lock_user_string(arg1)))
10307             return -TARGET_EFAULT;
10308         ret = get_errno(umount2(p, arg2));
10309         unlock_user(p, arg1, 0);
10310         return ret;
10311 #endif
10312     case TARGET_NR_ioctl:
10313         return do_ioctl(arg1, arg2, arg3);
10314 #ifdef TARGET_NR_fcntl
10315     case TARGET_NR_fcntl:
10316         return do_fcntl(arg1, arg2, arg3);
10317 #endif
10318     case TARGET_NR_setpgid:
10319         return get_errno(setpgid(arg1, arg2));
10320     case TARGET_NR_umask:
10321         return get_errno(umask(arg1));
10322     case TARGET_NR_chroot:
10323         if (!(p = lock_user_string(arg1)))
10324             return -TARGET_EFAULT;
10325         ret = get_errno(chroot(p));
10326         unlock_user(p, arg1, 0);
10327         return ret;
10328 #ifdef TARGET_NR_dup2
10329     case TARGET_NR_dup2:
10330         ret = get_errno(dup2(arg1, arg2));
10331         if (ret >= 0) {
10332             fd_trans_dup(arg1, arg2);
10333         }
10334         return ret;
10335 #endif
10336 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
10337     case TARGET_NR_dup3:
10338     {
10339         int host_flags;
10340 
10341         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
10342             return -EINVAL;
10343         }
10344         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
10345         ret = get_errno(dup3(arg1, arg2, host_flags));
10346         if (ret >= 0) {
10347             fd_trans_dup(arg1, arg2);
10348         }
10349         return ret;
10350     }
10351 #endif
10352 #ifdef TARGET_NR_getppid /* not on alpha */
10353     case TARGET_NR_getppid:
10354         return get_errno(getppid());
10355 #endif
10356 #ifdef TARGET_NR_getpgrp
10357     case TARGET_NR_getpgrp:
10358         return get_errno(getpgrp());
10359 #endif
10360     case TARGET_NR_setsid:
10361         return get_errno(setsid());
10362 #ifdef TARGET_NR_sigaction
10363     case TARGET_NR_sigaction:
10364         {
10365 #if defined(TARGET_MIPS)
10366 	    struct target_sigaction act, oact, *pact, *old_act;
10367 
10368 	    if (arg2) {
10369                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
10370                     return -TARGET_EFAULT;
10371 		act._sa_handler = old_act->_sa_handler;
10372 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
10373 		act.sa_flags = old_act->sa_flags;
10374 		unlock_user_struct(old_act, arg2, 0);
10375 		pact = &act;
10376 	    } else {
10377 		pact = NULL;
10378 	    }
10379 
10380         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
10381 
10382 	    if (!is_error(ret) && arg3) {
10383                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
10384                     return -TARGET_EFAULT;
10385 		old_act->_sa_handler = oact._sa_handler;
10386 		old_act->sa_flags = oact.sa_flags;
10387 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
10388 		old_act->sa_mask.sig[1] = 0;
10389 		old_act->sa_mask.sig[2] = 0;
10390 		old_act->sa_mask.sig[3] = 0;
10391 		unlock_user_struct(old_act, arg3, 1);
10392 	    }
10393 #else
10394             struct target_old_sigaction *old_act;
10395             struct target_sigaction act, oact, *pact;
10396             if (arg2) {
10397                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
10398                     return -TARGET_EFAULT;
10399                 act._sa_handler = old_act->_sa_handler;
10400                 target_siginitset(&act.sa_mask, old_act->sa_mask);
10401                 act.sa_flags = old_act->sa_flags;
10402 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10403                 act.sa_restorer = old_act->sa_restorer;
10404 #endif
10405                 unlock_user_struct(old_act, arg2, 0);
10406                 pact = &act;
10407             } else {
10408                 pact = NULL;
10409             }
10410             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
10411             if (!is_error(ret) && arg3) {
10412                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
10413                     return -TARGET_EFAULT;
10414                 old_act->_sa_handler = oact._sa_handler;
10415                 old_act->sa_mask = oact.sa_mask.sig[0];
10416                 old_act->sa_flags = oact.sa_flags;
10417 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10418                 old_act->sa_restorer = oact.sa_restorer;
10419 #endif
10420                 unlock_user_struct(old_act, arg3, 1);
10421             }
10422 #endif
10423         }
10424         return ret;
10425 #endif
10426     case TARGET_NR_rt_sigaction:
10427         {
10428             /*
10429              * For Alpha and SPARC this is a 5 argument syscall, with
10430              * a 'restorer' parameter which must be copied into the
10431              * sa_restorer field of the sigaction struct.
10432              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10433              * and arg5 is the sigsetsize.
10434              */
10435 #if defined(TARGET_ALPHA)
10436             target_ulong sigsetsize = arg4;
10437             target_ulong restorer = arg5;
10438 #elif defined(TARGET_SPARC)
10439             target_ulong restorer = arg4;
10440             target_ulong sigsetsize = arg5;
10441 #else
10442             target_ulong sigsetsize = arg4;
10443             target_ulong restorer = 0;
10444 #endif
10445             struct target_sigaction *act = NULL;
10446             struct target_sigaction *oact = NULL;
10447 
10448             if (sigsetsize != sizeof(target_sigset_t)) {
10449                 return -TARGET_EINVAL;
10450             }
10451             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10452                 return -TARGET_EFAULT;
10453             }
10454             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10455                 ret = -TARGET_EFAULT;
10456             } else {
10457                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10458                 if (oact) {
10459                     unlock_user_struct(oact, arg3, 1);
10460                 }
10461             }
10462             if (act) {
10463                 unlock_user_struct(act, arg2, 0);
10464             }
10465         }
10466         return ret;
10467 #ifdef TARGET_NR_sgetmask /* not on alpha */
10468     case TARGET_NR_sgetmask:
10469         {
10470             sigset_t cur_set;
10471             abi_ulong target_set;
10472             ret = do_sigprocmask(0, NULL, &cur_set);
10473             if (!ret) {
10474                 host_to_target_old_sigset(&target_set, &cur_set);
10475                 ret = target_set;
10476             }
10477         }
10478         return ret;
10479 #endif
10480 #ifdef TARGET_NR_ssetmask /* not on alpha */
10481     case TARGET_NR_ssetmask:
10482         {
10483             sigset_t set, oset;
10484             abi_ulong target_set = arg1;
10485             target_to_host_old_sigset(&set, &target_set);
10486             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10487             if (!ret) {
10488                 host_to_target_old_sigset(&target_set, &oset);
10489                 ret = target_set;
10490             }
10491         }
10492         return ret;
10493 #endif
10494 #ifdef TARGET_NR_sigprocmask
10495     case TARGET_NR_sigprocmask:
10496         {
10497 #if defined(TARGET_ALPHA)
10498             sigset_t set, oldset;
10499             abi_ulong mask;
10500             int how;
10501 
10502             switch (arg1) {
10503             case TARGET_SIG_BLOCK:
10504                 how = SIG_BLOCK;
10505                 break;
10506             case TARGET_SIG_UNBLOCK:
10507                 how = SIG_UNBLOCK;
10508                 break;
10509             case TARGET_SIG_SETMASK:
10510                 how = SIG_SETMASK;
10511                 break;
10512             default:
10513                 return -TARGET_EINVAL;
10514             }
10515             mask = arg2;
10516             target_to_host_old_sigset(&set, &mask);
10517 
10518             ret = do_sigprocmask(how, &set, &oldset);
10519             if (!is_error(ret)) {
10520                 host_to_target_old_sigset(&mask, &oldset);
10521                 ret = mask;
10522                 cpu_env->ir[IR_V0] = 0; /* force no error */
10523             }
10524 #else
10525             sigset_t set, oldset, *set_ptr;
10526             int how;
10527 
10528             if (arg2) {
10529                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10530                 if (!p) {
10531                     return -TARGET_EFAULT;
10532                 }
10533                 target_to_host_old_sigset(&set, p);
10534                 unlock_user(p, arg2, 0);
10535                 set_ptr = &set;
10536                 switch (arg1) {
10537                 case TARGET_SIG_BLOCK:
10538                     how = SIG_BLOCK;
10539                     break;
10540                 case TARGET_SIG_UNBLOCK:
10541                     how = SIG_UNBLOCK;
10542                     break;
10543                 case TARGET_SIG_SETMASK:
10544                     how = SIG_SETMASK;
10545                     break;
10546                 default:
10547                     return -TARGET_EINVAL;
10548                 }
10549             } else {
10550                 how = 0;
10551                 set_ptr = NULL;
10552             }
10553             ret = do_sigprocmask(how, set_ptr, &oldset);
10554             if (!is_error(ret) && arg3) {
10555                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10556                     return -TARGET_EFAULT;
10557                 host_to_target_old_sigset(p, &oldset);
10558                 unlock_user(p, arg3, sizeof(target_sigset_t));
10559             }
10560 #endif
10561         }
10562         return ret;
10563 #endif
10564     case TARGET_NR_rt_sigprocmask:
10565         {
10566             int how = arg1;
10567             sigset_t set, oldset, *set_ptr;
10568 
10569             if (arg4 != sizeof(target_sigset_t)) {
10570                 return -TARGET_EINVAL;
10571             }
10572 
10573             if (arg2) {
10574                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10575                 if (!p) {
10576                     return -TARGET_EFAULT;
10577                 }
10578                 target_to_host_sigset(&set, p);
10579                 unlock_user(p, arg2, 0);
10580                 set_ptr = &set;
10581                 switch(how) {
10582                 case TARGET_SIG_BLOCK:
10583                     how = SIG_BLOCK;
10584                     break;
10585                 case TARGET_SIG_UNBLOCK:
10586                     how = SIG_UNBLOCK;
10587                     break;
10588                 case TARGET_SIG_SETMASK:
10589                     how = SIG_SETMASK;
10590                     break;
10591                 default:
10592                     return -TARGET_EINVAL;
10593                 }
10594             } else {
10595                 how = 0;
10596                 set_ptr = NULL;
10597             }
10598             ret = do_sigprocmask(how, set_ptr, &oldset);
10599             if (!is_error(ret) && arg3) {
10600                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10601                     return -TARGET_EFAULT;
10602                 host_to_target_sigset(p, &oldset);
10603                 unlock_user(p, arg3, sizeof(target_sigset_t));
10604             }
10605         }
10606         return ret;
10607 #ifdef TARGET_NR_sigpending
10608     case TARGET_NR_sigpending:
10609         {
10610             sigset_t set;
10611             ret = get_errno(sigpending(&set));
10612             if (!is_error(ret)) {
10613                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10614                     return -TARGET_EFAULT;
10615                 host_to_target_old_sigset(p, &set);
10616                 unlock_user(p, arg1, sizeof(target_sigset_t));
10617             }
10618         }
10619         return ret;
10620 #endif
10621     case TARGET_NR_rt_sigpending:
10622         {
10623             sigset_t set;
10624 
10625             /* Yes, this check is >, not != like most. We follow the kernel's
10626              * logic and it does it like this because it implements
10627              * NR_sigpending through the same code path, and in that case
10628              * the old_sigset_t is smaller in size.
10629              */
10630             if (arg2 > sizeof(target_sigset_t)) {
10631                 return -TARGET_EINVAL;
10632             }
10633 
10634             ret = get_errno(sigpending(&set));
10635             if (!is_error(ret)) {
10636                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10637                     return -TARGET_EFAULT;
10638                 host_to_target_sigset(p, &set);
10639                 unlock_user(p, arg1, sizeof(target_sigset_t));
10640             }
10641         }
10642         return ret;
10643 #ifdef TARGET_NR_sigsuspend
10644     case TARGET_NR_sigsuspend:
10645         {
10646             sigset_t *set;
10647 
10648 #if defined(TARGET_ALPHA)
10649             TaskState *ts = get_task_state(cpu);
10650             /* target_to_host_old_sigset will bswap back */
10651             abi_ulong mask = tswapal(arg1);
10652             set = &ts->sigsuspend_mask;
10653             target_to_host_old_sigset(set, &mask);
10654 #else
10655             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10656             if (ret != 0) {
10657                 return ret;
10658             }
10659 #endif
10660             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10661             finish_sigsuspend_mask(ret);
10662         }
10663         return ret;
10664 #endif
10665     case TARGET_NR_rt_sigsuspend:
10666         {
10667             sigset_t *set;
10668 
10669             ret = process_sigsuspend_mask(&set, arg1, arg2);
10670             if (ret != 0) {
10671                 return ret;
10672             }
10673             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10674             finish_sigsuspend_mask(ret);
10675         }
10676         return ret;
10677 #ifdef TARGET_NR_rt_sigtimedwait
10678     case TARGET_NR_rt_sigtimedwait:
10679         {
10680             sigset_t set;
10681             struct timespec uts, *puts;
10682             siginfo_t uinfo;
10683 
10684             if (arg4 != sizeof(target_sigset_t)) {
10685                 return -TARGET_EINVAL;
10686             }
10687 
10688             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10689                 return -TARGET_EFAULT;
10690             target_to_host_sigset(&set, p);
10691             unlock_user(p, arg1, 0);
10692             if (arg3) {
10693                 puts = &uts;
10694                 if (target_to_host_timespec(puts, arg3)) {
10695                     return -TARGET_EFAULT;
10696                 }
10697             } else {
10698                 puts = NULL;
10699             }
10700             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10701                                                  SIGSET_T_SIZE));
10702             if (!is_error(ret)) {
10703                 if (arg2) {
10704                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10705                                   0);
10706                     if (!p) {
10707                         return -TARGET_EFAULT;
10708                     }
10709                     host_to_target_siginfo(p, &uinfo);
10710                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10711                 }
10712                 ret = host_to_target_signal(ret);
10713             }
10714         }
10715         return ret;
10716 #endif
10717 #ifdef TARGET_NR_rt_sigtimedwait_time64
10718     case TARGET_NR_rt_sigtimedwait_time64:
10719         {
10720             sigset_t set;
10721             struct timespec uts, *puts;
10722             siginfo_t uinfo;
10723 
10724             if (arg4 != sizeof(target_sigset_t)) {
10725                 return -TARGET_EINVAL;
10726             }
10727 
10728             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10729             if (!p) {
10730                 return -TARGET_EFAULT;
10731             }
10732             target_to_host_sigset(&set, p);
10733             unlock_user(p, arg1, 0);
10734             if (arg3) {
10735                 puts = &uts;
10736                 if (target_to_host_timespec64(puts, arg3)) {
10737                     return -TARGET_EFAULT;
10738                 }
10739             } else {
10740                 puts = NULL;
10741             }
10742             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10743                                                  SIGSET_T_SIZE));
10744             if (!is_error(ret)) {
10745                 if (arg2) {
10746                     p = lock_user(VERIFY_WRITE, arg2,
10747                                   sizeof(target_siginfo_t), 0);
10748                     if (!p) {
10749                         return -TARGET_EFAULT;
10750                     }
10751                     host_to_target_siginfo(p, &uinfo);
10752                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10753                 }
10754                 ret = host_to_target_signal(ret);
10755             }
10756         }
10757         return ret;
10758 #endif
10759     case TARGET_NR_rt_sigqueueinfo:
10760         {
10761             siginfo_t uinfo;
10762 
10763             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10764             if (!p) {
10765                 return -TARGET_EFAULT;
10766             }
10767             target_to_host_siginfo(&uinfo, p);
10768             unlock_user(p, arg3, 0);
10769             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10770         }
10771         return ret;
10772     case TARGET_NR_rt_tgsigqueueinfo:
10773         {
10774             siginfo_t uinfo;
10775 
10776             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10777             if (!p) {
10778                 return -TARGET_EFAULT;
10779             }
10780             target_to_host_siginfo(&uinfo, p);
10781             unlock_user(p, arg4, 0);
10782             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10783         }
10784         return ret;
10785 #ifdef TARGET_NR_sigreturn
10786     case TARGET_NR_sigreturn:
10787         if (block_signals()) {
10788             return -QEMU_ERESTARTSYS;
10789         }
10790         return do_sigreturn(cpu_env);
10791 #endif
10792     case TARGET_NR_rt_sigreturn:
10793         if (block_signals()) {
10794             return -QEMU_ERESTARTSYS;
10795         }
10796         return do_rt_sigreturn(cpu_env);
10797     case TARGET_NR_sethostname:
10798         if (!(p = lock_user_string(arg1)))
10799             return -TARGET_EFAULT;
10800         ret = get_errno(sethostname(p, arg2));
10801         unlock_user(p, arg1, 0);
10802         return ret;
10803 #ifdef TARGET_NR_setrlimit
10804     case TARGET_NR_setrlimit:
10805         {
10806             int resource = target_to_host_resource(arg1);
10807             struct target_rlimit *target_rlim;
10808             struct rlimit rlim;
10809             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10810                 return -TARGET_EFAULT;
10811             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10812             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10813             unlock_user_struct(target_rlim, arg2, 0);
10814             /*
10815              * If we just passed through resource limit settings for memory then
10816              * they would also apply to QEMU's own allocations, and QEMU will
10817              * crash or hang or die if its allocations fail. Ideally we would
10818              * track the guest allocations in QEMU and apply the limits ourselves.
10819              * For now, just tell the guest the call succeeded but don't actually
10820              * limit anything.
10821              */
10822             if (resource != RLIMIT_AS &&
10823                 resource != RLIMIT_DATA &&
10824                 resource != RLIMIT_STACK) {
10825                 return get_errno(setrlimit(resource, &rlim));
10826             } else {
10827                 return 0;
10828             }
10829         }
10830 #endif
10831 #ifdef TARGET_NR_getrlimit
10832     case TARGET_NR_getrlimit:
10833         {
10834             int resource = target_to_host_resource(arg1);
10835             struct target_rlimit *target_rlim;
10836             struct rlimit rlim;
10837 
10838             ret = get_errno(getrlimit(resource, &rlim));
10839             if (!is_error(ret)) {
10840                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10841                     return -TARGET_EFAULT;
10842                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10843                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10844                 unlock_user_struct(target_rlim, arg2, 1);
10845             }
10846         }
10847         return ret;
10848 #endif
10849     case TARGET_NR_getrusage:
10850         {
10851             struct rusage rusage;
10852             ret = get_errno(getrusage(arg1, &rusage));
10853             if (!is_error(ret)) {
10854                 ret = host_to_target_rusage(arg2, &rusage);
10855             }
10856         }
10857         return ret;
10858 #if defined(TARGET_NR_gettimeofday)
10859     case TARGET_NR_gettimeofday:
10860         {
10861             struct timeval tv;
10862             struct timezone tz;
10863 
10864             ret = get_errno(gettimeofday(&tv, &tz));
10865             if (!is_error(ret)) {
10866                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10867                     return -TARGET_EFAULT;
10868                 }
10869                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10870                     return -TARGET_EFAULT;
10871                 }
10872             }
10873         }
10874         return ret;
10875 #endif
10876 #if defined(TARGET_NR_settimeofday)
10877     case TARGET_NR_settimeofday:
10878         {
10879             struct timeval tv, *ptv = NULL;
10880             struct timezone tz, *ptz = NULL;
10881 
10882             if (arg1) {
10883                 if (copy_from_user_timeval(&tv, arg1)) {
10884                     return -TARGET_EFAULT;
10885                 }
10886                 ptv = &tv;
10887             }
10888 
10889             if (arg2) {
10890                 if (copy_from_user_timezone(&tz, arg2)) {
10891                     return -TARGET_EFAULT;
10892                 }
10893                 ptz = &tz;
10894             }
10895 
10896             return get_errno(settimeofday(ptv, ptz));
10897         }
10898 #endif
10899 #if defined(TARGET_NR_select)
10900     case TARGET_NR_select:
10901 #if defined(TARGET_WANT_NI_OLD_SELECT)
10902         /* some architectures used to have old_select here
10903          * but now ENOSYS it.
10904          */
10905         ret = -TARGET_ENOSYS;
10906 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10907         ret = do_old_select(arg1);
10908 #else
10909         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10910 #endif
10911         return ret;
10912 #endif
10913 #ifdef TARGET_NR_pselect6
10914     case TARGET_NR_pselect6:
10915         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10916 #endif
10917 #ifdef TARGET_NR_pselect6_time64
10918     case TARGET_NR_pselect6_time64:
10919         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10920 #endif
10921 #ifdef TARGET_NR_symlink
10922     case TARGET_NR_symlink:
10923         {
10924             void *p2;
10925             p = lock_user_string(arg1);
10926             p2 = lock_user_string(arg2);
10927             if (!p || !p2)
10928                 ret = -TARGET_EFAULT;
10929             else
10930                 ret = get_errno(symlink(p, p2));
10931             unlock_user(p2, arg2, 0);
10932             unlock_user(p, arg1, 0);
10933         }
10934         return ret;
10935 #endif
10936 #if defined(TARGET_NR_symlinkat)
10937     case TARGET_NR_symlinkat:
10938         {
10939             void *p2;
10940             p  = lock_user_string(arg1);
10941             p2 = lock_user_string(arg3);
10942             if (!p || !p2)
10943                 ret = -TARGET_EFAULT;
10944             else
10945                 ret = get_errno(symlinkat(p, arg2, p2));
10946             unlock_user(p2, arg3, 0);
10947             unlock_user(p, arg1, 0);
10948         }
10949         return ret;
10950 #endif
10951 #ifdef TARGET_NR_readlink
10952     case TARGET_NR_readlink:
10953         {
10954             void *p2;
10955             p = lock_user_string(arg1);
10956             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10957             ret = get_errno(do_guest_readlink(p, p2, arg3));
10958             unlock_user(p2, arg2, ret);
10959             unlock_user(p, arg1, 0);
10960         }
10961         return ret;
10962 #endif
10963 #if defined(TARGET_NR_readlinkat)
10964     case TARGET_NR_readlinkat:
10965         {
10966             void *p2;
10967             p  = lock_user_string(arg2);
10968             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10969             if (!p || !p2) {
10970                 ret = -TARGET_EFAULT;
10971             } else if (!arg4) {
10972                 /* Short circuit this for the magic exe check. */
10973                 ret = -TARGET_EINVAL;
10974             } else if (is_proc_myself((const char *)p, "exe")) {
10975                 /*
10976                  * Don't worry about sign mismatch as earlier mapping
10977                  * logic would have thrown a bad address error.
10978                  */
10979                 ret = MIN(strlen(exec_path), arg4);
10980                 /* We cannot NUL terminate the string. */
10981                 memcpy(p2, exec_path, ret);
10982             } else {
10983                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10984             }
10985             unlock_user(p2, arg3, ret);
10986             unlock_user(p, arg2, 0);
10987         }
10988         return ret;
10989 #endif
10990 #ifdef TARGET_NR_swapon
10991     case TARGET_NR_swapon:
10992         if (!(p = lock_user_string(arg1)))
10993             return -TARGET_EFAULT;
10994         ret = get_errno(swapon(p, arg2));
10995         unlock_user(p, arg1, 0);
10996         return ret;
10997 #endif
10998     case TARGET_NR_reboot:
10999         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
11000            /* arg4 must be ignored in all other cases */
11001            p = lock_user_string(arg4);
11002            if (!p) {
11003                return -TARGET_EFAULT;
11004            }
11005            ret = get_errno(reboot(arg1, arg2, arg3, p));
11006            unlock_user(p, arg4, 0);
11007         } else {
11008            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
11009         }
11010         return ret;
11011 #ifdef TARGET_NR_mmap
11012     case TARGET_NR_mmap:
11013 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
11014         {
11015             abi_ulong *v;
11016             abi_ulong v1, v2, v3, v4, v5, v6;
11017             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
11018                 return -TARGET_EFAULT;
11019             v1 = tswapal(v[0]);
11020             v2 = tswapal(v[1]);
11021             v3 = tswapal(v[2]);
11022             v4 = tswapal(v[3]);
11023             v5 = tswapal(v[4]);
11024             v6 = tswapal(v[5]);
11025             unlock_user(v, arg1, 0);
11026             return do_mmap(v1, v2, v3, v4, v5, v6);
11027         }
11028 #else
11029         /* mmap pointers are always untagged */
11030         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
11031 #endif
11032 #endif
11033 #ifdef TARGET_NR_mmap2
11034     case TARGET_NR_mmap2:
11035 #ifndef MMAP_SHIFT
11036 #define MMAP_SHIFT 12
11037 #endif
11038         return do_mmap(arg1, arg2, arg3, arg4, arg5,
11039                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
11040 #endif
11041     case TARGET_NR_munmap:
11042         arg1 = cpu_untagged_addr(cpu, arg1);
11043         return get_errno(target_munmap(arg1, arg2));
11044     case TARGET_NR_mprotect:
11045         arg1 = cpu_untagged_addr(cpu, arg1);
11046         {
11047             TaskState *ts = get_task_state(cpu);
11048             /* Special hack to detect libc making the stack executable.  */
11049             if ((arg3 & PROT_GROWSDOWN)
11050                 && arg1 >= ts->info->stack_limit
11051                 && arg1 <= ts->info->start_stack) {
11052                 arg3 &= ~PROT_GROWSDOWN;
11053                 arg2 = arg2 + arg1 - ts->info->stack_limit;
11054                 arg1 = ts->info->stack_limit;
11055             }
11056         }
11057         return get_errno(target_mprotect(arg1, arg2, arg3));
11058 #ifdef TARGET_NR_mremap
11059     case TARGET_NR_mremap:
11060         arg1 = cpu_untagged_addr(cpu, arg1);
11061         /* mremap new_addr (arg5) is always untagged */
11062         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
11063 #endif
11064         /* ??? msync/mlock/munlock are broken for softmmu.  */
11065 #ifdef TARGET_NR_msync
11066     case TARGET_NR_msync:
11067         return get_errno(msync(g2h(cpu, arg1), arg2,
11068                                target_to_host_msync_arg(arg3)));
11069 #endif
11070 #ifdef TARGET_NR_mlock
11071     case TARGET_NR_mlock:
11072         return get_errno(mlock(g2h(cpu, arg1), arg2));
11073 #endif
11074 #ifdef TARGET_NR_munlock
11075     case TARGET_NR_munlock:
11076         return get_errno(munlock(g2h(cpu, arg1), arg2));
11077 #endif
11078 #ifdef TARGET_NR_mlockall
11079     case TARGET_NR_mlockall:
11080         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
11081 #endif
11082 #ifdef TARGET_NR_munlockall
11083     case TARGET_NR_munlockall:
11084         return get_errno(munlockall());
11085 #endif
11086 #ifdef TARGET_NR_truncate
11087     case TARGET_NR_truncate:
11088         if (!(p = lock_user_string(arg1)))
11089             return -TARGET_EFAULT;
11090         ret = get_errno(truncate(p, arg2));
11091         unlock_user(p, arg1, 0);
11092         return ret;
11093 #endif
11094 #ifdef TARGET_NR_ftruncate
11095     case TARGET_NR_ftruncate:
11096         return get_errno(ftruncate(arg1, arg2));
11097 #endif
11098     case TARGET_NR_fchmod:
11099         return get_errno(fchmod(arg1, arg2));
11100 #if defined(TARGET_NR_fchmodat)
11101     case TARGET_NR_fchmodat:
11102         if (!(p = lock_user_string(arg2)))
11103             return -TARGET_EFAULT;
11104         ret = get_errno(fchmodat(arg1, p, arg3, 0));
11105         unlock_user(p, arg2, 0);
11106         return ret;
11107 #endif
11108 #if defined(TARGET_NR_fchmodat2) && defined(__NR_fchmodat2)
11109     case TARGET_NR_fchmodat2:
11110         if (!(p = lock_user_string(arg2))) {
11111             return -TARGET_EFAULT;
11112         }
11113         ret = get_errno(safe_fchmodat2(arg1, p, arg3, arg4));
11114         unlock_user(p, arg2, 0);
11115         return ret;
11116 #endif
11117     case TARGET_NR_getpriority:
11118         /* Note that negative values are valid for getpriority, so we must
11119            differentiate based on errno settings.  */
11120         errno = 0;
11121         ret = getpriority(arg1, arg2);
11122         if (ret == -1 && errno != 0) {
11123             return -host_to_target_errno(errno);
11124         }
11125 #ifdef TARGET_ALPHA
11126         /* Return value is the unbiased priority.  Signal no error.  */
11127         cpu_env->ir[IR_V0] = 0;
11128 #else
11129         /* Return value is a biased priority to avoid negative numbers.  */
11130         ret = 20 - ret;
11131 #endif
11132         return ret;
11133     case TARGET_NR_setpriority:
11134         return get_errno(setpriority(arg1, arg2, arg3));
11135 #ifdef TARGET_NR_statfs
11136     case TARGET_NR_statfs:
11137         if (!(p = lock_user_string(arg1))) {
11138             return -TARGET_EFAULT;
11139         }
11140         ret = get_errno(statfs(path(p), &stfs));
11141         unlock_user(p, arg1, 0);
11142     convert_statfs:
11143         if (!is_error(ret)) {
11144             struct target_statfs *target_stfs;
11145 
11146             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
11147                 return -TARGET_EFAULT;
11148             __put_user(stfs.f_type, &target_stfs->f_type);
11149             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
11150             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
11151             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
11152             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
11153             __put_user(stfs.f_files, &target_stfs->f_files);
11154             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
11155             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
11156             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
11157             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
11158             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
11159 #ifdef _STATFS_F_FLAGS
11160             __put_user(stfs.f_flags, &target_stfs->f_flags);
11161 #else
11162             __put_user(0, &target_stfs->f_flags);
11163 #endif
11164             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
11165             unlock_user_struct(target_stfs, arg2, 1);
11166         }
11167         return ret;
11168 #endif
11169 #ifdef TARGET_NR_fstatfs
11170     case TARGET_NR_fstatfs:
11171         ret = get_errno(fstatfs(arg1, &stfs));
11172         goto convert_statfs;
11173 #endif
11174 #ifdef TARGET_NR_statfs64
11175     case TARGET_NR_statfs64:
11176         if (!(p = lock_user_string(arg1))) {
11177             return -TARGET_EFAULT;
11178         }
11179         ret = get_errno(statfs(path(p), &stfs));
11180         unlock_user(p, arg1, 0);
11181     convert_statfs64:
11182         if (!is_error(ret)) {
11183             struct target_statfs64 *target_stfs;
11184 
11185             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
11186                 return -TARGET_EFAULT;
11187             __put_user(stfs.f_type, &target_stfs->f_type);
11188             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
11189             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
11190             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
11191             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
11192             __put_user(stfs.f_files, &target_stfs->f_files);
11193             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
11194             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
11195             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
11196             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
11197             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
11198 #ifdef _STATFS_F_FLAGS
11199             __put_user(stfs.f_flags, &target_stfs->f_flags);
11200 #else
11201             __put_user(0, &target_stfs->f_flags);
11202 #endif
11203             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
11204             unlock_user_struct(target_stfs, arg3, 1);
11205         }
11206         return ret;
11207     case TARGET_NR_fstatfs64:
11208         ret = get_errno(fstatfs(arg1, &stfs));
11209         goto convert_statfs64;
11210 #endif
11211 #ifdef TARGET_NR_socketcall
11212     case TARGET_NR_socketcall:
11213         return do_socketcall(arg1, arg2);
11214 #endif
11215 #ifdef TARGET_NR_accept
11216     case TARGET_NR_accept:
11217         return do_accept4(arg1, arg2, arg3, 0);
11218 #endif
11219 #ifdef TARGET_NR_accept4
11220     case TARGET_NR_accept4:
11221         return do_accept4(arg1, arg2, arg3, arg4);
11222 #endif
11223 #ifdef TARGET_NR_bind
11224     case TARGET_NR_bind:
11225         return do_bind(arg1, arg2, arg3);
11226 #endif
11227 #ifdef TARGET_NR_connect
11228     case TARGET_NR_connect:
11229         return do_connect(arg1, arg2, arg3);
11230 #endif
11231 #ifdef TARGET_NR_getpeername
11232     case TARGET_NR_getpeername:
11233         return do_getpeername(arg1, arg2, arg3);
11234 #endif
11235 #ifdef TARGET_NR_getsockname
11236     case TARGET_NR_getsockname:
11237         return do_getsockname(arg1, arg2, arg3);
11238 #endif
11239 #ifdef TARGET_NR_getsockopt
11240     case TARGET_NR_getsockopt:
11241         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
11242 #endif
11243 #ifdef TARGET_NR_listen
11244     case TARGET_NR_listen:
11245         return get_errno(listen(arg1, arg2));
11246 #endif
11247 #ifdef TARGET_NR_recv
11248     case TARGET_NR_recv:
11249         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
11250 #endif
11251 #ifdef TARGET_NR_recvfrom
11252     case TARGET_NR_recvfrom:
11253         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
11254 #endif
11255 #ifdef TARGET_NR_recvmsg
11256     case TARGET_NR_recvmsg:
11257         return do_sendrecvmsg(arg1, arg2, arg3, 0);
11258 #endif
11259 #ifdef TARGET_NR_send
11260     case TARGET_NR_send:
11261         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
11262 #endif
11263 #ifdef TARGET_NR_sendmsg
11264     case TARGET_NR_sendmsg:
11265         return do_sendrecvmsg(arg1, arg2, arg3, 1);
11266 #endif
11267 #ifdef TARGET_NR_sendmmsg
11268     case TARGET_NR_sendmmsg:
11269         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
11270 #endif
11271 #ifdef TARGET_NR_recvmmsg
11272     case TARGET_NR_recvmmsg:
11273         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
11274 #endif
11275 #ifdef TARGET_NR_sendto
11276     case TARGET_NR_sendto:
11277         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
11278 #endif
11279 #ifdef TARGET_NR_shutdown
11280     case TARGET_NR_shutdown:
11281         return get_errno(shutdown(arg1, arg2));
11282 #endif
11283 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
11284     case TARGET_NR_getrandom:
11285         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11286         if (!p) {
11287             return -TARGET_EFAULT;
11288         }
11289         ret = get_errno(getrandom(p, arg2, arg3));
11290         unlock_user(p, arg1, ret);
11291         return ret;
11292 #endif
11293 #ifdef TARGET_NR_socket
11294     case TARGET_NR_socket:
11295         return do_socket(arg1, arg2, arg3);
11296 #endif
11297 #ifdef TARGET_NR_socketpair
11298     case TARGET_NR_socketpair:
11299         return do_socketpair(arg1, arg2, arg3, arg4);
11300 #endif
11301 #ifdef TARGET_NR_setsockopt
11302     case TARGET_NR_setsockopt:
11303         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
11304 #endif
11305 #if defined(TARGET_NR_syslog)
11306     case TARGET_NR_syslog:
11307         {
11308             int len = arg2;
11309 
11310             switch (arg1) {
11311             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
11312             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
11313             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
11314             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
11315             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
11316             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
11317             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
11318             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
11319                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
11320             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
11321             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
11322             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
11323                 {
11324                     if (len < 0) {
11325                         return -TARGET_EINVAL;
11326                     }
11327                     if (len == 0) {
11328                         return 0;
11329                     }
11330                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11331                     if (!p) {
11332                         return -TARGET_EFAULT;
11333                     }
11334                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
11335                     unlock_user(p, arg2, arg3);
11336                 }
11337                 return ret;
11338             default:
11339                 return -TARGET_EINVAL;
11340             }
11341         }
11342         break;
11343 #endif
11344     case TARGET_NR_setitimer:
11345         {
11346             struct itimerval value, ovalue, *pvalue;
11347 
11348             if (arg2) {
11349                 pvalue = &value;
11350                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
11351                     || copy_from_user_timeval(&pvalue->it_value,
11352                                               arg2 + sizeof(struct target_timeval)))
11353                     return -TARGET_EFAULT;
11354             } else {
11355                 pvalue = NULL;
11356             }
11357             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
11358             if (!is_error(ret) && arg3) {
11359                 if (copy_to_user_timeval(arg3,
11360                                          &ovalue.it_interval)
11361                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
11362                                             &ovalue.it_value))
11363                     return -TARGET_EFAULT;
11364             }
11365         }
11366         return ret;
11367     case TARGET_NR_getitimer:
11368         {
11369             struct itimerval value;
11370 
11371             ret = get_errno(getitimer(arg1, &value));
11372             if (!is_error(ret) && arg2) {
11373                 if (copy_to_user_timeval(arg2,
11374                                          &value.it_interval)
11375                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
11376                                             &value.it_value))
11377                     return -TARGET_EFAULT;
11378             }
11379         }
11380         return ret;
11381 #ifdef TARGET_NR_stat
11382     case TARGET_NR_stat:
11383         if (!(p = lock_user_string(arg1))) {
11384             return -TARGET_EFAULT;
11385         }
11386         ret = get_errno(stat(path(p), &st));
11387         unlock_user(p, arg1, 0);
11388         goto do_stat;
11389 #endif
11390 #ifdef TARGET_NR_lstat
11391     case TARGET_NR_lstat:
11392         if (!(p = lock_user_string(arg1))) {
11393             return -TARGET_EFAULT;
11394         }
11395         ret = get_errno(lstat(path(p), &st));
11396         unlock_user(p, arg1, 0);
11397         goto do_stat;
11398 #endif
11399 #ifdef TARGET_NR_fstat
11400     case TARGET_NR_fstat:
11401         {
11402             ret = get_errno(fstat(arg1, &st));
11403 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
11404         do_stat:
11405 #endif
11406             if (!is_error(ret)) {
11407                 struct target_stat *target_st;
11408 
11409                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
11410                     return -TARGET_EFAULT;
11411                 memset(target_st, 0, sizeof(*target_st));
11412                 __put_user(st.st_dev, &target_st->st_dev);
11413                 __put_user(st.st_ino, &target_st->st_ino);
11414                 __put_user(st.st_mode, &target_st->st_mode);
11415                 __put_user(st.st_uid, &target_st->st_uid);
11416                 __put_user(st.st_gid, &target_st->st_gid);
11417                 __put_user(st.st_nlink, &target_st->st_nlink);
11418                 __put_user(st.st_rdev, &target_st->st_rdev);
11419                 __put_user(st.st_size, &target_st->st_size);
11420                 __put_user(st.st_blksize, &target_st->st_blksize);
11421                 __put_user(st.st_blocks, &target_st->st_blocks);
11422                 __put_user(st.st_atime, &target_st->target_st_atime);
11423                 __put_user(st.st_mtime, &target_st->target_st_mtime);
11424                 __put_user(st.st_ctime, &target_st->target_st_ctime);
11425 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
11426                 __put_user(st.st_atim.tv_nsec,
11427                            &target_st->target_st_atime_nsec);
11428                 __put_user(st.st_mtim.tv_nsec,
11429                            &target_st->target_st_mtime_nsec);
11430                 __put_user(st.st_ctim.tv_nsec,
11431                            &target_st->target_st_ctime_nsec);
11432 #endif
11433                 unlock_user_struct(target_st, arg2, 1);
11434             }
11435         }
11436         return ret;
11437 #endif
11438     case TARGET_NR_vhangup:
11439         return get_errno(vhangup());
11440 #ifdef TARGET_NR_syscall
11441     case TARGET_NR_syscall:
11442         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11443                           arg6, arg7, arg8, 0);
11444 #endif
11445 #if defined(TARGET_NR_wait4)
11446     case TARGET_NR_wait4:
11447         {
11448             int status;
11449             abi_long status_ptr = arg2;
11450             struct rusage rusage, *rusage_ptr;
11451             abi_ulong target_rusage = arg4;
11452             abi_long rusage_err;
11453             if (target_rusage)
11454                 rusage_ptr = &rusage;
11455             else
11456                 rusage_ptr = NULL;
11457             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11458             if (!is_error(ret)) {
11459                 if (status_ptr && ret) {
11460                     status = host_to_target_waitstatus(status);
11461                     if (put_user_s32(status, status_ptr))
11462                         return -TARGET_EFAULT;
11463                 }
11464                 if (target_rusage) {
11465                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11466                     if (rusage_err) {
11467                         ret = rusage_err;
11468                     }
11469                 }
11470             }
11471         }
11472         return ret;
11473 #endif
11474 #ifdef TARGET_NR_swapoff
11475     case TARGET_NR_swapoff:
11476         if (!(p = lock_user_string(arg1)))
11477             return -TARGET_EFAULT;
11478         ret = get_errno(swapoff(p));
11479         unlock_user(p, arg1, 0);
11480         return ret;
11481 #endif
11482     case TARGET_NR_sysinfo:
11483         {
11484             struct target_sysinfo *target_value;
11485             struct sysinfo value;
11486             ret = get_errno(sysinfo(&value));
11487             if (!is_error(ret) && arg1)
11488             {
11489                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11490                     return -TARGET_EFAULT;
11491                 __put_user(value.uptime, &target_value->uptime);
11492                 __put_user(value.loads[0], &target_value->loads[0]);
11493                 __put_user(value.loads[1], &target_value->loads[1]);
11494                 __put_user(value.loads[2], &target_value->loads[2]);
11495                 __put_user(value.totalram, &target_value->totalram);
11496                 __put_user(value.freeram, &target_value->freeram);
11497                 __put_user(value.sharedram, &target_value->sharedram);
11498                 __put_user(value.bufferram, &target_value->bufferram);
11499                 __put_user(value.totalswap, &target_value->totalswap);
11500                 __put_user(value.freeswap, &target_value->freeswap);
11501                 __put_user(value.procs, &target_value->procs);
11502                 __put_user(value.totalhigh, &target_value->totalhigh);
11503                 __put_user(value.freehigh, &target_value->freehigh);
11504                 __put_user(value.mem_unit, &target_value->mem_unit);
11505                 unlock_user_struct(target_value, arg1, 1);
11506             }
11507         }
11508         return ret;
11509 #ifdef TARGET_NR_ipc
11510     case TARGET_NR_ipc:
11511         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11512 #endif
11513 #ifdef TARGET_NR_semget
11514     case TARGET_NR_semget:
11515         return get_errno(semget(arg1, arg2, arg3));
11516 #endif
11517 #ifdef TARGET_NR_semop
11518     case TARGET_NR_semop:
11519         return do_semtimedop(arg1, arg2, arg3, 0, false);
11520 #endif
11521 #ifdef TARGET_NR_semtimedop
11522     case TARGET_NR_semtimedop:
11523         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11524 #endif
11525 #ifdef TARGET_NR_semtimedop_time64
11526     case TARGET_NR_semtimedop_time64:
11527         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11528 #endif
11529 #ifdef TARGET_NR_semctl
11530     case TARGET_NR_semctl:
11531         return do_semctl(arg1, arg2, arg3, arg4);
11532 #endif
11533 #ifdef TARGET_NR_msgctl
11534     case TARGET_NR_msgctl:
11535         return do_msgctl(arg1, arg2, arg3);
11536 #endif
11537 #ifdef TARGET_NR_msgget
11538     case TARGET_NR_msgget:
11539         return get_errno(msgget(arg1, arg2));
11540 #endif
11541 #ifdef TARGET_NR_msgrcv
11542     case TARGET_NR_msgrcv:
11543         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11544 #endif
11545 #ifdef TARGET_NR_msgsnd
11546     case TARGET_NR_msgsnd:
11547         return do_msgsnd(arg1, arg2, arg3, arg4);
11548 #endif
11549 #ifdef TARGET_NR_shmget
11550     case TARGET_NR_shmget:
11551         return get_errno(shmget(arg1, arg2, arg3));
11552 #endif
11553 #ifdef TARGET_NR_shmctl
11554     case TARGET_NR_shmctl:
11555         return do_shmctl(arg1, arg2, arg3);
11556 #endif
11557 #ifdef TARGET_NR_shmat
11558     case TARGET_NR_shmat:
11559         return target_shmat(cpu_env, arg1, arg2, arg3);
11560 #endif
11561 #ifdef TARGET_NR_shmdt
11562     case TARGET_NR_shmdt:
11563         return target_shmdt(arg1);
11564 #endif
11565     case TARGET_NR_fsync:
11566         return get_errno(fsync(arg1));
11567     case TARGET_NR_clone:
11568         /* Linux manages to have three different orderings for its
11569          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11570          * match the kernel's CONFIG_CLONE_* settings.
11571          * Microblaze is further special in that it uses a sixth
11572          * implicit argument to clone for the TLS pointer.
11573          */
11574 #if defined(TARGET_MICROBLAZE)
11575         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11576 #elif defined(TARGET_CLONE_BACKWARDS)
11577         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11578 #elif defined(TARGET_CLONE_BACKWARDS2)
11579         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11580 #else
11581         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11582 #endif
11583         return ret;
11584 #ifdef __NR_exit_group
11585         /* new thread calls */
11586     case TARGET_NR_exit_group:
11587         preexit_cleanup(cpu_env, arg1);
11588         return get_errno(exit_group(arg1));
11589 #endif
11590     case TARGET_NR_setdomainname:
11591         if (!(p = lock_user_string(arg1)))
11592             return -TARGET_EFAULT;
11593         ret = get_errno(setdomainname(p, arg2));
11594         unlock_user(p, arg1, 0);
11595         return ret;
11596     case TARGET_NR_uname:
11597         /* no need to transcode because we use the linux syscall */
11598         {
11599             struct new_utsname * buf;
11600 
11601             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11602                 return -TARGET_EFAULT;
11603             ret = get_errno(sys_uname(buf));
11604             if (!is_error(ret)) {
11605                 /* Overwrite the native machine name with whatever is being
11606                    emulated. */
11607                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11608                           sizeof(buf->machine));
11609                 /* Allow the user to override the reported release.  */
11610                 if (qemu_uname_release && *qemu_uname_release) {
11611                     g_strlcpy(buf->release, qemu_uname_release,
11612                               sizeof(buf->release));
11613                 }
11614             }
11615             unlock_user_struct(buf, arg1, 1);
11616         }
11617         return ret;
11618 #ifdef TARGET_I386
11619     case TARGET_NR_modify_ldt:
11620         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11621 #if !defined(TARGET_X86_64)
11622     case TARGET_NR_vm86:
11623         return do_vm86(cpu_env, arg1, arg2);
11624 #endif
11625 #endif
11626 #if defined(TARGET_NR_adjtimex)
11627     case TARGET_NR_adjtimex:
11628         {
11629             struct timex host_buf;
11630 
11631             if (target_to_host_timex(&host_buf, arg1) != 0) {
11632                 return -TARGET_EFAULT;
11633             }
11634             ret = get_errno(adjtimex(&host_buf));
11635             if (!is_error(ret)) {
11636                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11637                     return -TARGET_EFAULT;
11638                 }
11639             }
11640         }
11641         return ret;
11642 #endif
11643 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11644     case TARGET_NR_clock_adjtime:
11645         {
11646             struct timex htx;
11647 
11648             if (target_to_host_timex(&htx, arg2) != 0) {
11649                 return -TARGET_EFAULT;
11650             }
11651             ret = get_errno(clock_adjtime(arg1, &htx));
11652             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11653                 return -TARGET_EFAULT;
11654             }
11655         }
11656         return ret;
11657 #endif
11658 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11659     case TARGET_NR_clock_adjtime64:
11660         {
11661             struct timex htx;
11662 
11663             if (target_to_host_timex64(&htx, arg2) != 0) {
11664                 return -TARGET_EFAULT;
11665             }
11666             ret = get_errno(clock_adjtime(arg1, &htx));
11667             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11668                     return -TARGET_EFAULT;
11669             }
11670         }
11671         return ret;
11672 #endif
11673     case TARGET_NR_getpgid:
11674         return get_errno(getpgid(arg1));
11675     case TARGET_NR_fchdir:
11676         return get_errno(fchdir(arg1));
11677     case TARGET_NR_personality:
11678         return get_errno(personality(arg1));
11679 #ifdef TARGET_NR__llseek /* Not on alpha */
11680     case TARGET_NR__llseek:
11681         {
11682             int64_t res;
11683 #if !defined(__NR_llseek)
11684             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11685             if (res == -1) {
11686                 ret = get_errno(res);
11687             } else {
11688                 ret = 0;
11689             }
11690 #else
11691             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11692 #endif
11693             if ((ret == 0) && put_user_s64(res, arg4)) {
11694                 return -TARGET_EFAULT;
11695             }
11696         }
11697         return ret;
11698 #endif
11699 #ifdef TARGET_NR_getdents
11700     case TARGET_NR_getdents:
11701         return do_getdents(arg1, arg2, arg3);
11702 #endif /* TARGET_NR_getdents */
11703 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11704     case TARGET_NR_getdents64:
11705         return do_getdents64(arg1, arg2, arg3);
11706 #endif /* TARGET_NR_getdents64 */
11707 #if defined(TARGET_NR__newselect)
11708     case TARGET_NR__newselect:
11709         return do_select(arg1, arg2, arg3, arg4, arg5);
11710 #endif
11711 #ifdef TARGET_NR_poll
11712     case TARGET_NR_poll:
11713         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11714 #endif
11715 #ifdef TARGET_NR_ppoll
11716     case TARGET_NR_ppoll:
11717         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11718 #endif
11719 #ifdef TARGET_NR_ppoll_time64
11720     case TARGET_NR_ppoll_time64:
11721         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11722 #endif
11723     case TARGET_NR_flock:
11724         /* NOTE: the flock constant seems to be the same for every
11725            Linux platform */
11726         return get_errno(safe_flock(arg1, arg2));
11727     case TARGET_NR_readv:
11728         {
11729             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11730             if (vec != NULL) {
11731                 ret = get_errno(safe_readv(arg1, vec, arg3));
11732                 unlock_iovec(vec, arg2, arg3, 1);
11733             } else {
11734                 ret = -host_to_target_errno(errno);
11735             }
11736         }
11737         return ret;
11738     case TARGET_NR_writev:
11739         {
11740             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11741             if (vec != NULL) {
11742                 ret = get_errno(safe_writev(arg1, vec, arg3));
11743                 unlock_iovec(vec, arg2, arg3, 0);
11744             } else {
11745                 ret = -host_to_target_errno(errno);
11746             }
11747         }
11748         return ret;
11749 #if defined(TARGET_NR_preadv)
11750     case TARGET_NR_preadv:
11751         {
11752             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11753             if (vec != NULL) {
11754                 unsigned long low, high;
11755 
11756                 target_to_host_low_high(arg4, arg5, &low, &high);
11757                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11758                 unlock_iovec(vec, arg2, arg3, 1);
11759             } else {
11760                 ret = -host_to_target_errno(errno);
11761            }
11762         }
11763         return ret;
11764 #endif
11765 #if defined(TARGET_NR_pwritev)
11766     case TARGET_NR_pwritev:
11767         {
11768             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11769             if (vec != NULL) {
11770                 unsigned long low, high;
11771 
11772                 target_to_host_low_high(arg4, arg5, &low, &high);
11773                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11774                 unlock_iovec(vec, arg2, arg3, 0);
11775             } else {
11776                 ret = -host_to_target_errno(errno);
11777            }
11778         }
11779         return ret;
11780 #endif
11781     case TARGET_NR_getsid:
11782         return get_errno(getsid(arg1));
11783 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11784     case TARGET_NR_fdatasync:
11785         return get_errno(fdatasync(arg1));
11786 #endif
11787     case TARGET_NR_sched_getaffinity:
11788         {
11789             unsigned int mask_size;
11790             unsigned long *mask;
11791 
11792             /*
11793              * sched_getaffinity needs multiples of ulong, so need to take
11794              * care of mismatches between target ulong and host ulong sizes.
11795              */
11796             if (arg2 & (sizeof(abi_ulong) - 1)) {
11797                 return -TARGET_EINVAL;
11798             }
11799             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11800 
11801             mask = alloca(mask_size);
11802             memset(mask, 0, mask_size);
11803             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11804 
11805             if (!is_error(ret)) {
11806                 if (ret > arg2) {
11807                     /* More data returned than the caller's buffer will fit.
11808                      * This only happens if sizeof(abi_long) < sizeof(long)
11809                      * and the caller passed us a buffer holding an odd number
11810                      * of abi_longs. If the host kernel is actually using the
11811                      * extra 4 bytes then fail EINVAL; otherwise we can just
11812                      * ignore them and only copy the interesting part.
11813                      */
11814                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11815                     if (numcpus > arg2 * 8) {
11816                         return -TARGET_EINVAL;
11817                     }
11818                     ret = arg2;
11819                 }
11820 
11821                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11822                     return -TARGET_EFAULT;
11823                 }
11824             }
11825         }
11826         return ret;
11827     case TARGET_NR_sched_setaffinity:
11828         {
11829             unsigned int mask_size;
11830             unsigned long *mask;
11831 
11832             /*
11833              * sched_setaffinity needs multiples of ulong, so need to take
11834              * care of mismatches between target ulong and host ulong sizes.
11835              */
11836             if (arg2 & (sizeof(abi_ulong) - 1)) {
11837                 return -TARGET_EINVAL;
11838             }
11839             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11840             mask = alloca(mask_size);
11841 
11842             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11843             if (ret) {
11844                 return ret;
11845             }
11846 
11847             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11848         }
11849     case TARGET_NR_getcpu:
11850         {
11851             unsigned cpuid, node;
11852             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11853                                        arg2 ? &node : NULL,
11854                                        NULL));
11855             if (is_error(ret)) {
11856                 return ret;
11857             }
11858             if (arg1 && put_user_u32(cpuid, arg1)) {
11859                 return -TARGET_EFAULT;
11860             }
11861             if (arg2 && put_user_u32(node, arg2)) {
11862                 return -TARGET_EFAULT;
11863             }
11864         }
11865         return ret;
11866     case TARGET_NR_sched_setparam:
11867         {
11868             struct target_sched_param *target_schp;
11869             struct sched_param schp;
11870 
11871             if (arg2 == 0) {
11872                 return -TARGET_EINVAL;
11873             }
11874             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11875                 return -TARGET_EFAULT;
11876             }
11877             schp.sched_priority = tswap32(target_schp->sched_priority);
11878             unlock_user_struct(target_schp, arg2, 0);
11879             return get_errno(sys_sched_setparam(arg1, &schp));
11880         }
11881     case TARGET_NR_sched_getparam:
11882         {
11883             struct target_sched_param *target_schp;
11884             struct sched_param schp;
11885 
11886             if (arg2 == 0) {
11887                 return -TARGET_EINVAL;
11888             }
11889             ret = get_errno(sys_sched_getparam(arg1, &schp));
11890             if (!is_error(ret)) {
11891                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11892                     return -TARGET_EFAULT;
11893                 }
11894                 target_schp->sched_priority = tswap32(schp.sched_priority);
11895                 unlock_user_struct(target_schp, arg2, 1);
11896             }
11897         }
11898         return ret;
11899     case TARGET_NR_sched_setscheduler:
11900         {
11901             struct target_sched_param *target_schp;
11902             struct sched_param schp;
11903             if (arg3 == 0) {
11904                 return -TARGET_EINVAL;
11905             }
11906             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11907                 return -TARGET_EFAULT;
11908             }
11909             schp.sched_priority = tswap32(target_schp->sched_priority);
11910             unlock_user_struct(target_schp, arg3, 0);
11911             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11912         }
11913     case TARGET_NR_sched_getscheduler:
11914         return get_errno(sys_sched_getscheduler(arg1));
11915     case TARGET_NR_sched_getattr:
11916         {
11917             struct target_sched_attr *target_scha;
11918             struct sched_attr scha;
11919             if (arg2 == 0) {
11920                 return -TARGET_EINVAL;
11921             }
11922             if (arg3 > sizeof(scha)) {
11923                 arg3 = sizeof(scha);
11924             }
11925             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11926             if (!is_error(ret)) {
11927                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11928                 if (!target_scha) {
11929                     return -TARGET_EFAULT;
11930                 }
11931                 target_scha->size = tswap32(scha.size);
11932                 target_scha->sched_policy = tswap32(scha.sched_policy);
11933                 target_scha->sched_flags = tswap64(scha.sched_flags);
11934                 target_scha->sched_nice = tswap32(scha.sched_nice);
11935                 target_scha->sched_priority = tswap32(scha.sched_priority);
11936                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11937                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11938                 target_scha->sched_period = tswap64(scha.sched_period);
11939                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11940                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11941                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11942                 }
11943                 unlock_user(target_scha, arg2, arg3);
11944             }
11945             return ret;
11946         }
11947     case TARGET_NR_sched_setattr:
11948         {
11949             struct target_sched_attr *target_scha;
11950             struct sched_attr scha;
11951             uint32_t size;
11952             int zeroed;
11953             if (arg2 == 0) {
11954                 return -TARGET_EINVAL;
11955             }
11956             if (get_user_u32(size, arg2)) {
11957                 return -TARGET_EFAULT;
11958             }
11959             if (!size) {
11960                 size = offsetof(struct target_sched_attr, sched_util_min);
11961             }
11962             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11963                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11964                     return -TARGET_EFAULT;
11965                 }
11966                 return -TARGET_E2BIG;
11967             }
11968 
11969             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11970             if (zeroed < 0) {
11971                 return zeroed;
11972             } else if (zeroed == 0) {
11973                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11974                     return -TARGET_EFAULT;
11975                 }
11976                 return -TARGET_E2BIG;
11977             }
11978             if (size > sizeof(struct target_sched_attr)) {
11979                 size = sizeof(struct target_sched_attr);
11980             }
11981 
11982             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11983             if (!target_scha) {
11984                 return -TARGET_EFAULT;
11985             }
11986             scha.size = size;
11987             scha.sched_policy = tswap32(target_scha->sched_policy);
11988             scha.sched_flags = tswap64(target_scha->sched_flags);
11989             scha.sched_nice = tswap32(target_scha->sched_nice);
11990             scha.sched_priority = tswap32(target_scha->sched_priority);
11991             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11992             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11993             scha.sched_period = tswap64(target_scha->sched_period);
11994             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11995                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11996                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11997             }
11998             unlock_user(target_scha, arg2, 0);
11999             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
12000         }
12001     case TARGET_NR_sched_yield:
12002         return get_errno(sched_yield());
12003     case TARGET_NR_sched_get_priority_max:
12004         return get_errno(sched_get_priority_max(arg1));
12005     case TARGET_NR_sched_get_priority_min:
12006         return get_errno(sched_get_priority_min(arg1));
12007 #ifdef TARGET_NR_sched_rr_get_interval
12008     case TARGET_NR_sched_rr_get_interval:
12009         {
12010             struct timespec ts;
12011             ret = get_errno(sched_rr_get_interval(arg1, &ts));
12012             if (!is_error(ret)) {
12013                 ret = host_to_target_timespec(arg2, &ts);
12014             }
12015         }
12016         return ret;
12017 #endif
12018 #ifdef TARGET_NR_sched_rr_get_interval_time64
12019     case TARGET_NR_sched_rr_get_interval_time64:
12020         {
12021             struct timespec ts;
12022             ret = get_errno(sched_rr_get_interval(arg1, &ts));
12023             if (!is_error(ret)) {
12024                 ret = host_to_target_timespec64(arg2, &ts);
12025             }
12026         }
12027         return ret;
12028 #endif
12029 #if defined(TARGET_NR_nanosleep)
12030     case TARGET_NR_nanosleep:
12031         {
12032             struct timespec req, rem;
12033             if (target_to_host_timespec(&req, arg1)) {
12034                 return -TARGET_EFAULT;
12035             }
12036             ret = get_errno(safe_nanosleep(&req, &rem));
12037             if (is_error(ret) && arg2) {
12038                 if (host_to_target_timespec(arg2, &rem)) {
12039                     return -TARGET_EFAULT;
12040                 }
12041             }
12042         }
12043         return ret;
12044 #endif
12045     case TARGET_NR_prctl:
12046         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
12047         break;
12048 #ifdef TARGET_NR_arch_prctl
12049     case TARGET_NR_arch_prctl:
12050         return do_arch_prctl(cpu_env, arg1, arg2);
12051 #endif
12052 #ifdef TARGET_NR_pread64
12053     case TARGET_NR_pread64:
12054         if (regpairs_aligned(cpu_env, num)) {
12055             arg4 = arg5;
12056             arg5 = arg6;
12057         }
12058         if (arg2 == 0 && arg3 == 0) {
12059             /* Special-case NULL buffer and zero length, which should succeed */
12060             p = 0;
12061         } else {
12062             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12063             if (!p) {
12064                 return -TARGET_EFAULT;
12065             }
12066         }
12067         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
12068         unlock_user(p, arg2, ret);
12069         return ret;
12070     case TARGET_NR_pwrite64:
12071         if (regpairs_aligned(cpu_env, num)) {
12072             arg4 = arg5;
12073             arg5 = arg6;
12074         }
12075         if (arg2 == 0 && arg3 == 0) {
12076             /* Special-case NULL buffer and zero length, which should succeed */
12077             p = 0;
12078         } else {
12079             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12080             if (!p) {
12081                 return -TARGET_EFAULT;
12082             }
12083         }
12084         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
12085         unlock_user(p, arg2, 0);
12086         return ret;
12087 #endif
12088     case TARGET_NR_getcwd:
12089         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
12090             return -TARGET_EFAULT;
12091         ret = get_errno(sys_getcwd1(p, arg2));
12092         unlock_user(p, arg1, ret);
12093         return ret;
12094     case TARGET_NR_capget:
12095     case TARGET_NR_capset:
12096     {
12097         struct target_user_cap_header *target_header;
12098         struct target_user_cap_data *target_data = NULL;
12099         struct __user_cap_header_struct header;
12100         struct __user_cap_data_struct data[2];
12101         struct __user_cap_data_struct *dataptr = NULL;
12102         int i, target_datalen;
12103         int data_items = 1;
12104 
12105         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
12106             return -TARGET_EFAULT;
12107         }
12108         header.version = tswap32(target_header->version);
12109         header.pid = tswap32(target_header->pid);
12110 
12111         if (header.version != _LINUX_CAPABILITY_VERSION) {
12112             /* Version 2 and up takes pointer to two user_data structs */
12113             data_items = 2;
12114         }
12115 
12116         target_datalen = sizeof(*target_data) * data_items;
12117 
12118         if (arg2) {
12119             if (num == TARGET_NR_capget) {
12120                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
12121             } else {
12122                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
12123             }
12124             if (!target_data) {
12125                 unlock_user_struct(target_header, arg1, 0);
12126                 return -TARGET_EFAULT;
12127             }
12128 
12129             if (num == TARGET_NR_capset) {
12130                 for (i = 0; i < data_items; i++) {
12131                     data[i].effective = tswap32(target_data[i].effective);
12132                     data[i].permitted = tswap32(target_data[i].permitted);
12133                     data[i].inheritable = tswap32(target_data[i].inheritable);
12134                 }
12135             }
12136 
12137             dataptr = data;
12138         }
12139 
12140         if (num == TARGET_NR_capget) {
12141             ret = get_errno(capget(&header, dataptr));
12142         } else {
12143             ret = get_errno(capset(&header, dataptr));
12144         }
12145 
12146         /* The kernel always updates version for both capget and capset */
12147         target_header->version = tswap32(header.version);
12148         unlock_user_struct(target_header, arg1, 1);
12149 
12150         if (arg2) {
12151             if (num == TARGET_NR_capget) {
12152                 for (i = 0; i < data_items; i++) {
12153                     target_data[i].effective = tswap32(data[i].effective);
12154                     target_data[i].permitted = tswap32(data[i].permitted);
12155                     target_data[i].inheritable = tswap32(data[i].inheritable);
12156                 }
12157                 unlock_user(target_data, arg2, target_datalen);
12158             } else {
12159                 unlock_user(target_data, arg2, 0);
12160             }
12161         }
12162         return ret;
12163     }
12164     case TARGET_NR_sigaltstack:
12165         return do_sigaltstack(arg1, arg2, cpu_env);
12166 
12167 #ifdef CONFIG_SENDFILE
12168 #ifdef TARGET_NR_sendfile
12169     case TARGET_NR_sendfile:
12170     {
12171         off_t *offp = NULL;
12172         off_t off;
12173         if (arg3) {
12174             ret = get_user_sal(off, arg3);
12175             if (is_error(ret)) {
12176                 return ret;
12177             }
12178             offp = &off;
12179         }
12180         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
12181         if (!is_error(ret) && arg3) {
12182             abi_long ret2 = put_user_sal(off, arg3);
12183             if (is_error(ret2)) {
12184                 ret = ret2;
12185             }
12186         }
12187         return ret;
12188     }
12189 #endif
12190 #ifdef TARGET_NR_sendfile64
12191     case TARGET_NR_sendfile64:
12192     {
12193         off_t *offp = NULL;
12194         off_t off;
12195         if (arg3) {
12196             ret = get_user_s64(off, arg3);
12197             if (is_error(ret)) {
12198                 return ret;
12199             }
12200             offp = &off;
12201         }
12202         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
12203         if (!is_error(ret) && arg3) {
12204             abi_long ret2 = put_user_s64(off, arg3);
12205             if (is_error(ret2)) {
12206                 ret = ret2;
12207             }
12208         }
12209         return ret;
12210     }
12211 #endif
12212 #endif
12213 #ifdef TARGET_NR_vfork
12214     case TARGET_NR_vfork:
12215         return get_errno(do_fork(cpu_env,
12216                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
12217                          0, 0, 0, 0));
12218 #endif
12219 #ifdef TARGET_NR_ugetrlimit
12220     case TARGET_NR_ugetrlimit:
12221     {
12222 	struct rlimit rlim;
12223 	int resource = target_to_host_resource(arg1);
12224 	ret = get_errno(getrlimit(resource, &rlim));
12225 	if (!is_error(ret)) {
12226 	    struct target_rlimit *target_rlim;
12227             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
12228                 return -TARGET_EFAULT;
12229 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
12230 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
12231             unlock_user_struct(target_rlim, arg2, 1);
12232 	}
12233         return ret;
12234     }
12235 #endif
12236 #ifdef TARGET_NR_truncate64
12237     case TARGET_NR_truncate64:
12238         if (!(p = lock_user_string(arg1)))
12239             return -TARGET_EFAULT;
12240 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
12241         unlock_user(p, arg1, 0);
12242         return ret;
12243 #endif
12244 #ifdef TARGET_NR_ftruncate64
12245     case TARGET_NR_ftruncate64:
12246         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
12247 #endif
12248 #ifdef TARGET_NR_stat64
12249     case TARGET_NR_stat64:
12250         if (!(p = lock_user_string(arg1))) {
12251             return -TARGET_EFAULT;
12252         }
12253         ret = get_errno(stat(path(p), &st));
12254         unlock_user(p, arg1, 0);
12255         if (!is_error(ret))
12256             ret = host_to_target_stat64(cpu_env, arg2, &st);
12257         return ret;
12258 #endif
12259 #ifdef TARGET_NR_lstat64
12260     case TARGET_NR_lstat64:
12261         if (!(p = lock_user_string(arg1))) {
12262             return -TARGET_EFAULT;
12263         }
12264         ret = get_errno(lstat(path(p), &st));
12265         unlock_user(p, arg1, 0);
12266         if (!is_error(ret))
12267             ret = host_to_target_stat64(cpu_env, arg2, &st);
12268         return ret;
12269 #endif
12270 #ifdef TARGET_NR_fstat64
12271     case TARGET_NR_fstat64:
12272         ret = get_errno(fstat(arg1, &st));
12273         if (!is_error(ret))
12274             ret = host_to_target_stat64(cpu_env, arg2, &st);
12275         return ret;
12276 #endif
12277 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
12278 #ifdef TARGET_NR_fstatat64
12279     case TARGET_NR_fstatat64:
12280 #endif
12281 #ifdef TARGET_NR_newfstatat
12282     case TARGET_NR_newfstatat:
12283 #endif
12284         if (!(p = lock_user_string(arg2))) {
12285             return -TARGET_EFAULT;
12286         }
12287         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
12288         unlock_user(p, arg2, 0);
12289         if (!is_error(ret))
12290             ret = host_to_target_stat64(cpu_env, arg3, &st);
12291         return ret;
12292 #endif
12293 #if defined(TARGET_NR_statx)
12294     case TARGET_NR_statx:
12295         {
12296             struct target_statx *target_stx;
12297             int dirfd = arg1;
12298             int flags = arg3;
12299 
12300             p = NULL;
12301             /* Since Linux 6.11, the path argument may be NULL */
12302             if (arg2 != 0) {
12303                 p = lock_user_string(arg2);
12304                 if (p == NULL) {
12305                     return -TARGET_EFAULT;
12306                 }
12307             }
12308 #if defined(__NR_statx)
12309             {
12310                 /*
12311                  * It is assumed that struct statx is architecture independent.
12312                  */
12313                 struct target_statx host_stx;
12314                 int mask = arg4;
12315 
12316                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
12317                 if (!is_error(ret)) {
12318                     if (host_to_target_statx(&host_stx, arg5) != 0) {
12319                         unlock_user(p, arg2, 0);
12320                         return -TARGET_EFAULT;
12321                     }
12322                 }
12323 
12324                 if (ret != -TARGET_ENOSYS) {
12325                     unlock_user(p, arg2, 0);
12326                     return ret;
12327                 }
12328             }
12329 #endif
12330             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
12331             unlock_user(p, arg2, 0);
12332 
12333             if (!is_error(ret)) {
12334                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
12335                     return -TARGET_EFAULT;
12336                 }
12337                 memset(target_stx, 0, sizeof(*target_stx));
12338                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
12339                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
12340                 __put_user(st.st_ino, &target_stx->stx_ino);
12341                 __put_user(st.st_mode, &target_stx->stx_mode);
12342                 __put_user(st.st_uid, &target_stx->stx_uid);
12343                 __put_user(st.st_gid, &target_stx->stx_gid);
12344                 __put_user(st.st_nlink, &target_stx->stx_nlink);
12345                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
12346                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
12347                 __put_user(st.st_size, &target_stx->stx_size);
12348                 __put_user(st.st_blksize, &target_stx->stx_blksize);
12349                 __put_user(st.st_blocks, &target_stx->stx_blocks);
12350                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
12351                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
12352                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
12353                 unlock_user_struct(target_stx, arg5, 1);
12354             }
12355         }
12356         return ret;
12357 #endif
12358 #ifdef TARGET_NR_lchown
12359     case TARGET_NR_lchown:
12360         if (!(p = lock_user_string(arg1)))
12361             return -TARGET_EFAULT;
12362         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
12363         unlock_user(p, arg1, 0);
12364         return ret;
12365 #endif
12366 #ifdef TARGET_NR_getuid
12367     case TARGET_NR_getuid:
12368         return get_errno(high2lowuid(getuid()));
12369 #endif
12370 #ifdef TARGET_NR_getgid
12371     case TARGET_NR_getgid:
12372         return get_errno(high2lowgid(getgid()));
12373 #endif
12374 #ifdef TARGET_NR_geteuid
12375     case TARGET_NR_geteuid:
12376         return get_errno(high2lowuid(geteuid()));
12377 #endif
12378 #ifdef TARGET_NR_getegid
12379     case TARGET_NR_getegid:
12380         return get_errno(high2lowgid(getegid()));
12381 #endif
12382     case TARGET_NR_setreuid:
12383         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
12384     case TARGET_NR_setregid:
12385         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
12386     case TARGET_NR_getgroups:
12387         { /* the same code as for TARGET_NR_getgroups32 */
12388             int gidsetsize = arg1;
12389             target_id *target_grouplist;
12390             g_autofree gid_t *grouplist = NULL;
12391             int i;
12392 
12393             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12394                 return -TARGET_EINVAL;
12395             }
12396             if (gidsetsize > 0) {
12397                 grouplist = g_try_new(gid_t, gidsetsize);
12398                 if (!grouplist) {
12399                     return -TARGET_ENOMEM;
12400                 }
12401             }
12402             ret = get_errno(getgroups(gidsetsize, grouplist));
12403             if (!is_error(ret) && gidsetsize > 0) {
12404                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12405                                              gidsetsize * sizeof(target_id), 0);
12406                 if (!target_grouplist) {
12407                     return -TARGET_EFAULT;
12408                 }
12409                 for (i = 0; i < ret; i++) {
12410                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
12411                 }
12412                 unlock_user(target_grouplist, arg2,
12413                             gidsetsize * sizeof(target_id));
12414             }
12415             return ret;
12416         }
12417     case TARGET_NR_setgroups:
12418         { /* the same code as for TARGET_NR_setgroups32 */
12419             int gidsetsize = arg1;
12420             target_id *target_grouplist;
12421             g_autofree gid_t *grouplist = NULL;
12422             int i;
12423 
12424             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12425                 return -TARGET_EINVAL;
12426             }
12427             if (gidsetsize > 0) {
12428                 grouplist = g_try_new(gid_t, gidsetsize);
12429                 if (!grouplist) {
12430                     return -TARGET_ENOMEM;
12431                 }
12432                 target_grouplist = lock_user(VERIFY_READ, arg2,
12433                                              gidsetsize * sizeof(target_id), 1);
12434                 if (!target_grouplist) {
12435                     return -TARGET_EFAULT;
12436                 }
12437                 for (i = 0; i < gidsetsize; i++) {
12438                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12439                 }
12440                 unlock_user(target_grouplist, arg2,
12441                             gidsetsize * sizeof(target_id));
12442             }
12443             return get_errno(sys_setgroups(gidsetsize, grouplist));
12444         }
12445     case TARGET_NR_fchown:
12446         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12447 #if defined(TARGET_NR_fchownat)
12448     case TARGET_NR_fchownat:
12449         if (!(p = lock_user_string(arg2)))
12450             return -TARGET_EFAULT;
12451         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12452                                  low2highgid(arg4), arg5));
12453         unlock_user(p, arg2, 0);
12454         return ret;
12455 #endif
12456 #ifdef TARGET_NR_setresuid
12457     case TARGET_NR_setresuid:
12458         return get_errno(sys_setresuid(low2highuid(arg1),
12459                                        low2highuid(arg2),
12460                                        low2highuid(arg3)));
12461 #endif
12462 #ifdef TARGET_NR_getresuid
12463     case TARGET_NR_getresuid:
12464         {
12465             uid_t ruid, euid, suid;
12466             ret = get_errno(getresuid(&ruid, &euid, &suid));
12467             if (!is_error(ret)) {
12468                 if (put_user_id(high2lowuid(ruid), arg1)
12469                     || put_user_id(high2lowuid(euid), arg2)
12470                     || put_user_id(high2lowuid(suid), arg3))
12471                     return -TARGET_EFAULT;
12472             }
12473         }
12474         return ret;
12475 #endif
12476 #ifdef TARGET_NR_getresgid
12477     case TARGET_NR_setresgid:
12478         return get_errno(sys_setresgid(low2highgid(arg1),
12479                                        low2highgid(arg2),
12480                                        low2highgid(arg3)));
12481 #endif
12482 #ifdef TARGET_NR_getresgid
12483     case TARGET_NR_getresgid:
12484         {
12485             gid_t rgid, egid, sgid;
12486             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12487             if (!is_error(ret)) {
12488                 if (put_user_id(high2lowgid(rgid), arg1)
12489                     || put_user_id(high2lowgid(egid), arg2)
12490                     || put_user_id(high2lowgid(sgid), arg3))
12491                     return -TARGET_EFAULT;
12492             }
12493         }
12494         return ret;
12495 #endif
12496 #ifdef TARGET_NR_chown
12497     case TARGET_NR_chown:
12498         if (!(p = lock_user_string(arg1)))
12499             return -TARGET_EFAULT;
12500         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12501         unlock_user(p, arg1, 0);
12502         return ret;
12503 #endif
12504     case TARGET_NR_setuid:
12505         return get_errno(sys_setuid(low2highuid(arg1)));
12506     case TARGET_NR_setgid:
12507         return get_errno(sys_setgid(low2highgid(arg1)));
12508     case TARGET_NR_setfsuid:
12509         return get_errno(setfsuid(arg1));
12510     case TARGET_NR_setfsgid:
12511         return get_errno(setfsgid(arg1));
12512 
12513 #ifdef TARGET_NR_lchown32
12514     case TARGET_NR_lchown32:
12515         if (!(p = lock_user_string(arg1)))
12516             return -TARGET_EFAULT;
12517         ret = get_errno(lchown(p, arg2, arg3));
12518         unlock_user(p, arg1, 0);
12519         return ret;
12520 #endif
12521 #ifdef TARGET_NR_getuid32
12522     case TARGET_NR_getuid32:
12523         return get_errno(getuid());
12524 #endif
12525 
12526 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12527    /* Alpha specific */
12528     case TARGET_NR_getxuid:
12529          {
12530             uid_t euid;
12531             euid=geteuid();
12532             cpu_env->ir[IR_A4]=euid;
12533          }
12534         return get_errno(getuid());
12535 #endif
12536 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12537    /* Alpha specific */
12538     case TARGET_NR_getxgid:
12539          {
12540             uid_t egid;
12541             egid=getegid();
12542             cpu_env->ir[IR_A4]=egid;
12543          }
12544         return get_errno(getgid());
12545 #endif
12546 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12547     /* Alpha specific */
12548     case TARGET_NR_osf_getsysinfo:
12549         ret = -TARGET_EOPNOTSUPP;
12550         switch (arg1) {
12551           case TARGET_GSI_IEEE_FP_CONTROL:
12552             {
12553                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12554                 uint64_t swcr = cpu_env->swcr;
12555 
12556                 swcr &= ~SWCR_STATUS_MASK;
12557                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12558 
12559                 if (put_user_u64 (swcr, arg2))
12560                         return -TARGET_EFAULT;
12561                 ret = 0;
12562             }
12563             break;
12564 
12565           /* case GSI_IEEE_STATE_AT_SIGNAL:
12566              -- Not implemented in linux kernel.
12567              case GSI_UACPROC:
12568              -- Retrieves current unaligned access state; not much used.
12569              case GSI_PROC_TYPE:
12570              -- Retrieves implver information; surely not used.
12571              case GSI_GET_HWRPB:
12572              -- Grabs a copy of the HWRPB; surely not used.
12573           */
12574         }
12575         return ret;
12576 #endif
12577 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12578     /* Alpha specific */
12579     case TARGET_NR_osf_setsysinfo:
12580         ret = -TARGET_EOPNOTSUPP;
12581         switch (arg1) {
12582           case TARGET_SSI_IEEE_FP_CONTROL:
12583             {
12584                 uint64_t swcr, fpcr;
12585 
12586                 if (get_user_u64 (swcr, arg2)) {
12587                     return -TARGET_EFAULT;
12588                 }
12589 
12590                 /*
12591                  * The kernel calls swcr_update_status to update the
12592                  * status bits from the fpcr at every point that it
12593                  * could be queried.  Therefore, we store the status
12594                  * bits only in FPCR.
12595                  */
12596                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12597 
12598                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12599                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12600                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12601                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12602                 ret = 0;
12603             }
12604             break;
12605 
12606           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12607             {
12608                 uint64_t exc, fpcr, fex;
12609 
12610                 if (get_user_u64(exc, arg2)) {
12611                     return -TARGET_EFAULT;
12612                 }
12613                 exc &= SWCR_STATUS_MASK;
12614                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12615 
12616                 /* Old exceptions are not signaled.  */
12617                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12618                 fex = exc & ~fex;
12619                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12620                 fex &= (cpu_env)->swcr;
12621 
12622                 /* Update the hardware fpcr.  */
12623                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12624                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12625 
12626                 if (fex) {
12627                     int si_code = TARGET_FPE_FLTUNK;
12628                     target_siginfo_t info;
12629 
12630                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12631                         si_code = TARGET_FPE_FLTUND;
12632                     }
12633                     if (fex & SWCR_TRAP_ENABLE_INE) {
12634                         si_code = TARGET_FPE_FLTRES;
12635                     }
12636                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12637                         si_code = TARGET_FPE_FLTUND;
12638                     }
12639                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12640                         si_code = TARGET_FPE_FLTOVF;
12641                     }
12642                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12643                         si_code = TARGET_FPE_FLTDIV;
12644                     }
12645                     if (fex & SWCR_TRAP_ENABLE_INV) {
12646                         si_code = TARGET_FPE_FLTINV;
12647                     }
12648 
12649                     info.si_signo = SIGFPE;
12650                     info.si_errno = 0;
12651                     info.si_code = si_code;
12652                     info._sifields._sigfault._addr = (cpu_env)->pc;
12653                     queue_signal(cpu_env, info.si_signo,
12654                                  QEMU_SI_FAULT, &info);
12655                 }
12656                 ret = 0;
12657             }
12658             break;
12659 
12660           /* case SSI_NVPAIRS:
12661              -- Used with SSIN_UACPROC to enable unaligned accesses.
12662              case SSI_IEEE_STATE_AT_SIGNAL:
12663              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12664              -- Not implemented in linux kernel
12665           */
12666         }
12667         return ret;
12668 #endif
12669 #ifdef TARGET_NR_osf_sigprocmask
12670     /* Alpha specific.  */
12671     case TARGET_NR_osf_sigprocmask:
12672         {
12673             abi_ulong mask;
12674             int how;
12675             sigset_t set, oldset;
12676 
12677             switch(arg1) {
12678             case TARGET_SIG_BLOCK:
12679                 how = SIG_BLOCK;
12680                 break;
12681             case TARGET_SIG_UNBLOCK:
12682                 how = SIG_UNBLOCK;
12683                 break;
12684             case TARGET_SIG_SETMASK:
12685                 how = SIG_SETMASK;
12686                 break;
12687             default:
12688                 return -TARGET_EINVAL;
12689             }
12690             mask = arg2;
12691             target_to_host_old_sigset(&set, &mask);
12692             ret = do_sigprocmask(how, &set, &oldset);
12693             if (!ret) {
12694                 host_to_target_old_sigset(&mask, &oldset);
12695                 ret = mask;
12696             }
12697         }
12698         return ret;
12699 #endif
12700 
12701 #ifdef TARGET_NR_getgid32
12702     case TARGET_NR_getgid32:
12703         return get_errno(getgid());
12704 #endif
12705 #ifdef TARGET_NR_geteuid32
12706     case TARGET_NR_geteuid32:
12707         return get_errno(geteuid());
12708 #endif
12709 #ifdef TARGET_NR_getegid32
12710     case TARGET_NR_getegid32:
12711         return get_errno(getegid());
12712 #endif
12713 #ifdef TARGET_NR_setreuid32
12714     case TARGET_NR_setreuid32:
12715         return get_errno(sys_setreuid(arg1, arg2));
12716 #endif
12717 #ifdef TARGET_NR_setregid32
12718     case TARGET_NR_setregid32:
12719         return get_errno(sys_setregid(arg1, arg2));
12720 #endif
12721 #ifdef TARGET_NR_getgroups32
12722     case TARGET_NR_getgroups32:
12723         { /* the same code as for TARGET_NR_getgroups */
12724             int gidsetsize = arg1;
12725             uint32_t *target_grouplist;
12726             g_autofree gid_t *grouplist = NULL;
12727             int i;
12728 
12729             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12730                 return -TARGET_EINVAL;
12731             }
12732             if (gidsetsize > 0) {
12733                 grouplist = g_try_new(gid_t, gidsetsize);
12734                 if (!grouplist) {
12735                     return -TARGET_ENOMEM;
12736                 }
12737             }
12738             ret = get_errno(getgroups(gidsetsize, grouplist));
12739             if (!is_error(ret) && gidsetsize > 0) {
12740                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12741                                              gidsetsize * 4, 0);
12742                 if (!target_grouplist) {
12743                     return -TARGET_EFAULT;
12744                 }
12745                 for (i = 0; i < ret; i++) {
12746                     target_grouplist[i] = tswap32(grouplist[i]);
12747                 }
12748                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12749             }
12750             return ret;
12751         }
12752 #endif
12753 #ifdef TARGET_NR_setgroups32
12754     case TARGET_NR_setgroups32:
12755         { /* the same code as for TARGET_NR_setgroups */
12756             int gidsetsize = arg1;
12757             uint32_t *target_grouplist;
12758             g_autofree gid_t *grouplist = NULL;
12759             int i;
12760 
12761             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12762                 return -TARGET_EINVAL;
12763             }
12764             if (gidsetsize > 0) {
12765                 grouplist = g_try_new(gid_t, gidsetsize);
12766                 if (!grouplist) {
12767                     return -TARGET_ENOMEM;
12768                 }
12769                 target_grouplist = lock_user(VERIFY_READ, arg2,
12770                                              gidsetsize * 4, 1);
12771                 if (!target_grouplist) {
12772                     return -TARGET_EFAULT;
12773                 }
12774                 for (i = 0; i < gidsetsize; i++) {
12775                     grouplist[i] = tswap32(target_grouplist[i]);
12776                 }
12777                 unlock_user(target_grouplist, arg2, 0);
12778             }
12779             return get_errno(sys_setgroups(gidsetsize, grouplist));
12780         }
12781 #endif
12782 #ifdef TARGET_NR_fchown32
12783     case TARGET_NR_fchown32:
12784         return get_errno(fchown(arg1, arg2, arg3));
12785 #endif
12786 #ifdef TARGET_NR_setresuid32
12787     case TARGET_NR_setresuid32:
12788         return get_errno(sys_setresuid(arg1, arg2, arg3));
12789 #endif
12790 #ifdef TARGET_NR_getresuid32
12791     case TARGET_NR_getresuid32:
12792         {
12793             uid_t ruid, euid, suid;
12794             ret = get_errno(getresuid(&ruid, &euid, &suid));
12795             if (!is_error(ret)) {
12796                 if (put_user_u32(ruid, arg1)
12797                     || put_user_u32(euid, arg2)
12798                     || put_user_u32(suid, arg3))
12799                     return -TARGET_EFAULT;
12800             }
12801         }
12802         return ret;
12803 #endif
12804 #ifdef TARGET_NR_setresgid32
12805     case TARGET_NR_setresgid32:
12806         return get_errno(sys_setresgid(arg1, arg2, arg3));
12807 #endif
12808 #ifdef TARGET_NR_getresgid32
12809     case TARGET_NR_getresgid32:
12810         {
12811             gid_t rgid, egid, sgid;
12812             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12813             if (!is_error(ret)) {
12814                 if (put_user_u32(rgid, arg1)
12815                     || put_user_u32(egid, arg2)
12816                     || put_user_u32(sgid, arg3))
12817                     return -TARGET_EFAULT;
12818             }
12819         }
12820         return ret;
12821 #endif
12822 #ifdef TARGET_NR_chown32
12823     case TARGET_NR_chown32:
12824         if (!(p = lock_user_string(arg1)))
12825             return -TARGET_EFAULT;
12826         ret = get_errno(chown(p, arg2, arg3));
12827         unlock_user(p, arg1, 0);
12828         return ret;
12829 #endif
12830 #ifdef TARGET_NR_setuid32
12831     case TARGET_NR_setuid32:
12832         return get_errno(sys_setuid(arg1));
12833 #endif
12834 #ifdef TARGET_NR_setgid32
12835     case TARGET_NR_setgid32:
12836         return get_errno(sys_setgid(arg1));
12837 #endif
12838 #ifdef TARGET_NR_setfsuid32
12839     case TARGET_NR_setfsuid32:
12840         return get_errno(setfsuid(arg1));
12841 #endif
12842 #ifdef TARGET_NR_setfsgid32
12843     case TARGET_NR_setfsgid32:
12844         return get_errno(setfsgid(arg1));
12845 #endif
12846 #ifdef TARGET_NR_mincore
12847     case TARGET_NR_mincore:
12848         {
12849             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12850             if (!a) {
12851                 return -TARGET_ENOMEM;
12852             }
12853             p = lock_user_string(arg3);
12854             if (!p) {
12855                 ret = -TARGET_EFAULT;
12856             } else {
12857                 ret = get_errno(mincore(a, arg2, p));
12858                 unlock_user(p, arg3, ret);
12859             }
12860             unlock_user(a, arg1, 0);
12861         }
12862         return ret;
12863 #endif
12864 #ifdef TARGET_NR_arm_fadvise64_64
12865     case TARGET_NR_arm_fadvise64_64:
12866         /* arm_fadvise64_64 looks like fadvise64_64 but
12867          * with different argument order: fd, advice, offset, len
12868          * rather than the usual fd, offset, len, advice.
12869          * Note that offset and len are both 64-bit so appear as
12870          * pairs of 32-bit registers.
12871          */
12872         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12873                             target_offset64(arg5, arg6), arg2);
12874         return -host_to_target_errno(ret);
12875 #endif
12876 
12877 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12878 
12879 #ifdef TARGET_NR_fadvise64_64
12880     case TARGET_NR_fadvise64_64:
12881 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12882         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12883         ret = arg2;
12884         arg2 = arg3;
12885         arg3 = arg4;
12886         arg4 = arg5;
12887         arg5 = arg6;
12888         arg6 = ret;
12889 #else
12890         /* 6 args: fd, offset (high, low), len (high, low), advice */
12891         if (regpairs_aligned(cpu_env, num)) {
12892             /* offset is in (3,4), len in (5,6) and advice in 7 */
12893             arg2 = arg3;
12894             arg3 = arg4;
12895             arg4 = arg5;
12896             arg5 = arg6;
12897             arg6 = arg7;
12898         }
12899 #endif
12900         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12901                             target_offset64(arg4, arg5), arg6);
12902         return -host_to_target_errno(ret);
12903 #endif
12904 
12905 #ifdef TARGET_NR_fadvise64
12906     case TARGET_NR_fadvise64:
12907         /* 5 args: fd, offset (high, low), len, advice */
12908         if (regpairs_aligned(cpu_env, num)) {
12909             /* offset is in (3,4), len in 5 and advice in 6 */
12910             arg2 = arg3;
12911             arg3 = arg4;
12912             arg4 = arg5;
12913             arg5 = arg6;
12914         }
12915         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12916         return -host_to_target_errno(ret);
12917 #endif
12918 
12919 #else /* not a 32-bit ABI */
12920 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12921 #ifdef TARGET_NR_fadvise64_64
12922     case TARGET_NR_fadvise64_64:
12923 #endif
12924 #ifdef TARGET_NR_fadvise64
12925     case TARGET_NR_fadvise64:
12926 #endif
12927 #ifdef TARGET_S390X
12928         switch (arg4) {
12929         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12930         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12931         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12932         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12933         default: break;
12934         }
12935 #endif
12936         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12937 #endif
12938 #endif /* end of 64-bit ABI fadvise handling */
12939 
12940 #ifdef TARGET_NR_madvise
12941     case TARGET_NR_madvise:
12942         return target_madvise(arg1, arg2, arg3);
12943 #endif
12944 #ifdef TARGET_NR_fcntl64
12945     case TARGET_NR_fcntl64:
12946     {
12947         int cmd;
12948         struct flock fl;
12949         from_flock64_fn *copyfrom = copy_from_user_flock64;
12950         to_flock64_fn *copyto = copy_to_user_flock64;
12951 
12952 #ifdef TARGET_ARM
12953         if (!cpu_env->eabi) {
12954             copyfrom = copy_from_user_oabi_flock64;
12955             copyto = copy_to_user_oabi_flock64;
12956         }
12957 #endif
12958 
12959         cmd = target_to_host_fcntl_cmd(arg2);
12960         if (cmd == -TARGET_EINVAL) {
12961             return cmd;
12962         }
12963 
12964         switch(arg2) {
12965         case TARGET_F_GETLK64:
12966             ret = copyfrom(&fl, arg3);
12967             if (ret) {
12968                 break;
12969             }
12970             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12971             if (ret == 0) {
12972                 ret = copyto(arg3, &fl);
12973             }
12974 	    break;
12975 
12976         case TARGET_F_SETLK64:
12977         case TARGET_F_SETLKW64:
12978             ret = copyfrom(&fl, arg3);
12979             if (ret) {
12980                 break;
12981             }
12982             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12983 	    break;
12984         default:
12985             ret = do_fcntl(arg1, arg2, arg3);
12986             break;
12987         }
12988         return ret;
12989     }
12990 #endif
12991 #ifdef TARGET_NR_cacheflush
12992     case TARGET_NR_cacheflush:
12993         /* self-modifying code is handled automatically, so nothing needed */
12994         return 0;
12995 #endif
12996 #ifdef TARGET_NR_getpagesize
12997     case TARGET_NR_getpagesize:
12998         return TARGET_PAGE_SIZE;
12999 #endif
13000     case TARGET_NR_gettid:
13001         return get_errno(sys_gettid());
13002 #ifdef TARGET_NR_readahead
13003     case TARGET_NR_readahead:
13004 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13005         if (regpairs_aligned(cpu_env, num)) {
13006             arg2 = arg3;
13007             arg3 = arg4;
13008             arg4 = arg5;
13009         }
13010         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
13011 #else
13012         ret = get_errno(readahead(arg1, arg2, arg3));
13013 #endif
13014         return ret;
13015 #endif
13016 #ifdef CONFIG_ATTR
13017 #ifdef TARGET_NR_setxattr
13018     case TARGET_NR_listxattr:
13019     case TARGET_NR_llistxattr:
13020     {
13021         void *b = 0;
13022         if (arg2) {
13023             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
13024             if (!b) {
13025                 return -TARGET_EFAULT;
13026             }
13027         }
13028         p = lock_user_string(arg1);
13029         if (p) {
13030             if (num == TARGET_NR_listxattr) {
13031                 ret = get_errno(listxattr(p, b, arg3));
13032             } else {
13033                 ret = get_errno(llistxattr(p, b, arg3));
13034             }
13035         } else {
13036             ret = -TARGET_EFAULT;
13037         }
13038         unlock_user(p, arg1, 0);
13039         unlock_user(b, arg2, arg3);
13040         return ret;
13041     }
13042     case TARGET_NR_flistxattr:
13043     {
13044         void *b = 0;
13045         if (arg2) {
13046             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
13047             if (!b) {
13048                 return -TARGET_EFAULT;
13049             }
13050         }
13051         ret = get_errno(flistxattr(arg1, b, arg3));
13052         unlock_user(b, arg2, arg3);
13053         return ret;
13054     }
13055     case TARGET_NR_setxattr:
13056     case TARGET_NR_lsetxattr:
13057         {
13058             void *n, *v = 0;
13059             if (arg3) {
13060                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
13061                 if (!v) {
13062                     return -TARGET_EFAULT;
13063                 }
13064             }
13065             p = lock_user_string(arg1);
13066             n = lock_user_string(arg2);
13067             if (p && n) {
13068                 if (num == TARGET_NR_setxattr) {
13069                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
13070                 } else {
13071                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
13072                 }
13073             } else {
13074                 ret = -TARGET_EFAULT;
13075             }
13076             unlock_user(p, arg1, 0);
13077             unlock_user(n, arg2, 0);
13078             unlock_user(v, arg3, 0);
13079         }
13080         return ret;
13081     case TARGET_NR_fsetxattr:
13082         {
13083             void *n, *v = 0;
13084             if (arg3) {
13085                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
13086                 if (!v) {
13087                     return -TARGET_EFAULT;
13088                 }
13089             }
13090             n = lock_user_string(arg2);
13091             if (n) {
13092                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
13093             } else {
13094                 ret = -TARGET_EFAULT;
13095             }
13096             unlock_user(n, arg2, 0);
13097             unlock_user(v, arg3, 0);
13098         }
13099         return ret;
13100     case TARGET_NR_getxattr:
13101     case TARGET_NR_lgetxattr:
13102         {
13103             void *n, *v = 0;
13104             if (arg3) {
13105                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
13106                 if (!v) {
13107                     return -TARGET_EFAULT;
13108                 }
13109             }
13110             p = lock_user_string(arg1);
13111             n = lock_user_string(arg2);
13112             if (p && n) {
13113                 if (num == TARGET_NR_getxattr) {
13114                     ret = get_errno(getxattr(p, n, v, arg4));
13115                 } else {
13116                     ret = get_errno(lgetxattr(p, n, v, arg4));
13117                 }
13118             } else {
13119                 ret = -TARGET_EFAULT;
13120             }
13121             unlock_user(p, arg1, 0);
13122             unlock_user(n, arg2, 0);
13123             unlock_user(v, arg3, arg4);
13124         }
13125         return ret;
13126     case TARGET_NR_fgetxattr:
13127         {
13128             void *n, *v = 0;
13129             if (arg3) {
13130                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
13131                 if (!v) {
13132                     return -TARGET_EFAULT;
13133                 }
13134             }
13135             n = lock_user_string(arg2);
13136             if (n) {
13137                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
13138             } else {
13139                 ret = -TARGET_EFAULT;
13140             }
13141             unlock_user(n, arg2, 0);
13142             unlock_user(v, arg3, arg4);
13143         }
13144         return ret;
13145     case TARGET_NR_removexattr:
13146     case TARGET_NR_lremovexattr:
13147         {
13148             void *n;
13149             p = lock_user_string(arg1);
13150             n = lock_user_string(arg2);
13151             if (p && n) {
13152                 if (num == TARGET_NR_removexattr) {
13153                     ret = get_errno(removexattr(p, n));
13154                 } else {
13155                     ret = get_errno(lremovexattr(p, n));
13156                 }
13157             } else {
13158                 ret = -TARGET_EFAULT;
13159             }
13160             unlock_user(p, arg1, 0);
13161             unlock_user(n, arg2, 0);
13162         }
13163         return ret;
13164     case TARGET_NR_fremovexattr:
13165         {
13166             void *n;
13167             n = lock_user_string(arg2);
13168             if (n) {
13169                 ret = get_errno(fremovexattr(arg1, n));
13170             } else {
13171                 ret = -TARGET_EFAULT;
13172             }
13173             unlock_user(n, arg2, 0);
13174         }
13175         return ret;
13176 #endif
13177 #endif /* CONFIG_ATTR */
13178 #ifdef TARGET_NR_set_thread_area
13179     case TARGET_NR_set_thread_area:
13180 #if defined(TARGET_MIPS)
13181       cpu_env->active_tc.CP0_UserLocal = arg1;
13182       return 0;
13183 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
13184       return do_set_thread_area(cpu_env, arg1);
13185 #elif defined(TARGET_M68K)
13186       {
13187           TaskState *ts = get_task_state(cpu);
13188           ts->tp_value = arg1;
13189           return 0;
13190       }
13191 #else
13192       return -TARGET_ENOSYS;
13193 #endif
13194 #endif
13195 #ifdef TARGET_NR_get_thread_area
13196     case TARGET_NR_get_thread_area:
13197 #if defined(TARGET_I386) && defined(TARGET_ABI32)
13198         return do_get_thread_area(cpu_env, arg1);
13199 #elif defined(TARGET_M68K)
13200         {
13201             TaskState *ts = get_task_state(cpu);
13202             return ts->tp_value;
13203         }
13204 #else
13205         return -TARGET_ENOSYS;
13206 #endif
13207 #endif
13208 #ifdef TARGET_NR_getdomainname
13209     case TARGET_NR_getdomainname:
13210         return -TARGET_ENOSYS;
13211 #endif
13212 
13213 #ifdef TARGET_NR_clock_settime
13214     case TARGET_NR_clock_settime:
13215     {
13216         struct timespec ts;
13217 
13218         ret = target_to_host_timespec(&ts, arg2);
13219         if (!is_error(ret)) {
13220             ret = get_errno(clock_settime(arg1, &ts));
13221         }
13222         return ret;
13223     }
13224 #endif
13225 #ifdef TARGET_NR_clock_settime64
13226     case TARGET_NR_clock_settime64:
13227     {
13228         struct timespec ts;
13229 
13230         ret = target_to_host_timespec64(&ts, arg2);
13231         if (!is_error(ret)) {
13232             ret = get_errno(clock_settime(arg1, &ts));
13233         }
13234         return ret;
13235     }
13236 #endif
13237 #ifdef TARGET_NR_clock_gettime
13238     case TARGET_NR_clock_gettime:
13239     {
13240         struct timespec ts;
13241         ret = get_errno(clock_gettime(arg1, &ts));
13242         if (!is_error(ret)) {
13243             ret = host_to_target_timespec(arg2, &ts);
13244         }
13245         return ret;
13246     }
13247 #endif
13248 #ifdef TARGET_NR_clock_gettime64
13249     case TARGET_NR_clock_gettime64:
13250     {
13251         struct timespec ts;
13252         ret = get_errno(clock_gettime(arg1, &ts));
13253         if (!is_error(ret)) {
13254             ret = host_to_target_timespec64(arg2, &ts);
13255         }
13256         return ret;
13257     }
13258 #endif
13259 #ifdef TARGET_NR_clock_getres
13260     case TARGET_NR_clock_getres:
13261     {
13262         struct timespec ts;
13263         ret = get_errno(clock_getres(arg1, &ts));
13264         if (!is_error(ret)) {
13265             host_to_target_timespec(arg2, &ts);
13266         }
13267         return ret;
13268     }
13269 #endif
13270 #ifdef TARGET_NR_clock_getres_time64
13271     case TARGET_NR_clock_getres_time64:
13272     {
13273         struct timespec ts;
13274         ret = get_errno(clock_getres(arg1, &ts));
13275         if (!is_error(ret)) {
13276             host_to_target_timespec64(arg2, &ts);
13277         }
13278         return ret;
13279     }
13280 #endif
13281 #ifdef TARGET_NR_clock_nanosleep
13282     case TARGET_NR_clock_nanosleep:
13283     {
13284         struct timespec ts;
13285         if (target_to_host_timespec(&ts, arg3)) {
13286             return -TARGET_EFAULT;
13287         }
13288         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
13289                                              &ts, arg4 ? &ts : NULL));
13290         /*
13291          * if the call is interrupted by a signal handler, it fails
13292          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
13293          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
13294          */
13295         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
13296             host_to_target_timespec(arg4, &ts)) {
13297               return -TARGET_EFAULT;
13298         }
13299 
13300         return ret;
13301     }
13302 #endif
13303 #ifdef TARGET_NR_clock_nanosleep_time64
13304     case TARGET_NR_clock_nanosleep_time64:
13305     {
13306         struct timespec ts;
13307 
13308         if (target_to_host_timespec64(&ts, arg3)) {
13309             return -TARGET_EFAULT;
13310         }
13311 
13312         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
13313                                              &ts, arg4 ? &ts : NULL));
13314 
13315         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
13316             host_to_target_timespec64(arg4, &ts)) {
13317             return -TARGET_EFAULT;
13318         }
13319         return ret;
13320     }
13321 #endif
13322 
13323 #if defined(TARGET_NR_set_tid_address)
13324     case TARGET_NR_set_tid_address:
13325     {
13326         TaskState *ts = get_task_state(cpu);
13327         ts->child_tidptr = arg1;
13328         /* do not call host set_tid_address() syscall, instead return tid() */
13329         return get_errno(sys_gettid());
13330     }
13331 #endif
13332 
13333     case TARGET_NR_tkill:
13334         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
13335 
13336     case TARGET_NR_tgkill:
13337         return get_errno(safe_tgkill((int)arg1, (int)arg2,
13338                          target_to_host_signal(arg3)));
13339 
13340 #ifdef TARGET_NR_set_robust_list
13341     case TARGET_NR_set_robust_list:
13342     case TARGET_NR_get_robust_list:
13343         /* The ABI for supporting robust futexes has userspace pass
13344          * the kernel a pointer to a linked list which is updated by
13345          * userspace after the syscall; the list is walked by the kernel
13346          * when the thread exits. Since the linked list in QEMU guest
13347          * memory isn't a valid linked list for the host and we have
13348          * no way to reliably intercept the thread-death event, we can't
13349          * support these. Silently return ENOSYS so that guest userspace
13350          * falls back to a non-robust futex implementation (which should
13351          * be OK except in the corner case of the guest crashing while
13352          * holding a mutex that is shared with another process via
13353          * shared memory).
13354          */
13355         return -TARGET_ENOSYS;
13356 #endif
13357 
13358 #if defined(TARGET_NR_utimensat)
13359     case TARGET_NR_utimensat:
13360         {
13361             struct timespec *tsp, ts[2];
13362             if (!arg3) {
13363                 tsp = NULL;
13364             } else {
13365                 if (target_to_host_timespec(ts, arg3)) {
13366                     return -TARGET_EFAULT;
13367                 }
13368                 if (target_to_host_timespec(ts + 1, arg3 +
13369                                             sizeof(struct target_timespec))) {
13370                     return -TARGET_EFAULT;
13371                 }
13372                 tsp = ts;
13373             }
13374             if (!arg2)
13375                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
13376             else {
13377                 if (!(p = lock_user_string(arg2))) {
13378                     return -TARGET_EFAULT;
13379                 }
13380                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
13381                 unlock_user(p, arg2, 0);
13382             }
13383         }
13384         return ret;
13385 #endif
13386 #ifdef TARGET_NR_utimensat_time64
13387     case TARGET_NR_utimensat_time64:
13388         {
13389             struct timespec *tsp, ts[2];
13390             if (!arg3) {
13391                 tsp = NULL;
13392             } else {
13393                 if (target_to_host_timespec64(ts, arg3)) {
13394                     return -TARGET_EFAULT;
13395                 }
13396                 if (target_to_host_timespec64(ts + 1, arg3 +
13397                                      sizeof(struct target__kernel_timespec))) {
13398                     return -TARGET_EFAULT;
13399                 }
13400                 tsp = ts;
13401             }
13402             if (!arg2)
13403                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
13404             else {
13405                 p = lock_user_string(arg2);
13406                 if (!p) {
13407                     return -TARGET_EFAULT;
13408                 }
13409                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
13410                 unlock_user(p, arg2, 0);
13411             }
13412         }
13413         return ret;
13414 #endif
13415 #ifdef TARGET_NR_futex
13416     case TARGET_NR_futex:
13417         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
13418 #endif
13419 #ifdef TARGET_NR_futex_time64
13420     case TARGET_NR_futex_time64:
13421         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
13422 #endif
13423 #ifdef CONFIG_INOTIFY
13424 #if defined(TARGET_NR_inotify_init)
13425     case TARGET_NR_inotify_init:
13426         ret = get_errno(inotify_init());
13427         if (ret >= 0) {
13428             fd_trans_register(ret, &target_inotify_trans);
13429         }
13430         return ret;
13431 #endif
13432 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
13433     case TARGET_NR_inotify_init1:
13434         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
13435                                           fcntl_flags_tbl)));
13436         if (ret >= 0) {
13437             fd_trans_register(ret, &target_inotify_trans);
13438         }
13439         return ret;
13440 #endif
13441 #if defined(TARGET_NR_inotify_add_watch)
13442     case TARGET_NR_inotify_add_watch:
13443         p = lock_user_string(arg2);
13444         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13445         unlock_user(p, arg2, 0);
13446         return ret;
13447 #endif
13448 #if defined(TARGET_NR_inotify_rm_watch)
13449     case TARGET_NR_inotify_rm_watch:
13450         return get_errno(inotify_rm_watch(arg1, arg2));
13451 #endif
13452 #endif
13453 
13454 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13455     case TARGET_NR_mq_open:
13456         {
13457             struct mq_attr posix_mq_attr;
13458             struct mq_attr *pposix_mq_attr;
13459             int host_flags;
13460 
13461             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13462             pposix_mq_attr = NULL;
13463             if (arg4) {
13464                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13465                     return -TARGET_EFAULT;
13466                 }
13467                 pposix_mq_attr = &posix_mq_attr;
13468             }
13469             p = lock_user_string(arg1 - 1);
13470             if (!p) {
13471                 return -TARGET_EFAULT;
13472             }
13473             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13474             unlock_user (p, arg1, 0);
13475         }
13476         return ret;
13477 
13478     case TARGET_NR_mq_unlink:
13479         p = lock_user_string(arg1 - 1);
13480         if (!p) {
13481             return -TARGET_EFAULT;
13482         }
13483         ret = get_errno(mq_unlink(p));
13484         unlock_user (p, arg1, 0);
13485         return ret;
13486 
13487 #ifdef TARGET_NR_mq_timedsend
13488     case TARGET_NR_mq_timedsend:
13489         {
13490             struct timespec ts;
13491 
13492             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13493             if (arg5 != 0) {
13494                 if (target_to_host_timespec(&ts, arg5)) {
13495                     return -TARGET_EFAULT;
13496                 }
13497                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13498                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13499                     return -TARGET_EFAULT;
13500                 }
13501             } else {
13502                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13503             }
13504             unlock_user (p, arg2, arg3);
13505         }
13506         return ret;
13507 #endif
13508 #ifdef TARGET_NR_mq_timedsend_time64
13509     case TARGET_NR_mq_timedsend_time64:
13510         {
13511             struct timespec ts;
13512 
13513             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13514             if (arg5 != 0) {
13515                 if (target_to_host_timespec64(&ts, arg5)) {
13516                     return -TARGET_EFAULT;
13517                 }
13518                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13519                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13520                     return -TARGET_EFAULT;
13521                 }
13522             } else {
13523                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13524             }
13525             unlock_user(p, arg2, arg3);
13526         }
13527         return ret;
13528 #endif
13529 
13530 #ifdef TARGET_NR_mq_timedreceive
13531     case TARGET_NR_mq_timedreceive:
13532         {
13533             struct timespec ts;
13534             unsigned int prio;
13535 
13536             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13537             if (arg5 != 0) {
13538                 if (target_to_host_timespec(&ts, arg5)) {
13539                     return -TARGET_EFAULT;
13540                 }
13541                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13542                                                      &prio, &ts));
13543                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13544                     return -TARGET_EFAULT;
13545                 }
13546             } else {
13547                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13548                                                      &prio, NULL));
13549             }
13550             unlock_user (p, arg2, arg3);
13551             if (arg4 != 0)
13552                 put_user_u32(prio, arg4);
13553         }
13554         return ret;
13555 #endif
13556 #ifdef TARGET_NR_mq_timedreceive_time64
13557     case TARGET_NR_mq_timedreceive_time64:
13558         {
13559             struct timespec ts;
13560             unsigned int prio;
13561 
13562             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13563             if (arg5 != 0) {
13564                 if (target_to_host_timespec64(&ts, arg5)) {
13565                     return -TARGET_EFAULT;
13566                 }
13567                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13568                                                      &prio, &ts));
13569                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13570                     return -TARGET_EFAULT;
13571                 }
13572             } else {
13573                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13574                                                      &prio, NULL));
13575             }
13576             unlock_user(p, arg2, arg3);
13577             if (arg4 != 0) {
13578                 put_user_u32(prio, arg4);
13579             }
13580         }
13581         return ret;
13582 #endif
13583 
13584     /* Not implemented for now... */
13585 /*     case TARGET_NR_mq_notify: */
13586 /*         break; */
13587 
13588     case TARGET_NR_mq_getsetattr:
13589         {
13590             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13591             ret = 0;
13592             if (arg2 != 0) {
13593                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13594                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13595                                            &posix_mq_attr_out));
13596             } else if (arg3 != 0) {
13597                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13598             }
13599             if (ret == 0 && arg3 != 0) {
13600                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13601             }
13602         }
13603         return ret;
13604 #endif
13605 
13606 #ifdef CONFIG_SPLICE
13607 #ifdef TARGET_NR_tee
13608     case TARGET_NR_tee:
13609         {
13610             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13611         }
13612         return ret;
13613 #endif
13614 #ifdef TARGET_NR_splice
13615     case TARGET_NR_splice:
13616         {
13617             loff_t loff_in, loff_out;
13618             loff_t *ploff_in = NULL, *ploff_out = NULL;
13619             if (arg2) {
13620                 if (get_user_u64(loff_in, arg2)) {
13621                     return -TARGET_EFAULT;
13622                 }
13623                 ploff_in = &loff_in;
13624             }
13625             if (arg4) {
13626                 if (get_user_u64(loff_out, arg4)) {
13627                     return -TARGET_EFAULT;
13628                 }
13629                 ploff_out = &loff_out;
13630             }
13631             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13632             if (arg2) {
13633                 if (put_user_u64(loff_in, arg2)) {
13634                     return -TARGET_EFAULT;
13635                 }
13636             }
13637             if (arg4) {
13638                 if (put_user_u64(loff_out, arg4)) {
13639                     return -TARGET_EFAULT;
13640                 }
13641             }
13642         }
13643         return ret;
13644 #endif
13645 #ifdef TARGET_NR_vmsplice
13646 	case TARGET_NR_vmsplice:
13647         {
13648             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13649             if (vec != NULL) {
13650                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13651                 unlock_iovec(vec, arg2, arg3, 0);
13652             } else {
13653                 ret = -host_to_target_errno(errno);
13654             }
13655         }
13656         return ret;
13657 #endif
13658 #endif /* CONFIG_SPLICE */
13659 #ifdef CONFIG_EVENTFD
13660 #if defined(TARGET_NR_eventfd)
13661     case TARGET_NR_eventfd:
13662         ret = get_errno(eventfd(arg1, 0));
13663         if (ret >= 0) {
13664             fd_trans_register(ret, &target_eventfd_trans);
13665         }
13666         return ret;
13667 #endif
13668 #if defined(TARGET_NR_eventfd2)
13669     case TARGET_NR_eventfd2:
13670     {
13671         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13672         if (arg2 & TARGET_O_NONBLOCK) {
13673             host_flags |= O_NONBLOCK;
13674         }
13675         if (arg2 & TARGET_O_CLOEXEC) {
13676             host_flags |= O_CLOEXEC;
13677         }
13678         ret = get_errno(eventfd(arg1, host_flags));
13679         if (ret >= 0) {
13680             fd_trans_register(ret, &target_eventfd_trans);
13681         }
13682         return ret;
13683     }
13684 #endif
13685 #endif /* CONFIG_EVENTFD  */
13686 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13687     case TARGET_NR_fallocate:
13688 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13689         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13690                                   target_offset64(arg5, arg6)));
13691 #else
13692         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13693 #endif
13694         return ret;
13695 #endif
13696 #if defined(CONFIG_SYNC_FILE_RANGE)
13697 #if defined(TARGET_NR_sync_file_range)
13698     case TARGET_NR_sync_file_range:
13699 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13700 #if defined(TARGET_MIPS)
13701         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13702                                         target_offset64(arg5, arg6), arg7));
13703 #else
13704         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13705                                         target_offset64(arg4, arg5), arg6));
13706 #endif /* !TARGET_MIPS */
13707 #else
13708         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13709 #endif
13710         return ret;
13711 #endif
13712 #if defined(TARGET_NR_sync_file_range2) || \
13713     defined(TARGET_NR_arm_sync_file_range)
13714 #if defined(TARGET_NR_sync_file_range2)
13715     case TARGET_NR_sync_file_range2:
13716 #endif
13717 #if defined(TARGET_NR_arm_sync_file_range)
13718     case TARGET_NR_arm_sync_file_range:
13719 #endif
13720         /* This is like sync_file_range but the arguments are reordered */
13721 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13722         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13723                                         target_offset64(arg5, arg6), arg2));
13724 #else
13725         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13726 #endif
13727         return ret;
13728 #endif
13729 #endif
13730 #if defined(TARGET_NR_signalfd4)
13731     case TARGET_NR_signalfd4:
13732         return do_signalfd4(arg1, arg2, arg4);
13733 #endif
13734 #if defined(TARGET_NR_signalfd)
13735     case TARGET_NR_signalfd:
13736         return do_signalfd4(arg1, arg2, 0);
13737 #endif
13738 #if defined(CONFIG_EPOLL)
13739 #if defined(TARGET_NR_epoll_create)
13740     case TARGET_NR_epoll_create:
13741         return get_errno(epoll_create(arg1));
13742 #endif
13743 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13744     case TARGET_NR_epoll_create1:
13745         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13746 #endif
13747 #if defined(TARGET_NR_epoll_ctl)
13748     case TARGET_NR_epoll_ctl:
13749     {
13750         struct epoll_event ep;
13751         struct epoll_event *epp = 0;
13752         if (arg4) {
13753             if (arg2 != EPOLL_CTL_DEL) {
13754                 struct target_epoll_event *target_ep;
13755                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13756                     return -TARGET_EFAULT;
13757                 }
13758                 ep.events = tswap32(target_ep->events);
13759                 /*
13760                  * The epoll_data_t union is just opaque data to the kernel,
13761                  * so we transfer all 64 bits across and need not worry what
13762                  * actual data type it is.
13763                  */
13764                 ep.data.u64 = tswap64(target_ep->data.u64);
13765                 unlock_user_struct(target_ep, arg4, 0);
13766             }
13767             /*
13768              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13769              * non-null pointer, even though this argument is ignored.
13770              *
13771              */
13772             epp = &ep;
13773         }
13774         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13775     }
13776 #endif
13777 
13778 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13779 #if defined(TARGET_NR_epoll_wait)
13780     case TARGET_NR_epoll_wait:
13781 #endif
13782 #if defined(TARGET_NR_epoll_pwait)
13783     case TARGET_NR_epoll_pwait:
13784 #endif
13785     {
13786         struct target_epoll_event *target_ep;
13787         struct epoll_event *ep;
13788         int epfd = arg1;
13789         int maxevents = arg3;
13790         int timeout = arg4;
13791 
13792         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13793             return -TARGET_EINVAL;
13794         }
13795 
13796         target_ep = lock_user(VERIFY_WRITE, arg2,
13797                               maxevents * sizeof(struct target_epoll_event), 1);
13798         if (!target_ep) {
13799             return -TARGET_EFAULT;
13800         }
13801 
13802         ep = g_try_new(struct epoll_event, maxevents);
13803         if (!ep) {
13804             unlock_user(target_ep, arg2, 0);
13805             return -TARGET_ENOMEM;
13806         }
13807 
13808         switch (num) {
13809 #if defined(TARGET_NR_epoll_pwait)
13810         case TARGET_NR_epoll_pwait:
13811         {
13812             sigset_t *set = NULL;
13813 
13814             if (arg5) {
13815                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13816                 if (ret != 0) {
13817                     break;
13818                 }
13819             }
13820 
13821             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13822                                              set, SIGSET_T_SIZE));
13823 
13824             if (set) {
13825                 finish_sigsuspend_mask(ret);
13826             }
13827             break;
13828         }
13829 #endif
13830 #if defined(TARGET_NR_epoll_wait)
13831         case TARGET_NR_epoll_wait:
13832             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13833                                              NULL, 0));
13834             break;
13835 #endif
13836         default:
13837             ret = -TARGET_ENOSYS;
13838         }
13839         if (!is_error(ret)) {
13840             int i;
13841             for (i = 0; i < ret; i++) {
13842                 target_ep[i].events = tswap32(ep[i].events);
13843                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13844             }
13845             unlock_user(target_ep, arg2,
13846                         ret * sizeof(struct target_epoll_event));
13847         } else {
13848             unlock_user(target_ep, arg2, 0);
13849         }
13850         g_free(ep);
13851         return ret;
13852     }
13853 #endif
13854 #endif
13855 #ifdef TARGET_NR_prlimit64
13856     case TARGET_NR_prlimit64:
13857     {
13858         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13859         struct target_rlimit64 *target_rnew, *target_rold;
13860         struct host_rlimit64 rnew, rold, *rnewp = 0;
13861         int resource = target_to_host_resource(arg2);
13862 
13863         if (arg3 && (resource != RLIMIT_AS &&
13864                      resource != RLIMIT_DATA &&
13865                      resource != RLIMIT_STACK)) {
13866             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13867                 return -TARGET_EFAULT;
13868             }
13869             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13870             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13871             unlock_user_struct(target_rnew, arg3, 0);
13872             rnewp = &rnew;
13873         }
13874 
13875         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13876         if (!is_error(ret) && arg4) {
13877             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13878                 return -TARGET_EFAULT;
13879             }
13880             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13881             __put_user(rold.rlim_max, &target_rold->rlim_max);
13882             unlock_user_struct(target_rold, arg4, 1);
13883         }
13884         return ret;
13885     }
13886 #endif
13887 #ifdef TARGET_NR_gethostname
13888     case TARGET_NR_gethostname:
13889     {
13890         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13891         if (name) {
13892             ret = get_errno(gethostname(name, arg2));
13893             unlock_user(name, arg1, arg2);
13894         } else {
13895             ret = -TARGET_EFAULT;
13896         }
13897         return ret;
13898     }
13899 #endif
13900 #ifdef TARGET_NR_atomic_cmpxchg_32
13901     case TARGET_NR_atomic_cmpxchg_32:
13902     {
13903         /* should use start_exclusive from main.c */
13904         abi_ulong mem_value;
13905         if (get_user_u32(mem_value, arg6)) {
13906             target_siginfo_t info;
13907             info.si_signo = SIGSEGV;
13908             info.si_errno = 0;
13909             info.si_code = TARGET_SEGV_MAPERR;
13910             info._sifields._sigfault._addr = arg6;
13911             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13912             ret = 0xdeadbeef;
13913 
13914         }
13915         if (mem_value == arg2)
13916             put_user_u32(arg1, arg6);
13917         return mem_value;
13918     }
13919 #endif
13920 #ifdef TARGET_NR_atomic_barrier
13921     case TARGET_NR_atomic_barrier:
13922         /* Like the kernel implementation and the
13923            qemu arm barrier, no-op this? */
13924         return 0;
13925 #endif
13926 
13927 #ifdef TARGET_NR_timer_create
13928     case TARGET_NR_timer_create:
13929     {
13930         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13931 
13932         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13933 
13934         int clkid = arg1;
13935         int timer_index = next_free_host_timer();
13936 
13937         if (timer_index < 0) {
13938             ret = -TARGET_EAGAIN;
13939         } else {
13940             timer_t *phtimer = g_posix_timers  + timer_index;
13941 
13942             if (arg2) {
13943                 phost_sevp = &host_sevp;
13944                 ret = target_to_host_sigevent(phost_sevp, arg2);
13945                 if (ret != 0) {
13946                     free_host_timer_slot(timer_index);
13947                     return ret;
13948                 }
13949             }
13950 
13951             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13952             if (ret) {
13953                 free_host_timer_slot(timer_index);
13954             } else {
13955                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13956                     timer_delete(*phtimer);
13957                     free_host_timer_slot(timer_index);
13958                     return -TARGET_EFAULT;
13959                 }
13960             }
13961         }
13962         return ret;
13963     }
13964 #endif
13965 
13966 #ifdef TARGET_NR_timer_settime
13967     case TARGET_NR_timer_settime:
13968     {
13969         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13970          * struct itimerspec * old_value */
13971         target_timer_t timerid = get_timer_id(arg1);
13972 
13973         if (timerid < 0) {
13974             ret = timerid;
13975         } else if (arg3 == 0) {
13976             ret = -TARGET_EINVAL;
13977         } else {
13978             timer_t htimer = g_posix_timers[timerid];
13979             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13980 
13981             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13982                 return -TARGET_EFAULT;
13983             }
13984             ret = get_errno(
13985                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13986             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13987                 return -TARGET_EFAULT;
13988             }
13989         }
13990         return ret;
13991     }
13992 #endif
13993 
13994 #ifdef TARGET_NR_timer_settime64
13995     case TARGET_NR_timer_settime64:
13996     {
13997         target_timer_t timerid = get_timer_id(arg1);
13998 
13999         if (timerid < 0) {
14000             ret = timerid;
14001         } else if (arg3 == 0) {
14002             ret = -TARGET_EINVAL;
14003         } else {
14004             timer_t htimer = g_posix_timers[timerid];
14005             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
14006 
14007             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
14008                 return -TARGET_EFAULT;
14009             }
14010             ret = get_errno(
14011                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
14012             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
14013                 return -TARGET_EFAULT;
14014             }
14015         }
14016         return ret;
14017     }
14018 #endif
14019 
14020 #ifdef TARGET_NR_timer_gettime
14021     case TARGET_NR_timer_gettime:
14022     {
14023         /* args: timer_t timerid, struct itimerspec *curr_value */
14024         target_timer_t timerid = get_timer_id(arg1);
14025 
14026         if (timerid < 0) {
14027             ret = timerid;
14028         } else if (!arg2) {
14029             ret = -TARGET_EFAULT;
14030         } else {
14031             timer_t htimer = g_posix_timers[timerid];
14032             struct itimerspec hspec;
14033             ret = get_errno(timer_gettime(htimer, &hspec));
14034 
14035             if (host_to_target_itimerspec(arg2, &hspec)) {
14036                 ret = -TARGET_EFAULT;
14037             }
14038         }
14039         return ret;
14040     }
14041 #endif
14042 
14043 #ifdef TARGET_NR_timer_gettime64
14044     case TARGET_NR_timer_gettime64:
14045     {
14046         /* args: timer_t timerid, struct itimerspec64 *curr_value */
14047         target_timer_t timerid = get_timer_id(arg1);
14048 
14049         if (timerid < 0) {
14050             ret = timerid;
14051         } else if (!arg2) {
14052             ret = -TARGET_EFAULT;
14053         } else {
14054             timer_t htimer = g_posix_timers[timerid];
14055             struct itimerspec hspec;
14056             ret = get_errno(timer_gettime(htimer, &hspec));
14057 
14058             if (host_to_target_itimerspec64(arg2, &hspec)) {
14059                 ret = -TARGET_EFAULT;
14060             }
14061         }
14062         return ret;
14063     }
14064 #endif
14065 
14066 #ifdef TARGET_NR_timer_getoverrun
14067     case TARGET_NR_timer_getoverrun:
14068     {
14069         /* args: timer_t timerid */
14070         target_timer_t timerid = get_timer_id(arg1);
14071 
14072         if (timerid < 0) {
14073             ret = timerid;
14074         } else {
14075             timer_t htimer = g_posix_timers[timerid];
14076             ret = get_errno(timer_getoverrun(htimer));
14077         }
14078         return ret;
14079     }
14080 #endif
14081 
14082 #ifdef TARGET_NR_timer_delete
14083     case TARGET_NR_timer_delete:
14084     {
14085         /* args: timer_t timerid */
14086         target_timer_t timerid = get_timer_id(arg1);
14087 
14088         if (timerid < 0) {
14089             ret = timerid;
14090         } else {
14091             timer_t htimer = g_posix_timers[timerid];
14092             ret = get_errno(timer_delete(htimer));
14093             free_host_timer_slot(timerid);
14094         }
14095         return ret;
14096     }
14097 #endif
14098 
14099 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
14100     case TARGET_NR_timerfd_create:
14101         ret = get_errno(timerfd_create(arg1,
14102                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
14103         if (ret >= 0) {
14104             fd_trans_register(ret, &target_timerfd_trans);
14105         }
14106         return ret;
14107 #endif
14108 
14109 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
14110     case TARGET_NR_timerfd_gettime:
14111         {
14112             struct itimerspec its_curr;
14113 
14114             ret = get_errno(timerfd_gettime(arg1, &its_curr));
14115 
14116             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
14117                 return -TARGET_EFAULT;
14118             }
14119         }
14120         return ret;
14121 #endif
14122 
14123 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
14124     case TARGET_NR_timerfd_gettime64:
14125         {
14126             struct itimerspec its_curr;
14127 
14128             ret = get_errno(timerfd_gettime(arg1, &its_curr));
14129 
14130             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
14131                 return -TARGET_EFAULT;
14132             }
14133         }
14134         return ret;
14135 #endif
14136 
14137 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
14138     case TARGET_NR_timerfd_settime:
14139         {
14140             struct itimerspec its_new, its_old, *p_new;
14141 
14142             if (arg3) {
14143                 if (target_to_host_itimerspec(&its_new, arg3)) {
14144                     return -TARGET_EFAULT;
14145                 }
14146                 p_new = &its_new;
14147             } else {
14148                 p_new = NULL;
14149             }
14150 
14151             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
14152 
14153             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
14154                 return -TARGET_EFAULT;
14155             }
14156         }
14157         return ret;
14158 #endif
14159 
14160 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
14161     case TARGET_NR_timerfd_settime64:
14162         {
14163             struct itimerspec its_new, its_old, *p_new;
14164 
14165             if (arg3) {
14166                 if (target_to_host_itimerspec64(&its_new, arg3)) {
14167                     return -TARGET_EFAULT;
14168                 }
14169                 p_new = &its_new;
14170             } else {
14171                 p_new = NULL;
14172             }
14173 
14174             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
14175 
14176             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
14177                 return -TARGET_EFAULT;
14178             }
14179         }
14180         return ret;
14181 #endif
14182 
14183 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
14184     case TARGET_NR_ioprio_get:
14185         return get_errno(ioprio_get(arg1, arg2));
14186 #endif
14187 
14188 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
14189     case TARGET_NR_ioprio_set:
14190         return get_errno(ioprio_set(arg1, arg2, arg3));
14191 #endif
14192 
14193 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
14194     case TARGET_NR_setns:
14195         return get_errno(setns(arg1, arg2));
14196 #endif
14197 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
14198     case TARGET_NR_unshare:
14199         return get_errno(unshare(arg1));
14200 #endif
14201 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
14202     case TARGET_NR_kcmp:
14203         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
14204 #endif
14205 #ifdef TARGET_NR_swapcontext
14206     case TARGET_NR_swapcontext:
14207         /* PowerPC specific.  */
14208         return do_swapcontext(cpu_env, arg1, arg2, arg3);
14209 #endif
14210 #ifdef TARGET_NR_memfd_create
14211     case TARGET_NR_memfd_create:
14212         p = lock_user_string(arg1);
14213         if (!p) {
14214             return -TARGET_EFAULT;
14215         }
14216         ret = get_errno(memfd_create(p, arg2));
14217         fd_trans_unregister(ret);
14218         unlock_user(p, arg1, 0);
14219         return ret;
14220 #endif
14221 #if defined TARGET_NR_membarrier && defined __NR_membarrier
14222     case TARGET_NR_membarrier:
14223         return get_errno(membarrier(arg1, arg2));
14224 #endif
14225 
14226 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
14227     case TARGET_NR_copy_file_range:
14228         {
14229             loff_t inoff, outoff;
14230             loff_t *pinoff = NULL, *poutoff = NULL;
14231 
14232             if (arg2) {
14233                 if (get_user_u64(inoff, arg2)) {
14234                     return -TARGET_EFAULT;
14235                 }
14236                 pinoff = &inoff;
14237             }
14238             if (arg4) {
14239                 if (get_user_u64(outoff, arg4)) {
14240                     return -TARGET_EFAULT;
14241                 }
14242                 poutoff = &outoff;
14243             }
14244             /* Do not sign-extend the count parameter. */
14245             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
14246                                                  (abi_ulong)arg5, arg6));
14247             if (!is_error(ret) && ret > 0) {
14248                 if (arg2) {
14249                     if (put_user_u64(inoff, arg2)) {
14250                         return -TARGET_EFAULT;
14251                     }
14252                 }
14253                 if (arg4) {
14254                     if (put_user_u64(outoff, arg4)) {
14255                         return -TARGET_EFAULT;
14256                     }
14257                 }
14258             }
14259         }
14260         return ret;
14261 #endif
14262 
14263 #if defined(TARGET_NR_pivot_root)
14264     case TARGET_NR_pivot_root:
14265         {
14266             void *p2;
14267             p = lock_user_string(arg1); /* new_root */
14268             p2 = lock_user_string(arg2); /* put_old */
14269             if (!p || !p2) {
14270                 ret = -TARGET_EFAULT;
14271             } else {
14272                 ret = get_errno(pivot_root(p, p2));
14273             }
14274             unlock_user(p2, arg2, 0);
14275             unlock_user(p, arg1, 0);
14276         }
14277         return ret;
14278 #endif
14279 
14280 #if defined(TARGET_NR_riscv_hwprobe)
14281     case TARGET_NR_riscv_hwprobe:
14282         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
14283 #endif
14284 
14285 #ifdef TARGET_AARCH64
14286     case TARGET_NR_map_shadow_stack:
14287         return do_map_shadow_stack(cpu_env, arg1, arg2, arg3);
14288 #endif
14289 
14290     default:
14291         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
14292         return -TARGET_ENOSYS;
14293     }
14294     return ret;
14295 }
14296 
sys_dispatch(CPUState * cpu,TaskState * ts)14297 static bool sys_dispatch(CPUState *cpu, TaskState *ts)
14298 {
14299     abi_ptr pc;
14300 
14301     if (likely(ts->sys_dispatch_len == -1)) {
14302         return false;
14303     }
14304 
14305     pc = cpu->cc->get_pc(cpu);
14306     if (likely(pc - ts->sys_dispatch < ts->sys_dispatch_len)) {
14307         return false;
14308     }
14309     if (unlikely(is_vdso_sigreturn(pc))) {
14310         return false;
14311     }
14312     if (likely(ts->sys_dispatch_selector)) {
14313         uint8_t sb;
14314         if (get_user_u8(sb, ts->sys_dispatch_selector)) {
14315             force_sig(TARGET_SIGSEGV);
14316             return true;
14317         }
14318         if (likely(sb == SYSCALL_DISPATCH_FILTER_ALLOW)) {
14319             return false;
14320         }
14321         if (unlikely(sb != SYSCALL_DISPATCH_FILTER_BLOCK)) {
14322             force_sig(TARGET_SIGSYS);
14323             return true;
14324         }
14325     }
14326     force_sig_fault(TARGET_SIGSYS, TARGET_SYS_USER_DISPATCH, pc);
14327     return true;
14328 }
14329 
do_syscall(CPUArchState * cpu_env,int num,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)14330 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
14331                     abi_long arg2, abi_long arg3, abi_long arg4,
14332                     abi_long arg5, abi_long arg6, abi_long arg7,
14333                     abi_long arg8)
14334 {
14335     CPUState *cpu = env_cpu(cpu_env);
14336     TaskState *ts = get_task_state(cpu);
14337     abi_long ret;
14338 
14339 #ifdef DEBUG_ERESTARTSYS
14340     /* Debug-only code for exercising the syscall-restart code paths
14341      * in the per-architecture cpu main loops: restart every syscall
14342      * the guest makes once before letting it through.
14343      */
14344     {
14345         static bool flag;
14346         flag = !flag;
14347         if (flag) {
14348             return -QEMU_ERESTARTSYS;
14349         }
14350     }
14351 #endif
14352 
14353     if (sys_dispatch(cpu, ts)) {
14354         return -QEMU_ESIGRETURN;
14355     }
14356 
14357     record_syscall_start(cpu, num, arg1,
14358                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
14359 
14360     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
14361         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
14362     }
14363 
14364     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
14365                       arg5, arg6, arg7, arg8);
14366 
14367     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
14368         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
14369                           arg3, arg4, arg5, arg6);
14370     }
14371 
14372     record_syscall_return(cpu, num, ret);
14373     return ret;
14374 }
14375