1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include "qemu/osdep.h" 21 #include "qemu/cutils.h" 22 #include "qemu/path.h" 23 #include "qemu/memfd.h" 24 #include "qemu/queue.h" 25 #include <elf.h> 26 #include <endian.h> 27 #include <grp.h> 28 #include <sys/ipc.h> 29 #include <sys/msg.h> 30 #include <sys/wait.h> 31 #include <sys/mount.h> 32 #include <sys/file.h> 33 #include <sys/fsuid.h> 34 #include <sys/personality.h> 35 #include <sys/prctl.h> 36 #include <sys/resource.h> 37 #include <sys/swap.h> 38 #include <linux/capability.h> 39 #include <sched.h> 40 #include <sys/timex.h> 41 #include <sys/socket.h> 42 #include <linux/sockios.h> 43 #include <sys/un.h> 44 #include <sys/uio.h> 45 #include <poll.h> 46 #include <sys/times.h> 47 #include <sys/shm.h> 48 #include <sys/sem.h> 49 #include <sys/statfs.h> 50 #include <utime.h> 51 #include <sys/sysinfo.h> 52 #include <sys/signalfd.h> 53 //#include <sys/user.h> 54 #include <netinet/in.h> 55 #include <netinet/ip.h> 56 #include <netinet/tcp.h> 57 #include <netinet/udp.h> 58 #include <linux/wireless.h> 59 #include <linux/icmp.h> 60 #include <linux/icmpv6.h> 61 #include <linux/if_tun.h> 62 #include <linux/in6.h> 63 #include <linux/errqueue.h> 64 #include <linux/random.h> 65 #ifdef CONFIG_TIMERFD 66 #include <sys/timerfd.h> 67 #endif 68 #ifdef CONFIG_EVENTFD 69 #include <sys/eventfd.h> 70 #endif 71 #ifdef CONFIG_EPOLL 72 #include <sys/epoll.h> 73 #endif 74 #ifdef CONFIG_ATTR 75 #include "qemu/xattr.h" 76 #endif 77 #ifdef CONFIG_SENDFILE 78 #include <sys/sendfile.h> 79 #endif 80 #ifdef HAVE_SYS_KCOV_H 81 #include <sys/kcov.h> 82 #endif 83 84 #define termios host_termios 85 #define winsize host_winsize 86 #define termio host_termio 87 #define sgttyb host_sgttyb /* same as target */ 88 #define tchars host_tchars /* same as target */ 89 #define ltchars host_ltchars /* same as target */ 90 91 #include <linux/termios.h> 92 #include <linux/unistd.h> 93 #include <linux/cdrom.h> 94 #include <linux/hdreg.h> 95 #include <linux/soundcard.h> 96 #include <linux/kd.h> 97 #include <linux/mtio.h> 98 99 #ifdef HAVE_SYS_MOUNT_FSCONFIG 100 /* 101 * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h, 102 * which in turn prevents use of linux/fs.h. So we have to 103 * define the constants ourselves for now. 104 */ 105 #define FS_IOC_GETFLAGS _IOR('f', 1, long) 106 #define FS_IOC_SETFLAGS _IOW('f', 2, long) 107 #define FS_IOC_GETVERSION _IOR('v', 1, long) 108 #define FS_IOC_SETVERSION _IOW('v', 2, long) 109 #define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap) 110 #define FS_IOC32_GETFLAGS _IOR('f', 1, int) 111 #define FS_IOC32_SETFLAGS _IOW('f', 2, int) 112 #define FS_IOC32_GETVERSION _IOR('v', 1, int) 113 #define FS_IOC32_SETVERSION _IOW('v', 2, int) 114 #else 115 #include <linux/fs.h> 116 #endif 117 #include <linux/fd.h> 118 #if defined(CONFIG_FIEMAP) 119 #include <linux/fiemap.h> 120 #endif 121 #include <linux/fb.h> 122 #if defined(CONFIG_USBFS) 123 #include <linux/usbdevice_fs.h> 124 #include <linux/usb/ch9.h> 125 #endif 126 #include <linux/vt.h> 127 #include <linux/dm-ioctl.h> 128 #include <linux/reboot.h> 129 #include <linux/route.h> 130 #include <linux/filter.h> 131 #include <linux/blkpg.h> 132 #include <netpacket/packet.h> 133 #include <linux/netlink.h> 134 #include <linux/if_alg.h> 135 #include <linux/rtc.h> 136 #include <sound/asound.h> 137 #ifdef HAVE_BTRFS_H 138 #include <linux/btrfs.h> 139 #endif 140 #ifdef HAVE_DRM_H 141 #include <libdrm/drm.h> 142 #include <libdrm/i915_drm.h> 143 #endif 144 #include "linux_loop.h" 145 #include "uname.h" 146 147 #include "qemu.h" 148 #include "user-internals.h" 149 #include "strace.h" 150 #include "signal-common.h" 151 #include "loader.h" 152 #include "user-mmap.h" 153 #include "user/safe-syscall.h" 154 #include "qemu/guest-random.h" 155 #include "qemu/selfmap.h" 156 #include "user/syscall-trace.h" 157 #include "special-errno.h" 158 #include "qapi/error.h" 159 #include "fd-trans.h" 160 #include "tcg/tcg.h" 161 162 #ifndef CLONE_IO 163 #define CLONE_IO 0x80000000 /* Clone io context */ 164 #endif 165 166 /* We can't directly call the host clone syscall, because this will 167 * badly confuse libc (breaking mutexes, for example). So we must 168 * divide clone flags into: 169 * * flag combinations that look like pthread_create() 170 * * flag combinations that look like fork() 171 * * flags we can implement within QEMU itself 172 * * flags we can't support and will return an error for 173 */ 174 /* For thread creation, all these flags must be present; for 175 * fork, none must be present. 176 */ 177 #define CLONE_THREAD_FLAGS \ 178 (CLONE_VM | CLONE_FS | CLONE_FILES | \ 179 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM) 180 181 /* These flags are ignored: 182 * CLONE_DETACHED is now ignored by the kernel; 183 * CLONE_IO is just an optimisation hint to the I/O scheduler 184 */ 185 #define CLONE_IGNORED_FLAGS \ 186 (CLONE_DETACHED | CLONE_IO) 187 188 /* Flags for fork which we can implement within QEMU itself */ 189 #define CLONE_OPTIONAL_FORK_FLAGS \ 190 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 191 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID) 192 193 /* Flags for thread creation which we can implement within QEMU itself */ 194 #define CLONE_OPTIONAL_THREAD_FLAGS \ 195 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 196 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT) 197 198 #define CLONE_INVALID_FORK_FLAGS \ 199 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS)) 200 201 #define CLONE_INVALID_THREAD_FLAGS \ 202 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \ 203 CLONE_IGNORED_FLAGS)) 204 205 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits 206 * have almost all been allocated. We cannot support any of 207 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC, 208 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED. 209 * The checks against the invalid thread masks above will catch these. 210 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.) 211 */ 212 213 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted 214 * once. This exercises the codepaths for restart. 215 */ 216 //#define DEBUG_ERESTARTSYS 217 218 //#include <linux/msdos_fs.h> 219 #define VFAT_IOCTL_READDIR_BOTH \ 220 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2) 221 #define VFAT_IOCTL_READDIR_SHORT \ 222 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2) 223 224 #undef _syscall0 225 #undef _syscall1 226 #undef _syscall2 227 #undef _syscall3 228 #undef _syscall4 229 #undef _syscall5 230 #undef _syscall6 231 232 #define _syscall0(type,name) \ 233 static type name (void) \ 234 { \ 235 return syscall(__NR_##name); \ 236 } 237 238 #define _syscall1(type,name,type1,arg1) \ 239 static type name (type1 arg1) \ 240 { \ 241 return syscall(__NR_##name, arg1); \ 242 } 243 244 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 245 static type name (type1 arg1,type2 arg2) \ 246 { \ 247 return syscall(__NR_##name, arg1, arg2); \ 248 } 249 250 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 251 static type name (type1 arg1,type2 arg2,type3 arg3) \ 252 { \ 253 return syscall(__NR_##name, arg1, arg2, arg3); \ 254 } 255 256 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 257 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 258 { \ 259 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 260 } 261 262 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 263 type5,arg5) \ 264 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 265 { \ 266 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 267 } 268 269 270 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 271 type5,arg5,type6,arg6) \ 272 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 273 type6 arg6) \ 274 { \ 275 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 276 } 277 278 279 #define __NR_sys_uname __NR_uname 280 #define __NR_sys_getcwd1 __NR_getcwd 281 #define __NR_sys_getdents __NR_getdents 282 #define __NR_sys_getdents64 __NR_getdents64 283 #define __NR_sys_getpriority __NR_getpriority 284 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 285 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo 286 #define __NR_sys_syslog __NR_syslog 287 #if defined(__NR_futex) 288 # define __NR_sys_futex __NR_futex 289 #endif 290 #if defined(__NR_futex_time64) 291 # define __NR_sys_futex_time64 __NR_futex_time64 292 #endif 293 #define __NR_sys_statx __NR_statx 294 295 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__) 296 #define __NR__llseek __NR_lseek 297 #endif 298 299 /* Newer kernel ports have llseek() instead of _llseek() */ 300 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 301 #define TARGET_NR__llseek TARGET_NR_llseek 302 #endif 303 304 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */ 305 #ifndef TARGET_O_NONBLOCK_MASK 306 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK 307 #endif 308 309 #define __NR_sys_gettid __NR_gettid 310 _syscall0(int, sys_gettid) 311 312 /* For the 64-bit guest on 32-bit host case we must emulate 313 * getdents using getdents64, because otherwise the host 314 * might hand us back more dirent records than we can fit 315 * into the guest buffer after structure format conversion. 316 * Otherwise we emulate getdents with getdents if the host has it. 317 */ 318 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS 319 #define EMULATE_GETDENTS_WITH_GETDENTS 320 #endif 321 322 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS) 323 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 324 #endif 325 #if (defined(TARGET_NR_getdents) && \ 326 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \ 327 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 328 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 329 #endif 330 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 331 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 332 loff_t *, res, uint, wh); 333 #endif 334 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo) 335 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig, 336 siginfo_t *, uinfo) 337 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 338 #ifdef __NR_exit_group 339 _syscall1(int,exit_group,int,error_code) 340 #endif 341 #if defined(__NR_futex) 342 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 343 const struct timespec *,timeout,int *,uaddr2,int,val3) 344 #endif 345 #if defined(__NR_futex_time64) 346 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val, 347 const struct timespec *,timeout,int *,uaddr2,int,val3) 348 #endif 349 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open) 350 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags); 351 #endif 352 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal) 353 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info, 354 unsigned int, flags); 355 #endif 356 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd) 357 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags); 358 #endif 359 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 360 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 361 unsigned long *, user_mask_ptr); 362 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 363 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 364 unsigned long *, user_mask_ptr); 365 /* sched_attr is not defined in glibc */ 366 struct sched_attr { 367 uint32_t size; 368 uint32_t sched_policy; 369 uint64_t sched_flags; 370 int32_t sched_nice; 371 uint32_t sched_priority; 372 uint64_t sched_runtime; 373 uint64_t sched_deadline; 374 uint64_t sched_period; 375 uint32_t sched_util_min; 376 uint32_t sched_util_max; 377 }; 378 #define __NR_sys_sched_getattr __NR_sched_getattr 379 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr, 380 unsigned int, size, unsigned int, flags); 381 #define __NR_sys_sched_setattr __NR_sched_setattr 382 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr, 383 unsigned int, flags); 384 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler 385 _syscall1(int, sys_sched_getscheduler, pid_t, pid); 386 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler 387 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy, 388 const struct sched_param *, param); 389 #define __NR_sys_sched_getparam __NR_sched_getparam 390 _syscall2(int, sys_sched_getparam, pid_t, pid, 391 struct sched_param *, param); 392 #define __NR_sys_sched_setparam __NR_sched_setparam 393 _syscall2(int, sys_sched_setparam, pid_t, pid, 394 const struct sched_param *, param); 395 #define __NR_sys_getcpu __NR_getcpu 396 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache); 397 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 398 void *, arg); 399 _syscall2(int, capget, struct __user_cap_header_struct *, header, 400 struct __user_cap_data_struct *, data); 401 _syscall2(int, capset, struct __user_cap_header_struct *, header, 402 struct __user_cap_data_struct *, data); 403 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 404 _syscall2(int, ioprio_get, int, which, int, who) 405 #endif 406 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 407 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio) 408 #endif 409 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 410 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags) 411 #endif 412 413 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 414 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type, 415 unsigned long, idx1, unsigned long, idx2) 416 #endif 417 418 /* 419 * It is assumed that struct statx is architecture independent. 420 */ 421 #if defined(TARGET_NR_statx) && defined(__NR_statx) 422 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags, 423 unsigned int, mask, struct target_statx *, statxbuf) 424 #endif 425 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier) 426 _syscall2(int, membarrier, int, cmd, int, flags) 427 #endif 428 429 static const bitmask_transtbl fcntl_flags_tbl[] = { 430 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 431 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 432 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 433 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 434 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 435 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 436 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 437 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 438 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 439 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 440 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 441 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 442 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 443 #if defined(O_DIRECT) 444 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 445 #endif 446 #if defined(O_NOATIME) 447 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 448 #endif 449 #if defined(O_CLOEXEC) 450 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 451 #endif 452 #if defined(O_PATH) 453 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 454 #endif 455 #if defined(O_TMPFILE) 456 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE }, 457 #endif 458 /* Don't terminate the list prematurely on 64-bit host+guest. */ 459 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 460 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 461 #endif 462 { 0, 0, 0, 0 } 463 }; 464 465 _syscall2(int, sys_getcwd1, char *, buf, size_t, size) 466 467 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64) 468 #if defined(__NR_utimensat) 469 #define __NR_sys_utimensat __NR_utimensat 470 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 471 const struct timespec *,tsp,int,flags) 472 #else 473 static int sys_utimensat(int dirfd, const char *pathname, 474 const struct timespec times[2], int flags) 475 { 476 errno = ENOSYS; 477 return -1; 478 } 479 #endif 480 #endif /* TARGET_NR_utimensat */ 481 482 #ifdef TARGET_NR_renameat2 483 #if defined(__NR_renameat2) 484 #define __NR_sys_renameat2 __NR_renameat2 485 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd, 486 const char *, new, unsigned int, flags) 487 #else 488 static int sys_renameat2(int oldfd, const char *old, 489 int newfd, const char *new, int flags) 490 { 491 if (flags == 0) { 492 return renameat(oldfd, old, newfd, new); 493 } 494 errno = ENOSYS; 495 return -1; 496 } 497 #endif 498 #endif /* TARGET_NR_renameat2 */ 499 500 #ifdef CONFIG_INOTIFY 501 #include <sys/inotify.h> 502 #else 503 /* Userspace can usually survive runtime without inotify */ 504 #undef TARGET_NR_inotify_init 505 #undef TARGET_NR_inotify_init1 506 #undef TARGET_NR_inotify_add_watch 507 #undef TARGET_NR_inotify_rm_watch 508 #endif /* CONFIG_INOTIFY */ 509 510 #if defined(TARGET_NR_prlimit64) 511 #ifndef __NR_prlimit64 512 # define __NR_prlimit64 -1 513 #endif 514 #define __NR_sys_prlimit64 __NR_prlimit64 515 /* The glibc rlimit structure may not be that used by the underlying syscall */ 516 struct host_rlimit64 { 517 uint64_t rlim_cur; 518 uint64_t rlim_max; 519 }; 520 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 521 const struct host_rlimit64 *, new_limit, 522 struct host_rlimit64 *, old_limit) 523 #endif 524 525 526 #if defined(TARGET_NR_timer_create) 527 /* Maximum of 32 active POSIX timers allowed at any one time. */ 528 #define GUEST_TIMER_MAX 32 529 static timer_t g_posix_timers[GUEST_TIMER_MAX]; 530 static int g_posix_timer_allocated[GUEST_TIMER_MAX]; 531 532 static inline int next_free_host_timer(void) 533 { 534 int k; 535 for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) { 536 if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) { 537 return k; 538 } 539 } 540 return -1; 541 } 542 543 static inline void free_host_timer_slot(int id) 544 { 545 qatomic_store_release(g_posix_timer_allocated + id, 0); 546 } 547 #endif 548 549 static inline int host_to_target_errno(int host_errno) 550 { 551 switch (host_errno) { 552 #define E(X) case X: return TARGET_##X; 553 #include "errnos.c.inc" 554 #undef E 555 default: 556 return host_errno; 557 } 558 } 559 560 static inline int target_to_host_errno(int target_errno) 561 { 562 switch (target_errno) { 563 #define E(X) case TARGET_##X: return X; 564 #include "errnos.c.inc" 565 #undef E 566 default: 567 return target_errno; 568 } 569 } 570 571 abi_long get_errno(abi_long ret) 572 { 573 if (ret == -1) 574 return -host_to_target_errno(errno); 575 else 576 return ret; 577 } 578 579 const char *target_strerror(int err) 580 { 581 if (err == QEMU_ERESTARTSYS) { 582 return "To be restarted"; 583 } 584 if (err == QEMU_ESIGRETURN) { 585 return "Successful exit from sigreturn"; 586 } 587 588 return strerror(target_to_host_errno(err)); 589 } 590 591 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize) 592 { 593 int i; 594 uint8_t b; 595 if (usize <= ksize) { 596 return 1; 597 } 598 for (i = ksize; i < usize; i++) { 599 if (get_user_u8(b, addr + i)) { 600 return -TARGET_EFAULT; 601 } 602 if (b != 0) { 603 return 0; 604 } 605 } 606 return 1; 607 } 608 609 #define safe_syscall0(type, name) \ 610 static type safe_##name(void) \ 611 { \ 612 return safe_syscall(__NR_##name); \ 613 } 614 615 #define safe_syscall1(type, name, type1, arg1) \ 616 static type safe_##name(type1 arg1) \ 617 { \ 618 return safe_syscall(__NR_##name, arg1); \ 619 } 620 621 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \ 622 static type safe_##name(type1 arg1, type2 arg2) \ 623 { \ 624 return safe_syscall(__NR_##name, arg1, arg2); \ 625 } 626 627 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 628 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \ 629 { \ 630 return safe_syscall(__NR_##name, arg1, arg2, arg3); \ 631 } 632 633 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ 634 type4, arg4) \ 635 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ 636 { \ 637 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 638 } 639 640 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ 641 type4, arg4, type5, arg5) \ 642 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 643 type5 arg5) \ 644 { \ 645 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 646 } 647 648 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ 649 type4, arg4, type5, arg5, type6, arg6) \ 650 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 651 type5 arg5, type6 arg6) \ 652 { \ 653 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 654 } 655 656 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count) 657 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count) 658 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \ 659 int, flags, mode_t, mode) 660 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid) 661 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \ 662 struct rusage *, rusage) 663 #endif 664 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \ 665 int, options, struct rusage *, rusage) 666 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp) 667 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \ 668 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 669 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \ 670 fd_set *, exceptfds, struct timespec *, timeout, void *, sig) 671 #endif 672 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64) 673 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds, 674 struct timespec *, tsp, const sigset_t *, sigmask, 675 size_t, sigsetsize) 676 #endif 677 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events, 678 int, maxevents, int, timeout, const sigset_t *, sigmask, 679 size_t, sigsetsize) 680 #if defined(__NR_futex) 681 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \ 682 const struct timespec *,timeout,int *,uaddr2,int,val3) 683 #endif 684 #if defined(__NR_futex_time64) 685 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \ 686 const struct timespec *,timeout,int *,uaddr2,int,val3) 687 #endif 688 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize) 689 safe_syscall2(int, kill, pid_t, pid, int, sig) 690 safe_syscall2(int, tkill, int, tid, int, sig) 691 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig) 692 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt) 693 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt) 694 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt, 695 unsigned long, pos_l, unsigned long, pos_h) 696 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt, 697 unsigned long, pos_l, unsigned long, pos_h) 698 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr, 699 socklen_t, addrlen) 700 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len, 701 int, flags, const struct sockaddr *, addr, socklen_t, addrlen) 702 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len, 703 int, flags, struct sockaddr *, addr, socklen_t *, addrlen) 704 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags) 705 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags) 706 safe_syscall2(int, flock, int, fd, int, operation) 707 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64) 708 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo, 709 const struct timespec *, uts, size_t, sigsetsize) 710 #endif 711 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len, 712 int, flags) 713 #if defined(TARGET_NR_nanosleep) 714 safe_syscall2(int, nanosleep, const struct timespec *, req, 715 struct timespec *, rem) 716 #endif 717 #if defined(TARGET_NR_clock_nanosleep) || \ 718 defined(TARGET_NR_clock_nanosleep_time64) 719 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags, 720 const struct timespec *, req, struct timespec *, rem) 721 #endif 722 #ifdef __NR_ipc 723 #ifdef __s390x__ 724 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third, 725 void *, ptr) 726 #else 727 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third, 728 void *, ptr, long, fifth) 729 #endif 730 #endif 731 #ifdef __NR_msgsnd 732 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz, 733 int, flags) 734 #endif 735 #ifdef __NR_msgrcv 736 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz, 737 long, msgtype, int, flags) 738 #endif 739 #ifdef __NR_semtimedop 740 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops, 741 unsigned, nsops, const struct timespec *, timeout) 742 #endif 743 #if defined(TARGET_NR_mq_timedsend) || \ 744 defined(TARGET_NR_mq_timedsend_time64) 745 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr, 746 size_t, len, unsigned, prio, const struct timespec *, timeout) 747 #endif 748 #if defined(TARGET_NR_mq_timedreceive) || \ 749 defined(TARGET_NR_mq_timedreceive_time64) 750 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr, 751 size_t, len, unsigned *, prio, const struct timespec *, timeout) 752 #endif 753 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range) 754 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff, 755 int, outfd, loff_t *, poutoff, size_t, length, 756 unsigned int, flags) 757 #endif 758 759 /* We do ioctl like this rather than via safe_syscall3 to preserve the 760 * "third argument might be integer or pointer or not present" behaviour of 761 * the libc function. 762 */ 763 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__) 764 /* Similarly for fcntl. Note that callers must always: 765 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK 766 * use the flock64 struct rather than unsuffixed flock 767 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts. 768 */ 769 #ifdef __NR_fcntl64 770 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__) 771 #else 772 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__) 773 #endif 774 775 static inline int host_to_target_sock_type(int host_type) 776 { 777 int target_type; 778 779 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 780 case SOCK_DGRAM: 781 target_type = TARGET_SOCK_DGRAM; 782 break; 783 case SOCK_STREAM: 784 target_type = TARGET_SOCK_STREAM; 785 break; 786 default: 787 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 788 break; 789 } 790 791 #if defined(SOCK_CLOEXEC) 792 if (host_type & SOCK_CLOEXEC) { 793 target_type |= TARGET_SOCK_CLOEXEC; 794 } 795 #endif 796 797 #if defined(SOCK_NONBLOCK) 798 if (host_type & SOCK_NONBLOCK) { 799 target_type |= TARGET_SOCK_NONBLOCK; 800 } 801 #endif 802 803 return target_type; 804 } 805 806 static abi_ulong target_brk; 807 static abi_ulong target_original_brk; 808 static abi_ulong brk_page; 809 810 void target_set_brk(abi_ulong new_brk) 811 { 812 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 813 brk_page = HOST_PAGE_ALIGN(target_brk); 814 } 815 816 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 817 #define DEBUGF_BRK(message, args...) 818 819 /* do_brk() must return target values and target errnos. */ 820 abi_long do_brk(abi_ulong new_brk) 821 { 822 abi_long mapped_addr; 823 abi_ulong new_alloc_size; 824 825 /* brk pointers are always untagged */ 826 827 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 828 829 if (!new_brk) { 830 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 831 return target_brk; 832 } 833 if (new_brk < target_original_brk) { 834 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 835 target_brk); 836 return target_brk; 837 } 838 839 /* If the new brk is less than the highest page reserved to the 840 * target heap allocation, set it and we're almost done... */ 841 if (new_brk <= brk_page) { 842 /* Heap contents are initialized to zero, as for anonymous 843 * mapped pages. */ 844 if (new_brk > target_brk) { 845 memset(g2h_untagged(target_brk), 0, new_brk - target_brk); 846 } 847 target_brk = new_brk; 848 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 849 return target_brk; 850 } 851 852 /* We need to allocate more memory after the brk... Note that 853 * we don't use MAP_FIXED because that will map over the top of 854 * any existing mapping (like the one with the host libc or qemu 855 * itself); instead we treat "mapped but at wrong address" as 856 * a failure and unmap again. 857 */ 858 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 859 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 860 PROT_READ|PROT_WRITE, 861 MAP_ANON|MAP_PRIVATE, 0, 0)); 862 863 if (mapped_addr == brk_page) { 864 /* Heap contents are initialized to zero, as for anonymous 865 * mapped pages. Technically the new pages are already 866 * initialized to zero since they *are* anonymous mapped 867 * pages, however we have to take care with the contents that 868 * come from the remaining part of the previous page: it may 869 * contains garbage data due to a previous heap usage (grown 870 * then shrunken). */ 871 memset(g2h_untagged(target_brk), 0, brk_page - target_brk); 872 873 target_brk = new_brk; 874 brk_page = HOST_PAGE_ALIGN(target_brk); 875 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 876 target_brk); 877 return target_brk; 878 } else if (mapped_addr != -1) { 879 /* Mapped but at wrong address, meaning there wasn't actually 880 * enough space for this brk. 881 */ 882 target_munmap(mapped_addr, new_alloc_size); 883 mapped_addr = -1; 884 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 885 } 886 else { 887 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 888 } 889 890 #if defined(TARGET_ALPHA) 891 /* We (partially) emulate OSF/1 on Alpha, which requires we 892 return a proper errno, not an unchanged brk value. */ 893 return -TARGET_ENOMEM; 894 #endif 895 /* For everything else, return the previous break. */ 896 return target_brk; 897 } 898 899 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \ 900 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 901 static inline abi_long copy_from_user_fdset(fd_set *fds, 902 abi_ulong target_fds_addr, 903 int n) 904 { 905 int i, nw, j, k; 906 abi_ulong b, *target_fds; 907 908 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 909 if (!(target_fds = lock_user(VERIFY_READ, 910 target_fds_addr, 911 sizeof(abi_ulong) * nw, 912 1))) 913 return -TARGET_EFAULT; 914 915 FD_ZERO(fds); 916 k = 0; 917 for (i = 0; i < nw; i++) { 918 /* grab the abi_ulong */ 919 __get_user(b, &target_fds[i]); 920 for (j = 0; j < TARGET_ABI_BITS; j++) { 921 /* check the bit inside the abi_ulong */ 922 if ((b >> j) & 1) 923 FD_SET(k, fds); 924 k++; 925 } 926 } 927 928 unlock_user(target_fds, target_fds_addr, 0); 929 930 return 0; 931 } 932 933 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 934 abi_ulong target_fds_addr, 935 int n) 936 { 937 if (target_fds_addr) { 938 if (copy_from_user_fdset(fds, target_fds_addr, n)) 939 return -TARGET_EFAULT; 940 *fds_ptr = fds; 941 } else { 942 *fds_ptr = NULL; 943 } 944 return 0; 945 } 946 947 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 948 const fd_set *fds, 949 int n) 950 { 951 int i, nw, j, k; 952 abi_long v; 953 abi_ulong *target_fds; 954 955 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 956 if (!(target_fds = lock_user(VERIFY_WRITE, 957 target_fds_addr, 958 sizeof(abi_ulong) * nw, 959 0))) 960 return -TARGET_EFAULT; 961 962 k = 0; 963 for (i = 0; i < nw; i++) { 964 v = 0; 965 for (j = 0; j < TARGET_ABI_BITS; j++) { 966 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 967 k++; 968 } 969 __put_user(v, &target_fds[i]); 970 } 971 972 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 973 974 return 0; 975 } 976 #endif 977 978 #if defined(__alpha__) 979 #define HOST_HZ 1024 980 #else 981 #define HOST_HZ 100 982 #endif 983 984 static inline abi_long host_to_target_clock_t(long ticks) 985 { 986 #if HOST_HZ == TARGET_HZ 987 return ticks; 988 #else 989 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 990 #endif 991 } 992 993 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 994 const struct rusage *rusage) 995 { 996 struct target_rusage *target_rusage; 997 998 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 999 return -TARGET_EFAULT; 1000 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 1001 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 1002 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 1003 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 1004 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 1005 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 1006 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 1007 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 1008 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 1009 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 1010 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 1011 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 1012 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 1013 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 1014 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 1015 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 1016 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 1017 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 1018 unlock_user_struct(target_rusage, target_addr, 1); 1019 1020 return 0; 1021 } 1022 1023 #ifdef TARGET_NR_setrlimit 1024 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 1025 { 1026 abi_ulong target_rlim_swap; 1027 rlim_t result; 1028 1029 target_rlim_swap = tswapal(target_rlim); 1030 if (target_rlim_swap == TARGET_RLIM_INFINITY) 1031 return RLIM_INFINITY; 1032 1033 result = target_rlim_swap; 1034 if (target_rlim_swap != (rlim_t)result) 1035 return RLIM_INFINITY; 1036 1037 return result; 1038 } 1039 #endif 1040 1041 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit) 1042 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 1043 { 1044 abi_ulong target_rlim_swap; 1045 abi_ulong result; 1046 1047 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 1048 target_rlim_swap = TARGET_RLIM_INFINITY; 1049 else 1050 target_rlim_swap = rlim; 1051 result = tswapal(target_rlim_swap); 1052 1053 return result; 1054 } 1055 #endif 1056 1057 static inline int target_to_host_resource(int code) 1058 { 1059 switch (code) { 1060 case TARGET_RLIMIT_AS: 1061 return RLIMIT_AS; 1062 case TARGET_RLIMIT_CORE: 1063 return RLIMIT_CORE; 1064 case TARGET_RLIMIT_CPU: 1065 return RLIMIT_CPU; 1066 case TARGET_RLIMIT_DATA: 1067 return RLIMIT_DATA; 1068 case TARGET_RLIMIT_FSIZE: 1069 return RLIMIT_FSIZE; 1070 case TARGET_RLIMIT_LOCKS: 1071 return RLIMIT_LOCKS; 1072 case TARGET_RLIMIT_MEMLOCK: 1073 return RLIMIT_MEMLOCK; 1074 case TARGET_RLIMIT_MSGQUEUE: 1075 return RLIMIT_MSGQUEUE; 1076 case TARGET_RLIMIT_NICE: 1077 return RLIMIT_NICE; 1078 case TARGET_RLIMIT_NOFILE: 1079 return RLIMIT_NOFILE; 1080 case TARGET_RLIMIT_NPROC: 1081 return RLIMIT_NPROC; 1082 case TARGET_RLIMIT_RSS: 1083 return RLIMIT_RSS; 1084 case TARGET_RLIMIT_RTPRIO: 1085 return RLIMIT_RTPRIO; 1086 #ifdef RLIMIT_RTTIME 1087 case TARGET_RLIMIT_RTTIME: 1088 return RLIMIT_RTTIME; 1089 #endif 1090 case TARGET_RLIMIT_SIGPENDING: 1091 return RLIMIT_SIGPENDING; 1092 case TARGET_RLIMIT_STACK: 1093 return RLIMIT_STACK; 1094 default: 1095 return code; 1096 } 1097 } 1098 1099 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1100 abi_ulong target_tv_addr) 1101 { 1102 struct target_timeval *target_tv; 1103 1104 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) { 1105 return -TARGET_EFAULT; 1106 } 1107 1108 __get_user(tv->tv_sec, &target_tv->tv_sec); 1109 __get_user(tv->tv_usec, &target_tv->tv_usec); 1110 1111 unlock_user_struct(target_tv, target_tv_addr, 0); 1112 1113 return 0; 1114 } 1115 1116 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1117 const struct timeval *tv) 1118 { 1119 struct target_timeval *target_tv; 1120 1121 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) { 1122 return -TARGET_EFAULT; 1123 } 1124 1125 __put_user(tv->tv_sec, &target_tv->tv_sec); 1126 __put_user(tv->tv_usec, &target_tv->tv_usec); 1127 1128 unlock_user_struct(target_tv, target_tv_addr, 1); 1129 1130 return 0; 1131 } 1132 1133 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 1134 static inline abi_long copy_from_user_timeval64(struct timeval *tv, 1135 abi_ulong target_tv_addr) 1136 { 1137 struct target__kernel_sock_timeval *target_tv; 1138 1139 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) { 1140 return -TARGET_EFAULT; 1141 } 1142 1143 __get_user(tv->tv_sec, &target_tv->tv_sec); 1144 __get_user(tv->tv_usec, &target_tv->tv_usec); 1145 1146 unlock_user_struct(target_tv, target_tv_addr, 0); 1147 1148 return 0; 1149 } 1150 #endif 1151 1152 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr, 1153 const struct timeval *tv) 1154 { 1155 struct target__kernel_sock_timeval *target_tv; 1156 1157 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) { 1158 return -TARGET_EFAULT; 1159 } 1160 1161 __put_user(tv->tv_sec, &target_tv->tv_sec); 1162 __put_user(tv->tv_usec, &target_tv->tv_usec); 1163 1164 unlock_user_struct(target_tv, target_tv_addr, 1); 1165 1166 return 0; 1167 } 1168 1169 #if defined(TARGET_NR_futex) || \ 1170 defined(TARGET_NR_rt_sigtimedwait) || \ 1171 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \ 1172 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \ 1173 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \ 1174 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \ 1175 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \ 1176 defined(TARGET_NR_timer_settime) || \ 1177 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)) 1178 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 1179 abi_ulong target_addr) 1180 { 1181 struct target_timespec *target_ts; 1182 1183 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) { 1184 return -TARGET_EFAULT; 1185 } 1186 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 1187 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1188 unlock_user_struct(target_ts, target_addr, 0); 1189 return 0; 1190 } 1191 #endif 1192 1193 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \ 1194 defined(TARGET_NR_timer_settime64) || \ 1195 defined(TARGET_NR_mq_timedsend_time64) || \ 1196 defined(TARGET_NR_mq_timedreceive_time64) || \ 1197 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \ 1198 defined(TARGET_NR_clock_nanosleep_time64) || \ 1199 defined(TARGET_NR_rt_sigtimedwait_time64) || \ 1200 defined(TARGET_NR_utimensat) || \ 1201 defined(TARGET_NR_utimensat_time64) || \ 1202 defined(TARGET_NR_semtimedop_time64) || \ 1203 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64) 1204 static inline abi_long target_to_host_timespec64(struct timespec *host_ts, 1205 abi_ulong target_addr) 1206 { 1207 struct target__kernel_timespec *target_ts; 1208 1209 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) { 1210 return -TARGET_EFAULT; 1211 } 1212 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 1213 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1214 /* in 32bit mode, this drops the padding */ 1215 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec; 1216 unlock_user_struct(target_ts, target_addr, 0); 1217 return 0; 1218 } 1219 #endif 1220 1221 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 1222 struct timespec *host_ts) 1223 { 1224 struct target_timespec *target_ts; 1225 1226 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) { 1227 return -TARGET_EFAULT; 1228 } 1229 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 1230 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1231 unlock_user_struct(target_ts, target_addr, 1); 1232 return 0; 1233 } 1234 1235 static inline abi_long host_to_target_timespec64(abi_ulong target_addr, 1236 struct timespec *host_ts) 1237 { 1238 struct target__kernel_timespec *target_ts; 1239 1240 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) { 1241 return -TARGET_EFAULT; 1242 } 1243 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 1244 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1245 unlock_user_struct(target_ts, target_addr, 1); 1246 return 0; 1247 } 1248 1249 #if defined(TARGET_NR_gettimeofday) 1250 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr, 1251 struct timezone *tz) 1252 { 1253 struct target_timezone *target_tz; 1254 1255 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) { 1256 return -TARGET_EFAULT; 1257 } 1258 1259 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1260 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1261 1262 unlock_user_struct(target_tz, target_tz_addr, 1); 1263 1264 return 0; 1265 } 1266 #endif 1267 1268 #if defined(TARGET_NR_settimeofday) 1269 static inline abi_long copy_from_user_timezone(struct timezone *tz, 1270 abi_ulong target_tz_addr) 1271 { 1272 struct target_timezone *target_tz; 1273 1274 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 1275 return -TARGET_EFAULT; 1276 } 1277 1278 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1279 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1280 1281 unlock_user_struct(target_tz, target_tz_addr, 0); 1282 1283 return 0; 1284 } 1285 #endif 1286 1287 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1288 #include <mqueue.h> 1289 1290 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1291 abi_ulong target_mq_attr_addr) 1292 { 1293 struct target_mq_attr *target_mq_attr; 1294 1295 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1296 target_mq_attr_addr, 1)) 1297 return -TARGET_EFAULT; 1298 1299 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1300 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1301 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1302 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1303 1304 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1305 1306 return 0; 1307 } 1308 1309 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1310 const struct mq_attr *attr) 1311 { 1312 struct target_mq_attr *target_mq_attr; 1313 1314 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1315 target_mq_attr_addr, 0)) 1316 return -TARGET_EFAULT; 1317 1318 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1319 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1320 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1321 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1322 1323 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1324 1325 return 0; 1326 } 1327 #endif 1328 1329 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1330 /* do_select() must return target values and target errnos. */ 1331 static abi_long do_select(int n, 1332 abi_ulong rfd_addr, abi_ulong wfd_addr, 1333 abi_ulong efd_addr, abi_ulong target_tv_addr) 1334 { 1335 fd_set rfds, wfds, efds; 1336 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1337 struct timeval tv; 1338 struct timespec ts, *ts_ptr; 1339 abi_long ret; 1340 1341 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1342 if (ret) { 1343 return ret; 1344 } 1345 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1346 if (ret) { 1347 return ret; 1348 } 1349 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1350 if (ret) { 1351 return ret; 1352 } 1353 1354 if (target_tv_addr) { 1355 if (copy_from_user_timeval(&tv, target_tv_addr)) 1356 return -TARGET_EFAULT; 1357 ts.tv_sec = tv.tv_sec; 1358 ts.tv_nsec = tv.tv_usec * 1000; 1359 ts_ptr = &ts; 1360 } else { 1361 ts_ptr = NULL; 1362 } 1363 1364 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1365 ts_ptr, NULL)); 1366 1367 if (!is_error(ret)) { 1368 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1369 return -TARGET_EFAULT; 1370 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1371 return -TARGET_EFAULT; 1372 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1373 return -TARGET_EFAULT; 1374 1375 if (target_tv_addr) { 1376 tv.tv_sec = ts.tv_sec; 1377 tv.tv_usec = ts.tv_nsec / 1000; 1378 if (copy_to_user_timeval(target_tv_addr, &tv)) { 1379 return -TARGET_EFAULT; 1380 } 1381 } 1382 } 1383 1384 return ret; 1385 } 1386 1387 #if defined(TARGET_WANT_OLD_SYS_SELECT) 1388 static abi_long do_old_select(abi_ulong arg1) 1389 { 1390 struct target_sel_arg_struct *sel; 1391 abi_ulong inp, outp, exp, tvp; 1392 long nsel; 1393 1394 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) { 1395 return -TARGET_EFAULT; 1396 } 1397 1398 nsel = tswapal(sel->n); 1399 inp = tswapal(sel->inp); 1400 outp = tswapal(sel->outp); 1401 exp = tswapal(sel->exp); 1402 tvp = tswapal(sel->tvp); 1403 1404 unlock_user_struct(sel, arg1, 0); 1405 1406 return do_select(nsel, inp, outp, exp, tvp); 1407 } 1408 #endif 1409 #endif 1410 1411 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 1412 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3, 1413 abi_long arg4, abi_long arg5, abi_long arg6, 1414 bool time64) 1415 { 1416 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 1417 fd_set rfds, wfds, efds; 1418 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1419 struct timespec ts, *ts_ptr; 1420 abi_long ret; 1421 1422 /* 1423 * The 6th arg is actually two args smashed together, 1424 * so we cannot use the C library. 1425 */ 1426 struct { 1427 sigset_t *set; 1428 size_t size; 1429 } sig, *sig_ptr; 1430 1431 abi_ulong arg_sigset, arg_sigsize, *arg7; 1432 1433 n = arg1; 1434 rfd_addr = arg2; 1435 wfd_addr = arg3; 1436 efd_addr = arg4; 1437 ts_addr = arg5; 1438 1439 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1440 if (ret) { 1441 return ret; 1442 } 1443 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1444 if (ret) { 1445 return ret; 1446 } 1447 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1448 if (ret) { 1449 return ret; 1450 } 1451 1452 /* 1453 * This takes a timespec, and not a timeval, so we cannot 1454 * use the do_select() helper ... 1455 */ 1456 if (ts_addr) { 1457 if (time64) { 1458 if (target_to_host_timespec64(&ts, ts_addr)) { 1459 return -TARGET_EFAULT; 1460 } 1461 } else { 1462 if (target_to_host_timespec(&ts, ts_addr)) { 1463 return -TARGET_EFAULT; 1464 } 1465 } 1466 ts_ptr = &ts; 1467 } else { 1468 ts_ptr = NULL; 1469 } 1470 1471 /* Extract the two packed args for the sigset */ 1472 sig_ptr = NULL; 1473 if (arg6) { 1474 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 1475 if (!arg7) { 1476 return -TARGET_EFAULT; 1477 } 1478 arg_sigset = tswapal(arg7[0]); 1479 arg_sigsize = tswapal(arg7[1]); 1480 unlock_user(arg7, arg6, 0); 1481 1482 if (arg_sigset) { 1483 ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize); 1484 if (ret != 0) { 1485 return ret; 1486 } 1487 sig_ptr = &sig; 1488 sig.size = SIGSET_T_SIZE; 1489 } 1490 } 1491 1492 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1493 ts_ptr, sig_ptr)); 1494 1495 if (sig_ptr) { 1496 finish_sigsuspend_mask(ret); 1497 } 1498 1499 if (!is_error(ret)) { 1500 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) { 1501 return -TARGET_EFAULT; 1502 } 1503 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) { 1504 return -TARGET_EFAULT; 1505 } 1506 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) { 1507 return -TARGET_EFAULT; 1508 } 1509 if (time64) { 1510 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) { 1511 return -TARGET_EFAULT; 1512 } 1513 } else { 1514 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) { 1515 return -TARGET_EFAULT; 1516 } 1517 } 1518 } 1519 return ret; 1520 } 1521 #endif 1522 1523 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \ 1524 defined(TARGET_NR_ppoll_time64) 1525 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3, 1526 abi_long arg4, abi_long arg5, bool ppoll, bool time64) 1527 { 1528 struct target_pollfd *target_pfd; 1529 unsigned int nfds = arg2; 1530 struct pollfd *pfd; 1531 unsigned int i; 1532 abi_long ret; 1533 1534 pfd = NULL; 1535 target_pfd = NULL; 1536 if (nfds) { 1537 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) { 1538 return -TARGET_EINVAL; 1539 } 1540 target_pfd = lock_user(VERIFY_WRITE, arg1, 1541 sizeof(struct target_pollfd) * nfds, 1); 1542 if (!target_pfd) { 1543 return -TARGET_EFAULT; 1544 } 1545 1546 pfd = alloca(sizeof(struct pollfd) * nfds); 1547 for (i = 0; i < nfds; i++) { 1548 pfd[i].fd = tswap32(target_pfd[i].fd); 1549 pfd[i].events = tswap16(target_pfd[i].events); 1550 } 1551 } 1552 if (ppoll) { 1553 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 1554 sigset_t *set = NULL; 1555 1556 if (arg3) { 1557 if (time64) { 1558 if (target_to_host_timespec64(timeout_ts, arg3)) { 1559 unlock_user(target_pfd, arg1, 0); 1560 return -TARGET_EFAULT; 1561 } 1562 } else { 1563 if (target_to_host_timespec(timeout_ts, arg3)) { 1564 unlock_user(target_pfd, arg1, 0); 1565 return -TARGET_EFAULT; 1566 } 1567 } 1568 } else { 1569 timeout_ts = NULL; 1570 } 1571 1572 if (arg4) { 1573 ret = process_sigsuspend_mask(&set, arg4, arg5); 1574 if (ret != 0) { 1575 unlock_user(target_pfd, arg1, 0); 1576 return ret; 1577 } 1578 } 1579 1580 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts, 1581 set, SIGSET_T_SIZE)); 1582 1583 if (set) { 1584 finish_sigsuspend_mask(ret); 1585 } 1586 if (!is_error(ret) && arg3) { 1587 if (time64) { 1588 if (host_to_target_timespec64(arg3, timeout_ts)) { 1589 return -TARGET_EFAULT; 1590 } 1591 } else { 1592 if (host_to_target_timespec(arg3, timeout_ts)) { 1593 return -TARGET_EFAULT; 1594 } 1595 } 1596 } 1597 } else { 1598 struct timespec ts, *pts; 1599 1600 if (arg3 >= 0) { 1601 /* Convert ms to secs, ns */ 1602 ts.tv_sec = arg3 / 1000; 1603 ts.tv_nsec = (arg3 % 1000) * 1000000LL; 1604 pts = &ts; 1605 } else { 1606 /* -ve poll() timeout means "infinite" */ 1607 pts = NULL; 1608 } 1609 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0)); 1610 } 1611 1612 if (!is_error(ret)) { 1613 for (i = 0; i < nfds; i++) { 1614 target_pfd[i].revents = tswap16(pfd[i].revents); 1615 } 1616 } 1617 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 1618 return ret; 1619 } 1620 #endif 1621 1622 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes, 1623 int flags, int is_pipe2) 1624 { 1625 int host_pipe[2]; 1626 abi_long ret; 1627 ret = pipe2(host_pipe, flags); 1628 1629 if (is_error(ret)) 1630 return get_errno(ret); 1631 1632 /* Several targets have special calling conventions for the original 1633 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1634 if (!is_pipe2) { 1635 #if defined(TARGET_ALPHA) 1636 cpu_env->ir[IR_A4] = host_pipe[1]; 1637 return host_pipe[0]; 1638 #elif defined(TARGET_MIPS) 1639 cpu_env->active_tc.gpr[3] = host_pipe[1]; 1640 return host_pipe[0]; 1641 #elif defined(TARGET_SH4) 1642 cpu_env->gregs[1] = host_pipe[1]; 1643 return host_pipe[0]; 1644 #elif defined(TARGET_SPARC) 1645 cpu_env->regwptr[1] = host_pipe[1]; 1646 return host_pipe[0]; 1647 #endif 1648 } 1649 1650 if (put_user_s32(host_pipe[0], pipedes) 1651 || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int))) 1652 return -TARGET_EFAULT; 1653 return get_errno(ret); 1654 } 1655 1656 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1657 abi_ulong target_addr, 1658 socklen_t len) 1659 { 1660 struct target_ip_mreqn *target_smreqn; 1661 1662 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1663 if (!target_smreqn) 1664 return -TARGET_EFAULT; 1665 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1666 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1667 if (len == sizeof(struct target_ip_mreqn)) 1668 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1669 unlock_user(target_smreqn, target_addr, 0); 1670 1671 return 0; 1672 } 1673 1674 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, 1675 abi_ulong target_addr, 1676 socklen_t len) 1677 { 1678 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1679 sa_family_t sa_family; 1680 struct target_sockaddr *target_saddr; 1681 1682 if (fd_trans_target_to_host_addr(fd)) { 1683 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len); 1684 } 1685 1686 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1687 if (!target_saddr) 1688 return -TARGET_EFAULT; 1689 1690 sa_family = tswap16(target_saddr->sa_family); 1691 1692 /* Oops. The caller might send a incomplete sun_path; sun_path 1693 * must be terminated by \0 (see the manual page), but 1694 * unfortunately it is quite common to specify sockaddr_un 1695 * length as "strlen(x->sun_path)" while it should be 1696 * "strlen(...) + 1". We'll fix that here if needed. 1697 * Linux kernel has a similar feature. 1698 */ 1699 1700 if (sa_family == AF_UNIX) { 1701 if (len < unix_maxlen && len > 0) { 1702 char *cp = (char*)target_saddr; 1703 1704 if ( cp[len-1] && !cp[len] ) 1705 len++; 1706 } 1707 if (len > unix_maxlen) 1708 len = unix_maxlen; 1709 } 1710 1711 memcpy(addr, target_saddr, len); 1712 addr->sa_family = sa_family; 1713 if (sa_family == AF_NETLINK) { 1714 struct sockaddr_nl *nladdr; 1715 1716 nladdr = (struct sockaddr_nl *)addr; 1717 nladdr->nl_pid = tswap32(nladdr->nl_pid); 1718 nladdr->nl_groups = tswap32(nladdr->nl_groups); 1719 } else if (sa_family == AF_PACKET) { 1720 struct target_sockaddr_ll *lladdr; 1721 1722 lladdr = (struct target_sockaddr_ll *)addr; 1723 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); 1724 lladdr->sll_hatype = tswap16(lladdr->sll_hatype); 1725 } 1726 unlock_user(target_saddr, target_addr, 0); 1727 1728 return 0; 1729 } 1730 1731 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1732 struct sockaddr *addr, 1733 socklen_t len) 1734 { 1735 struct target_sockaddr *target_saddr; 1736 1737 if (len == 0) { 1738 return 0; 1739 } 1740 assert(addr); 1741 1742 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1743 if (!target_saddr) 1744 return -TARGET_EFAULT; 1745 memcpy(target_saddr, addr, len); 1746 if (len >= offsetof(struct target_sockaddr, sa_family) + 1747 sizeof(target_saddr->sa_family)) { 1748 target_saddr->sa_family = tswap16(addr->sa_family); 1749 } 1750 if (addr->sa_family == AF_NETLINK && 1751 len >= sizeof(struct target_sockaddr_nl)) { 1752 struct target_sockaddr_nl *target_nl = 1753 (struct target_sockaddr_nl *)target_saddr; 1754 target_nl->nl_pid = tswap32(target_nl->nl_pid); 1755 target_nl->nl_groups = tswap32(target_nl->nl_groups); 1756 } else if (addr->sa_family == AF_PACKET) { 1757 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr; 1758 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex); 1759 target_ll->sll_hatype = tswap16(target_ll->sll_hatype); 1760 } else if (addr->sa_family == AF_INET6 && 1761 len >= sizeof(struct target_sockaddr_in6)) { 1762 struct target_sockaddr_in6 *target_in6 = 1763 (struct target_sockaddr_in6 *)target_saddr; 1764 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id); 1765 } 1766 unlock_user(target_saddr, target_addr, len); 1767 1768 return 0; 1769 } 1770 1771 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1772 struct target_msghdr *target_msgh) 1773 { 1774 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1775 abi_long msg_controllen; 1776 abi_ulong target_cmsg_addr; 1777 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1778 socklen_t space = 0; 1779 1780 msg_controllen = tswapal(target_msgh->msg_controllen); 1781 if (msg_controllen < sizeof (struct target_cmsghdr)) 1782 goto the_end; 1783 target_cmsg_addr = tswapal(target_msgh->msg_control); 1784 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1785 target_cmsg_start = target_cmsg; 1786 if (!target_cmsg) 1787 return -TARGET_EFAULT; 1788 1789 while (cmsg && target_cmsg) { 1790 void *data = CMSG_DATA(cmsg); 1791 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1792 1793 int len = tswapal(target_cmsg->cmsg_len) 1794 - sizeof(struct target_cmsghdr); 1795 1796 space += CMSG_SPACE(len); 1797 if (space > msgh->msg_controllen) { 1798 space -= CMSG_SPACE(len); 1799 /* This is a QEMU bug, since we allocated the payload 1800 * area ourselves (unlike overflow in host-to-target 1801 * conversion, which is just the guest giving us a buffer 1802 * that's too small). It can't happen for the payload types 1803 * we currently support; if it becomes an issue in future 1804 * we would need to improve our allocation strategy to 1805 * something more intelligent than "twice the size of the 1806 * target buffer we're reading from". 1807 */ 1808 qemu_log_mask(LOG_UNIMP, 1809 ("Unsupported ancillary data %d/%d: " 1810 "unhandled msg size\n"), 1811 tswap32(target_cmsg->cmsg_level), 1812 tswap32(target_cmsg->cmsg_type)); 1813 break; 1814 } 1815 1816 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1817 cmsg->cmsg_level = SOL_SOCKET; 1818 } else { 1819 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1820 } 1821 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1822 cmsg->cmsg_len = CMSG_LEN(len); 1823 1824 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 1825 int *fd = (int *)data; 1826 int *target_fd = (int *)target_data; 1827 int i, numfds = len / sizeof(int); 1828 1829 for (i = 0; i < numfds; i++) { 1830 __get_user(fd[i], target_fd + i); 1831 } 1832 } else if (cmsg->cmsg_level == SOL_SOCKET 1833 && cmsg->cmsg_type == SCM_CREDENTIALS) { 1834 struct ucred *cred = (struct ucred *)data; 1835 struct target_ucred *target_cred = 1836 (struct target_ucred *)target_data; 1837 1838 __get_user(cred->pid, &target_cred->pid); 1839 __get_user(cred->uid, &target_cred->uid); 1840 __get_user(cred->gid, &target_cred->gid); 1841 } else { 1842 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", 1843 cmsg->cmsg_level, cmsg->cmsg_type); 1844 memcpy(data, target_data, len); 1845 } 1846 1847 cmsg = CMSG_NXTHDR(msgh, cmsg); 1848 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1849 target_cmsg_start); 1850 } 1851 unlock_user(target_cmsg, target_cmsg_addr, 0); 1852 the_end: 1853 msgh->msg_controllen = space; 1854 return 0; 1855 } 1856 1857 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1858 struct msghdr *msgh) 1859 { 1860 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1861 abi_long msg_controllen; 1862 abi_ulong target_cmsg_addr; 1863 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1864 socklen_t space = 0; 1865 1866 msg_controllen = tswapal(target_msgh->msg_controllen); 1867 if (msg_controllen < sizeof (struct target_cmsghdr)) 1868 goto the_end; 1869 target_cmsg_addr = tswapal(target_msgh->msg_control); 1870 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1871 target_cmsg_start = target_cmsg; 1872 if (!target_cmsg) 1873 return -TARGET_EFAULT; 1874 1875 while (cmsg && target_cmsg) { 1876 void *data = CMSG_DATA(cmsg); 1877 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1878 1879 int len = cmsg->cmsg_len - sizeof(struct cmsghdr); 1880 int tgt_len, tgt_space; 1881 1882 /* We never copy a half-header but may copy half-data; 1883 * this is Linux's behaviour in put_cmsg(). Note that 1884 * truncation here is a guest problem (which we report 1885 * to the guest via the CTRUNC bit), unlike truncation 1886 * in target_to_host_cmsg, which is a QEMU bug. 1887 */ 1888 if (msg_controllen < sizeof(struct target_cmsghdr)) { 1889 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1890 break; 1891 } 1892 1893 if (cmsg->cmsg_level == SOL_SOCKET) { 1894 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1895 } else { 1896 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1897 } 1898 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1899 1900 /* Payload types which need a different size of payload on 1901 * the target must adjust tgt_len here. 1902 */ 1903 tgt_len = len; 1904 switch (cmsg->cmsg_level) { 1905 case SOL_SOCKET: 1906 switch (cmsg->cmsg_type) { 1907 case SO_TIMESTAMP: 1908 tgt_len = sizeof(struct target_timeval); 1909 break; 1910 default: 1911 break; 1912 } 1913 break; 1914 default: 1915 break; 1916 } 1917 1918 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) { 1919 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1920 tgt_len = msg_controllen - sizeof(struct target_cmsghdr); 1921 } 1922 1923 /* We must now copy-and-convert len bytes of payload 1924 * into tgt_len bytes of destination space. Bear in mind 1925 * that in both source and destination we may be dealing 1926 * with a truncated value! 1927 */ 1928 switch (cmsg->cmsg_level) { 1929 case SOL_SOCKET: 1930 switch (cmsg->cmsg_type) { 1931 case SCM_RIGHTS: 1932 { 1933 int *fd = (int *)data; 1934 int *target_fd = (int *)target_data; 1935 int i, numfds = tgt_len / sizeof(int); 1936 1937 for (i = 0; i < numfds; i++) { 1938 __put_user(fd[i], target_fd + i); 1939 } 1940 break; 1941 } 1942 case SO_TIMESTAMP: 1943 { 1944 struct timeval *tv = (struct timeval *)data; 1945 struct target_timeval *target_tv = 1946 (struct target_timeval *)target_data; 1947 1948 if (len != sizeof(struct timeval) || 1949 tgt_len != sizeof(struct target_timeval)) { 1950 goto unimplemented; 1951 } 1952 1953 /* copy struct timeval to target */ 1954 __put_user(tv->tv_sec, &target_tv->tv_sec); 1955 __put_user(tv->tv_usec, &target_tv->tv_usec); 1956 break; 1957 } 1958 case SCM_CREDENTIALS: 1959 { 1960 struct ucred *cred = (struct ucred *)data; 1961 struct target_ucred *target_cred = 1962 (struct target_ucred *)target_data; 1963 1964 __put_user(cred->pid, &target_cred->pid); 1965 __put_user(cred->uid, &target_cred->uid); 1966 __put_user(cred->gid, &target_cred->gid); 1967 break; 1968 } 1969 default: 1970 goto unimplemented; 1971 } 1972 break; 1973 1974 case SOL_IP: 1975 switch (cmsg->cmsg_type) { 1976 case IP_TTL: 1977 { 1978 uint32_t *v = (uint32_t *)data; 1979 uint32_t *t_int = (uint32_t *)target_data; 1980 1981 if (len != sizeof(uint32_t) || 1982 tgt_len != sizeof(uint32_t)) { 1983 goto unimplemented; 1984 } 1985 __put_user(*v, t_int); 1986 break; 1987 } 1988 case IP_RECVERR: 1989 { 1990 struct errhdr_t { 1991 struct sock_extended_err ee; 1992 struct sockaddr_in offender; 1993 }; 1994 struct errhdr_t *errh = (struct errhdr_t *)data; 1995 struct errhdr_t *target_errh = 1996 (struct errhdr_t *)target_data; 1997 1998 if (len != sizeof(struct errhdr_t) || 1999 tgt_len != sizeof(struct errhdr_t)) { 2000 goto unimplemented; 2001 } 2002 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 2003 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 2004 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 2005 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 2006 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 2007 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 2008 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 2009 host_to_target_sockaddr((unsigned long) &target_errh->offender, 2010 (void *) &errh->offender, sizeof(errh->offender)); 2011 break; 2012 } 2013 default: 2014 goto unimplemented; 2015 } 2016 break; 2017 2018 case SOL_IPV6: 2019 switch (cmsg->cmsg_type) { 2020 case IPV6_HOPLIMIT: 2021 { 2022 uint32_t *v = (uint32_t *)data; 2023 uint32_t *t_int = (uint32_t *)target_data; 2024 2025 if (len != sizeof(uint32_t) || 2026 tgt_len != sizeof(uint32_t)) { 2027 goto unimplemented; 2028 } 2029 __put_user(*v, t_int); 2030 break; 2031 } 2032 case IPV6_RECVERR: 2033 { 2034 struct errhdr6_t { 2035 struct sock_extended_err ee; 2036 struct sockaddr_in6 offender; 2037 }; 2038 struct errhdr6_t *errh = (struct errhdr6_t *)data; 2039 struct errhdr6_t *target_errh = 2040 (struct errhdr6_t *)target_data; 2041 2042 if (len != sizeof(struct errhdr6_t) || 2043 tgt_len != sizeof(struct errhdr6_t)) { 2044 goto unimplemented; 2045 } 2046 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 2047 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 2048 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 2049 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 2050 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 2051 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 2052 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 2053 host_to_target_sockaddr((unsigned long) &target_errh->offender, 2054 (void *) &errh->offender, sizeof(errh->offender)); 2055 break; 2056 } 2057 default: 2058 goto unimplemented; 2059 } 2060 break; 2061 2062 default: 2063 unimplemented: 2064 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", 2065 cmsg->cmsg_level, cmsg->cmsg_type); 2066 memcpy(target_data, data, MIN(len, tgt_len)); 2067 if (tgt_len > len) { 2068 memset(target_data + len, 0, tgt_len - len); 2069 } 2070 } 2071 2072 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len)); 2073 tgt_space = TARGET_CMSG_SPACE(tgt_len); 2074 if (msg_controllen < tgt_space) { 2075 tgt_space = msg_controllen; 2076 } 2077 msg_controllen -= tgt_space; 2078 space += tgt_space; 2079 cmsg = CMSG_NXTHDR(msgh, cmsg); 2080 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 2081 target_cmsg_start); 2082 } 2083 unlock_user(target_cmsg, target_cmsg_addr, space); 2084 the_end: 2085 target_msgh->msg_controllen = tswapal(space); 2086 return 0; 2087 } 2088 2089 /* do_setsockopt() Must return target values and target errnos. */ 2090 static abi_long do_setsockopt(int sockfd, int level, int optname, 2091 abi_ulong optval_addr, socklen_t optlen) 2092 { 2093 abi_long ret; 2094 int val; 2095 struct ip_mreqn *ip_mreq; 2096 struct ip_mreq_source *ip_mreq_source; 2097 2098 switch(level) { 2099 case SOL_TCP: 2100 case SOL_UDP: 2101 /* TCP and UDP options all take an 'int' value. */ 2102 if (optlen < sizeof(uint32_t)) 2103 return -TARGET_EINVAL; 2104 2105 if (get_user_u32(val, optval_addr)) 2106 return -TARGET_EFAULT; 2107 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2108 break; 2109 case SOL_IP: 2110 switch(optname) { 2111 case IP_TOS: 2112 case IP_TTL: 2113 case IP_HDRINCL: 2114 case IP_ROUTER_ALERT: 2115 case IP_RECVOPTS: 2116 case IP_RETOPTS: 2117 case IP_PKTINFO: 2118 case IP_MTU_DISCOVER: 2119 case IP_RECVERR: 2120 case IP_RECVTTL: 2121 case IP_RECVTOS: 2122 #ifdef IP_FREEBIND 2123 case IP_FREEBIND: 2124 #endif 2125 case IP_MULTICAST_TTL: 2126 case IP_MULTICAST_LOOP: 2127 val = 0; 2128 if (optlen >= sizeof(uint32_t)) { 2129 if (get_user_u32(val, optval_addr)) 2130 return -TARGET_EFAULT; 2131 } else if (optlen >= 1) { 2132 if (get_user_u8(val, optval_addr)) 2133 return -TARGET_EFAULT; 2134 } 2135 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2136 break; 2137 case IP_ADD_MEMBERSHIP: 2138 case IP_DROP_MEMBERSHIP: 2139 if (optlen < sizeof (struct target_ip_mreq) || 2140 optlen > sizeof (struct target_ip_mreqn)) 2141 return -TARGET_EINVAL; 2142 2143 ip_mreq = (struct ip_mreqn *) alloca(optlen); 2144 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 2145 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 2146 break; 2147 2148 case IP_BLOCK_SOURCE: 2149 case IP_UNBLOCK_SOURCE: 2150 case IP_ADD_SOURCE_MEMBERSHIP: 2151 case IP_DROP_SOURCE_MEMBERSHIP: 2152 if (optlen != sizeof (struct target_ip_mreq_source)) 2153 return -TARGET_EINVAL; 2154 2155 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2156 if (!ip_mreq_source) { 2157 return -TARGET_EFAULT; 2158 } 2159 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 2160 unlock_user (ip_mreq_source, optval_addr, 0); 2161 break; 2162 2163 default: 2164 goto unimplemented; 2165 } 2166 break; 2167 case SOL_IPV6: 2168 switch (optname) { 2169 case IPV6_MTU_DISCOVER: 2170 case IPV6_MTU: 2171 case IPV6_V6ONLY: 2172 case IPV6_RECVPKTINFO: 2173 case IPV6_UNICAST_HOPS: 2174 case IPV6_MULTICAST_HOPS: 2175 case IPV6_MULTICAST_LOOP: 2176 case IPV6_RECVERR: 2177 case IPV6_RECVHOPLIMIT: 2178 case IPV6_2292HOPLIMIT: 2179 case IPV6_CHECKSUM: 2180 case IPV6_ADDRFORM: 2181 case IPV6_2292PKTINFO: 2182 case IPV6_RECVTCLASS: 2183 case IPV6_RECVRTHDR: 2184 case IPV6_2292RTHDR: 2185 case IPV6_RECVHOPOPTS: 2186 case IPV6_2292HOPOPTS: 2187 case IPV6_RECVDSTOPTS: 2188 case IPV6_2292DSTOPTS: 2189 case IPV6_TCLASS: 2190 case IPV6_ADDR_PREFERENCES: 2191 #ifdef IPV6_RECVPATHMTU 2192 case IPV6_RECVPATHMTU: 2193 #endif 2194 #ifdef IPV6_TRANSPARENT 2195 case IPV6_TRANSPARENT: 2196 #endif 2197 #ifdef IPV6_FREEBIND 2198 case IPV6_FREEBIND: 2199 #endif 2200 #ifdef IPV6_RECVORIGDSTADDR 2201 case IPV6_RECVORIGDSTADDR: 2202 #endif 2203 val = 0; 2204 if (optlen < sizeof(uint32_t)) { 2205 return -TARGET_EINVAL; 2206 } 2207 if (get_user_u32(val, optval_addr)) { 2208 return -TARGET_EFAULT; 2209 } 2210 ret = get_errno(setsockopt(sockfd, level, optname, 2211 &val, sizeof(val))); 2212 break; 2213 case IPV6_PKTINFO: 2214 { 2215 struct in6_pktinfo pki; 2216 2217 if (optlen < sizeof(pki)) { 2218 return -TARGET_EINVAL; 2219 } 2220 2221 if (copy_from_user(&pki, optval_addr, sizeof(pki))) { 2222 return -TARGET_EFAULT; 2223 } 2224 2225 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex); 2226 2227 ret = get_errno(setsockopt(sockfd, level, optname, 2228 &pki, sizeof(pki))); 2229 break; 2230 } 2231 case IPV6_ADD_MEMBERSHIP: 2232 case IPV6_DROP_MEMBERSHIP: 2233 { 2234 struct ipv6_mreq ipv6mreq; 2235 2236 if (optlen < sizeof(ipv6mreq)) { 2237 return -TARGET_EINVAL; 2238 } 2239 2240 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) { 2241 return -TARGET_EFAULT; 2242 } 2243 2244 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface); 2245 2246 ret = get_errno(setsockopt(sockfd, level, optname, 2247 &ipv6mreq, sizeof(ipv6mreq))); 2248 break; 2249 } 2250 default: 2251 goto unimplemented; 2252 } 2253 break; 2254 case SOL_ICMPV6: 2255 switch (optname) { 2256 case ICMPV6_FILTER: 2257 { 2258 struct icmp6_filter icmp6f; 2259 2260 if (optlen > sizeof(icmp6f)) { 2261 optlen = sizeof(icmp6f); 2262 } 2263 2264 if (copy_from_user(&icmp6f, optval_addr, optlen)) { 2265 return -TARGET_EFAULT; 2266 } 2267 2268 for (val = 0; val < 8; val++) { 2269 icmp6f.data[val] = tswap32(icmp6f.data[val]); 2270 } 2271 2272 ret = get_errno(setsockopt(sockfd, level, optname, 2273 &icmp6f, optlen)); 2274 break; 2275 } 2276 default: 2277 goto unimplemented; 2278 } 2279 break; 2280 case SOL_RAW: 2281 switch (optname) { 2282 case ICMP_FILTER: 2283 case IPV6_CHECKSUM: 2284 /* those take an u32 value */ 2285 if (optlen < sizeof(uint32_t)) { 2286 return -TARGET_EINVAL; 2287 } 2288 2289 if (get_user_u32(val, optval_addr)) { 2290 return -TARGET_EFAULT; 2291 } 2292 ret = get_errno(setsockopt(sockfd, level, optname, 2293 &val, sizeof(val))); 2294 break; 2295 2296 default: 2297 goto unimplemented; 2298 } 2299 break; 2300 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE) 2301 case SOL_ALG: 2302 switch (optname) { 2303 case ALG_SET_KEY: 2304 { 2305 char *alg_key = g_malloc(optlen); 2306 2307 if (!alg_key) { 2308 return -TARGET_ENOMEM; 2309 } 2310 if (copy_from_user(alg_key, optval_addr, optlen)) { 2311 g_free(alg_key); 2312 return -TARGET_EFAULT; 2313 } 2314 ret = get_errno(setsockopt(sockfd, level, optname, 2315 alg_key, optlen)); 2316 g_free(alg_key); 2317 break; 2318 } 2319 case ALG_SET_AEAD_AUTHSIZE: 2320 { 2321 ret = get_errno(setsockopt(sockfd, level, optname, 2322 NULL, optlen)); 2323 break; 2324 } 2325 default: 2326 goto unimplemented; 2327 } 2328 break; 2329 #endif 2330 case TARGET_SOL_SOCKET: 2331 switch (optname) { 2332 case TARGET_SO_RCVTIMEO: 2333 { 2334 struct timeval tv; 2335 2336 optname = SO_RCVTIMEO; 2337 2338 set_timeout: 2339 if (optlen != sizeof(struct target_timeval)) { 2340 return -TARGET_EINVAL; 2341 } 2342 2343 if (copy_from_user_timeval(&tv, optval_addr)) { 2344 return -TARGET_EFAULT; 2345 } 2346 2347 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 2348 &tv, sizeof(tv))); 2349 return ret; 2350 } 2351 case TARGET_SO_SNDTIMEO: 2352 optname = SO_SNDTIMEO; 2353 goto set_timeout; 2354 case TARGET_SO_ATTACH_FILTER: 2355 { 2356 struct target_sock_fprog *tfprog; 2357 struct target_sock_filter *tfilter; 2358 struct sock_fprog fprog; 2359 struct sock_filter *filter; 2360 int i; 2361 2362 if (optlen != sizeof(*tfprog)) { 2363 return -TARGET_EINVAL; 2364 } 2365 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 2366 return -TARGET_EFAULT; 2367 } 2368 if (!lock_user_struct(VERIFY_READ, tfilter, 2369 tswapal(tfprog->filter), 0)) { 2370 unlock_user_struct(tfprog, optval_addr, 1); 2371 return -TARGET_EFAULT; 2372 } 2373 2374 fprog.len = tswap16(tfprog->len); 2375 filter = g_try_new(struct sock_filter, fprog.len); 2376 if (filter == NULL) { 2377 unlock_user_struct(tfilter, tfprog->filter, 1); 2378 unlock_user_struct(tfprog, optval_addr, 1); 2379 return -TARGET_ENOMEM; 2380 } 2381 for (i = 0; i < fprog.len; i++) { 2382 filter[i].code = tswap16(tfilter[i].code); 2383 filter[i].jt = tfilter[i].jt; 2384 filter[i].jf = tfilter[i].jf; 2385 filter[i].k = tswap32(tfilter[i].k); 2386 } 2387 fprog.filter = filter; 2388 2389 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 2390 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 2391 g_free(filter); 2392 2393 unlock_user_struct(tfilter, tfprog->filter, 1); 2394 unlock_user_struct(tfprog, optval_addr, 1); 2395 return ret; 2396 } 2397 case TARGET_SO_BINDTODEVICE: 2398 { 2399 char *dev_ifname, *addr_ifname; 2400 2401 if (optlen > IFNAMSIZ - 1) { 2402 optlen = IFNAMSIZ - 1; 2403 } 2404 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2405 if (!dev_ifname) { 2406 return -TARGET_EFAULT; 2407 } 2408 optname = SO_BINDTODEVICE; 2409 addr_ifname = alloca(IFNAMSIZ); 2410 memcpy(addr_ifname, dev_ifname, optlen); 2411 addr_ifname[optlen] = 0; 2412 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 2413 addr_ifname, optlen)); 2414 unlock_user (dev_ifname, optval_addr, 0); 2415 return ret; 2416 } 2417 case TARGET_SO_LINGER: 2418 { 2419 struct linger lg; 2420 struct target_linger *tlg; 2421 2422 if (optlen != sizeof(struct target_linger)) { 2423 return -TARGET_EINVAL; 2424 } 2425 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) { 2426 return -TARGET_EFAULT; 2427 } 2428 __get_user(lg.l_onoff, &tlg->l_onoff); 2429 __get_user(lg.l_linger, &tlg->l_linger); 2430 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER, 2431 &lg, sizeof(lg))); 2432 unlock_user_struct(tlg, optval_addr, 0); 2433 return ret; 2434 } 2435 /* Options with 'int' argument. */ 2436 case TARGET_SO_DEBUG: 2437 optname = SO_DEBUG; 2438 break; 2439 case TARGET_SO_REUSEADDR: 2440 optname = SO_REUSEADDR; 2441 break; 2442 #ifdef SO_REUSEPORT 2443 case TARGET_SO_REUSEPORT: 2444 optname = SO_REUSEPORT; 2445 break; 2446 #endif 2447 case TARGET_SO_TYPE: 2448 optname = SO_TYPE; 2449 break; 2450 case TARGET_SO_ERROR: 2451 optname = SO_ERROR; 2452 break; 2453 case TARGET_SO_DONTROUTE: 2454 optname = SO_DONTROUTE; 2455 break; 2456 case TARGET_SO_BROADCAST: 2457 optname = SO_BROADCAST; 2458 break; 2459 case TARGET_SO_SNDBUF: 2460 optname = SO_SNDBUF; 2461 break; 2462 case TARGET_SO_SNDBUFFORCE: 2463 optname = SO_SNDBUFFORCE; 2464 break; 2465 case TARGET_SO_RCVBUF: 2466 optname = SO_RCVBUF; 2467 break; 2468 case TARGET_SO_RCVBUFFORCE: 2469 optname = SO_RCVBUFFORCE; 2470 break; 2471 case TARGET_SO_KEEPALIVE: 2472 optname = SO_KEEPALIVE; 2473 break; 2474 case TARGET_SO_OOBINLINE: 2475 optname = SO_OOBINLINE; 2476 break; 2477 case TARGET_SO_NO_CHECK: 2478 optname = SO_NO_CHECK; 2479 break; 2480 case TARGET_SO_PRIORITY: 2481 optname = SO_PRIORITY; 2482 break; 2483 #ifdef SO_BSDCOMPAT 2484 case TARGET_SO_BSDCOMPAT: 2485 optname = SO_BSDCOMPAT; 2486 break; 2487 #endif 2488 case TARGET_SO_PASSCRED: 2489 optname = SO_PASSCRED; 2490 break; 2491 case TARGET_SO_PASSSEC: 2492 optname = SO_PASSSEC; 2493 break; 2494 case TARGET_SO_TIMESTAMP: 2495 optname = SO_TIMESTAMP; 2496 break; 2497 case TARGET_SO_RCVLOWAT: 2498 optname = SO_RCVLOWAT; 2499 break; 2500 default: 2501 goto unimplemented; 2502 } 2503 if (optlen < sizeof(uint32_t)) 2504 return -TARGET_EINVAL; 2505 2506 if (get_user_u32(val, optval_addr)) 2507 return -TARGET_EFAULT; 2508 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 2509 break; 2510 #ifdef SOL_NETLINK 2511 case SOL_NETLINK: 2512 switch (optname) { 2513 case NETLINK_PKTINFO: 2514 case NETLINK_ADD_MEMBERSHIP: 2515 case NETLINK_DROP_MEMBERSHIP: 2516 case NETLINK_BROADCAST_ERROR: 2517 case NETLINK_NO_ENOBUFS: 2518 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2519 case NETLINK_LISTEN_ALL_NSID: 2520 case NETLINK_CAP_ACK: 2521 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2522 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) 2523 case NETLINK_EXT_ACK: 2524 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2525 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) 2526 case NETLINK_GET_STRICT_CHK: 2527 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2528 break; 2529 default: 2530 goto unimplemented; 2531 } 2532 val = 0; 2533 if (optlen < sizeof(uint32_t)) { 2534 return -TARGET_EINVAL; 2535 } 2536 if (get_user_u32(val, optval_addr)) { 2537 return -TARGET_EFAULT; 2538 } 2539 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val, 2540 sizeof(val))); 2541 break; 2542 #endif /* SOL_NETLINK */ 2543 default: 2544 unimplemented: 2545 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n", 2546 level, optname); 2547 ret = -TARGET_ENOPROTOOPT; 2548 } 2549 return ret; 2550 } 2551 2552 /* do_getsockopt() Must return target values and target errnos. */ 2553 static abi_long do_getsockopt(int sockfd, int level, int optname, 2554 abi_ulong optval_addr, abi_ulong optlen) 2555 { 2556 abi_long ret; 2557 int len, val; 2558 socklen_t lv; 2559 2560 switch(level) { 2561 case TARGET_SOL_SOCKET: 2562 level = SOL_SOCKET; 2563 switch (optname) { 2564 /* These don't just return a single integer */ 2565 case TARGET_SO_PEERNAME: 2566 goto unimplemented; 2567 case TARGET_SO_RCVTIMEO: { 2568 struct timeval tv; 2569 socklen_t tvlen; 2570 2571 optname = SO_RCVTIMEO; 2572 2573 get_timeout: 2574 if (get_user_u32(len, optlen)) { 2575 return -TARGET_EFAULT; 2576 } 2577 if (len < 0) { 2578 return -TARGET_EINVAL; 2579 } 2580 2581 tvlen = sizeof(tv); 2582 ret = get_errno(getsockopt(sockfd, level, optname, 2583 &tv, &tvlen)); 2584 if (ret < 0) { 2585 return ret; 2586 } 2587 if (len > sizeof(struct target_timeval)) { 2588 len = sizeof(struct target_timeval); 2589 } 2590 if (copy_to_user_timeval(optval_addr, &tv)) { 2591 return -TARGET_EFAULT; 2592 } 2593 if (put_user_u32(len, optlen)) { 2594 return -TARGET_EFAULT; 2595 } 2596 break; 2597 } 2598 case TARGET_SO_SNDTIMEO: 2599 optname = SO_SNDTIMEO; 2600 goto get_timeout; 2601 case TARGET_SO_PEERCRED: { 2602 struct ucred cr; 2603 socklen_t crlen; 2604 struct target_ucred *tcr; 2605 2606 if (get_user_u32(len, optlen)) { 2607 return -TARGET_EFAULT; 2608 } 2609 if (len < 0) { 2610 return -TARGET_EINVAL; 2611 } 2612 2613 crlen = sizeof(cr); 2614 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 2615 &cr, &crlen)); 2616 if (ret < 0) { 2617 return ret; 2618 } 2619 if (len > crlen) { 2620 len = crlen; 2621 } 2622 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 2623 return -TARGET_EFAULT; 2624 } 2625 __put_user(cr.pid, &tcr->pid); 2626 __put_user(cr.uid, &tcr->uid); 2627 __put_user(cr.gid, &tcr->gid); 2628 unlock_user_struct(tcr, optval_addr, 1); 2629 if (put_user_u32(len, optlen)) { 2630 return -TARGET_EFAULT; 2631 } 2632 break; 2633 } 2634 case TARGET_SO_PEERSEC: { 2635 char *name; 2636 2637 if (get_user_u32(len, optlen)) { 2638 return -TARGET_EFAULT; 2639 } 2640 if (len < 0) { 2641 return -TARGET_EINVAL; 2642 } 2643 name = lock_user(VERIFY_WRITE, optval_addr, len, 0); 2644 if (!name) { 2645 return -TARGET_EFAULT; 2646 } 2647 lv = len; 2648 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC, 2649 name, &lv)); 2650 if (put_user_u32(lv, optlen)) { 2651 ret = -TARGET_EFAULT; 2652 } 2653 unlock_user(name, optval_addr, lv); 2654 break; 2655 } 2656 case TARGET_SO_LINGER: 2657 { 2658 struct linger lg; 2659 socklen_t lglen; 2660 struct target_linger *tlg; 2661 2662 if (get_user_u32(len, optlen)) { 2663 return -TARGET_EFAULT; 2664 } 2665 if (len < 0) { 2666 return -TARGET_EINVAL; 2667 } 2668 2669 lglen = sizeof(lg); 2670 ret = get_errno(getsockopt(sockfd, level, SO_LINGER, 2671 &lg, &lglen)); 2672 if (ret < 0) { 2673 return ret; 2674 } 2675 if (len > lglen) { 2676 len = lglen; 2677 } 2678 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) { 2679 return -TARGET_EFAULT; 2680 } 2681 __put_user(lg.l_onoff, &tlg->l_onoff); 2682 __put_user(lg.l_linger, &tlg->l_linger); 2683 unlock_user_struct(tlg, optval_addr, 1); 2684 if (put_user_u32(len, optlen)) { 2685 return -TARGET_EFAULT; 2686 } 2687 break; 2688 } 2689 /* Options with 'int' argument. */ 2690 case TARGET_SO_DEBUG: 2691 optname = SO_DEBUG; 2692 goto int_case; 2693 case TARGET_SO_REUSEADDR: 2694 optname = SO_REUSEADDR; 2695 goto int_case; 2696 #ifdef SO_REUSEPORT 2697 case TARGET_SO_REUSEPORT: 2698 optname = SO_REUSEPORT; 2699 goto int_case; 2700 #endif 2701 case TARGET_SO_TYPE: 2702 optname = SO_TYPE; 2703 goto int_case; 2704 case TARGET_SO_ERROR: 2705 optname = SO_ERROR; 2706 goto int_case; 2707 case TARGET_SO_DONTROUTE: 2708 optname = SO_DONTROUTE; 2709 goto int_case; 2710 case TARGET_SO_BROADCAST: 2711 optname = SO_BROADCAST; 2712 goto int_case; 2713 case TARGET_SO_SNDBUF: 2714 optname = SO_SNDBUF; 2715 goto int_case; 2716 case TARGET_SO_RCVBUF: 2717 optname = SO_RCVBUF; 2718 goto int_case; 2719 case TARGET_SO_KEEPALIVE: 2720 optname = SO_KEEPALIVE; 2721 goto int_case; 2722 case TARGET_SO_OOBINLINE: 2723 optname = SO_OOBINLINE; 2724 goto int_case; 2725 case TARGET_SO_NO_CHECK: 2726 optname = SO_NO_CHECK; 2727 goto int_case; 2728 case TARGET_SO_PRIORITY: 2729 optname = SO_PRIORITY; 2730 goto int_case; 2731 #ifdef SO_BSDCOMPAT 2732 case TARGET_SO_BSDCOMPAT: 2733 optname = SO_BSDCOMPAT; 2734 goto int_case; 2735 #endif 2736 case TARGET_SO_PASSCRED: 2737 optname = SO_PASSCRED; 2738 goto int_case; 2739 case TARGET_SO_TIMESTAMP: 2740 optname = SO_TIMESTAMP; 2741 goto int_case; 2742 case TARGET_SO_RCVLOWAT: 2743 optname = SO_RCVLOWAT; 2744 goto int_case; 2745 case TARGET_SO_ACCEPTCONN: 2746 optname = SO_ACCEPTCONN; 2747 goto int_case; 2748 case TARGET_SO_PROTOCOL: 2749 optname = SO_PROTOCOL; 2750 goto int_case; 2751 case TARGET_SO_DOMAIN: 2752 optname = SO_DOMAIN; 2753 goto int_case; 2754 default: 2755 goto int_case; 2756 } 2757 break; 2758 case SOL_TCP: 2759 case SOL_UDP: 2760 /* TCP and UDP options all take an 'int' value. */ 2761 int_case: 2762 if (get_user_u32(len, optlen)) 2763 return -TARGET_EFAULT; 2764 if (len < 0) 2765 return -TARGET_EINVAL; 2766 lv = sizeof(lv); 2767 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2768 if (ret < 0) 2769 return ret; 2770 if (optname == SO_TYPE) { 2771 val = host_to_target_sock_type(val); 2772 } 2773 if (len > lv) 2774 len = lv; 2775 if (len == 4) { 2776 if (put_user_u32(val, optval_addr)) 2777 return -TARGET_EFAULT; 2778 } else { 2779 if (put_user_u8(val, optval_addr)) 2780 return -TARGET_EFAULT; 2781 } 2782 if (put_user_u32(len, optlen)) 2783 return -TARGET_EFAULT; 2784 break; 2785 case SOL_IP: 2786 switch(optname) { 2787 case IP_TOS: 2788 case IP_TTL: 2789 case IP_HDRINCL: 2790 case IP_ROUTER_ALERT: 2791 case IP_RECVOPTS: 2792 case IP_RETOPTS: 2793 case IP_PKTINFO: 2794 case IP_MTU_DISCOVER: 2795 case IP_RECVERR: 2796 case IP_RECVTOS: 2797 #ifdef IP_FREEBIND 2798 case IP_FREEBIND: 2799 #endif 2800 case IP_MULTICAST_TTL: 2801 case IP_MULTICAST_LOOP: 2802 if (get_user_u32(len, optlen)) 2803 return -TARGET_EFAULT; 2804 if (len < 0) 2805 return -TARGET_EINVAL; 2806 lv = sizeof(lv); 2807 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2808 if (ret < 0) 2809 return ret; 2810 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 2811 len = 1; 2812 if (put_user_u32(len, optlen) 2813 || put_user_u8(val, optval_addr)) 2814 return -TARGET_EFAULT; 2815 } else { 2816 if (len > sizeof(int)) 2817 len = sizeof(int); 2818 if (put_user_u32(len, optlen) 2819 || put_user_u32(val, optval_addr)) 2820 return -TARGET_EFAULT; 2821 } 2822 break; 2823 default: 2824 ret = -TARGET_ENOPROTOOPT; 2825 break; 2826 } 2827 break; 2828 case SOL_IPV6: 2829 switch (optname) { 2830 case IPV6_MTU_DISCOVER: 2831 case IPV6_MTU: 2832 case IPV6_V6ONLY: 2833 case IPV6_RECVPKTINFO: 2834 case IPV6_UNICAST_HOPS: 2835 case IPV6_MULTICAST_HOPS: 2836 case IPV6_MULTICAST_LOOP: 2837 case IPV6_RECVERR: 2838 case IPV6_RECVHOPLIMIT: 2839 case IPV6_2292HOPLIMIT: 2840 case IPV6_CHECKSUM: 2841 case IPV6_ADDRFORM: 2842 case IPV6_2292PKTINFO: 2843 case IPV6_RECVTCLASS: 2844 case IPV6_RECVRTHDR: 2845 case IPV6_2292RTHDR: 2846 case IPV6_RECVHOPOPTS: 2847 case IPV6_2292HOPOPTS: 2848 case IPV6_RECVDSTOPTS: 2849 case IPV6_2292DSTOPTS: 2850 case IPV6_TCLASS: 2851 case IPV6_ADDR_PREFERENCES: 2852 #ifdef IPV6_RECVPATHMTU 2853 case IPV6_RECVPATHMTU: 2854 #endif 2855 #ifdef IPV6_TRANSPARENT 2856 case IPV6_TRANSPARENT: 2857 #endif 2858 #ifdef IPV6_FREEBIND 2859 case IPV6_FREEBIND: 2860 #endif 2861 #ifdef IPV6_RECVORIGDSTADDR 2862 case IPV6_RECVORIGDSTADDR: 2863 #endif 2864 if (get_user_u32(len, optlen)) 2865 return -TARGET_EFAULT; 2866 if (len < 0) 2867 return -TARGET_EINVAL; 2868 lv = sizeof(lv); 2869 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2870 if (ret < 0) 2871 return ret; 2872 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 2873 len = 1; 2874 if (put_user_u32(len, optlen) 2875 || put_user_u8(val, optval_addr)) 2876 return -TARGET_EFAULT; 2877 } else { 2878 if (len > sizeof(int)) 2879 len = sizeof(int); 2880 if (put_user_u32(len, optlen) 2881 || put_user_u32(val, optval_addr)) 2882 return -TARGET_EFAULT; 2883 } 2884 break; 2885 default: 2886 ret = -TARGET_ENOPROTOOPT; 2887 break; 2888 } 2889 break; 2890 #ifdef SOL_NETLINK 2891 case SOL_NETLINK: 2892 switch (optname) { 2893 case NETLINK_PKTINFO: 2894 case NETLINK_BROADCAST_ERROR: 2895 case NETLINK_NO_ENOBUFS: 2896 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2897 case NETLINK_LISTEN_ALL_NSID: 2898 case NETLINK_CAP_ACK: 2899 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2900 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) 2901 case NETLINK_EXT_ACK: 2902 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2903 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) 2904 case NETLINK_GET_STRICT_CHK: 2905 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2906 if (get_user_u32(len, optlen)) { 2907 return -TARGET_EFAULT; 2908 } 2909 if (len != sizeof(val)) { 2910 return -TARGET_EINVAL; 2911 } 2912 lv = len; 2913 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2914 if (ret < 0) { 2915 return ret; 2916 } 2917 if (put_user_u32(lv, optlen) 2918 || put_user_u32(val, optval_addr)) { 2919 return -TARGET_EFAULT; 2920 } 2921 break; 2922 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2923 case NETLINK_LIST_MEMBERSHIPS: 2924 { 2925 uint32_t *results; 2926 int i; 2927 if (get_user_u32(len, optlen)) { 2928 return -TARGET_EFAULT; 2929 } 2930 if (len < 0) { 2931 return -TARGET_EINVAL; 2932 } 2933 results = lock_user(VERIFY_WRITE, optval_addr, len, 1); 2934 if (!results && len > 0) { 2935 return -TARGET_EFAULT; 2936 } 2937 lv = len; 2938 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv)); 2939 if (ret < 0) { 2940 unlock_user(results, optval_addr, 0); 2941 return ret; 2942 } 2943 /* swap host endianess to target endianess. */ 2944 for (i = 0; i < (len / sizeof(uint32_t)); i++) { 2945 results[i] = tswap32(results[i]); 2946 } 2947 if (put_user_u32(lv, optlen)) { 2948 return -TARGET_EFAULT; 2949 } 2950 unlock_user(results, optval_addr, 0); 2951 break; 2952 } 2953 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2954 default: 2955 goto unimplemented; 2956 } 2957 break; 2958 #endif /* SOL_NETLINK */ 2959 default: 2960 unimplemented: 2961 qemu_log_mask(LOG_UNIMP, 2962 "getsockopt level=%d optname=%d not yet supported\n", 2963 level, optname); 2964 ret = -TARGET_EOPNOTSUPP; 2965 break; 2966 } 2967 return ret; 2968 } 2969 2970 /* Convert target low/high pair representing file offset into the host 2971 * low/high pair. This function doesn't handle offsets bigger than 64 bits 2972 * as the kernel doesn't handle them either. 2973 */ 2974 static void target_to_host_low_high(abi_ulong tlow, 2975 abi_ulong thigh, 2976 unsigned long *hlow, 2977 unsigned long *hhigh) 2978 { 2979 uint64_t off = tlow | 2980 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) << 2981 TARGET_LONG_BITS / 2; 2982 2983 *hlow = off; 2984 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2; 2985 } 2986 2987 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 2988 abi_ulong count, int copy) 2989 { 2990 struct target_iovec *target_vec; 2991 struct iovec *vec; 2992 abi_ulong total_len, max_len; 2993 int i; 2994 int err = 0; 2995 bool bad_address = false; 2996 2997 if (count == 0) { 2998 errno = 0; 2999 return NULL; 3000 } 3001 if (count > IOV_MAX) { 3002 errno = EINVAL; 3003 return NULL; 3004 } 3005 3006 vec = g_try_new0(struct iovec, count); 3007 if (vec == NULL) { 3008 errno = ENOMEM; 3009 return NULL; 3010 } 3011 3012 target_vec = lock_user(VERIFY_READ, target_addr, 3013 count * sizeof(struct target_iovec), 1); 3014 if (target_vec == NULL) { 3015 err = EFAULT; 3016 goto fail2; 3017 } 3018 3019 /* ??? If host page size > target page size, this will result in a 3020 value larger than what we can actually support. */ 3021 max_len = 0x7fffffff & TARGET_PAGE_MASK; 3022 total_len = 0; 3023 3024 for (i = 0; i < count; i++) { 3025 abi_ulong base = tswapal(target_vec[i].iov_base); 3026 abi_long len = tswapal(target_vec[i].iov_len); 3027 3028 if (len < 0) { 3029 err = EINVAL; 3030 goto fail; 3031 } else if (len == 0) { 3032 /* Zero length pointer is ignored. */ 3033 vec[i].iov_base = 0; 3034 } else { 3035 vec[i].iov_base = lock_user(type, base, len, copy); 3036 /* If the first buffer pointer is bad, this is a fault. But 3037 * subsequent bad buffers will result in a partial write; this 3038 * is realized by filling the vector with null pointers and 3039 * zero lengths. */ 3040 if (!vec[i].iov_base) { 3041 if (i == 0) { 3042 err = EFAULT; 3043 goto fail; 3044 } else { 3045 bad_address = true; 3046 } 3047 } 3048 if (bad_address) { 3049 len = 0; 3050 } 3051 if (len > max_len - total_len) { 3052 len = max_len - total_len; 3053 } 3054 } 3055 vec[i].iov_len = len; 3056 total_len += len; 3057 } 3058 3059 unlock_user(target_vec, target_addr, 0); 3060 return vec; 3061 3062 fail: 3063 while (--i >= 0) { 3064 if (tswapal(target_vec[i].iov_len) > 0) { 3065 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0); 3066 } 3067 } 3068 unlock_user(target_vec, target_addr, 0); 3069 fail2: 3070 g_free(vec); 3071 errno = err; 3072 return NULL; 3073 } 3074 3075 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 3076 abi_ulong count, int copy) 3077 { 3078 struct target_iovec *target_vec; 3079 int i; 3080 3081 target_vec = lock_user(VERIFY_READ, target_addr, 3082 count * sizeof(struct target_iovec), 1); 3083 if (target_vec) { 3084 for (i = 0; i < count; i++) { 3085 abi_ulong base = tswapal(target_vec[i].iov_base); 3086 abi_long len = tswapal(target_vec[i].iov_len); 3087 if (len < 0) { 3088 break; 3089 } 3090 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 3091 } 3092 unlock_user(target_vec, target_addr, 0); 3093 } 3094 3095 g_free(vec); 3096 } 3097 3098 static inline int target_to_host_sock_type(int *type) 3099 { 3100 int host_type = 0; 3101 int target_type = *type; 3102 3103 switch (target_type & TARGET_SOCK_TYPE_MASK) { 3104 case TARGET_SOCK_DGRAM: 3105 host_type = SOCK_DGRAM; 3106 break; 3107 case TARGET_SOCK_STREAM: 3108 host_type = SOCK_STREAM; 3109 break; 3110 default: 3111 host_type = target_type & TARGET_SOCK_TYPE_MASK; 3112 break; 3113 } 3114 if (target_type & TARGET_SOCK_CLOEXEC) { 3115 #if defined(SOCK_CLOEXEC) 3116 host_type |= SOCK_CLOEXEC; 3117 #else 3118 return -TARGET_EINVAL; 3119 #endif 3120 } 3121 if (target_type & TARGET_SOCK_NONBLOCK) { 3122 #if defined(SOCK_NONBLOCK) 3123 host_type |= SOCK_NONBLOCK; 3124 #elif !defined(O_NONBLOCK) 3125 return -TARGET_EINVAL; 3126 #endif 3127 } 3128 *type = host_type; 3129 return 0; 3130 } 3131 3132 /* Try to emulate socket type flags after socket creation. */ 3133 static int sock_flags_fixup(int fd, int target_type) 3134 { 3135 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 3136 if (target_type & TARGET_SOCK_NONBLOCK) { 3137 int flags = fcntl(fd, F_GETFL); 3138 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 3139 close(fd); 3140 return -TARGET_EINVAL; 3141 } 3142 } 3143 #endif 3144 return fd; 3145 } 3146 3147 /* do_socket() Must return target values and target errnos. */ 3148 static abi_long do_socket(int domain, int type, int protocol) 3149 { 3150 int target_type = type; 3151 int ret; 3152 3153 ret = target_to_host_sock_type(&type); 3154 if (ret) { 3155 return ret; 3156 } 3157 3158 if (domain == PF_NETLINK && !( 3159 #ifdef CONFIG_RTNETLINK 3160 protocol == NETLINK_ROUTE || 3161 #endif 3162 protocol == NETLINK_KOBJECT_UEVENT || 3163 protocol == NETLINK_AUDIT)) { 3164 return -TARGET_EPROTONOSUPPORT; 3165 } 3166 3167 if (domain == AF_PACKET || 3168 (domain == AF_INET && type == SOCK_PACKET)) { 3169 protocol = tswap16(protocol); 3170 } 3171 3172 ret = get_errno(socket(domain, type, protocol)); 3173 if (ret >= 0) { 3174 ret = sock_flags_fixup(ret, target_type); 3175 if (type == SOCK_PACKET) { 3176 /* Manage an obsolete case : 3177 * if socket type is SOCK_PACKET, bind by name 3178 */ 3179 fd_trans_register(ret, &target_packet_trans); 3180 } else if (domain == PF_NETLINK) { 3181 switch (protocol) { 3182 #ifdef CONFIG_RTNETLINK 3183 case NETLINK_ROUTE: 3184 fd_trans_register(ret, &target_netlink_route_trans); 3185 break; 3186 #endif 3187 case NETLINK_KOBJECT_UEVENT: 3188 /* nothing to do: messages are strings */ 3189 break; 3190 case NETLINK_AUDIT: 3191 fd_trans_register(ret, &target_netlink_audit_trans); 3192 break; 3193 default: 3194 g_assert_not_reached(); 3195 } 3196 } 3197 } 3198 return ret; 3199 } 3200 3201 /* do_bind() Must return target values and target errnos. */ 3202 static abi_long do_bind(int sockfd, abi_ulong target_addr, 3203 socklen_t addrlen) 3204 { 3205 void *addr; 3206 abi_long ret; 3207 3208 if ((int)addrlen < 0) { 3209 return -TARGET_EINVAL; 3210 } 3211 3212 addr = alloca(addrlen+1); 3213 3214 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3215 if (ret) 3216 return ret; 3217 3218 return get_errno(bind(sockfd, addr, addrlen)); 3219 } 3220 3221 /* do_connect() Must return target values and target errnos. */ 3222 static abi_long do_connect(int sockfd, abi_ulong target_addr, 3223 socklen_t addrlen) 3224 { 3225 void *addr; 3226 abi_long ret; 3227 3228 if ((int)addrlen < 0) { 3229 return -TARGET_EINVAL; 3230 } 3231 3232 addr = alloca(addrlen+1); 3233 3234 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3235 if (ret) 3236 return ret; 3237 3238 return get_errno(safe_connect(sockfd, addr, addrlen)); 3239 } 3240 3241 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 3242 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 3243 int flags, int send) 3244 { 3245 abi_long ret, len; 3246 struct msghdr msg; 3247 abi_ulong count; 3248 struct iovec *vec; 3249 abi_ulong target_vec; 3250 3251 if (msgp->msg_name) { 3252 msg.msg_namelen = tswap32(msgp->msg_namelen); 3253 msg.msg_name = alloca(msg.msg_namelen+1); 3254 ret = target_to_host_sockaddr(fd, msg.msg_name, 3255 tswapal(msgp->msg_name), 3256 msg.msg_namelen); 3257 if (ret == -TARGET_EFAULT) { 3258 /* For connected sockets msg_name and msg_namelen must 3259 * be ignored, so returning EFAULT immediately is wrong. 3260 * Instead, pass a bad msg_name to the host kernel, and 3261 * let it decide whether to return EFAULT or not. 3262 */ 3263 msg.msg_name = (void *)-1; 3264 } else if (ret) { 3265 goto out2; 3266 } 3267 } else { 3268 msg.msg_name = NULL; 3269 msg.msg_namelen = 0; 3270 } 3271 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 3272 msg.msg_control = alloca(msg.msg_controllen); 3273 memset(msg.msg_control, 0, msg.msg_controllen); 3274 3275 msg.msg_flags = tswap32(msgp->msg_flags); 3276 3277 count = tswapal(msgp->msg_iovlen); 3278 target_vec = tswapal(msgp->msg_iov); 3279 3280 if (count > IOV_MAX) { 3281 /* sendrcvmsg returns a different errno for this condition than 3282 * readv/writev, so we must catch it here before lock_iovec() does. 3283 */ 3284 ret = -TARGET_EMSGSIZE; 3285 goto out2; 3286 } 3287 3288 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 3289 target_vec, count, send); 3290 if (vec == NULL) { 3291 ret = -host_to_target_errno(errno); 3292 goto out2; 3293 } 3294 msg.msg_iovlen = count; 3295 msg.msg_iov = vec; 3296 3297 if (send) { 3298 if (fd_trans_target_to_host_data(fd)) { 3299 void *host_msg; 3300 3301 host_msg = g_malloc(msg.msg_iov->iov_len); 3302 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len); 3303 ret = fd_trans_target_to_host_data(fd)(host_msg, 3304 msg.msg_iov->iov_len); 3305 if (ret >= 0) { 3306 msg.msg_iov->iov_base = host_msg; 3307 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3308 } 3309 g_free(host_msg); 3310 } else { 3311 ret = target_to_host_cmsg(&msg, msgp); 3312 if (ret == 0) { 3313 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3314 } 3315 } 3316 } else { 3317 ret = get_errno(safe_recvmsg(fd, &msg, flags)); 3318 if (!is_error(ret)) { 3319 len = ret; 3320 if (fd_trans_host_to_target_data(fd)) { 3321 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base, 3322 MIN(msg.msg_iov->iov_len, len)); 3323 } else { 3324 ret = host_to_target_cmsg(msgp, &msg); 3325 } 3326 if (!is_error(ret)) { 3327 msgp->msg_namelen = tswap32(msg.msg_namelen); 3328 msgp->msg_flags = tswap32(msg.msg_flags); 3329 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) { 3330 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 3331 msg.msg_name, msg.msg_namelen); 3332 if (ret) { 3333 goto out; 3334 } 3335 } 3336 3337 ret = len; 3338 } 3339 } 3340 } 3341 3342 out: 3343 unlock_iovec(vec, target_vec, count, !send); 3344 out2: 3345 return ret; 3346 } 3347 3348 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 3349 int flags, int send) 3350 { 3351 abi_long ret; 3352 struct target_msghdr *msgp; 3353 3354 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 3355 msgp, 3356 target_msg, 3357 send ? 1 : 0)) { 3358 return -TARGET_EFAULT; 3359 } 3360 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 3361 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 3362 return ret; 3363 } 3364 3365 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 3366 * so it might not have this *mmsg-specific flag either. 3367 */ 3368 #ifndef MSG_WAITFORONE 3369 #define MSG_WAITFORONE 0x10000 3370 #endif 3371 3372 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 3373 unsigned int vlen, unsigned int flags, 3374 int send) 3375 { 3376 struct target_mmsghdr *mmsgp; 3377 abi_long ret = 0; 3378 int i; 3379 3380 if (vlen > UIO_MAXIOV) { 3381 vlen = UIO_MAXIOV; 3382 } 3383 3384 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 3385 if (!mmsgp) { 3386 return -TARGET_EFAULT; 3387 } 3388 3389 for (i = 0; i < vlen; i++) { 3390 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 3391 if (is_error(ret)) { 3392 break; 3393 } 3394 mmsgp[i].msg_len = tswap32(ret); 3395 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 3396 if (flags & MSG_WAITFORONE) { 3397 flags |= MSG_DONTWAIT; 3398 } 3399 } 3400 3401 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 3402 3403 /* Return number of datagrams sent if we sent any at all; 3404 * otherwise return the error. 3405 */ 3406 if (i) { 3407 return i; 3408 } 3409 return ret; 3410 } 3411 3412 /* do_accept4() Must return target values and target errnos. */ 3413 static abi_long do_accept4(int fd, abi_ulong target_addr, 3414 abi_ulong target_addrlen_addr, int flags) 3415 { 3416 socklen_t addrlen, ret_addrlen; 3417 void *addr; 3418 abi_long ret; 3419 int host_flags; 3420 3421 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 3422 3423 if (target_addr == 0) { 3424 return get_errno(safe_accept4(fd, NULL, NULL, host_flags)); 3425 } 3426 3427 /* linux returns EFAULT if addrlen pointer is invalid */ 3428 if (get_user_u32(addrlen, target_addrlen_addr)) 3429 return -TARGET_EFAULT; 3430 3431 if ((int)addrlen < 0) { 3432 return -TARGET_EINVAL; 3433 } 3434 3435 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3436 return -TARGET_EFAULT; 3437 } 3438 3439 addr = alloca(addrlen); 3440 3441 ret_addrlen = addrlen; 3442 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags)); 3443 if (!is_error(ret)) { 3444 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3445 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3446 ret = -TARGET_EFAULT; 3447 } 3448 } 3449 return ret; 3450 } 3451 3452 /* do_getpeername() Must return target values and target errnos. */ 3453 static abi_long do_getpeername(int fd, abi_ulong target_addr, 3454 abi_ulong target_addrlen_addr) 3455 { 3456 socklen_t addrlen, ret_addrlen; 3457 void *addr; 3458 abi_long ret; 3459 3460 if (get_user_u32(addrlen, target_addrlen_addr)) 3461 return -TARGET_EFAULT; 3462 3463 if ((int)addrlen < 0) { 3464 return -TARGET_EINVAL; 3465 } 3466 3467 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3468 return -TARGET_EFAULT; 3469 } 3470 3471 addr = alloca(addrlen); 3472 3473 ret_addrlen = addrlen; 3474 ret = get_errno(getpeername(fd, addr, &ret_addrlen)); 3475 if (!is_error(ret)) { 3476 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3477 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3478 ret = -TARGET_EFAULT; 3479 } 3480 } 3481 return ret; 3482 } 3483 3484 /* do_getsockname() Must return target values and target errnos. */ 3485 static abi_long do_getsockname(int fd, abi_ulong target_addr, 3486 abi_ulong target_addrlen_addr) 3487 { 3488 socklen_t addrlen, ret_addrlen; 3489 void *addr; 3490 abi_long ret; 3491 3492 if (get_user_u32(addrlen, target_addrlen_addr)) 3493 return -TARGET_EFAULT; 3494 3495 if ((int)addrlen < 0) { 3496 return -TARGET_EINVAL; 3497 } 3498 3499 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3500 return -TARGET_EFAULT; 3501 } 3502 3503 addr = alloca(addrlen); 3504 3505 ret_addrlen = addrlen; 3506 ret = get_errno(getsockname(fd, addr, &ret_addrlen)); 3507 if (!is_error(ret)) { 3508 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3509 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3510 ret = -TARGET_EFAULT; 3511 } 3512 } 3513 return ret; 3514 } 3515 3516 /* do_socketpair() Must return target values and target errnos. */ 3517 static abi_long do_socketpair(int domain, int type, int protocol, 3518 abi_ulong target_tab_addr) 3519 { 3520 int tab[2]; 3521 abi_long ret; 3522 3523 target_to_host_sock_type(&type); 3524 3525 ret = get_errno(socketpair(domain, type, protocol, tab)); 3526 if (!is_error(ret)) { 3527 if (put_user_s32(tab[0], target_tab_addr) 3528 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 3529 ret = -TARGET_EFAULT; 3530 } 3531 return ret; 3532 } 3533 3534 /* do_sendto() Must return target values and target errnos. */ 3535 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 3536 abi_ulong target_addr, socklen_t addrlen) 3537 { 3538 void *addr; 3539 void *host_msg; 3540 void *copy_msg = NULL; 3541 abi_long ret; 3542 3543 if ((int)addrlen < 0) { 3544 return -TARGET_EINVAL; 3545 } 3546 3547 host_msg = lock_user(VERIFY_READ, msg, len, 1); 3548 if (!host_msg) 3549 return -TARGET_EFAULT; 3550 if (fd_trans_target_to_host_data(fd)) { 3551 copy_msg = host_msg; 3552 host_msg = g_malloc(len); 3553 memcpy(host_msg, copy_msg, len); 3554 ret = fd_trans_target_to_host_data(fd)(host_msg, len); 3555 if (ret < 0) { 3556 goto fail; 3557 } 3558 } 3559 if (target_addr) { 3560 addr = alloca(addrlen+1); 3561 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen); 3562 if (ret) { 3563 goto fail; 3564 } 3565 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen)); 3566 } else { 3567 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0)); 3568 } 3569 fail: 3570 if (copy_msg) { 3571 g_free(host_msg); 3572 host_msg = copy_msg; 3573 } 3574 unlock_user(host_msg, msg, 0); 3575 return ret; 3576 } 3577 3578 /* do_recvfrom() Must return target values and target errnos. */ 3579 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 3580 abi_ulong target_addr, 3581 abi_ulong target_addrlen) 3582 { 3583 socklen_t addrlen, ret_addrlen; 3584 void *addr; 3585 void *host_msg; 3586 abi_long ret; 3587 3588 if (!msg) { 3589 host_msg = NULL; 3590 } else { 3591 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 3592 if (!host_msg) { 3593 return -TARGET_EFAULT; 3594 } 3595 } 3596 if (target_addr) { 3597 if (get_user_u32(addrlen, target_addrlen)) { 3598 ret = -TARGET_EFAULT; 3599 goto fail; 3600 } 3601 if ((int)addrlen < 0) { 3602 ret = -TARGET_EINVAL; 3603 goto fail; 3604 } 3605 addr = alloca(addrlen); 3606 ret_addrlen = addrlen; 3607 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, 3608 addr, &ret_addrlen)); 3609 } else { 3610 addr = NULL; /* To keep compiler quiet. */ 3611 addrlen = 0; /* To keep compiler quiet. */ 3612 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0)); 3613 } 3614 if (!is_error(ret)) { 3615 if (fd_trans_host_to_target_data(fd)) { 3616 abi_long trans; 3617 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len)); 3618 if (is_error(trans)) { 3619 ret = trans; 3620 goto fail; 3621 } 3622 } 3623 if (target_addr) { 3624 host_to_target_sockaddr(target_addr, addr, 3625 MIN(addrlen, ret_addrlen)); 3626 if (put_user_u32(ret_addrlen, target_addrlen)) { 3627 ret = -TARGET_EFAULT; 3628 goto fail; 3629 } 3630 } 3631 unlock_user(host_msg, msg, len); 3632 } else { 3633 fail: 3634 unlock_user(host_msg, msg, 0); 3635 } 3636 return ret; 3637 } 3638 3639 #ifdef TARGET_NR_socketcall 3640 /* do_socketcall() must return target values and target errnos. */ 3641 static abi_long do_socketcall(int num, abi_ulong vptr) 3642 { 3643 static const unsigned nargs[] = { /* number of arguments per operation */ 3644 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */ 3645 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */ 3646 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */ 3647 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */ 3648 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */ 3649 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */ 3650 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */ 3651 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */ 3652 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */ 3653 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */ 3654 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */ 3655 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */ 3656 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */ 3657 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 3658 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 3659 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */ 3660 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */ 3661 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */ 3662 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */ 3663 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */ 3664 }; 3665 abi_long a[6]; /* max 6 args */ 3666 unsigned i; 3667 3668 /* check the range of the first argument num */ 3669 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */ 3670 if (num < 1 || num > TARGET_SYS_SENDMMSG) { 3671 return -TARGET_EINVAL; 3672 } 3673 /* ensure we have space for args */ 3674 if (nargs[num] > ARRAY_SIZE(a)) { 3675 return -TARGET_EINVAL; 3676 } 3677 /* collect the arguments in a[] according to nargs[] */ 3678 for (i = 0; i < nargs[num]; ++i) { 3679 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 3680 return -TARGET_EFAULT; 3681 } 3682 } 3683 /* now when we have the args, invoke the appropriate underlying function */ 3684 switch (num) { 3685 case TARGET_SYS_SOCKET: /* domain, type, protocol */ 3686 return do_socket(a[0], a[1], a[2]); 3687 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */ 3688 return do_bind(a[0], a[1], a[2]); 3689 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */ 3690 return do_connect(a[0], a[1], a[2]); 3691 case TARGET_SYS_LISTEN: /* sockfd, backlog */ 3692 return get_errno(listen(a[0], a[1])); 3693 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */ 3694 return do_accept4(a[0], a[1], a[2], 0); 3695 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */ 3696 return do_getsockname(a[0], a[1], a[2]); 3697 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */ 3698 return do_getpeername(a[0], a[1], a[2]); 3699 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */ 3700 return do_socketpair(a[0], a[1], a[2], a[3]); 3701 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */ 3702 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 3703 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */ 3704 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 3705 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */ 3706 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 3707 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */ 3708 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 3709 case TARGET_SYS_SHUTDOWN: /* sockfd, how */ 3710 return get_errno(shutdown(a[0], a[1])); 3711 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 3712 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 3713 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 3714 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 3715 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */ 3716 return do_sendrecvmsg(a[0], a[1], a[2], 1); 3717 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */ 3718 return do_sendrecvmsg(a[0], a[1], a[2], 0); 3719 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */ 3720 return do_accept4(a[0], a[1], a[2], a[3]); 3721 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */ 3722 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0); 3723 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */ 3724 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1); 3725 default: 3726 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num); 3727 return -TARGET_EINVAL; 3728 } 3729 } 3730 #endif 3731 3732 #define N_SHM_REGIONS 32 3733 3734 static struct shm_region { 3735 abi_ulong start; 3736 abi_ulong size; 3737 bool in_use; 3738 } shm_regions[N_SHM_REGIONS]; 3739 3740 #ifndef TARGET_SEMID64_DS 3741 /* asm-generic version of this struct */ 3742 struct target_semid64_ds 3743 { 3744 struct target_ipc_perm sem_perm; 3745 abi_ulong sem_otime; 3746 #if TARGET_ABI_BITS == 32 3747 abi_ulong __unused1; 3748 #endif 3749 abi_ulong sem_ctime; 3750 #if TARGET_ABI_BITS == 32 3751 abi_ulong __unused2; 3752 #endif 3753 abi_ulong sem_nsems; 3754 abi_ulong __unused3; 3755 abi_ulong __unused4; 3756 }; 3757 #endif 3758 3759 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 3760 abi_ulong target_addr) 3761 { 3762 struct target_ipc_perm *target_ip; 3763 struct target_semid64_ds *target_sd; 3764 3765 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 3766 return -TARGET_EFAULT; 3767 target_ip = &(target_sd->sem_perm); 3768 host_ip->__key = tswap32(target_ip->__key); 3769 host_ip->uid = tswap32(target_ip->uid); 3770 host_ip->gid = tswap32(target_ip->gid); 3771 host_ip->cuid = tswap32(target_ip->cuid); 3772 host_ip->cgid = tswap32(target_ip->cgid); 3773 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 3774 host_ip->mode = tswap32(target_ip->mode); 3775 #else 3776 host_ip->mode = tswap16(target_ip->mode); 3777 #endif 3778 #if defined(TARGET_PPC) 3779 host_ip->__seq = tswap32(target_ip->__seq); 3780 #else 3781 host_ip->__seq = tswap16(target_ip->__seq); 3782 #endif 3783 unlock_user_struct(target_sd, target_addr, 0); 3784 return 0; 3785 } 3786 3787 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 3788 struct ipc_perm *host_ip) 3789 { 3790 struct target_ipc_perm *target_ip; 3791 struct target_semid64_ds *target_sd; 3792 3793 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3794 return -TARGET_EFAULT; 3795 target_ip = &(target_sd->sem_perm); 3796 target_ip->__key = tswap32(host_ip->__key); 3797 target_ip->uid = tswap32(host_ip->uid); 3798 target_ip->gid = tswap32(host_ip->gid); 3799 target_ip->cuid = tswap32(host_ip->cuid); 3800 target_ip->cgid = tswap32(host_ip->cgid); 3801 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 3802 target_ip->mode = tswap32(host_ip->mode); 3803 #else 3804 target_ip->mode = tswap16(host_ip->mode); 3805 #endif 3806 #if defined(TARGET_PPC) 3807 target_ip->__seq = tswap32(host_ip->__seq); 3808 #else 3809 target_ip->__seq = tswap16(host_ip->__seq); 3810 #endif 3811 unlock_user_struct(target_sd, target_addr, 1); 3812 return 0; 3813 } 3814 3815 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 3816 abi_ulong target_addr) 3817 { 3818 struct target_semid64_ds *target_sd; 3819 3820 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 3821 return -TARGET_EFAULT; 3822 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 3823 return -TARGET_EFAULT; 3824 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 3825 host_sd->sem_otime = tswapal(target_sd->sem_otime); 3826 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 3827 unlock_user_struct(target_sd, target_addr, 0); 3828 return 0; 3829 } 3830 3831 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 3832 struct semid_ds *host_sd) 3833 { 3834 struct target_semid64_ds *target_sd; 3835 3836 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3837 return -TARGET_EFAULT; 3838 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 3839 return -TARGET_EFAULT; 3840 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 3841 target_sd->sem_otime = tswapal(host_sd->sem_otime); 3842 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 3843 unlock_user_struct(target_sd, target_addr, 1); 3844 return 0; 3845 } 3846 3847 struct target_seminfo { 3848 int semmap; 3849 int semmni; 3850 int semmns; 3851 int semmnu; 3852 int semmsl; 3853 int semopm; 3854 int semume; 3855 int semusz; 3856 int semvmx; 3857 int semaem; 3858 }; 3859 3860 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 3861 struct seminfo *host_seminfo) 3862 { 3863 struct target_seminfo *target_seminfo; 3864 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 3865 return -TARGET_EFAULT; 3866 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 3867 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 3868 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 3869 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 3870 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 3871 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 3872 __put_user(host_seminfo->semume, &target_seminfo->semume); 3873 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 3874 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 3875 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 3876 unlock_user_struct(target_seminfo, target_addr, 1); 3877 return 0; 3878 } 3879 3880 union semun { 3881 int val; 3882 struct semid_ds *buf; 3883 unsigned short *array; 3884 struct seminfo *__buf; 3885 }; 3886 3887 union target_semun { 3888 int val; 3889 abi_ulong buf; 3890 abi_ulong array; 3891 abi_ulong __buf; 3892 }; 3893 3894 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 3895 abi_ulong target_addr) 3896 { 3897 int nsems; 3898 unsigned short *array; 3899 union semun semun; 3900 struct semid_ds semid_ds; 3901 int i, ret; 3902 3903 semun.buf = &semid_ds; 3904 3905 ret = semctl(semid, 0, IPC_STAT, semun); 3906 if (ret == -1) 3907 return get_errno(ret); 3908 3909 nsems = semid_ds.sem_nsems; 3910 3911 *host_array = g_try_new(unsigned short, nsems); 3912 if (!*host_array) { 3913 return -TARGET_ENOMEM; 3914 } 3915 array = lock_user(VERIFY_READ, target_addr, 3916 nsems*sizeof(unsigned short), 1); 3917 if (!array) { 3918 g_free(*host_array); 3919 return -TARGET_EFAULT; 3920 } 3921 3922 for(i=0; i<nsems; i++) { 3923 __get_user((*host_array)[i], &array[i]); 3924 } 3925 unlock_user(array, target_addr, 0); 3926 3927 return 0; 3928 } 3929 3930 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 3931 unsigned short **host_array) 3932 { 3933 int nsems; 3934 unsigned short *array; 3935 union semun semun; 3936 struct semid_ds semid_ds; 3937 int i, ret; 3938 3939 semun.buf = &semid_ds; 3940 3941 ret = semctl(semid, 0, IPC_STAT, semun); 3942 if (ret == -1) 3943 return get_errno(ret); 3944 3945 nsems = semid_ds.sem_nsems; 3946 3947 array = lock_user(VERIFY_WRITE, target_addr, 3948 nsems*sizeof(unsigned short), 0); 3949 if (!array) 3950 return -TARGET_EFAULT; 3951 3952 for(i=0; i<nsems; i++) { 3953 __put_user((*host_array)[i], &array[i]); 3954 } 3955 g_free(*host_array); 3956 unlock_user(array, target_addr, 1); 3957 3958 return 0; 3959 } 3960 3961 static inline abi_long do_semctl(int semid, int semnum, int cmd, 3962 abi_ulong target_arg) 3963 { 3964 union target_semun target_su = { .buf = target_arg }; 3965 union semun arg; 3966 struct semid_ds dsarg; 3967 unsigned short *array = NULL; 3968 struct seminfo seminfo; 3969 abi_long ret = -TARGET_EINVAL; 3970 abi_long err; 3971 cmd &= 0xff; 3972 3973 switch( cmd ) { 3974 case GETVAL: 3975 case SETVAL: 3976 /* In 64 bit cross-endian situations, we will erroneously pick up 3977 * the wrong half of the union for the "val" element. To rectify 3978 * this, the entire 8-byte structure is byteswapped, followed by 3979 * a swap of the 4 byte val field. In other cases, the data is 3980 * already in proper host byte order. */ 3981 if (sizeof(target_su.val) != (sizeof(target_su.buf))) { 3982 target_su.buf = tswapal(target_su.buf); 3983 arg.val = tswap32(target_su.val); 3984 } else { 3985 arg.val = target_su.val; 3986 } 3987 ret = get_errno(semctl(semid, semnum, cmd, arg)); 3988 break; 3989 case GETALL: 3990 case SETALL: 3991 err = target_to_host_semarray(semid, &array, target_su.array); 3992 if (err) 3993 return err; 3994 arg.array = array; 3995 ret = get_errno(semctl(semid, semnum, cmd, arg)); 3996 err = host_to_target_semarray(semid, target_su.array, &array); 3997 if (err) 3998 return err; 3999 break; 4000 case IPC_STAT: 4001 case IPC_SET: 4002 case SEM_STAT: 4003 err = target_to_host_semid_ds(&dsarg, target_su.buf); 4004 if (err) 4005 return err; 4006 arg.buf = &dsarg; 4007 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4008 err = host_to_target_semid_ds(target_su.buf, &dsarg); 4009 if (err) 4010 return err; 4011 break; 4012 case IPC_INFO: 4013 case SEM_INFO: 4014 arg.__buf = &seminfo; 4015 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4016 err = host_to_target_seminfo(target_su.__buf, &seminfo); 4017 if (err) 4018 return err; 4019 break; 4020 case IPC_RMID: 4021 case GETPID: 4022 case GETNCNT: 4023 case GETZCNT: 4024 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 4025 break; 4026 } 4027 4028 return ret; 4029 } 4030 4031 struct target_sembuf { 4032 unsigned short sem_num; 4033 short sem_op; 4034 short sem_flg; 4035 }; 4036 4037 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 4038 abi_ulong target_addr, 4039 unsigned nsops) 4040 { 4041 struct target_sembuf *target_sembuf; 4042 int i; 4043 4044 target_sembuf = lock_user(VERIFY_READ, target_addr, 4045 nsops*sizeof(struct target_sembuf), 1); 4046 if (!target_sembuf) 4047 return -TARGET_EFAULT; 4048 4049 for(i=0; i<nsops; i++) { 4050 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 4051 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 4052 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 4053 } 4054 4055 unlock_user(target_sembuf, target_addr, 0); 4056 4057 return 0; 4058 } 4059 4060 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \ 4061 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64) 4062 4063 /* 4064 * This macro is required to handle the s390 variants, which passes the 4065 * arguments in a different order than default. 4066 */ 4067 #ifdef __s390x__ 4068 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \ 4069 (__nsops), (__timeout), (__sops) 4070 #else 4071 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \ 4072 (__nsops), 0, (__sops), (__timeout) 4073 #endif 4074 4075 static inline abi_long do_semtimedop(int semid, 4076 abi_long ptr, 4077 unsigned nsops, 4078 abi_long timeout, bool time64) 4079 { 4080 struct sembuf *sops; 4081 struct timespec ts, *pts = NULL; 4082 abi_long ret; 4083 4084 if (timeout) { 4085 pts = &ts; 4086 if (time64) { 4087 if (target_to_host_timespec64(pts, timeout)) { 4088 return -TARGET_EFAULT; 4089 } 4090 } else { 4091 if (target_to_host_timespec(pts, timeout)) { 4092 return -TARGET_EFAULT; 4093 } 4094 } 4095 } 4096 4097 if (nsops > TARGET_SEMOPM) { 4098 return -TARGET_E2BIG; 4099 } 4100 4101 sops = g_new(struct sembuf, nsops); 4102 4103 if (target_to_host_sembuf(sops, ptr, nsops)) { 4104 g_free(sops); 4105 return -TARGET_EFAULT; 4106 } 4107 4108 ret = -TARGET_ENOSYS; 4109 #ifdef __NR_semtimedop 4110 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts)); 4111 #endif 4112 #ifdef __NR_ipc 4113 if (ret == -TARGET_ENOSYS) { 4114 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, 4115 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts))); 4116 } 4117 #endif 4118 g_free(sops); 4119 return ret; 4120 } 4121 #endif 4122 4123 struct target_msqid_ds 4124 { 4125 struct target_ipc_perm msg_perm; 4126 abi_ulong msg_stime; 4127 #if TARGET_ABI_BITS == 32 4128 abi_ulong __unused1; 4129 #endif 4130 abi_ulong msg_rtime; 4131 #if TARGET_ABI_BITS == 32 4132 abi_ulong __unused2; 4133 #endif 4134 abi_ulong msg_ctime; 4135 #if TARGET_ABI_BITS == 32 4136 abi_ulong __unused3; 4137 #endif 4138 abi_ulong __msg_cbytes; 4139 abi_ulong msg_qnum; 4140 abi_ulong msg_qbytes; 4141 abi_ulong msg_lspid; 4142 abi_ulong msg_lrpid; 4143 abi_ulong __unused4; 4144 abi_ulong __unused5; 4145 }; 4146 4147 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 4148 abi_ulong target_addr) 4149 { 4150 struct target_msqid_ds *target_md; 4151 4152 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 4153 return -TARGET_EFAULT; 4154 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 4155 return -TARGET_EFAULT; 4156 host_md->msg_stime = tswapal(target_md->msg_stime); 4157 host_md->msg_rtime = tswapal(target_md->msg_rtime); 4158 host_md->msg_ctime = tswapal(target_md->msg_ctime); 4159 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 4160 host_md->msg_qnum = tswapal(target_md->msg_qnum); 4161 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 4162 host_md->msg_lspid = tswapal(target_md->msg_lspid); 4163 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 4164 unlock_user_struct(target_md, target_addr, 0); 4165 return 0; 4166 } 4167 4168 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 4169 struct msqid_ds *host_md) 4170 { 4171 struct target_msqid_ds *target_md; 4172 4173 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 4174 return -TARGET_EFAULT; 4175 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 4176 return -TARGET_EFAULT; 4177 target_md->msg_stime = tswapal(host_md->msg_stime); 4178 target_md->msg_rtime = tswapal(host_md->msg_rtime); 4179 target_md->msg_ctime = tswapal(host_md->msg_ctime); 4180 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 4181 target_md->msg_qnum = tswapal(host_md->msg_qnum); 4182 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 4183 target_md->msg_lspid = tswapal(host_md->msg_lspid); 4184 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 4185 unlock_user_struct(target_md, target_addr, 1); 4186 return 0; 4187 } 4188 4189 struct target_msginfo { 4190 int msgpool; 4191 int msgmap; 4192 int msgmax; 4193 int msgmnb; 4194 int msgmni; 4195 int msgssz; 4196 int msgtql; 4197 unsigned short int msgseg; 4198 }; 4199 4200 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 4201 struct msginfo *host_msginfo) 4202 { 4203 struct target_msginfo *target_msginfo; 4204 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 4205 return -TARGET_EFAULT; 4206 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 4207 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 4208 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 4209 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 4210 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 4211 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 4212 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 4213 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 4214 unlock_user_struct(target_msginfo, target_addr, 1); 4215 return 0; 4216 } 4217 4218 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 4219 { 4220 struct msqid_ds dsarg; 4221 struct msginfo msginfo; 4222 abi_long ret = -TARGET_EINVAL; 4223 4224 cmd &= 0xff; 4225 4226 switch (cmd) { 4227 case IPC_STAT: 4228 case IPC_SET: 4229 case MSG_STAT: 4230 if (target_to_host_msqid_ds(&dsarg,ptr)) 4231 return -TARGET_EFAULT; 4232 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 4233 if (host_to_target_msqid_ds(ptr,&dsarg)) 4234 return -TARGET_EFAULT; 4235 break; 4236 case IPC_RMID: 4237 ret = get_errno(msgctl(msgid, cmd, NULL)); 4238 break; 4239 case IPC_INFO: 4240 case MSG_INFO: 4241 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 4242 if (host_to_target_msginfo(ptr, &msginfo)) 4243 return -TARGET_EFAULT; 4244 break; 4245 } 4246 4247 return ret; 4248 } 4249 4250 struct target_msgbuf { 4251 abi_long mtype; 4252 char mtext[1]; 4253 }; 4254 4255 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 4256 ssize_t msgsz, int msgflg) 4257 { 4258 struct target_msgbuf *target_mb; 4259 struct msgbuf *host_mb; 4260 abi_long ret = 0; 4261 4262 if (msgsz < 0) { 4263 return -TARGET_EINVAL; 4264 } 4265 4266 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 4267 return -TARGET_EFAULT; 4268 host_mb = g_try_malloc(msgsz + sizeof(long)); 4269 if (!host_mb) { 4270 unlock_user_struct(target_mb, msgp, 0); 4271 return -TARGET_ENOMEM; 4272 } 4273 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 4274 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 4275 ret = -TARGET_ENOSYS; 4276 #ifdef __NR_msgsnd 4277 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg)); 4278 #endif 4279 #ifdef __NR_ipc 4280 if (ret == -TARGET_ENOSYS) { 4281 #ifdef __s390x__ 4282 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg, 4283 host_mb)); 4284 #else 4285 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg, 4286 host_mb, 0)); 4287 #endif 4288 } 4289 #endif 4290 g_free(host_mb); 4291 unlock_user_struct(target_mb, msgp, 0); 4292 4293 return ret; 4294 } 4295 4296 #ifdef __NR_ipc 4297 #if defined(__sparc__) 4298 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */ 4299 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp 4300 #elif defined(__s390x__) 4301 /* The s390 sys_ipc variant has only five parameters. */ 4302 #define MSGRCV_ARGS(__msgp, __msgtyp) \ 4303 ((long int[]){(long int)__msgp, __msgtyp}) 4304 #else 4305 #define MSGRCV_ARGS(__msgp, __msgtyp) \ 4306 ((long int[]){(long int)__msgp, __msgtyp}), 0 4307 #endif 4308 #endif 4309 4310 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 4311 ssize_t msgsz, abi_long msgtyp, 4312 int msgflg) 4313 { 4314 struct target_msgbuf *target_mb; 4315 char *target_mtext; 4316 struct msgbuf *host_mb; 4317 abi_long ret = 0; 4318 4319 if (msgsz < 0) { 4320 return -TARGET_EINVAL; 4321 } 4322 4323 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 4324 return -TARGET_EFAULT; 4325 4326 host_mb = g_try_malloc(msgsz + sizeof(long)); 4327 if (!host_mb) { 4328 ret = -TARGET_ENOMEM; 4329 goto end; 4330 } 4331 ret = -TARGET_ENOSYS; 4332 #ifdef __NR_msgrcv 4333 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 4334 #endif 4335 #ifdef __NR_ipc 4336 if (ret == -TARGET_ENOSYS) { 4337 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz, 4338 msgflg, MSGRCV_ARGS(host_mb, msgtyp))); 4339 } 4340 #endif 4341 4342 if (ret > 0) { 4343 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 4344 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 4345 if (!target_mtext) { 4346 ret = -TARGET_EFAULT; 4347 goto end; 4348 } 4349 memcpy(target_mb->mtext, host_mb->mtext, ret); 4350 unlock_user(target_mtext, target_mtext_addr, ret); 4351 } 4352 4353 target_mb->mtype = tswapal(host_mb->mtype); 4354 4355 end: 4356 if (target_mb) 4357 unlock_user_struct(target_mb, msgp, 1); 4358 g_free(host_mb); 4359 return ret; 4360 } 4361 4362 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 4363 abi_ulong target_addr) 4364 { 4365 struct target_shmid_ds *target_sd; 4366 4367 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4368 return -TARGET_EFAULT; 4369 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 4370 return -TARGET_EFAULT; 4371 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4372 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 4373 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4374 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4375 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4376 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4377 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4378 unlock_user_struct(target_sd, target_addr, 0); 4379 return 0; 4380 } 4381 4382 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 4383 struct shmid_ds *host_sd) 4384 { 4385 struct target_shmid_ds *target_sd; 4386 4387 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4388 return -TARGET_EFAULT; 4389 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 4390 return -TARGET_EFAULT; 4391 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4392 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 4393 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4394 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4395 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4396 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4397 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4398 unlock_user_struct(target_sd, target_addr, 1); 4399 return 0; 4400 } 4401 4402 struct target_shminfo { 4403 abi_ulong shmmax; 4404 abi_ulong shmmin; 4405 abi_ulong shmmni; 4406 abi_ulong shmseg; 4407 abi_ulong shmall; 4408 }; 4409 4410 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 4411 struct shminfo *host_shminfo) 4412 { 4413 struct target_shminfo *target_shminfo; 4414 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 4415 return -TARGET_EFAULT; 4416 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 4417 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 4418 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 4419 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 4420 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 4421 unlock_user_struct(target_shminfo, target_addr, 1); 4422 return 0; 4423 } 4424 4425 struct target_shm_info { 4426 int used_ids; 4427 abi_ulong shm_tot; 4428 abi_ulong shm_rss; 4429 abi_ulong shm_swp; 4430 abi_ulong swap_attempts; 4431 abi_ulong swap_successes; 4432 }; 4433 4434 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 4435 struct shm_info *host_shm_info) 4436 { 4437 struct target_shm_info *target_shm_info; 4438 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 4439 return -TARGET_EFAULT; 4440 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 4441 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 4442 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 4443 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 4444 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 4445 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 4446 unlock_user_struct(target_shm_info, target_addr, 1); 4447 return 0; 4448 } 4449 4450 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 4451 { 4452 struct shmid_ds dsarg; 4453 struct shminfo shminfo; 4454 struct shm_info shm_info; 4455 abi_long ret = -TARGET_EINVAL; 4456 4457 cmd &= 0xff; 4458 4459 switch(cmd) { 4460 case IPC_STAT: 4461 case IPC_SET: 4462 case SHM_STAT: 4463 if (target_to_host_shmid_ds(&dsarg, buf)) 4464 return -TARGET_EFAULT; 4465 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 4466 if (host_to_target_shmid_ds(buf, &dsarg)) 4467 return -TARGET_EFAULT; 4468 break; 4469 case IPC_INFO: 4470 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 4471 if (host_to_target_shminfo(buf, &shminfo)) 4472 return -TARGET_EFAULT; 4473 break; 4474 case SHM_INFO: 4475 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 4476 if (host_to_target_shm_info(buf, &shm_info)) 4477 return -TARGET_EFAULT; 4478 break; 4479 case IPC_RMID: 4480 case SHM_LOCK: 4481 case SHM_UNLOCK: 4482 ret = get_errno(shmctl(shmid, cmd, NULL)); 4483 break; 4484 } 4485 4486 return ret; 4487 } 4488 4489 #ifndef TARGET_FORCE_SHMLBA 4490 /* For most architectures, SHMLBA is the same as the page size; 4491 * some architectures have larger values, in which case they should 4492 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function. 4493 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA 4494 * and defining its own value for SHMLBA. 4495 * 4496 * The kernel also permits SHMLBA to be set by the architecture to a 4497 * value larger than the page size without setting __ARCH_FORCE_SHMLBA; 4498 * this means that addresses are rounded to the large size if 4499 * SHM_RND is set but addresses not aligned to that size are not rejected 4500 * as long as they are at least page-aligned. Since the only architecture 4501 * which uses this is ia64 this code doesn't provide for that oddity. 4502 */ 4503 static inline abi_ulong target_shmlba(CPUArchState *cpu_env) 4504 { 4505 return TARGET_PAGE_SIZE; 4506 } 4507 #endif 4508 4509 static inline abi_ulong do_shmat(CPUArchState *cpu_env, 4510 int shmid, abi_ulong shmaddr, int shmflg) 4511 { 4512 CPUState *cpu = env_cpu(cpu_env); 4513 abi_long raddr; 4514 void *host_raddr; 4515 struct shmid_ds shm_info; 4516 int i,ret; 4517 abi_ulong shmlba; 4518 4519 /* shmat pointers are always untagged */ 4520 4521 /* find out the length of the shared memory segment */ 4522 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 4523 if (is_error(ret)) { 4524 /* can't get length, bail out */ 4525 return ret; 4526 } 4527 4528 shmlba = target_shmlba(cpu_env); 4529 4530 if (shmaddr & (shmlba - 1)) { 4531 if (shmflg & SHM_RND) { 4532 shmaddr &= ~(shmlba - 1); 4533 } else { 4534 return -TARGET_EINVAL; 4535 } 4536 } 4537 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) { 4538 return -TARGET_EINVAL; 4539 } 4540 4541 mmap_lock(); 4542 4543 /* 4544 * We're mapping shared memory, so ensure we generate code for parallel 4545 * execution and flush old translations. This will work up to the level 4546 * supported by the host -- anything that requires EXCP_ATOMIC will not 4547 * be atomic with respect to an external process. 4548 */ 4549 if (!(cpu->tcg_cflags & CF_PARALLEL)) { 4550 cpu->tcg_cflags |= CF_PARALLEL; 4551 tb_flush(cpu); 4552 } 4553 4554 if (shmaddr) 4555 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg); 4556 else { 4557 abi_ulong mmap_start; 4558 4559 /* In order to use the host shmat, we need to honor host SHMLBA. */ 4560 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba)); 4561 4562 if (mmap_start == -1) { 4563 errno = ENOMEM; 4564 host_raddr = (void *)-1; 4565 } else 4566 host_raddr = shmat(shmid, g2h_untagged(mmap_start), 4567 shmflg | SHM_REMAP); 4568 } 4569 4570 if (host_raddr == (void *)-1) { 4571 mmap_unlock(); 4572 return get_errno((long)host_raddr); 4573 } 4574 raddr=h2g((unsigned long)host_raddr); 4575 4576 page_set_flags(raddr, raddr + shm_info.shm_segsz, 4577 PAGE_VALID | PAGE_RESET | PAGE_READ | 4578 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE)); 4579 4580 for (i = 0; i < N_SHM_REGIONS; i++) { 4581 if (!shm_regions[i].in_use) { 4582 shm_regions[i].in_use = true; 4583 shm_regions[i].start = raddr; 4584 shm_regions[i].size = shm_info.shm_segsz; 4585 break; 4586 } 4587 } 4588 4589 mmap_unlock(); 4590 return raddr; 4591 4592 } 4593 4594 static inline abi_long do_shmdt(abi_ulong shmaddr) 4595 { 4596 int i; 4597 abi_long rv; 4598 4599 /* shmdt pointers are always untagged */ 4600 4601 mmap_lock(); 4602 4603 for (i = 0; i < N_SHM_REGIONS; ++i) { 4604 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { 4605 shm_regions[i].in_use = false; 4606 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 4607 break; 4608 } 4609 } 4610 rv = get_errno(shmdt(g2h_untagged(shmaddr))); 4611 4612 mmap_unlock(); 4613 4614 return rv; 4615 } 4616 4617 #ifdef TARGET_NR_ipc 4618 /* ??? This only works with linear mappings. */ 4619 /* do_ipc() must return target values and target errnos. */ 4620 static abi_long do_ipc(CPUArchState *cpu_env, 4621 unsigned int call, abi_long first, 4622 abi_long second, abi_long third, 4623 abi_long ptr, abi_long fifth) 4624 { 4625 int version; 4626 abi_long ret = 0; 4627 4628 version = call >> 16; 4629 call &= 0xffff; 4630 4631 switch (call) { 4632 case IPCOP_semop: 4633 ret = do_semtimedop(first, ptr, second, 0, false); 4634 break; 4635 case IPCOP_semtimedop: 4636 /* 4637 * The s390 sys_ipc variant has only five parameters instead of six 4638 * (as for default variant) and the only difference is the handling of 4639 * SEMTIMEDOP where on s390 the third parameter is used as a pointer 4640 * to a struct timespec where the generic variant uses fifth parameter. 4641 */ 4642 #if defined(TARGET_S390X) 4643 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64); 4644 #else 4645 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64); 4646 #endif 4647 break; 4648 4649 case IPCOP_semget: 4650 ret = get_errno(semget(first, second, third)); 4651 break; 4652 4653 case IPCOP_semctl: { 4654 /* The semun argument to semctl is passed by value, so dereference the 4655 * ptr argument. */ 4656 abi_ulong atptr; 4657 get_user_ual(atptr, ptr); 4658 ret = do_semctl(first, second, third, atptr); 4659 break; 4660 } 4661 4662 case IPCOP_msgget: 4663 ret = get_errno(msgget(first, second)); 4664 break; 4665 4666 case IPCOP_msgsnd: 4667 ret = do_msgsnd(first, ptr, second, third); 4668 break; 4669 4670 case IPCOP_msgctl: 4671 ret = do_msgctl(first, second, ptr); 4672 break; 4673 4674 case IPCOP_msgrcv: 4675 switch (version) { 4676 case 0: 4677 { 4678 struct target_ipc_kludge { 4679 abi_long msgp; 4680 abi_long msgtyp; 4681 } *tmp; 4682 4683 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 4684 ret = -TARGET_EFAULT; 4685 break; 4686 } 4687 4688 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 4689 4690 unlock_user_struct(tmp, ptr, 0); 4691 break; 4692 } 4693 default: 4694 ret = do_msgrcv(first, ptr, second, fifth, third); 4695 } 4696 break; 4697 4698 case IPCOP_shmat: 4699 switch (version) { 4700 default: 4701 { 4702 abi_ulong raddr; 4703 raddr = do_shmat(cpu_env, first, ptr, second); 4704 if (is_error(raddr)) 4705 return get_errno(raddr); 4706 if (put_user_ual(raddr, third)) 4707 return -TARGET_EFAULT; 4708 break; 4709 } 4710 case 1: 4711 ret = -TARGET_EINVAL; 4712 break; 4713 } 4714 break; 4715 case IPCOP_shmdt: 4716 ret = do_shmdt(ptr); 4717 break; 4718 4719 case IPCOP_shmget: 4720 /* IPC_* flag values are the same on all linux platforms */ 4721 ret = get_errno(shmget(first, second, third)); 4722 break; 4723 4724 /* IPC_* and SHM_* command values are the same on all linux platforms */ 4725 case IPCOP_shmctl: 4726 ret = do_shmctl(first, second, ptr); 4727 break; 4728 default: 4729 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n", 4730 call, version); 4731 ret = -TARGET_ENOSYS; 4732 break; 4733 } 4734 return ret; 4735 } 4736 #endif 4737 4738 /* kernel structure types definitions */ 4739 4740 #define STRUCT(name, ...) STRUCT_ ## name, 4741 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 4742 enum { 4743 #include "syscall_types.h" 4744 STRUCT_MAX 4745 }; 4746 #undef STRUCT 4747 #undef STRUCT_SPECIAL 4748 4749 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 4750 #define STRUCT_SPECIAL(name) 4751 #include "syscall_types.h" 4752 #undef STRUCT 4753 #undef STRUCT_SPECIAL 4754 4755 #define MAX_STRUCT_SIZE 4096 4756 4757 #ifdef CONFIG_FIEMAP 4758 /* So fiemap access checks don't overflow on 32 bit systems. 4759 * This is very slightly smaller than the limit imposed by 4760 * the underlying kernel. 4761 */ 4762 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 4763 / sizeof(struct fiemap_extent)) 4764 4765 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 4766 int fd, int cmd, abi_long arg) 4767 { 4768 /* The parameter for this ioctl is a struct fiemap followed 4769 * by an array of struct fiemap_extent whose size is set 4770 * in fiemap->fm_extent_count. The array is filled in by the 4771 * ioctl. 4772 */ 4773 int target_size_in, target_size_out; 4774 struct fiemap *fm; 4775 const argtype *arg_type = ie->arg_type; 4776 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 4777 void *argptr, *p; 4778 abi_long ret; 4779 int i, extent_size = thunk_type_size(extent_arg_type, 0); 4780 uint32_t outbufsz; 4781 int free_fm = 0; 4782 4783 assert(arg_type[0] == TYPE_PTR); 4784 assert(ie->access == IOC_RW); 4785 arg_type++; 4786 target_size_in = thunk_type_size(arg_type, 0); 4787 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 4788 if (!argptr) { 4789 return -TARGET_EFAULT; 4790 } 4791 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4792 unlock_user(argptr, arg, 0); 4793 fm = (struct fiemap *)buf_temp; 4794 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 4795 return -TARGET_EINVAL; 4796 } 4797 4798 outbufsz = sizeof (*fm) + 4799 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 4800 4801 if (outbufsz > MAX_STRUCT_SIZE) { 4802 /* We can't fit all the extents into the fixed size buffer. 4803 * Allocate one that is large enough and use it instead. 4804 */ 4805 fm = g_try_malloc(outbufsz); 4806 if (!fm) { 4807 return -TARGET_ENOMEM; 4808 } 4809 memcpy(fm, buf_temp, sizeof(struct fiemap)); 4810 free_fm = 1; 4811 } 4812 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm)); 4813 if (!is_error(ret)) { 4814 target_size_out = target_size_in; 4815 /* An extent_count of 0 means we were only counting the extents 4816 * so there are no structs to copy 4817 */ 4818 if (fm->fm_extent_count != 0) { 4819 target_size_out += fm->fm_mapped_extents * extent_size; 4820 } 4821 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 4822 if (!argptr) { 4823 ret = -TARGET_EFAULT; 4824 } else { 4825 /* Convert the struct fiemap */ 4826 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 4827 if (fm->fm_extent_count != 0) { 4828 p = argptr + target_size_in; 4829 /* ...and then all the struct fiemap_extents */ 4830 for (i = 0; i < fm->fm_mapped_extents; i++) { 4831 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 4832 THUNK_TARGET); 4833 p += extent_size; 4834 } 4835 } 4836 unlock_user(argptr, arg, target_size_out); 4837 } 4838 } 4839 if (free_fm) { 4840 g_free(fm); 4841 } 4842 return ret; 4843 } 4844 #endif 4845 4846 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 4847 int fd, int cmd, abi_long arg) 4848 { 4849 const argtype *arg_type = ie->arg_type; 4850 int target_size; 4851 void *argptr; 4852 int ret; 4853 struct ifconf *host_ifconf; 4854 uint32_t outbufsz; 4855 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 4856 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) }; 4857 int target_ifreq_size; 4858 int nb_ifreq; 4859 int free_buf = 0; 4860 int i; 4861 int target_ifc_len; 4862 abi_long target_ifc_buf; 4863 int host_ifc_len; 4864 char *host_ifc_buf; 4865 4866 assert(arg_type[0] == TYPE_PTR); 4867 assert(ie->access == IOC_RW); 4868 4869 arg_type++; 4870 target_size = thunk_type_size(arg_type, 0); 4871 4872 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 4873 if (!argptr) 4874 return -TARGET_EFAULT; 4875 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4876 unlock_user(argptr, arg, 0); 4877 4878 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 4879 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 4880 target_ifreq_size = thunk_type_size(ifreq_max_type, 0); 4881 4882 if (target_ifc_buf != 0) { 4883 target_ifc_len = host_ifconf->ifc_len; 4884 nb_ifreq = target_ifc_len / target_ifreq_size; 4885 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 4886 4887 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 4888 if (outbufsz > MAX_STRUCT_SIZE) { 4889 /* 4890 * We can't fit all the extents into the fixed size buffer. 4891 * Allocate one that is large enough and use it instead. 4892 */ 4893 host_ifconf = g_try_malloc(outbufsz); 4894 if (!host_ifconf) { 4895 return -TARGET_ENOMEM; 4896 } 4897 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 4898 free_buf = 1; 4899 } 4900 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf); 4901 4902 host_ifconf->ifc_len = host_ifc_len; 4903 } else { 4904 host_ifc_buf = NULL; 4905 } 4906 host_ifconf->ifc_buf = host_ifc_buf; 4907 4908 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf)); 4909 if (!is_error(ret)) { 4910 /* convert host ifc_len to target ifc_len */ 4911 4912 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 4913 target_ifc_len = nb_ifreq * target_ifreq_size; 4914 host_ifconf->ifc_len = target_ifc_len; 4915 4916 /* restore target ifc_buf */ 4917 4918 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 4919 4920 /* copy struct ifconf to target user */ 4921 4922 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 4923 if (!argptr) 4924 return -TARGET_EFAULT; 4925 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 4926 unlock_user(argptr, arg, target_size); 4927 4928 if (target_ifc_buf != 0) { 4929 /* copy ifreq[] to target user */ 4930 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 4931 for (i = 0; i < nb_ifreq ; i++) { 4932 thunk_convert(argptr + i * target_ifreq_size, 4933 host_ifc_buf + i * sizeof(struct ifreq), 4934 ifreq_arg_type, THUNK_TARGET); 4935 } 4936 unlock_user(argptr, target_ifc_buf, target_ifc_len); 4937 } 4938 } 4939 4940 if (free_buf) { 4941 g_free(host_ifconf); 4942 } 4943 4944 return ret; 4945 } 4946 4947 #if defined(CONFIG_USBFS) 4948 #if HOST_LONG_BITS > 64 4949 #error USBDEVFS thunks do not support >64 bit hosts yet. 4950 #endif 4951 struct live_urb { 4952 uint64_t target_urb_adr; 4953 uint64_t target_buf_adr; 4954 char *target_buf_ptr; 4955 struct usbdevfs_urb host_urb; 4956 }; 4957 4958 static GHashTable *usbdevfs_urb_hashtable(void) 4959 { 4960 static GHashTable *urb_hashtable; 4961 4962 if (!urb_hashtable) { 4963 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal); 4964 } 4965 return urb_hashtable; 4966 } 4967 4968 static void urb_hashtable_insert(struct live_urb *urb) 4969 { 4970 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 4971 g_hash_table_insert(urb_hashtable, urb, urb); 4972 } 4973 4974 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr) 4975 { 4976 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 4977 return g_hash_table_lookup(urb_hashtable, &target_urb_adr); 4978 } 4979 4980 static void urb_hashtable_remove(struct live_urb *urb) 4981 { 4982 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 4983 g_hash_table_remove(urb_hashtable, urb); 4984 } 4985 4986 static abi_long 4987 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp, 4988 int fd, int cmd, abi_long arg) 4989 { 4990 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) }; 4991 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 }; 4992 struct live_urb *lurb; 4993 void *argptr; 4994 uint64_t hurb; 4995 int target_size; 4996 uintptr_t target_urb_adr; 4997 abi_long ret; 4998 4999 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET); 5000 5001 memset(buf_temp, 0, sizeof(uint64_t)); 5002 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5003 if (is_error(ret)) { 5004 return ret; 5005 } 5006 5007 memcpy(&hurb, buf_temp, sizeof(uint64_t)); 5008 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb)); 5009 if (!lurb->target_urb_adr) { 5010 return -TARGET_EFAULT; 5011 } 5012 urb_hashtable_remove(lurb); 5013 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 5014 lurb->host_urb.buffer_length); 5015 lurb->target_buf_ptr = NULL; 5016 5017 /* restore the guest buffer pointer */ 5018 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr; 5019 5020 /* update the guest urb struct */ 5021 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0); 5022 if (!argptr) { 5023 g_free(lurb); 5024 return -TARGET_EFAULT; 5025 } 5026 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET); 5027 unlock_user(argptr, lurb->target_urb_adr, target_size); 5028 5029 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET); 5030 /* write back the urb handle */ 5031 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5032 if (!argptr) { 5033 g_free(lurb); 5034 return -TARGET_EFAULT; 5035 } 5036 5037 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */ 5038 target_urb_adr = lurb->target_urb_adr; 5039 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET); 5040 unlock_user(argptr, arg, target_size); 5041 5042 g_free(lurb); 5043 return ret; 5044 } 5045 5046 static abi_long 5047 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie, 5048 uint8_t *buf_temp __attribute__((unused)), 5049 int fd, int cmd, abi_long arg) 5050 { 5051 struct live_urb *lurb; 5052 5053 /* map target address back to host URB with metadata. */ 5054 lurb = urb_hashtable_lookup(arg); 5055 if (!lurb) { 5056 return -TARGET_EFAULT; 5057 } 5058 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); 5059 } 5060 5061 static abi_long 5062 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp, 5063 int fd, int cmd, abi_long arg) 5064 { 5065 const argtype *arg_type = ie->arg_type; 5066 int target_size; 5067 abi_long ret; 5068 void *argptr; 5069 int rw_dir; 5070 struct live_urb *lurb; 5071 5072 /* 5073 * each submitted URB needs to map to a unique ID for the 5074 * kernel, and that unique ID needs to be a pointer to 5075 * host memory. hence, we need to malloc for each URB. 5076 * isochronous transfers have a variable length struct. 5077 */ 5078 arg_type++; 5079 target_size = thunk_type_size(arg_type, THUNK_TARGET); 5080 5081 /* construct host copy of urb and metadata */ 5082 lurb = g_try_new0(struct live_urb, 1); 5083 if (!lurb) { 5084 return -TARGET_ENOMEM; 5085 } 5086 5087 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5088 if (!argptr) { 5089 g_free(lurb); 5090 return -TARGET_EFAULT; 5091 } 5092 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST); 5093 unlock_user(argptr, arg, 0); 5094 5095 lurb->target_urb_adr = arg; 5096 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer; 5097 5098 /* buffer space used depends on endpoint type so lock the entire buffer */ 5099 /* control type urbs should check the buffer contents for true direction */ 5100 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ; 5101 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr, 5102 lurb->host_urb.buffer_length, 1); 5103 if (lurb->target_buf_ptr == NULL) { 5104 g_free(lurb); 5105 return -TARGET_EFAULT; 5106 } 5107 5108 /* update buffer pointer in host copy */ 5109 lurb->host_urb.buffer = lurb->target_buf_ptr; 5110 5111 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); 5112 if (is_error(ret)) { 5113 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0); 5114 g_free(lurb); 5115 } else { 5116 urb_hashtable_insert(lurb); 5117 } 5118 5119 return ret; 5120 } 5121 #endif /* CONFIG_USBFS */ 5122 5123 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5124 int cmd, abi_long arg) 5125 { 5126 void *argptr; 5127 struct dm_ioctl *host_dm; 5128 abi_long guest_data; 5129 uint32_t guest_data_size; 5130 int target_size; 5131 const argtype *arg_type = ie->arg_type; 5132 abi_long ret; 5133 void *big_buf = NULL; 5134 char *host_data; 5135 5136 arg_type++; 5137 target_size = thunk_type_size(arg_type, 0); 5138 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5139 if (!argptr) { 5140 ret = -TARGET_EFAULT; 5141 goto out; 5142 } 5143 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5144 unlock_user(argptr, arg, 0); 5145 5146 /* buf_temp is too small, so fetch things into a bigger buffer */ 5147 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 5148 memcpy(big_buf, buf_temp, target_size); 5149 buf_temp = big_buf; 5150 host_dm = big_buf; 5151 5152 guest_data = arg + host_dm->data_start; 5153 if ((guest_data - arg) < 0) { 5154 ret = -TARGET_EINVAL; 5155 goto out; 5156 } 5157 guest_data_size = host_dm->data_size - host_dm->data_start; 5158 host_data = (char*)host_dm + host_dm->data_start; 5159 5160 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 5161 if (!argptr) { 5162 ret = -TARGET_EFAULT; 5163 goto out; 5164 } 5165 5166 switch (ie->host_cmd) { 5167 case DM_REMOVE_ALL: 5168 case DM_LIST_DEVICES: 5169 case DM_DEV_CREATE: 5170 case DM_DEV_REMOVE: 5171 case DM_DEV_SUSPEND: 5172 case DM_DEV_STATUS: 5173 case DM_DEV_WAIT: 5174 case DM_TABLE_STATUS: 5175 case DM_TABLE_CLEAR: 5176 case DM_TABLE_DEPS: 5177 case DM_LIST_VERSIONS: 5178 /* no input data */ 5179 break; 5180 case DM_DEV_RENAME: 5181 case DM_DEV_SET_GEOMETRY: 5182 /* data contains only strings */ 5183 memcpy(host_data, argptr, guest_data_size); 5184 break; 5185 case DM_TARGET_MSG: 5186 memcpy(host_data, argptr, guest_data_size); 5187 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 5188 break; 5189 case DM_TABLE_LOAD: 5190 { 5191 void *gspec = argptr; 5192 void *cur_data = host_data; 5193 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5194 int spec_size = thunk_type_size(arg_type, 0); 5195 int i; 5196 5197 for (i = 0; i < host_dm->target_count; i++) { 5198 struct dm_target_spec *spec = cur_data; 5199 uint32_t next; 5200 int slen; 5201 5202 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 5203 slen = strlen((char*)gspec + spec_size) + 1; 5204 next = spec->next; 5205 spec->next = sizeof(*spec) + slen; 5206 strcpy((char*)&spec[1], gspec + spec_size); 5207 gspec += next; 5208 cur_data += spec->next; 5209 } 5210 break; 5211 } 5212 default: 5213 ret = -TARGET_EINVAL; 5214 unlock_user(argptr, guest_data, 0); 5215 goto out; 5216 } 5217 unlock_user(argptr, guest_data, 0); 5218 5219 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5220 if (!is_error(ret)) { 5221 guest_data = arg + host_dm->data_start; 5222 guest_data_size = host_dm->data_size - host_dm->data_start; 5223 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 5224 switch (ie->host_cmd) { 5225 case DM_REMOVE_ALL: 5226 case DM_DEV_CREATE: 5227 case DM_DEV_REMOVE: 5228 case DM_DEV_RENAME: 5229 case DM_DEV_SUSPEND: 5230 case DM_DEV_STATUS: 5231 case DM_TABLE_LOAD: 5232 case DM_TABLE_CLEAR: 5233 case DM_TARGET_MSG: 5234 case DM_DEV_SET_GEOMETRY: 5235 /* no return data */ 5236 break; 5237 case DM_LIST_DEVICES: 5238 { 5239 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 5240 uint32_t remaining_data = guest_data_size; 5241 void *cur_data = argptr; 5242 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 5243 int nl_size = 12; /* can't use thunk_size due to alignment */ 5244 5245 while (1) { 5246 uint32_t next = nl->next; 5247 if (next) { 5248 nl->next = nl_size + (strlen(nl->name) + 1); 5249 } 5250 if (remaining_data < nl->next) { 5251 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5252 break; 5253 } 5254 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 5255 strcpy(cur_data + nl_size, nl->name); 5256 cur_data += nl->next; 5257 remaining_data -= nl->next; 5258 if (!next) { 5259 break; 5260 } 5261 nl = (void*)nl + next; 5262 } 5263 break; 5264 } 5265 case DM_DEV_WAIT: 5266 case DM_TABLE_STATUS: 5267 { 5268 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 5269 void *cur_data = argptr; 5270 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5271 int spec_size = thunk_type_size(arg_type, 0); 5272 int i; 5273 5274 for (i = 0; i < host_dm->target_count; i++) { 5275 uint32_t next = spec->next; 5276 int slen = strlen((char*)&spec[1]) + 1; 5277 spec->next = (cur_data - argptr) + spec_size + slen; 5278 if (guest_data_size < spec->next) { 5279 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5280 break; 5281 } 5282 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 5283 strcpy(cur_data + spec_size, (char*)&spec[1]); 5284 cur_data = argptr + spec->next; 5285 spec = (void*)host_dm + host_dm->data_start + next; 5286 } 5287 break; 5288 } 5289 case DM_TABLE_DEPS: 5290 { 5291 void *hdata = (void*)host_dm + host_dm->data_start; 5292 int count = *(uint32_t*)hdata; 5293 uint64_t *hdev = hdata + 8; 5294 uint64_t *gdev = argptr + 8; 5295 int i; 5296 5297 *(uint32_t*)argptr = tswap32(count); 5298 for (i = 0; i < count; i++) { 5299 *gdev = tswap64(*hdev); 5300 gdev++; 5301 hdev++; 5302 } 5303 break; 5304 } 5305 case DM_LIST_VERSIONS: 5306 { 5307 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 5308 uint32_t remaining_data = guest_data_size; 5309 void *cur_data = argptr; 5310 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 5311 int vers_size = thunk_type_size(arg_type, 0); 5312 5313 while (1) { 5314 uint32_t next = vers->next; 5315 if (next) { 5316 vers->next = vers_size + (strlen(vers->name) + 1); 5317 } 5318 if (remaining_data < vers->next) { 5319 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5320 break; 5321 } 5322 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 5323 strcpy(cur_data + vers_size, vers->name); 5324 cur_data += vers->next; 5325 remaining_data -= vers->next; 5326 if (!next) { 5327 break; 5328 } 5329 vers = (void*)vers + next; 5330 } 5331 break; 5332 } 5333 default: 5334 unlock_user(argptr, guest_data, 0); 5335 ret = -TARGET_EINVAL; 5336 goto out; 5337 } 5338 unlock_user(argptr, guest_data, guest_data_size); 5339 5340 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5341 if (!argptr) { 5342 ret = -TARGET_EFAULT; 5343 goto out; 5344 } 5345 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5346 unlock_user(argptr, arg, target_size); 5347 } 5348 out: 5349 g_free(big_buf); 5350 return ret; 5351 } 5352 5353 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5354 int cmd, abi_long arg) 5355 { 5356 void *argptr; 5357 int target_size; 5358 const argtype *arg_type = ie->arg_type; 5359 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) }; 5360 abi_long ret; 5361 5362 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp; 5363 struct blkpg_partition host_part; 5364 5365 /* Read and convert blkpg */ 5366 arg_type++; 5367 target_size = thunk_type_size(arg_type, 0); 5368 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5369 if (!argptr) { 5370 ret = -TARGET_EFAULT; 5371 goto out; 5372 } 5373 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5374 unlock_user(argptr, arg, 0); 5375 5376 switch (host_blkpg->op) { 5377 case BLKPG_ADD_PARTITION: 5378 case BLKPG_DEL_PARTITION: 5379 /* payload is struct blkpg_partition */ 5380 break; 5381 default: 5382 /* Unknown opcode */ 5383 ret = -TARGET_EINVAL; 5384 goto out; 5385 } 5386 5387 /* Read and convert blkpg->data */ 5388 arg = (abi_long)(uintptr_t)host_blkpg->data; 5389 target_size = thunk_type_size(part_arg_type, 0); 5390 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5391 if (!argptr) { 5392 ret = -TARGET_EFAULT; 5393 goto out; 5394 } 5395 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST); 5396 unlock_user(argptr, arg, 0); 5397 5398 /* Swizzle the data pointer to our local copy and call! */ 5399 host_blkpg->data = &host_part; 5400 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg)); 5401 5402 out: 5403 return ret; 5404 } 5405 5406 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 5407 int fd, int cmd, abi_long arg) 5408 { 5409 const argtype *arg_type = ie->arg_type; 5410 const StructEntry *se; 5411 const argtype *field_types; 5412 const int *dst_offsets, *src_offsets; 5413 int target_size; 5414 void *argptr; 5415 abi_ulong *target_rt_dev_ptr = NULL; 5416 unsigned long *host_rt_dev_ptr = NULL; 5417 abi_long ret; 5418 int i; 5419 5420 assert(ie->access == IOC_W); 5421 assert(*arg_type == TYPE_PTR); 5422 arg_type++; 5423 assert(*arg_type == TYPE_STRUCT); 5424 target_size = thunk_type_size(arg_type, 0); 5425 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5426 if (!argptr) { 5427 return -TARGET_EFAULT; 5428 } 5429 arg_type++; 5430 assert(*arg_type == (int)STRUCT_rtentry); 5431 se = struct_entries + *arg_type++; 5432 assert(se->convert[0] == NULL); 5433 /* convert struct here to be able to catch rt_dev string */ 5434 field_types = se->field_types; 5435 dst_offsets = se->field_offsets[THUNK_HOST]; 5436 src_offsets = se->field_offsets[THUNK_TARGET]; 5437 for (i = 0; i < se->nb_fields; i++) { 5438 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 5439 assert(*field_types == TYPE_PTRVOID); 5440 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 5441 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 5442 if (*target_rt_dev_ptr != 0) { 5443 *host_rt_dev_ptr = (unsigned long)lock_user_string( 5444 tswapal(*target_rt_dev_ptr)); 5445 if (!*host_rt_dev_ptr) { 5446 unlock_user(argptr, arg, 0); 5447 return -TARGET_EFAULT; 5448 } 5449 } else { 5450 *host_rt_dev_ptr = 0; 5451 } 5452 field_types++; 5453 continue; 5454 } 5455 field_types = thunk_convert(buf_temp + dst_offsets[i], 5456 argptr + src_offsets[i], 5457 field_types, THUNK_HOST); 5458 } 5459 unlock_user(argptr, arg, 0); 5460 5461 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5462 5463 assert(host_rt_dev_ptr != NULL); 5464 assert(target_rt_dev_ptr != NULL); 5465 if (*host_rt_dev_ptr != 0) { 5466 unlock_user((void *)*host_rt_dev_ptr, 5467 *target_rt_dev_ptr, 0); 5468 } 5469 return ret; 5470 } 5471 5472 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 5473 int fd, int cmd, abi_long arg) 5474 { 5475 int sig = target_to_host_signal(arg); 5476 return get_errno(safe_ioctl(fd, ie->host_cmd, sig)); 5477 } 5478 5479 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp, 5480 int fd, int cmd, abi_long arg) 5481 { 5482 struct timeval tv; 5483 abi_long ret; 5484 5485 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv)); 5486 if (is_error(ret)) { 5487 return ret; 5488 } 5489 5490 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) { 5491 if (copy_to_user_timeval(arg, &tv)) { 5492 return -TARGET_EFAULT; 5493 } 5494 } else { 5495 if (copy_to_user_timeval64(arg, &tv)) { 5496 return -TARGET_EFAULT; 5497 } 5498 } 5499 5500 return ret; 5501 } 5502 5503 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp, 5504 int fd, int cmd, abi_long arg) 5505 { 5506 struct timespec ts; 5507 abi_long ret; 5508 5509 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts)); 5510 if (is_error(ret)) { 5511 return ret; 5512 } 5513 5514 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) { 5515 if (host_to_target_timespec(arg, &ts)) { 5516 return -TARGET_EFAULT; 5517 } 5518 } else{ 5519 if (host_to_target_timespec64(arg, &ts)) { 5520 return -TARGET_EFAULT; 5521 } 5522 } 5523 5524 return ret; 5525 } 5526 5527 #ifdef TIOCGPTPEER 5528 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp, 5529 int fd, int cmd, abi_long arg) 5530 { 5531 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl); 5532 return get_errno(safe_ioctl(fd, ie->host_cmd, flags)); 5533 } 5534 #endif 5535 5536 #ifdef HAVE_DRM_H 5537 5538 static void unlock_drm_version(struct drm_version *host_ver, 5539 struct target_drm_version *target_ver, 5540 bool copy) 5541 { 5542 unlock_user(host_ver->name, target_ver->name, 5543 copy ? host_ver->name_len : 0); 5544 unlock_user(host_ver->date, target_ver->date, 5545 copy ? host_ver->date_len : 0); 5546 unlock_user(host_ver->desc, target_ver->desc, 5547 copy ? host_ver->desc_len : 0); 5548 } 5549 5550 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver, 5551 struct target_drm_version *target_ver) 5552 { 5553 memset(host_ver, 0, sizeof(*host_ver)); 5554 5555 __get_user(host_ver->name_len, &target_ver->name_len); 5556 if (host_ver->name_len) { 5557 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name, 5558 target_ver->name_len, 0); 5559 if (!host_ver->name) { 5560 return -EFAULT; 5561 } 5562 } 5563 5564 __get_user(host_ver->date_len, &target_ver->date_len); 5565 if (host_ver->date_len) { 5566 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date, 5567 target_ver->date_len, 0); 5568 if (!host_ver->date) { 5569 goto err; 5570 } 5571 } 5572 5573 __get_user(host_ver->desc_len, &target_ver->desc_len); 5574 if (host_ver->desc_len) { 5575 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc, 5576 target_ver->desc_len, 0); 5577 if (!host_ver->desc) { 5578 goto err; 5579 } 5580 } 5581 5582 return 0; 5583 err: 5584 unlock_drm_version(host_ver, target_ver, false); 5585 return -EFAULT; 5586 } 5587 5588 static inline void host_to_target_drmversion( 5589 struct target_drm_version *target_ver, 5590 struct drm_version *host_ver) 5591 { 5592 __put_user(host_ver->version_major, &target_ver->version_major); 5593 __put_user(host_ver->version_minor, &target_ver->version_minor); 5594 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel); 5595 __put_user(host_ver->name_len, &target_ver->name_len); 5596 __put_user(host_ver->date_len, &target_ver->date_len); 5597 __put_user(host_ver->desc_len, &target_ver->desc_len); 5598 unlock_drm_version(host_ver, target_ver, true); 5599 } 5600 5601 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp, 5602 int fd, int cmd, abi_long arg) 5603 { 5604 struct drm_version *ver; 5605 struct target_drm_version *target_ver; 5606 abi_long ret; 5607 5608 switch (ie->host_cmd) { 5609 case DRM_IOCTL_VERSION: 5610 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) { 5611 return -TARGET_EFAULT; 5612 } 5613 ver = (struct drm_version *)buf_temp; 5614 ret = target_to_host_drmversion(ver, target_ver); 5615 if (!is_error(ret)) { 5616 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver)); 5617 if (is_error(ret)) { 5618 unlock_drm_version(ver, target_ver, false); 5619 } else { 5620 host_to_target_drmversion(target_ver, ver); 5621 } 5622 } 5623 unlock_user_struct(target_ver, arg, 0); 5624 return ret; 5625 } 5626 return -TARGET_ENOSYS; 5627 } 5628 5629 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie, 5630 struct drm_i915_getparam *gparam, 5631 int fd, abi_long arg) 5632 { 5633 abi_long ret; 5634 int value; 5635 struct target_drm_i915_getparam *target_gparam; 5636 5637 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) { 5638 return -TARGET_EFAULT; 5639 } 5640 5641 __get_user(gparam->param, &target_gparam->param); 5642 gparam->value = &value; 5643 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam)); 5644 put_user_s32(value, target_gparam->value); 5645 5646 unlock_user_struct(target_gparam, arg, 0); 5647 return ret; 5648 } 5649 5650 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp, 5651 int fd, int cmd, abi_long arg) 5652 { 5653 switch (ie->host_cmd) { 5654 case DRM_IOCTL_I915_GETPARAM: 5655 return do_ioctl_drm_i915_getparam(ie, 5656 (struct drm_i915_getparam *)buf_temp, 5657 fd, arg); 5658 default: 5659 return -TARGET_ENOSYS; 5660 } 5661 } 5662 5663 #endif 5664 5665 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp, 5666 int fd, int cmd, abi_long arg) 5667 { 5668 struct tun_filter *filter = (struct tun_filter *)buf_temp; 5669 struct tun_filter *target_filter; 5670 char *target_addr; 5671 5672 assert(ie->access == IOC_W); 5673 5674 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1); 5675 if (!target_filter) { 5676 return -TARGET_EFAULT; 5677 } 5678 filter->flags = tswap16(target_filter->flags); 5679 filter->count = tswap16(target_filter->count); 5680 unlock_user(target_filter, arg, 0); 5681 5682 if (filter->count) { 5683 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN > 5684 MAX_STRUCT_SIZE) { 5685 return -TARGET_EFAULT; 5686 } 5687 5688 target_addr = lock_user(VERIFY_READ, 5689 arg + offsetof(struct tun_filter, addr), 5690 filter->count * ETH_ALEN, 1); 5691 if (!target_addr) { 5692 return -TARGET_EFAULT; 5693 } 5694 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN); 5695 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0); 5696 } 5697 5698 return get_errno(safe_ioctl(fd, ie->host_cmd, filter)); 5699 } 5700 5701 IOCTLEntry ioctl_entries[] = { 5702 #define IOCTL(cmd, access, ...) \ 5703 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 5704 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 5705 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 5706 #define IOCTL_IGNORE(cmd) \ 5707 { TARGET_ ## cmd, 0, #cmd }, 5708 #include "ioctls.h" 5709 { 0, 0, }, 5710 }; 5711 5712 /* ??? Implement proper locking for ioctls. */ 5713 /* do_ioctl() Must return target values and target errnos. */ 5714 static abi_long do_ioctl(int fd, int cmd, abi_long arg) 5715 { 5716 const IOCTLEntry *ie; 5717 const argtype *arg_type; 5718 abi_long ret; 5719 uint8_t buf_temp[MAX_STRUCT_SIZE]; 5720 int target_size; 5721 void *argptr; 5722 5723 ie = ioctl_entries; 5724 for(;;) { 5725 if (ie->target_cmd == 0) { 5726 qemu_log_mask( 5727 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 5728 return -TARGET_ENOSYS; 5729 } 5730 if (ie->target_cmd == cmd) 5731 break; 5732 ie++; 5733 } 5734 arg_type = ie->arg_type; 5735 if (ie->do_ioctl) { 5736 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 5737 } else if (!ie->host_cmd) { 5738 /* Some architectures define BSD ioctls in their headers 5739 that are not implemented in Linux. */ 5740 return -TARGET_ENOSYS; 5741 } 5742 5743 switch(arg_type[0]) { 5744 case TYPE_NULL: 5745 /* no argument */ 5746 ret = get_errno(safe_ioctl(fd, ie->host_cmd)); 5747 break; 5748 case TYPE_PTRVOID: 5749 case TYPE_INT: 5750 case TYPE_LONG: 5751 case TYPE_ULONG: 5752 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg)); 5753 break; 5754 case TYPE_PTR: 5755 arg_type++; 5756 target_size = thunk_type_size(arg_type, 0); 5757 switch(ie->access) { 5758 case IOC_R: 5759 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5760 if (!is_error(ret)) { 5761 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5762 if (!argptr) 5763 return -TARGET_EFAULT; 5764 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5765 unlock_user(argptr, arg, target_size); 5766 } 5767 break; 5768 case IOC_W: 5769 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5770 if (!argptr) 5771 return -TARGET_EFAULT; 5772 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5773 unlock_user(argptr, arg, 0); 5774 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5775 break; 5776 default: 5777 case IOC_RW: 5778 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5779 if (!argptr) 5780 return -TARGET_EFAULT; 5781 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5782 unlock_user(argptr, arg, 0); 5783 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5784 if (!is_error(ret)) { 5785 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5786 if (!argptr) 5787 return -TARGET_EFAULT; 5788 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5789 unlock_user(argptr, arg, target_size); 5790 } 5791 break; 5792 } 5793 break; 5794 default: 5795 qemu_log_mask(LOG_UNIMP, 5796 "Unsupported ioctl type: cmd=0x%04lx type=%d\n", 5797 (long)cmd, arg_type[0]); 5798 ret = -TARGET_ENOSYS; 5799 break; 5800 } 5801 return ret; 5802 } 5803 5804 static const bitmask_transtbl iflag_tbl[] = { 5805 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 5806 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 5807 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 5808 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 5809 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 5810 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 5811 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 5812 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 5813 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 5814 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 5815 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 5816 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 5817 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 5818 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 5819 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8}, 5820 { 0, 0, 0, 0 } 5821 }; 5822 5823 static const bitmask_transtbl oflag_tbl[] = { 5824 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 5825 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 5826 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 5827 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 5828 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 5829 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 5830 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 5831 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 5832 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 5833 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 5834 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 5835 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 5836 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 5837 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 5838 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 5839 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 5840 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 5841 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 5842 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 5843 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 5844 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 5845 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 5846 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 5847 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 5848 { 0, 0, 0, 0 } 5849 }; 5850 5851 static const bitmask_transtbl cflag_tbl[] = { 5852 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 5853 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 5854 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 5855 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 5856 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 5857 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 5858 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 5859 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 5860 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 5861 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 5862 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 5863 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 5864 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 5865 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 5866 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 5867 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 5868 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 5869 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 5870 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 5871 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 5872 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 5873 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 5874 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 5875 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 5876 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 5877 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 5878 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 5879 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 5880 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 5881 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 5882 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 5883 { 0, 0, 0, 0 } 5884 }; 5885 5886 static const bitmask_transtbl lflag_tbl[] = { 5887 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 5888 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 5889 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 5890 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 5891 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 5892 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 5893 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 5894 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 5895 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 5896 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 5897 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 5898 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 5899 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 5900 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 5901 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 5902 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC}, 5903 { 0, 0, 0, 0 } 5904 }; 5905 5906 static void target_to_host_termios (void *dst, const void *src) 5907 { 5908 struct host_termios *host = dst; 5909 const struct target_termios *target = src; 5910 5911 host->c_iflag = 5912 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 5913 host->c_oflag = 5914 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 5915 host->c_cflag = 5916 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 5917 host->c_lflag = 5918 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 5919 host->c_line = target->c_line; 5920 5921 memset(host->c_cc, 0, sizeof(host->c_cc)); 5922 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 5923 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 5924 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 5925 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 5926 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 5927 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 5928 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 5929 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 5930 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 5931 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 5932 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 5933 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 5934 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 5935 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 5936 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 5937 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 5938 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 5939 } 5940 5941 static void host_to_target_termios (void *dst, const void *src) 5942 { 5943 struct target_termios *target = dst; 5944 const struct host_termios *host = src; 5945 5946 target->c_iflag = 5947 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 5948 target->c_oflag = 5949 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 5950 target->c_cflag = 5951 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 5952 target->c_lflag = 5953 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 5954 target->c_line = host->c_line; 5955 5956 memset(target->c_cc, 0, sizeof(target->c_cc)); 5957 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 5958 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 5959 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 5960 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 5961 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 5962 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 5963 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 5964 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 5965 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 5966 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 5967 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 5968 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 5969 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 5970 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 5971 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 5972 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 5973 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 5974 } 5975 5976 static const StructEntry struct_termios_def = { 5977 .convert = { host_to_target_termios, target_to_host_termios }, 5978 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 5979 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 5980 .print = print_termios, 5981 }; 5982 5983 static const bitmask_transtbl mmap_flags_tbl[] = { 5984 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 5985 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 5986 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 5987 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, 5988 MAP_ANONYMOUS, MAP_ANONYMOUS }, 5989 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, 5990 MAP_GROWSDOWN, MAP_GROWSDOWN }, 5991 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, 5992 MAP_DENYWRITE, MAP_DENYWRITE }, 5993 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, 5994 MAP_EXECUTABLE, MAP_EXECUTABLE }, 5995 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 5996 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, 5997 MAP_NORESERVE, MAP_NORESERVE }, 5998 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB }, 5999 /* MAP_STACK had been ignored by the kernel for quite some time. 6000 Recognize it for the target insofar as we do not want to pass 6001 it through to the host. */ 6002 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 }, 6003 { 0, 0, 0, 0 } 6004 }; 6005 6006 /* 6007 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64) 6008 * TARGET_I386 is defined if TARGET_X86_64 is defined 6009 */ 6010 #if defined(TARGET_I386) 6011 6012 /* NOTE: there is really one LDT for all the threads */ 6013 static uint8_t *ldt_table; 6014 6015 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 6016 { 6017 int size; 6018 void *p; 6019 6020 if (!ldt_table) 6021 return 0; 6022 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 6023 if (size > bytecount) 6024 size = bytecount; 6025 p = lock_user(VERIFY_WRITE, ptr, size, 0); 6026 if (!p) 6027 return -TARGET_EFAULT; 6028 /* ??? Should this by byteswapped? */ 6029 memcpy(p, ldt_table, size); 6030 unlock_user(p, ptr, size); 6031 return size; 6032 } 6033 6034 /* XXX: add locking support */ 6035 static abi_long write_ldt(CPUX86State *env, 6036 abi_ulong ptr, unsigned long bytecount, int oldmode) 6037 { 6038 struct target_modify_ldt_ldt_s ldt_info; 6039 struct target_modify_ldt_ldt_s *target_ldt_info; 6040 int seg_32bit, contents, read_exec_only, limit_in_pages; 6041 int seg_not_present, useable, lm; 6042 uint32_t *lp, entry_1, entry_2; 6043 6044 if (bytecount != sizeof(ldt_info)) 6045 return -TARGET_EINVAL; 6046 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 6047 return -TARGET_EFAULT; 6048 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6049 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6050 ldt_info.limit = tswap32(target_ldt_info->limit); 6051 ldt_info.flags = tswap32(target_ldt_info->flags); 6052 unlock_user_struct(target_ldt_info, ptr, 0); 6053 6054 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 6055 return -TARGET_EINVAL; 6056 seg_32bit = ldt_info.flags & 1; 6057 contents = (ldt_info.flags >> 1) & 3; 6058 read_exec_only = (ldt_info.flags >> 3) & 1; 6059 limit_in_pages = (ldt_info.flags >> 4) & 1; 6060 seg_not_present = (ldt_info.flags >> 5) & 1; 6061 useable = (ldt_info.flags >> 6) & 1; 6062 #ifdef TARGET_ABI32 6063 lm = 0; 6064 #else 6065 lm = (ldt_info.flags >> 7) & 1; 6066 #endif 6067 if (contents == 3) { 6068 if (oldmode) 6069 return -TARGET_EINVAL; 6070 if (seg_not_present == 0) 6071 return -TARGET_EINVAL; 6072 } 6073 /* allocate the LDT */ 6074 if (!ldt_table) { 6075 env->ldt.base = target_mmap(0, 6076 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 6077 PROT_READ|PROT_WRITE, 6078 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 6079 if (env->ldt.base == -1) 6080 return -TARGET_ENOMEM; 6081 memset(g2h_untagged(env->ldt.base), 0, 6082 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 6083 env->ldt.limit = 0xffff; 6084 ldt_table = g2h_untagged(env->ldt.base); 6085 } 6086 6087 /* NOTE: same code as Linux kernel */ 6088 /* Allow LDTs to be cleared by the user. */ 6089 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6090 if (oldmode || 6091 (contents == 0 && 6092 read_exec_only == 1 && 6093 seg_32bit == 0 && 6094 limit_in_pages == 0 && 6095 seg_not_present == 1 && 6096 useable == 0 )) { 6097 entry_1 = 0; 6098 entry_2 = 0; 6099 goto install; 6100 } 6101 } 6102 6103 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6104 (ldt_info.limit & 0x0ffff); 6105 entry_2 = (ldt_info.base_addr & 0xff000000) | 6106 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6107 (ldt_info.limit & 0xf0000) | 6108 ((read_exec_only ^ 1) << 9) | 6109 (contents << 10) | 6110 ((seg_not_present ^ 1) << 15) | 6111 (seg_32bit << 22) | 6112 (limit_in_pages << 23) | 6113 (lm << 21) | 6114 0x7000; 6115 if (!oldmode) 6116 entry_2 |= (useable << 20); 6117 6118 /* Install the new entry ... */ 6119 install: 6120 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 6121 lp[0] = tswap32(entry_1); 6122 lp[1] = tswap32(entry_2); 6123 return 0; 6124 } 6125 6126 /* specific and weird i386 syscalls */ 6127 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 6128 unsigned long bytecount) 6129 { 6130 abi_long ret; 6131 6132 switch (func) { 6133 case 0: 6134 ret = read_ldt(ptr, bytecount); 6135 break; 6136 case 1: 6137 ret = write_ldt(env, ptr, bytecount, 1); 6138 break; 6139 case 0x11: 6140 ret = write_ldt(env, ptr, bytecount, 0); 6141 break; 6142 default: 6143 ret = -TARGET_ENOSYS; 6144 break; 6145 } 6146 return ret; 6147 } 6148 6149 #if defined(TARGET_ABI32) 6150 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 6151 { 6152 uint64_t *gdt_table = g2h_untagged(env->gdt.base); 6153 struct target_modify_ldt_ldt_s ldt_info; 6154 struct target_modify_ldt_ldt_s *target_ldt_info; 6155 int seg_32bit, contents, read_exec_only, limit_in_pages; 6156 int seg_not_present, useable, lm; 6157 uint32_t *lp, entry_1, entry_2; 6158 int i; 6159 6160 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6161 if (!target_ldt_info) 6162 return -TARGET_EFAULT; 6163 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6164 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6165 ldt_info.limit = tswap32(target_ldt_info->limit); 6166 ldt_info.flags = tswap32(target_ldt_info->flags); 6167 if (ldt_info.entry_number == -1) { 6168 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 6169 if (gdt_table[i] == 0) { 6170 ldt_info.entry_number = i; 6171 target_ldt_info->entry_number = tswap32(i); 6172 break; 6173 } 6174 } 6175 } 6176 unlock_user_struct(target_ldt_info, ptr, 1); 6177 6178 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 6179 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 6180 return -TARGET_EINVAL; 6181 seg_32bit = ldt_info.flags & 1; 6182 contents = (ldt_info.flags >> 1) & 3; 6183 read_exec_only = (ldt_info.flags >> 3) & 1; 6184 limit_in_pages = (ldt_info.flags >> 4) & 1; 6185 seg_not_present = (ldt_info.flags >> 5) & 1; 6186 useable = (ldt_info.flags >> 6) & 1; 6187 #ifdef TARGET_ABI32 6188 lm = 0; 6189 #else 6190 lm = (ldt_info.flags >> 7) & 1; 6191 #endif 6192 6193 if (contents == 3) { 6194 if (seg_not_present == 0) 6195 return -TARGET_EINVAL; 6196 } 6197 6198 /* NOTE: same code as Linux kernel */ 6199 /* Allow LDTs to be cleared by the user. */ 6200 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6201 if ((contents == 0 && 6202 read_exec_only == 1 && 6203 seg_32bit == 0 && 6204 limit_in_pages == 0 && 6205 seg_not_present == 1 && 6206 useable == 0 )) { 6207 entry_1 = 0; 6208 entry_2 = 0; 6209 goto install; 6210 } 6211 } 6212 6213 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6214 (ldt_info.limit & 0x0ffff); 6215 entry_2 = (ldt_info.base_addr & 0xff000000) | 6216 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6217 (ldt_info.limit & 0xf0000) | 6218 ((read_exec_only ^ 1) << 9) | 6219 (contents << 10) | 6220 ((seg_not_present ^ 1) << 15) | 6221 (seg_32bit << 22) | 6222 (limit_in_pages << 23) | 6223 (useable << 20) | 6224 (lm << 21) | 6225 0x7000; 6226 6227 /* Install the new entry ... */ 6228 install: 6229 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 6230 lp[0] = tswap32(entry_1); 6231 lp[1] = tswap32(entry_2); 6232 return 0; 6233 } 6234 6235 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 6236 { 6237 struct target_modify_ldt_ldt_s *target_ldt_info; 6238 uint64_t *gdt_table = g2h_untagged(env->gdt.base); 6239 uint32_t base_addr, limit, flags; 6240 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 6241 int seg_not_present, useable, lm; 6242 uint32_t *lp, entry_1, entry_2; 6243 6244 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6245 if (!target_ldt_info) 6246 return -TARGET_EFAULT; 6247 idx = tswap32(target_ldt_info->entry_number); 6248 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 6249 idx > TARGET_GDT_ENTRY_TLS_MAX) { 6250 unlock_user_struct(target_ldt_info, ptr, 1); 6251 return -TARGET_EINVAL; 6252 } 6253 lp = (uint32_t *)(gdt_table + idx); 6254 entry_1 = tswap32(lp[0]); 6255 entry_2 = tswap32(lp[1]); 6256 6257 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 6258 contents = (entry_2 >> 10) & 3; 6259 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 6260 seg_32bit = (entry_2 >> 22) & 1; 6261 limit_in_pages = (entry_2 >> 23) & 1; 6262 useable = (entry_2 >> 20) & 1; 6263 #ifdef TARGET_ABI32 6264 lm = 0; 6265 #else 6266 lm = (entry_2 >> 21) & 1; 6267 #endif 6268 flags = (seg_32bit << 0) | (contents << 1) | 6269 (read_exec_only << 3) | (limit_in_pages << 4) | 6270 (seg_not_present << 5) | (useable << 6) | (lm << 7); 6271 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 6272 base_addr = (entry_1 >> 16) | 6273 (entry_2 & 0xff000000) | 6274 ((entry_2 & 0xff) << 16); 6275 target_ldt_info->base_addr = tswapal(base_addr); 6276 target_ldt_info->limit = tswap32(limit); 6277 target_ldt_info->flags = tswap32(flags); 6278 unlock_user_struct(target_ldt_info, ptr, 1); 6279 return 0; 6280 } 6281 6282 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6283 { 6284 return -TARGET_ENOSYS; 6285 } 6286 #else 6287 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6288 { 6289 abi_long ret = 0; 6290 abi_ulong val; 6291 int idx; 6292 6293 switch(code) { 6294 case TARGET_ARCH_SET_GS: 6295 case TARGET_ARCH_SET_FS: 6296 if (code == TARGET_ARCH_SET_GS) 6297 idx = R_GS; 6298 else 6299 idx = R_FS; 6300 cpu_x86_load_seg(env, idx, 0); 6301 env->segs[idx].base = addr; 6302 break; 6303 case TARGET_ARCH_GET_GS: 6304 case TARGET_ARCH_GET_FS: 6305 if (code == TARGET_ARCH_GET_GS) 6306 idx = R_GS; 6307 else 6308 idx = R_FS; 6309 val = env->segs[idx].base; 6310 if (put_user(val, addr, abi_ulong)) 6311 ret = -TARGET_EFAULT; 6312 break; 6313 default: 6314 ret = -TARGET_EINVAL; 6315 break; 6316 } 6317 return ret; 6318 } 6319 #endif /* defined(TARGET_ABI32 */ 6320 #endif /* defined(TARGET_I386) */ 6321 6322 /* 6323 * These constants are generic. Supply any that are missing from the host. 6324 */ 6325 #ifndef PR_SET_NAME 6326 # define PR_SET_NAME 15 6327 # define PR_GET_NAME 16 6328 #endif 6329 #ifndef PR_SET_FP_MODE 6330 # define PR_SET_FP_MODE 45 6331 # define PR_GET_FP_MODE 46 6332 # define PR_FP_MODE_FR (1 << 0) 6333 # define PR_FP_MODE_FRE (1 << 1) 6334 #endif 6335 #ifndef PR_SVE_SET_VL 6336 # define PR_SVE_SET_VL 50 6337 # define PR_SVE_GET_VL 51 6338 # define PR_SVE_VL_LEN_MASK 0xffff 6339 # define PR_SVE_VL_INHERIT (1 << 17) 6340 #endif 6341 #ifndef PR_PAC_RESET_KEYS 6342 # define PR_PAC_RESET_KEYS 54 6343 # define PR_PAC_APIAKEY (1 << 0) 6344 # define PR_PAC_APIBKEY (1 << 1) 6345 # define PR_PAC_APDAKEY (1 << 2) 6346 # define PR_PAC_APDBKEY (1 << 3) 6347 # define PR_PAC_APGAKEY (1 << 4) 6348 #endif 6349 #ifndef PR_SET_TAGGED_ADDR_CTRL 6350 # define PR_SET_TAGGED_ADDR_CTRL 55 6351 # define PR_GET_TAGGED_ADDR_CTRL 56 6352 # define PR_TAGGED_ADDR_ENABLE (1UL << 0) 6353 #endif 6354 #ifndef PR_MTE_TCF_SHIFT 6355 # define PR_MTE_TCF_SHIFT 1 6356 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT) 6357 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT) 6358 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT) 6359 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT) 6360 # define PR_MTE_TAG_SHIFT 3 6361 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT) 6362 #endif 6363 #ifndef PR_SET_IO_FLUSHER 6364 # define PR_SET_IO_FLUSHER 57 6365 # define PR_GET_IO_FLUSHER 58 6366 #endif 6367 #ifndef PR_SET_SYSCALL_USER_DISPATCH 6368 # define PR_SET_SYSCALL_USER_DISPATCH 59 6369 #endif 6370 #ifndef PR_SME_SET_VL 6371 # define PR_SME_SET_VL 63 6372 # define PR_SME_GET_VL 64 6373 # define PR_SME_VL_LEN_MASK 0xffff 6374 # define PR_SME_VL_INHERIT (1 << 17) 6375 #endif 6376 6377 #include "target_prctl.h" 6378 6379 static abi_long do_prctl_inval0(CPUArchState *env) 6380 { 6381 return -TARGET_EINVAL; 6382 } 6383 6384 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2) 6385 { 6386 return -TARGET_EINVAL; 6387 } 6388 6389 #ifndef do_prctl_get_fp_mode 6390 #define do_prctl_get_fp_mode do_prctl_inval0 6391 #endif 6392 #ifndef do_prctl_set_fp_mode 6393 #define do_prctl_set_fp_mode do_prctl_inval1 6394 #endif 6395 #ifndef do_prctl_sve_get_vl 6396 #define do_prctl_sve_get_vl do_prctl_inval0 6397 #endif 6398 #ifndef do_prctl_sve_set_vl 6399 #define do_prctl_sve_set_vl do_prctl_inval1 6400 #endif 6401 #ifndef do_prctl_reset_keys 6402 #define do_prctl_reset_keys do_prctl_inval1 6403 #endif 6404 #ifndef do_prctl_set_tagged_addr_ctrl 6405 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1 6406 #endif 6407 #ifndef do_prctl_get_tagged_addr_ctrl 6408 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0 6409 #endif 6410 #ifndef do_prctl_get_unalign 6411 #define do_prctl_get_unalign do_prctl_inval1 6412 #endif 6413 #ifndef do_prctl_set_unalign 6414 #define do_prctl_set_unalign do_prctl_inval1 6415 #endif 6416 #ifndef do_prctl_sme_get_vl 6417 #define do_prctl_sme_get_vl do_prctl_inval0 6418 #endif 6419 #ifndef do_prctl_sme_set_vl 6420 #define do_prctl_sme_set_vl do_prctl_inval1 6421 #endif 6422 6423 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, 6424 abi_long arg3, abi_long arg4, abi_long arg5) 6425 { 6426 abi_long ret; 6427 6428 switch (option) { 6429 case PR_GET_PDEATHSIG: 6430 { 6431 int deathsig; 6432 ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig, 6433 arg3, arg4, arg5)); 6434 if (!is_error(ret) && 6435 put_user_s32(host_to_target_signal(deathsig), arg2)) { 6436 return -TARGET_EFAULT; 6437 } 6438 return ret; 6439 } 6440 case PR_SET_PDEATHSIG: 6441 return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2), 6442 arg3, arg4, arg5)); 6443 case PR_GET_NAME: 6444 { 6445 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 6446 if (!name) { 6447 return -TARGET_EFAULT; 6448 } 6449 ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name, 6450 arg3, arg4, arg5)); 6451 unlock_user(name, arg2, 16); 6452 return ret; 6453 } 6454 case PR_SET_NAME: 6455 { 6456 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 6457 if (!name) { 6458 return -TARGET_EFAULT; 6459 } 6460 ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name, 6461 arg3, arg4, arg5)); 6462 unlock_user(name, arg2, 0); 6463 return ret; 6464 } 6465 case PR_GET_FP_MODE: 6466 return do_prctl_get_fp_mode(env); 6467 case PR_SET_FP_MODE: 6468 return do_prctl_set_fp_mode(env, arg2); 6469 case PR_SVE_GET_VL: 6470 return do_prctl_sve_get_vl(env); 6471 case PR_SVE_SET_VL: 6472 return do_prctl_sve_set_vl(env, arg2); 6473 case PR_SME_GET_VL: 6474 return do_prctl_sme_get_vl(env); 6475 case PR_SME_SET_VL: 6476 return do_prctl_sme_set_vl(env, arg2); 6477 case PR_PAC_RESET_KEYS: 6478 if (arg3 || arg4 || arg5) { 6479 return -TARGET_EINVAL; 6480 } 6481 return do_prctl_reset_keys(env, arg2); 6482 case PR_SET_TAGGED_ADDR_CTRL: 6483 if (arg3 || arg4 || arg5) { 6484 return -TARGET_EINVAL; 6485 } 6486 return do_prctl_set_tagged_addr_ctrl(env, arg2); 6487 case PR_GET_TAGGED_ADDR_CTRL: 6488 if (arg2 || arg3 || arg4 || arg5) { 6489 return -TARGET_EINVAL; 6490 } 6491 return do_prctl_get_tagged_addr_ctrl(env); 6492 6493 case PR_GET_UNALIGN: 6494 return do_prctl_get_unalign(env, arg2); 6495 case PR_SET_UNALIGN: 6496 return do_prctl_set_unalign(env, arg2); 6497 6498 case PR_CAP_AMBIENT: 6499 case PR_CAPBSET_READ: 6500 case PR_CAPBSET_DROP: 6501 case PR_GET_DUMPABLE: 6502 case PR_SET_DUMPABLE: 6503 case PR_GET_KEEPCAPS: 6504 case PR_SET_KEEPCAPS: 6505 case PR_GET_SECUREBITS: 6506 case PR_SET_SECUREBITS: 6507 case PR_GET_TIMING: 6508 case PR_SET_TIMING: 6509 case PR_GET_TIMERSLACK: 6510 case PR_SET_TIMERSLACK: 6511 case PR_MCE_KILL: 6512 case PR_MCE_KILL_GET: 6513 case PR_GET_NO_NEW_PRIVS: 6514 case PR_SET_NO_NEW_PRIVS: 6515 case PR_GET_IO_FLUSHER: 6516 case PR_SET_IO_FLUSHER: 6517 /* Some prctl options have no pointer arguments and we can pass on. */ 6518 return get_errno(prctl(option, arg2, arg3, arg4, arg5)); 6519 6520 case PR_GET_CHILD_SUBREAPER: 6521 case PR_SET_CHILD_SUBREAPER: 6522 case PR_GET_SPECULATION_CTRL: 6523 case PR_SET_SPECULATION_CTRL: 6524 case PR_GET_TID_ADDRESS: 6525 /* TODO */ 6526 return -TARGET_EINVAL; 6527 6528 case PR_GET_FPEXC: 6529 case PR_SET_FPEXC: 6530 /* Was used for SPE on PowerPC. */ 6531 return -TARGET_EINVAL; 6532 6533 case PR_GET_ENDIAN: 6534 case PR_SET_ENDIAN: 6535 case PR_GET_FPEMU: 6536 case PR_SET_FPEMU: 6537 case PR_SET_MM: 6538 case PR_GET_SECCOMP: 6539 case PR_SET_SECCOMP: 6540 case PR_SET_SYSCALL_USER_DISPATCH: 6541 case PR_GET_THP_DISABLE: 6542 case PR_SET_THP_DISABLE: 6543 case PR_GET_TSC: 6544 case PR_SET_TSC: 6545 /* Disable to prevent the target disabling stuff we need. */ 6546 return -TARGET_EINVAL; 6547 6548 default: 6549 qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n", 6550 option); 6551 return -TARGET_EINVAL; 6552 } 6553 } 6554 6555 #define NEW_STACK_SIZE 0x40000 6556 6557 6558 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 6559 typedef struct { 6560 CPUArchState *env; 6561 pthread_mutex_t mutex; 6562 pthread_cond_t cond; 6563 pthread_t thread; 6564 uint32_t tid; 6565 abi_ulong child_tidptr; 6566 abi_ulong parent_tidptr; 6567 sigset_t sigmask; 6568 } new_thread_info; 6569 6570 static void *clone_func(void *arg) 6571 { 6572 new_thread_info *info = arg; 6573 CPUArchState *env; 6574 CPUState *cpu; 6575 TaskState *ts; 6576 6577 rcu_register_thread(); 6578 tcg_register_thread(); 6579 env = info->env; 6580 cpu = env_cpu(env); 6581 thread_cpu = cpu; 6582 ts = (TaskState *)cpu->opaque; 6583 info->tid = sys_gettid(); 6584 task_settid(ts); 6585 if (info->child_tidptr) 6586 put_user_u32(info->tid, info->child_tidptr); 6587 if (info->parent_tidptr) 6588 put_user_u32(info->tid, info->parent_tidptr); 6589 qemu_guest_random_seed_thread_part2(cpu->random_seed); 6590 /* Enable signals. */ 6591 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 6592 /* Signal to the parent that we're ready. */ 6593 pthread_mutex_lock(&info->mutex); 6594 pthread_cond_broadcast(&info->cond); 6595 pthread_mutex_unlock(&info->mutex); 6596 /* Wait until the parent has finished initializing the tls state. */ 6597 pthread_mutex_lock(&clone_lock); 6598 pthread_mutex_unlock(&clone_lock); 6599 cpu_loop(env); 6600 /* never exits */ 6601 return NULL; 6602 } 6603 6604 /* do_fork() Must return host values and target errnos (unlike most 6605 do_*() functions). */ 6606 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 6607 abi_ulong parent_tidptr, target_ulong newtls, 6608 abi_ulong child_tidptr) 6609 { 6610 CPUState *cpu = env_cpu(env); 6611 int ret; 6612 TaskState *ts; 6613 CPUState *new_cpu; 6614 CPUArchState *new_env; 6615 sigset_t sigmask; 6616 6617 flags &= ~CLONE_IGNORED_FLAGS; 6618 6619 /* Emulate vfork() with fork() */ 6620 if (flags & CLONE_VFORK) 6621 flags &= ~(CLONE_VFORK | CLONE_VM); 6622 6623 if (flags & CLONE_VM) { 6624 TaskState *parent_ts = (TaskState *)cpu->opaque; 6625 new_thread_info info; 6626 pthread_attr_t attr; 6627 6628 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) || 6629 (flags & CLONE_INVALID_THREAD_FLAGS)) { 6630 return -TARGET_EINVAL; 6631 } 6632 6633 ts = g_new0(TaskState, 1); 6634 init_task_state(ts); 6635 6636 /* Grab a mutex so that thread setup appears atomic. */ 6637 pthread_mutex_lock(&clone_lock); 6638 6639 /* 6640 * If this is our first additional thread, we need to ensure we 6641 * generate code for parallel execution and flush old translations. 6642 * Do this now so that the copy gets CF_PARALLEL too. 6643 */ 6644 if (!(cpu->tcg_cflags & CF_PARALLEL)) { 6645 cpu->tcg_cflags |= CF_PARALLEL; 6646 tb_flush(cpu); 6647 } 6648 6649 /* we create a new CPU instance. */ 6650 new_env = cpu_copy(env); 6651 /* Init regs that differ from the parent. */ 6652 cpu_clone_regs_child(new_env, newsp, flags); 6653 cpu_clone_regs_parent(env, flags); 6654 new_cpu = env_cpu(new_env); 6655 new_cpu->opaque = ts; 6656 ts->bprm = parent_ts->bprm; 6657 ts->info = parent_ts->info; 6658 ts->signal_mask = parent_ts->signal_mask; 6659 6660 if (flags & CLONE_CHILD_CLEARTID) { 6661 ts->child_tidptr = child_tidptr; 6662 } 6663 6664 if (flags & CLONE_SETTLS) { 6665 cpu_set_tls (new_env, newtls); 6666 } 6667 6668 memset(&info, 0, sizeof(info)); 6669 pthread_mutex_init(&info.mutex, NULL); 6670 pthread_mutex_lock(&info.mutex); 6671 pthread_cond_init(&info.cond, NULL); 6672 info.env = new_env; 6673 if (flags & CLONE_CHILD_SETTID) { 6674 info.child_tidptr = child_tidptr; 6675 } 6676 if (flags & CLONE_PARENT_SETTID) { 6677 info.parent_tidptr = parent_tidptr; 6678 } 6679 6680 ret = pthread_attr_init(&attr); 6681 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 6682 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 6683 /* It is not safe to deliver signals until the child has finished 6684 initializing, so temporarily block all signals. */ 6685 sigfillset(&sigmask); 6686 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 6687 cpu->random_seed = qemu_guest_random_seed_thread_part1(); 6688 6689 ret = pthread_create(&info.thread, &attr, clone_func, &info); 6690 /* TODO: Free new CPU state if thread creation failed. */ 6691 6692 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 6693 pthread_attr_destroy(&attr); 6694 if (ret == 0) { 6695 /* Wait for the child to initialize. */ 6696 pthread_cond_wait(&info.cond, &info.mutex); 6697 ret = info.tid; 6698 } else { 6699 ret = -1; 6700 } 6701 pthread_mutex_unlock(&info.mutex); 6702 pthread_cond_destroy(&info.cond); 6703 pthread_mutex_destroy(&info.mutex); 6704 pthread_mutex_unlock(&clone_lock); 6705 } else { 6706 /* if no CLONE_VM, we consider it is a fork */ 6707 if (flags & CLONE_INVALID_FORK_FLAGS) { 6708 return -TARGET_EINVAL; 6709 } 6710 6711 /* We can't support custom termination signals */ 6712 if ((flags & CSIGNAL) != TARGET_SIGCHLD) { 6713 return -TARGET_EINVAL; 6714 } 6715 6716 if (block_signals()) { 6717 return -QEMU_ERESTARTSYS; 6718 } 6719 6720 fork_start(); 6721 ret = fork(); 6722 if (ret == 0) { 6723 /* Child Process. */ 6724 cpu_clone_regs_child(env, newsp, flags); 6725 fork_end(1); 6726 /* There is a race condition here. The parent process could 6727 theoretically read the TID in the child process before the child 6728 tid is set. This would require using either ptrace 6729 (not implemented) or having *_tidptr to point at a shared memory 6730 mapping. We can't repeat the spinlock hack used above because 6731 the child process gets its own copy of the lock. */ 6732 if (flags & CLONE_CHILD_SETTID) 6733 put_user_u32(sys_gettid(), child_tidptr); 6734 if (flags & CLONE_PARENT_SETTID) 6735 put_user_u32(sys_gettid(), parent_tidptr); 6736 ts = (TaskState *)cpu->opaque; 6737 if (flags & CLONE_SETTLS) 6738 cpu_set_tls (env, newtls); 6739 if (flags & CLONE_CHILD_CLEARTID) 6740 ts->child_tidptr = child_tidptr; 6741 } else { 6742 cpu_clone_regs_parent(env, flags); 6743 fork_end(0); 6744 } 6745 } 6746 return ret; 6747 } 6748 6749 /* warning : doesn't handle linux specific flags... */ 6750 static int target_to_host_fcntl_cmd(int cmd) 6751 { 6752 int ret; 6753 6754 switch(cmd) { 6755 case TARGET_F_DUPFD: 6756 case TARGET_F_GETFD: 6757 case TARGET_F_SETFD: 6758 case TARGET_F_GETFL: 6759 case TARGET_F_SETFL: 6760 case TARGET_F_OFD_GETLK: 6761 case TARGET_F_OFD_SETLK: 6762 case TARGET_F_OFD_SETLKW: 6763 ret = cmd; 6764 break; 6765 case TARGET_F_GETLK: 6766 ret = F_GETLK64; 6767 break; 6768 case TARGET_F_SETLK: 6769 ret = F_SETLK64; 6770 break; 6771 case TARGET_F_SETLKW: 6772 ret = F_SETLKW64; 6773 break; 6774 case TARGET_F_GETOWN: 6775 ret = F_GETOWN; 6776 break; 6777 case TARGET_F_SETOWN: 6778 ret = F_SETOWN; 6779 break; 6780 case TARGET_F_GETSIG: 6781 ret = F_GETSIG; 6782 break; 6783 case TARGET_F_SETSIG: 6784 ret = F_SETSIG; 6785 break; 6786 #if TARGET_ABI_BITS == 32 6787 case TARGET_F_GETLK64: 6788 ret = F_GETLK64; 6789 break; 6790 case TARGET_F_SETLK64: 6791 ret = F_SETLK64; 6792 break; 6793 case TARGET_F_SETLKW64: 6794 ret = F_SETLKW64; 6795 break; 6796 #endif 6797 case TARGET_F_SETLEASE: 6798 ret = F_SETLEASE; 6799 break; 6800 case TARGET_F_GETLEASE: 6801 ret = F_GETLEASE; 6802 break; 6803 #ifdef F_DUPFD_CLOEXEC 6804 case TARGET_F_DUPFD_CLOEXEC: 6805 ret = F_DUPFD_CLOEXEC; 6806 break; 6807 #endif 6808 case TARGET_F_NOTIFY: 6809 ret = F_NOTIFY; 6810 break; 6811 #ifdef F_GETOWN_EX 6812 case TARGET_F_GETOWN_EX: 6813 ret = F_GETOWN_EX; 6814 break; 6815 #endif 6816 #ifdef F_SETOWN_EX 6817 case TARGET_F_SETOWN_EX: 6818 ret = F_SETOWN_EX; 6819 break; 6820 #endif 6821 #ifdef F_SETPIPE_SZ 6822 case TARGET_F_SETPIPE_SZ: 6823 ret = F_SETPIPE_SZ; 6824 break; 6825 case TARGET_F_GETPIPE_SZ: 6826 ret = F_GETPIPE_SZ; 6827 break; 6828 #endif 6829 #ifdef F_ADD_SEALS 6830 case TARGET_F_ADD_SEALS: 6831 ret = F_ADD_SEALS; 6832 break; 6833 case TARGET_F_GET_SEALS: 6834 ret = F_GET_SEALS; 6835 break; 6836 #endif 6837 default: 6838 ret = -TARGET_EINVAL; 6839 break; 6840 } 6841 6842 #if defined(__powerpc64__) 6843 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and 6844 * is not supported by kernel. The glibc fcntl call actually adjusts 6845 * them to 5, 6 and 7 before making the syscall(). Since we make the 6846 * syscall directly, adjust to what is supported by the kernel. 6847 */ 6848 if (ret >= F_GETLK64 && ret <= F_SETLKW64) { 6849 ret -= F_GETLK64 - 5; 6850 } 6851 #endif 6852 6853 return ret; 6854 } 6855 6856 #define FLOCK_TRANSTBL \ 6857 switch (type) { \ 6858 TRANSTBL_CONVERT(F_RDLCK); \ 6859 TRANSTBL_CONVERT(F_WRLCK); \ 6860 TRANSTBL_CONVERT(F_UNLCK); \ 6861 } 6862 6863 static int target_to_host_flock(int type) 6864 { 6865 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a 6866 FLOCK_TRANSTBL 6867 #undef TRANSTBL_CONVERT 6868 return -TARGET_EINVAL; 6869 } 6870 6871 static int host_to_target_flock(int type) 6872 { 6873 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a 6874 FLOCK_TRANSTBL 6875 #undef TRANSTBL_CONVERT 6876 /* if we don't know how to convert the value coming 6877 * from the host we copy to the target field as-is 6878 */ 6879 return type; 6880 } 6881 6882 static inline abi_long copy_from_user_flock(struct flock64 *fl, 6883 abi_ulong target_flock_addr) 6884 { 6885 struct target_flock *target_fl; 6886 int l_type; 6887 6888 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6889 return -TARGET_EFAULT; 6890 } 6891 6892 __get_user(l_type, &target_fl->l_type); 6893 l_type = target_to_host_flock(l_type); 6894 if (l_type < 0) { 6895 return l_type; 6896 } 6897 fl->l_type = l_type; 6898 __get_user(fl->l_whence, &target_fl->l_whence); 6899 __get_user(fl->l_start, &target_fl->l_start); 6900 __get_user(fl->l_len, &target_fl->l_len); 6901 __get_user(fl->l_pid, &target_fl->l_pid); 6902 unlock_user_struct(target_fl, target_flock_addr, 0); 6903 return 0; 6904 } 6905 6906 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr, 6907 const struct flock64 *fl) 6908 { 6909 struct target_flock *target_fl; 6910 short l_type; 6911 6912 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6913 return -TARGET_EFAULT; 6914 } 6915 6916 l_type = host_to_target_flock(fl->l_type); 6917 __put_user(l_type, &target_fl->l_type); 6918 __put_user(fl->l_whence, &target_fl->l_whence); 6919 __put_user(fl->l_start, &target_fl->l_start); 6920 __put_user(fl->l_len, &target_fl->l_len); 6921 __put_user(fl->l_pid, &target_fl->l_pid); 6922 unlock_user_struct(target_fl, target_flock_addr, 1); 6923 return 0; 6924 } 6925 6926 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr); 6927 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl); 6928 6929 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32 6930 struct target_oabi_flock64 { 6931 abi_short l_type; 6932 abi_short l_whence; 6933 abi_llong l_start; 6934 abi_llong l_len; 6935 abi_int l_pid; 6936 } QEMU_PACKED; 6937 6938 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl, 6939 abi_ulong target_flock_addr) 6940 { 6941 struct target_oabi_flock64 *target_fl; 6942 int l_type; 6943 6944 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6945 return -TARGET_EFAULT; 6946 } 6947 6948 __get_user(l_type, &target_fl->l_type); 6949 l_type = target_to_host_flock(l_type); 6950 if (l_type < 0) { 6951 return l_type; 6952 } 6953 fl->l_type = l_type; 6954 __get_user(fl->l_whence, &target_fl->l_whence); 6955 __get_user(fl->l_start, &target_fl->l_start); 6956 __get_user(fl->l_len, &target_fl->l_len); 6957 __get_user(fl->l_pid, &target_fl->l_pid); 6958 unlock_user_struct(target_fl, target_flock_addr, 0); 6959 return 0; 6960 } 6961 6962 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr, 6963 const struct flock64 *fl) 6964 { 6965 struct target_oabi_flock64 *target_fl; 6966 short l_type; 6967 6968 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6969 return -TARGET_EFAULT; 6970 } 6971 6972 l_type = host_to_target_flock(fl->l_type); 6973 __put_user(l_type, &target_fl->l_type); 6974 __put_user(fl->l_whence, &target_fl->l_whence); 6975 __put_user(fl->l_start, &target_fl->l_start); 6976 __put_user(fl->l_len, &target_fl->l_len); 6977 __put_user(fl->l_pid, &target_fl->l_pid); 6978 unlock_user_struct(target_fl, target_flock_addr, 1); 6979 return 0; 6980 } 6981 #endif 6982 6983 static inline abi_long copy_from_user_flock64(struct flock64 *fl, 6984 abi_ulong target_flock_addr) 6985 { 6986 struct target_flock64 *target_fl; 6987 int l_type; 6988 6989 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6990 return -TARGET_EFAULT; 6991 } 6992 6993 __get_user(l_type, &target_fl->l_type); 6994 l_type = target_to_host_flock(l_type); 6995 if (l_type < 0) { 6996 return l_type; 6997 } 6998 fl->l_type = l_type; 6999 __get_user(fl->l_whence, &target_fl->l_whence); 7000 __get_user(fl->l_start, &target_fl->l_start); 7001 __get_user(fl->l_len, &target_fl->l_len); 7002 __get_user(fl->l_pid, &target_fl->l_pid); 7003 unlock_user_struct(target_fl, target_flock_addr, 0); 7004 return 0; 7005 } 7006 7007 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr, 7008 const struct flock64 *fl) 7009 { 7010 struct target_flock64 *target_fl; 7011 short l_type; 7012 7013 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 7014 return -TARGET_EFAULT; 7015 } 7016 7017 l_type = host_to_target_flock(fl->l_type); 7018 __put_user(l_type, &target_fl->l_type); 7019 __put_user(fl->l_whence, &target_fl->l_whence); 7020 __put_user(fl->l_start, &target_fl->l_start); 7021 __put_user(fl->l_len, &target_fl->l_len); 7022 __put_user(fl->l_pid, &target_fl->l_pid); 7023 unlock_user_struct(target_fl, target_flock_addr, 1); 7024 return 0; 7025 } 7026 7027 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 7028 { 7029 struct flock64 fl64; 7030 #ifdef F_GETOWN_EX 7031 struct f_owner_ex fox; 7032 struct target_f_owner_ex *target_fox; 7033 #endif 7034 abi_long ret; 7035 int host_cmd = target_to_host_fcntl_cmd(cmd); 7036 7037 if (host_cmd == -TARGET_EINVAL) 7038 return host_cmd; 7039 7040 switch(cmd) { 7041 case TARGET_F_GETLK: 7042 ret = copy_from_user_flock(&fl64, arg); 7043 if (ret) { 7044 return ret; 7045 } 7046 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7047 if (ret == 0) { 7048 ret = copy_to_user_flock(arg, &fl64); 7049 } 7050 break; 7051 7052 case TARGET_F_SETLK: 7053 case TARGET_F_SETLKW: 7054 ret = copy_from_user_flock(&fl64, arg); 7055 if (ret) { 7056 return ret; 7057 } 7058 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7059 break; 7060 7061 case TARGET_F_GETLK64: 7062 case TARGET_F_OFD_GETLK: 7063 ret = copy_from_user_flock64(&fl64, arg); 7064 if (ret) { 7065 return ret; 7066 } 7067 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7068 if (ret == 0) { 7069 ret = copy_to_user_flock64(arg, &fl64); 7070 } 7071 break; 7072 case TARGET_F_SETLK64: 7073 case TARGET_F_SETLKW64: 7074 case TARGET_F_OFD_SETLK: 7075 case TARGET_F_OFD_SETLKW: 7076 ret = copy_from_user_flock64(&fl64, arg); 7077 if (ret) { 7078 return ret; 7079 } 7080 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7081 break; 7082 7083 case TARGET_F_GETFL: 7084 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 7085 if (ret >= 0) { 7086 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 7087 } 7088 break; 7089 7090 case TARGET_F_SETFL: 7091 ret = get_errno(safe_fcntl(fd, host_cmd, 7092 target_to_host_bitmask(arg, 7093 fcntl_flags_tbl))); 7094 break; 7095 7096 #ifdef F_GETOWN_EX 7097 case TARGET_F_GETOWN_EX: 7098 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 7099 if (ret >= 0) { 7100 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 7101 return -TARGET_EFAULT; 7102 target_fox->type = tswap32(fox.type); 7103 target_fox->pid = tswap32(fox.pid); 7104 unlock_user_struct(target_fox, arg, 1); 7105 } 7106 break; 7107 #endif 7108 7109 #ifdef F_SETOWN_EX 7110 case TARGET_F_SETOWN_EX: 7111 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 7112 return -TARGET_EFAULT; 7113 fox.type = tswap32(target_fox->type); 7114 fox.pid = tswap32(target_fox->pid); 7115 unlock_user_struct(target_fox, arg, 0); 7116 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 7117 break; 7118 #endif 7119 7120 case TARGET_F_SETSIG: 7121 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg))); 7122 break; 7123 7124 case TARGET_F_GETSIG: 7125 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg))); 7126 break; 7127 7128 case TARGET_F_SETOWN: 7129 case TARGET_F_GETOWN: 7130 case TARGET_F_SETLEASE: 7131 case TARGET_F_GETLEASE: 7132 case TARGET_F_SETPIPE_SZ: 7133 case TARGET_F_GETPIPE_SZ: 7134 case TARGET_F_ADD_SEALS: 7135 case TARGET_F_GET_SEALS: 7136 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 7137 break; 7138 7139 default: 7140 ret = get_errno(safe_fcntl(fd, cmd, arg)); 7141 break; 7142 } 7143 return ret; 7144 } 7145 7146 #ifdef USE_UID16 7147 7148 static inline int high2lowuid(int uid) 7149 { 7150 if (uid > 65535) 7151 return 65534; 7152 else 7153 return uid; 7154 } 7155 7156 static inline int high2lowgid(int gid) 7157 { 7158 if (gid > 65535) 7159 return 65534; 7160 else 7161 return gid; 7162 } 7163 7164 static inline int low2highuid(int uid) 7165 { 7166 if ((int16_t)uid == -1) 7167 return -1; 7168 else 7169 return uid; 7170 } 7171 7172 static inline int low2highgid(int gid) 7173 { 7174 if ((int16_t)gid == -1) 7175 return -1; 7176 else 7177 return gid; 7178 } 7179 static inline int tswapid(int id) 7180 { 7181 return tswap16(id); 7182 } 7183 7184 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 7185 7186 #else /* !USE_UID16 */ 7187 static inline int high2lowuid(int uid) 7188 { 7189 return uid; 7190 } 7191 static inline int high2lowgid(int gid) 7192 { 7193 return gid; 7194 } 7195 static inline int low2highuid(int uid) 7196 { 7197 return uid; 7198 } 7199 static inline int low2highgid(int gid) 7200 { 7201 return gid; 7202 } 7203 static inline int tswapid(int id) 7204 { 7205 return tswap32(id); 7206 } 7207 7208 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 7209 7210 #endif /* USE_UID16 */ 7211 7212 /* We must do direct syscalls for setting UID/GID, because we want to 7213 * implement the Linux system call semantics of "change only for this thread", 7214 * not the libc/POSIX semantics of "change for all threads in process". 7215 * (See http://ewontfix.com/17/ for more details.) 7216 * We use the 32-bit version of the syscalls if present; if it is not 7217 * then either the host architecture supports 32-bit UIDs natively with 7218 * the standard syscall, or the 16-bit UID is the best we can do. 7219 */ 7220 #ifdef __NR_setuid32 7221 #define __NR_sys_setuid __NR_setuid32 7222 #else 7223 #define __NR_sys_setuid __NR_setuid 7224 #endif 7225 #ifdef __NR_setgid32 7226 #define __NR_sys_setgid __NR_setgid32 7227 #else 7228 #define __NR_sys_setgid __NR_setgid 7229 #endif 7230 #ifdef __NR_setresuid32 7231 #define __NR_sys_setresuid __NR_setresuid32 7232 #else 7233 #define __NR_sys_setresuid __NR_setresuid 7234 #endif 7235 #ifdef __NR_setresgid32 7236 #define __NR_sys_setresgid __NR_setresgid32 7237 #else 7238 #define __NR_sys_setresgid __NR_setresgid 7239 #endif 7240 7241 _syscall1(int, sys_setuid, uid_t, uid) 7242 _syscall1(int, sys_setgid, gid_t, gid) 7243 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 7244 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 7245 7246 void syscall_init(void) 7247 { 7248 IOCTLEntry *ie; 7249 const argtype *arg_type; 7250 int size; 7251 7252 thunk_init(STRUCT_MAX); 7253 7254 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 7255 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 7256 #include "syscall_types.h" 7257 #undef STRUCT 7258 #undef STRUCT_SPECIAL 7259 7260 /* we patch the ioctl size if necessary. We rely on the fact that 7261 no ioctl has all the bits at '1' in the size field */ 7262 ie = ioctl_entries; 7263 while (ie->target_cmd != 0) { 7264 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 7265 TARGET_IOC_SIZEMASK) { 7266 arg_type = ie->arg_type; 7267 if (arg_type[0] != TYPE_PTR) { 7268 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 7269 ie->target_cmd); 7270 exit(1); 7271 } 7272 arg_type++; 7273 size = thunk_type_size(arg_type, 0); 7274 ie->target_cmd = (ie->target_cmd & 7275 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 7276 (size << TARGET_IOC_SIZESHIFT); 7277 } 7278 7279 /* automatic consistency check if same arch */ 7280 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 7281 (defined(__x86_64__) && defined(TARGET_X86_64)) 7282 if (unlikely(ie->target_cmd != ie->host_cmd)) { 7283 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 7284 ie->name, ie->target_cmd, ie->host_cmd); 7285 } 7286 #endif 7287 ie++; 7288 } 7289 } 7290 7291 #ifdef TARGET_NR_truncate64 7292 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1, 7293 abi_long arg2, 7294 abi_long arg3, 7295 abi_long arg4) 7296 { 7297 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) { 7298 arg2 = arg3; 7299 arg3 = arg4; 7300 } 7301 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 7302 } 7303 #endif 7304 7305 #ifdef TARGET_NR_ftruncate64 7306 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1, 7307 abi_long arg2, 7308 abi_long arg3, 7309 abi_long arg4) 7310 { 7311 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) { 7312 arg2 = arg3; 7313 arg3 = arg4; 7314 } 7315 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 7316 } 7317 #endif 7318 7319 #if defined(TARGET_NR_timer_settime) || \ 7320 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)) 7321 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its, 7322 abi_ulong target_addr) 7323 { 7324 if (target_to_host_timespec(&host_its->it_interval, target_addr + 7325 offsetof(struct target_itimerspec, 7326 it_interval)) || 7327 target_to_host_timespec(&host_its->it_value, target_addr + 7328 offsetof(struct target_itimerspec, 7329 it_value))) { 7330 return -TARGET_EFAULT; 7331 } 7332 7333 return 0; 7334 } 7335 #endif 7336 7337 #if defined(TARGET_NR_timer_settime64) || \ 7338 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) 7339 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its, 7340 abi_ulong target_addr) 7341 { 7342 if (target_to_host_timespec64(&host_its->it_interval, target_addr + 7343 offsetof(struct target__kernel_itimerspec, 7344 it_interval)) || 7345 target_to_host_timespec64(&host_its->it_value, target_addr + 7346 offsetof(struct target__kernel_itimerspec, 7347 it_value))) { 7348 return -TARGET_EFAULT; 7349 } 7350 7351 return 0; 7352 } 7353 #endif 7354 7355 #if ((defined(TARGET_NR_timerfd_gettime) || \ 7356 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \ 7357 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime) 7358 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 7359 struct itimerspec *host_its) 7360 { 7361 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec, 7362 it_interval), 7363 &host_its->it_interval) || 7364 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec, 7365 it_value), 7366 &host_its->it_value)) { 7367 return -TARGET_EFAULT; 7368 } 7369 return 0; 7370 } 7371 #endif 7372 7373 #if ((defined(TARGET_NR_timerfd_gettime64) || \ 7374 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \ 7375 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64) 7376 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr, 7377 struct itimerspec *host_its) 7378 { 7379 if (host_to_target_timespec64(target_addr + 7380 offsetof(struct target__kernel_itimerspec, 7381 it_interval), 7382 &host_its->it_interval) || 7383 host_to_target_timespec64(target_addr + 7384 offsetof(struct target__kernel_itimerspec, 7385 it_value), 7386 &host_its->it_value)) { 7387 return -TARGET_EFAULT; 7388 } 7389 return 0; 7390 } 7391 #endif 7392 7393 #if defined(TARGET_NR_adjtimex) || \ 7394 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)) 7395 static inline abi_long target_to_host_timex(struct timex *host_tx, 7396 abi_long target_addr) 7397 { 7398 struct target_timex *target_tx; 7399 7400 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7401 return -TARGET_EFAULT; 7402 } 7403 7404 __get_user(host_tx->modes, &target_tx->modes); 7405 __get_user(host_tx->offset, &target_tx->offset); 7406 __get_user(host_tx->freq, &target_tx->freq); 7407 __get_user(host_tx->maxerror, &target_tx->maxerror); 7408 __get_user(host_tx->esterror, &target_tx->esterror); 7409 __get_user(host_tx->status, &target_tx->status); 7410 __get_user(host_tx->constant, &target_tx->constant); 7411 __get_user(host_tx->precision, &target_tx->precision); 7412 __get_user(host_tx->tolerance, &target_tx->tolerance); 7413 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7414 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7415 __get_user(host_tx->tick, &target_tx->tick); 7416 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7417 __get_user(host_tx->jitter, &target_tx->jitter); 7418 __get_user(host_tx->shift, &target_tx->shift); 7419 __get_user(host_tx->stabil, &target_tx->stabil); 7420 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7421 __get_user(host_tx->calcnt, &target_tx->calcnt); 7422 __get_user(host_tx->errcnt, &target_tx->errcnt); 7423 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7424 __get_user(host_tx->tai, &target_tx->tai); 7425 7426 unlock_user_struct(target_tx, target_addr, 0); 7427 return 0; 7428 } 7429 7430 static inline abi_long host_to_target_timex(abi_long target_addr, 7431 struct timex *host_tx) 7432 { 7433 struct target_timex *target_tx; 7434 7435 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7436 return -TARGET_EFAULT; 7437 } 7438 7439 __put_user(host_tx->modes, &target_tx->modes); 7440 __put_user(host_tx->offset, &target_tx->offset); 7441 __put_user(host_tx->freq, &target_tx->freq); 7442 __put_user(host_tx->maxerror, &target_tx->maxerror); 7443 __put_user(host_tx->esterror, &target_tx->esterror); 7444 __put_user(host_tx->status, &target_tx->status); 7445 __put_user(host_tx->constant, &target_tx->constant); 7446 __put_user(host_tx->precision, &target_tx->precision); 7447 __put_user(host_tx->tolerance, &target_tx->tolerance); 7448 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7449 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7450 __put_user(host_tx->tick, &target_tx->tick); 7451 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7452 __put_user(host_tx->jitter, &target_tx->jitter); 7453 __put_user(host_tx->shift, &target_tx->shift); 7454 __put_user(host_tx->stabil, &target_tx->stabil); 7455 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7456 __put_user(host_tx->calcnt, &target_tx->calcnt); 7457 __put_user(host_tx->errcnt, &target_tx->errcnt); 7458 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7459 __put_user(host_tx->tai, &target_tx->tai); 7460 7461 unlock_user_struct(target_tx, target_addr, 1); 7462 return 0; 7463 } 7464 #endif 7465 7466 7467 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 7468 static inline abi_long target_to_host_timex64(struct timex *host_tx, 7469 abi_long target_addr) 7470 { 7471 struct target__kernel_timex *target_tx; 7472 7473 if (copy_from_user_timeval64(&host_tx->time, target_addr + 7474 offsetof(struct target__kernel_timex, 7475 time))) { 7476 return -TARGET_EFAULT; 7477 } 7478 7479 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7480 return -TARGET_EFAULT; 7481 } 7482 7483 __get_user(host_tx->modes, &target_tx->modes); 7484 __get_user(host_tx->offset, &target_tx->offset); 7485 __get_user(host_tx->freq, &target_tx->freq); 7486 __get_user(host_tx->maxerror, &target_tx->maxerror); 7487 __get_user(host_tx->esterror, &target_tx->esterror); 7488 __get_user(host_tx->status, &target_tx->status); 7489 __get_user(host_tx->constant, &target_tx->constant); 7490 __get_user(host_tx->precision, &target_tx->precision); 7491 __get_user(host_tx->tolerance, &target_tx->tolerance); 7492 __get_user(host_tx->tick, &target_tx->tick); 7493 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7494 __get_user(host_tx->jitter, &target_tx->jitter); 7495 __get_user(host_tx->shift, &target_tx->shift); 7496 __get_user(host_tx->stabil, &target_tx->stabil); 7497 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7498 __get_user(host_tx->calcnt, &target_tx->calcnt); 7499 __get_user(host_tx->errcnt, &target_tx->errcnt); 7500 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7501 __get_user(host_tx->tai, &target_tx->tai); 7502 7503 unlock_user_struct(target_tx, target_addr, 0); 7504 return 0; 7505 } 7506 7507 static inline abi_long host_to_target_timex64(abi_long target_addr, 7508 struct timex *host_tx) 7509 { 7510 struct target__kernel_timex *target_tx; 7511 7512 if (copy_to_user_timeval64(target_addr + 7513 offsetof(struct target__kernel_timex, time), 7514 &host_tx->time)) { 7515 return -TARGET_EFAULT; 7516 } 7517 7518 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7519 return -TARGET_EFAULT; 7520 } 7521 7522 __put_user(host_tx->modes, &target_tx->modes); 7523 __put_user(host_tx->offset, &target_tx->offset); 7524 __put_user(host_tx->freq, &target_tx->freq); 7525 __put_user(host_tx->maxerror, &target_tx->maxerror); 7526 __put_user(host_tx->esterror, &target_tx->esterror); 7527 __put_user(host_tx->status, &target_tx->status); 7528 __put_user(host_tx->constant, &target_tx->constant); 7529 __put_user(host_tx->precision, &target_tx->precision); 7530 __put_user(host_tx->tolerance, &target_tx->tolerance); 7531 __put_user(host_tx->tick, &target_tx->tick); 7532 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7533 __put_user(host_tx->jitter, &target_tx->jitter); 7534 __put_user(host_tx->shift, &target_tx->shift); 7535 __put_user(host_tx->stabil, &target_tx->stabil); 7536 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7537 __put_user(host_tx->calcnt, &target_tx->calcnt); 7538 __put_user(host_tx->errcnt, &target_tx->errcnt); 7539 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7540 __put_user(host_tx->tai, &target_tx->tai); 7541 7542 unlock_user_struct(target_tx, target_addr, 1); 7543 return 0; 7544 } 7545 #endif 7546 7547 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID 7548 #define sigev_notify_thread_id _sigev_un._tid 7549 #endif 7550 7551 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, 7552 abi_ulong target_addr) 7553 { 7554 struct target_sigevent *target_sevp; 7555 7556 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { 7557 return -TARGET_EFAULT; 7558 } 7559 7560 /* This union is awkward on 64 bit systems because it has a 32 bit 7561 * integer and a pointer in it; we follow the conversion approach 7562 * used for handling sigval types in signal.c so the guest should get 7563 * the correct value back even if we did a 64 bit byteswap and it's 7564 * using the 32 bit integer. 7565 */ 7566 host_sevp->sigev_value.sival_ptr = 7567 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); 7568 host_sevp->sigev_signo = 7569 target_to_host_signal(tswap32(target_sevp->sigev_signo)); 7570 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); 7571 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid); 7572 7573 unlock_user_struct(target_sevp, target_addr, 1); 7574 return 0; 7575 } 7576 7577 #if defined(TARGET_NR_mlockall) 7578 static inline int target_to_host_mlockall_arg(int arg) 7579 { 7580 int result = 0; 7581 7582 if (arg & TARGET_MCL_CURRENT) { 7583 result |= MCL_CURRENT; 7584 } 7585 if (arg & TARGET_MCL_FUTURE) { 7586 result |= MCL_FUTURE; 7587 } 7588 #ifdef MCL_ONFAULT 7589 if (arg & TARGET_MCL_ONFAULT) { 7590 result |= MCL_ONFAULT; 7591 } 7592 #endif 7593 7594 return result; 7595 } 7596 #endif 7597 7598 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \ 7599 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \ 7600 defined(TARGET_NR_newfstatat)) 7601 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env, 7602 abi_ulong target_addr, 7603 struct stat *host_st) 7604 { 7605 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 7606 if (cpu_env->eabi) { 7607 struct target_eabi_stat64 *target_st; 7608 7609 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7610 return -TARGET_EFAULT; 7611 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 7612 __put_user(host_st->st_dev, &target_st->st_dev); 7613 __put_user(host_st->st_ino, &target_st->st_ino); 7614 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7615 __put_user(host_st->st_ino, &target_st->__st_ino); 7616 #endif 7617 __put_user(host_st->st_mode, &target_st->st_mode); 7618 __put_user(host_st->st_nlink, &target_st->st_nlink); 7619 __put_user(host_st->st_uid, &target_st->st_uid); 7620 __put_user(host_st->st_gid, &target_st->st_gid); 7621 __put_user(host_st->st_rdev, &target_st->st_rdev); 7622 __put_user(host_st->st_size, &target_st->st_size); 7623 __put_user(host_st->st_blksize, &target_st->st_blksize); 7624 __put_user(host_st->st_blocks, &target_st->st_blocks); 7625 __put_user(host_st->st_atime, &target_st->target_st_atime); 7626 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7627 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7628 #ifdef HAVE_STRUCT_STAT_ST_ATIM 7629 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); 7630 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); 7631 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); 7632 #endif 7633 unlock_user_struct(target_st, target_addr, 1); 7634 } else 7635 #endif 7636 { 7637 #if defined(TARGET_HAS_STRUCT_STAT64) 7638 struct target_stat64 *target_st; 7639 #else 7640 struct target_stat *target_st; 7641 #endif 7642 7643 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7644 return -TARGET_EFAULT; 7645 memset(target_st, 0, sizeof(*target_st)); 7646 __put_user(host_st->st_dev, &target_st->st_dev); 7647 __put_user(host_st->st_ino, &target_st->st_ino); 7648 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7649 __put_user(host_st->st_ino, &target_st->__st_ino); 7650 #endif 7651 __put_user(host_st->st_mode, &target_st->st_mode); 7652 __put_user(host_st->st_nlink, &target_st->st_nlink); 7653 __put_user(host_st->st_uid, &target_st->st_uid); 7654 __put_user(host_st->st_gid, &target_st->st_gid); 7655 __put_user(host_st->st_rdev, &target_st->st_rdev); 7656 /* XXX: better use of kernel struct */ 7657 __put_user(host_st->st_size, &target_st->st_size); 7658 __put_user(host_st->st_blksize, &target_st->st_blksize); 7659 __put_user(host_st->st_blocks, &target_st->st_blocks); 7660 __put_user(host_st->st_atime, &target_st->target_st_atime); 7661 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7662 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7663 #ifdef HAVE_STRUCT_STAT_ST_ATIM 7664 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); 7665 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); 7666 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); 7667 #endif 7668 unlock_user_struct(target_st, target_addr, 1); 7669 } 7670 7671 return 0; 7672 } 7673 #endif 7674 7675 #if defined(TARGET_NR_statx) && defined(__NR_statx) 7676 static inline abi_long host_to_target_statx(struct target_statx *host_stx, 7677 abi_ulong target_addr) 7678 { 7679 struct target_statx *target_stx; 7680 7681 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) { 7682 return -TARGET_EFAULT; 7683 } 7684 memset(target_stx, 0, sizeof(*target_stx)); 7685 7686 __put_user(host_stx->stx_mask, &target_stx->stx_mask); 7687 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize); 7688 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes); 7689 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink); 7690 __put_user(host_stx->stx_uid, &target_stx->stx_uid); 7691 __put_user(host_stx->stx_gid, &target_stx->stx_gid); 7692 __put_user(host_stx->stx_mode, &target_stx->stx_mode); 7693 __put_user(host_stx->stx_ino, &target_stx->stx_ino); 7694 __put_user(host_stx->stx_size, &target_stx->stx_size); 7695 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks); 7696 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask); 7697 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec); 7698 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec); 7699 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec); 7700 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec); 7701 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec); 7702 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec); 7703 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec); 7704 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec); 7705 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major); 7706 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor); 7707 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major); 7708 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor); 7709 7710 unlock_user_struct(target_stx, target_addr, 1); 7711 7712 return 0; 7713 } 7714 #endif 7715 7716 static int do_sys_futex(int *uaddr, int op, int val, 7717 const struct timespec *timeout, int *uaddr2, 7718 int val3) 7719 { 7720 #if HOST_LONG_BITS == 64 7721 #if defined(__NR_futex) 7722 /* always a 64-bit time_t, it doesn't define _time64 version */ 7723 return sys_futex(uaddr, op, val, timeout, uaddr2, val3); 7724 7725 #endif 7726 #else /* HOST_LONG_BITS == 64 */ 7727 #if defined(__NR_futex_time64) 7728 if (sizeof(timeout->tv_sec) == 8) { 7729 /* _time64 function on 32bit arch */ 7730 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3); 7731 } 7732 #endif 7733 #if defined(__NR_futex) 7734 /* old function on 32bit arch */ 7735 return sys_futex(uaddr, op, val, timeout, uaddr2, val3); 7736 #endif 7737 #endif /* HOST_LONG_BITS == 64 */ 7738 g_assert_not_reached(); 7739 } 7740 7741 static int do_safe_futex(int *uaddr, int op, int val, 7742 const struct timespec *timeout, int *uaddr2, 7743 int val3) 7744 { 7745 #if HOST_LONG_BITS == 64 7746 #if defined(__NR_futex) 7747 /* always a 64-bit time_t, it doesn't define _time64 version */ 7748 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3)); 7749 #endif 7750 #else /* HOST_LONG_BITS == 64 */ 7751 #if defined(__NR_futex_time64) 7752 if (sizeof(timeout->tv_sec) == 8) { 7753 /* _time64 function on 32bit arch */ 7754 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2, 7755 val3)); 7756 } 7757 #endif 7758 #if defined(__NR_futex) 7759 /* old function on 32bit arch */ 7760 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3)); 7761 #endif 7762 #endif /* HOST_LONG_BITS == 64 */ 7763 return -TARGET_ENOSYS; 7764 } 7765 7766 /* ??? Using host futex calls even when target atomic operations 7767 are not really atomic probably breaks things. However implementing 7768 futexes locally would make futexes shared between multiple processes 7769 tricky. However they're probably useless because guest atomic 7770 operations won't work either. */ 7771 #if defined(TARGET_NR_futex) 7772 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val, 7773 target_ulong timeout, target_ulong uaddr2, int val3) 7774 { 7775 struct timespec ts, *pts; 7776 int base_op; 7777 7778 /* ??? We assume FUTEX_* constants are the same on both host 7779 and target. */ 7780 #ifdef FUTEX_CMD_MASK 7781 base_op = op & FUTEX_CMD_MASK; 7782 #else 7783 base_op = op; 7784 #endif 7785 switch (base_op) { 7786 case FUTEX_WAIT: 7787 case FUTEX_WAIT_BITSET: 7788 if (timeout) { 7789 pts = &ts; 7790 target_to_host_timespec(pts, timeout); 7791 } else { 7792 pts = NULL; 7793 } 7794 return do_safe_futex(g2h(cpu, uaddr), 7795 op, tswap32(val), pts, NULL, val3); 7796 case FUTEX_WAKE: 7797 return do_safe_futex(g2h(cpu, uaddr), 7798 op, val, NULL, NULL, 0); 7799 case FUTEX_FD: 7800 return do_safe_futex(g2h(cpu, uaddr), 7801 op, val, NULL, NULL, 0); 7802 case FUTEX_REQUEUE: 7803 case FUTEX_CMP_REQUEUE: 7804 case FUTEX_WAKE_OP: 7805 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 7806 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 7807 But the prototype takes a `struct timespec *'; insert casts 7808 to satisfy the compiler. We do not need to tswap TIMEOUT 7809 since it's not compared to guest memory. */ 7810 pts = (struct timespec *)(uintptr_t) timeout; 7811 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2), 7812 (base_op == FUTEX_CMP_REQUEUE 7813 ? tswap32(val3) : val3)); 7814 default: 7815 return -TARGET_ENOSYS; 7816 } 7817 } 7818 #endif 7819 7820 #if defined(TARGET_NR_futex_time64) 7821 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op, 7822 int val, target_ulong timeout, 7823 target_ulong uaddr2, int val3) 7824 { 7825 struct timespec ts, *pts; 7826 int base_op; 7827 7828 /* ??? We assume FUTEX_* constants are the same on both host 7829 and target. */ 7830 #ifdef FUTEX_CMD_MASK 7831 base_op = op & FUTEX_CMD_MASK; 7832 #else 7833 base_op = op; 7834 #endif 7835 switch (base_op) { 7836 case FUTEX_WAIT: 7837 case FUTEX_WAIT_BITSET: 7838 if (timeout) { 7839 pts = &ts; 7840 if (target_to_host_timespec64(pts, timeout)) { 7841 return -TARGET_EFAULT; 7842 } 7843 } else { 7844 pts = NULL; 7845 } 7846 return do_safe_futex(g2h(cpu, uaddr), op, 7847 tswap32(val), pts, NULL, val3); 7848 case FUTEX_WAKE: 7849 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0); 7850 case FUTEX_FD: 7851 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0); 7852 case FUTEX_REQUEUE: 7853 case FUTEX_CMP_REQUEUE: 7854 case FUTEX_WAKE_OP: 7855 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 7856 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 7857 But the prototype takes a `struct timespec *'; insert casts 7858 to satisfy the compiler. We do not need to tswap TIMEOUT 7859 since it's not compared to guest memory. */ 7860 pts = (struct timespec *)(uintptr_t) timeout; 7861 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2), 7862 (base_op == FUTEX_CMP_REQUEUE 7863 ? tswap32(val3) : val3)); 7864 default: 7865 return -TARGET_ENOSYS; 7866 } 7867 } 7868 #endif 7869 7870 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7871 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname, 7872 abi_long handle, abi_long mount_id, 7873 abi_long flags) 7874 { 7875 struct file_handle *target_fh; 7876 struct file_handle *fh; 7877 int mid = 0; 7878 abi_long ret; 7879 char *name; 7880 unsigned int size, total_size; 7881 7882 if (get_user_s32(size, handle)) { 7883 return -TARGET_EFAULT; 7884 } 7885 7886 name = lock_user_string(pathname); 7887 if (!name) { 7888 return -TARGET_EFAULT; 7889 } 7890 7891 total_size = sizeof(struct file_handle) + size; 7892 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0); 7893 if (!target_fh) { 7894 unlock_user(name, pathname, 0); 7895 return -TARGET_EFAULT; 7896 } 7897 7898 fh = g_malloc0(total_size); 7899 fh->handle_bytes = size; 7900 7901 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags)); 7902 unlock_user(name, pathname, 0); 7903 7904 /* man name_to_handle_at(2): 7905 * Other than the use of the handle_bytes field, the caller should treat 7906 * the file_handle structure as an opaque data type 7907 */ 7908 7909 memcpy(target_fh, fh, total_size); 7910 target_fh->handle_bytes = tswap32(fh->handle_bytes); 7911 target_fh->handle_type = tswap32(fh->handle_type); 7912 g_free(fh); 7913 unlock_user(target_fh, handle, total_size); 7914 7915 if (put_user_s32(mid, mount_id)) { 7916 return -TARGET_EFAULT; 7917 } 7918 7919 return ret; 7920 7921 } 7922 #endif 7923 7924 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7925 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle, 7926 abi_long flags) 7927 { 7928 struct file_handle *target_fh; 7929 struct file_handle *fh; 7930 unsigned int size, total_size; 7931 abi_long ret; 7932 7933 if (get_user_s32(size, handle)) { 7934 return -TARGET_EFAULT; 7935 } 7936 7937 total_size = sizeof(struct file_handle) + size; 7938 target_fh = lock_user(VERIFY_READ, handle, total_size, 1); 7939 if (!target_fh) { 7940 return -TARGET_EFAULT; 7941 } 7942 7943 fh = g_memdup(target_fh, total_size); 7944 fh->handle_bytes = size; 7945 fh->handle_type = tswap32(target_fh->handle_type); 7946 7947 ret = get_errno(open_by_handle_at(mount_fd, fh, 7948 target_to_host_bitmask(flags, fcntl_flags_tbl))); 7949 7950 g_free(fh); 7951 7952 unlock_user(target_fh, handle, total_size); 7953 7954 return ret; 7955 } 7956 #endif 7957 7958 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4) 7959 7960 static abi_long do_signalfd4(int fd, abi_long mask, int flags) 7961 { 7962 int host_flags; 7963 target_sigset_t *target_mask; 7964 sigset_t host_mask; 7965 abi_long ret; 7966 7967 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) { 7968 return -TARGET_EINVAL; 7969 } 7970 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) { 7971 return -TARGET_EFAULT; 7972 } 7973 7974 target_to_host_sigset(&host_mask, target_mask); 7975 7976 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 7977 7978 ret = get_errno(signalfd(fd, &host_mask, host_flags)); 7979 if (ret >= 0) { 7980 fd_trans_register(ret, &target_signalfd_trans); 7981 } 7982 7983 unlock_user_struct(target_mask, mask, 0); 7984 7985 return ret; 7986 } 7987 #endif 7988 7989 /* Map host to target signal numbers for the wait family of syscalls. 7990 Assume all other status bits are the same. */ 7991 int host_to_target_waitstatus(int status) 7992 { 7993 if (WIFSIGNALED(status)) { 7994 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 7995 } 7996 if (WIFSTOPPED(status)) { 7997 return (host_to_target_signal(WSTOPSIG(status)) << 8) 7998 | (status & 0xff); 7999 } 8000 return status; 8001 } 8002 8003 static int open_self_cmdline(CPUArchState *cpu_env, int fd) 8004 { 8005 CPUState *cpu = env_cpu(cpu_env); 8006 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; 8007 int i; 8008 8009 for (i = 0; i < bprm->argc; i++) { 8010 size_t len = strlen(bprm->argv[i]) + 1; 8011 8012 if (write(fd, bprm->argv[i], len) != len) { 8013 return -1; 8014 } 8015 } 8016 8017 return 0; 8018 } 8019 8020 static int open_self_maps(CPUArchState *cpu_env, int fd) 8021 { 8022 CPUState *cpu = env_cpu(cpu_env); 8023 TaskState *ts = cpu->opaque; 8024 GSList *map_info = read_self_maps(); 8025 GSList *s; 8026 int count; 8027 8028 for (s = map_info; s; s = g_slist_next(s)) { 8029 MapInfo *e = (MapInfo *) s->data; 8030 8031 if (h2g_valid(e->start)) { 8032 unsigned long min = e->start; 8033 unsigned long max = e->end; 8034 int flags = page_get_flags(h2g(min)); 8035 const char *path; 8036 8037 max = h2g_valid(max - 1) ? 8038 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1; 8039 8040 if (page_check_range(h2g(min), max - min, flags) == -1) { 8041 continue; 8042 } 8043 8044 #ifdef TARGET_HPPA 8045 if (h2g(max) == ts->info->stack_limit) { 8046 #else 8047 if (h2g(min) == ts->info->stack_limit) { 8048 #endif 8049 path = "[stack]"; 8050 } else { 8051 path = e->path; 8052 } 8053 8054 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr 8055 " %c%c%c%c %08" PRIx64 " %s %"PRId64, 8056 h2g(min), h2g(max - 1) + 1, 8057 (flags & PAGE_READ) ? 'r' : '-', 8058 (flags & PAGE_WRITE_ORG) ? 'w' : '-', 8059 (flags & PAGE_EXEC) ? 'x' : '-', 8060 e->is_priv ? 'p' : 's', 8061 (uint64_t) e->offset, e->dev, e->inode); 8062 if (path) { 8063 dprintf(fd, "%*s%s\n", 73 - count, "", path); 8064 } else { 8065 dprintf(fd, "\n"); 8066 } 8067 } 8068 } 8069 8070 free_self_maps(map_info); 8071 8072 #ifdef TARGET_VSYSCALL_PAGE 8073 /* 8074 * We only support execution from the vsyscall page. 8075 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3. 8076 */ 8077 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx 8078 " --xp 00000000 00:00 0", 8079 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE); 8080 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]"); 8081 #endif 8082 8083 return 0; 8084 } 8085 8086 static int open_self_stat(CPUArchState *cpu_env, int fd) 8087 { 8088 CPUState *cpu = env_cpu(cpu_env); 8089 TaskState *ts = cpu->opaque; 8090 g_autoptr(GString) buf = g_string_new(NULL); 8091 int i; 8092 8093 for (i = 0; i < 44; i++) { 8094 if (i == 0) { 8095 /* pid */ 8096 g_string_printf(buf, FMT_pid " ", getpid()); 8097 } else if (i == 1) { 8098 /* app name */ 8099 gchar *bin = g_strrstr(ts->bprm->argv[0], "/"); 8100 bin = bin ? bin + 1 : ts->bprm->argv[0]; 8101 g_string_printf(buf, "(%.15s) ", bin); 8102 } else if (i == 3) { 8103 /* ppid */ 8104 g_string_printf(buf, FMT_pid " ", getppid()); 8105 } else if (i == 21) { 8106 /* starttime */ 8107 g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime); 8108 } else if (i == 27) { 8109 /* stack bottom */ 8110 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack); 8111 } else { 8112 /* for the rest, there is MasterCard */ 8113 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' '); 8114 } 8115 8116 if (write(fd, buf->str, buf->len) != buf->len) { 8117 return -1; 8118 } 8119 } 8120 8121 return 0; 8122 } 8123 8124 static int open_self_auxv(CPUArchState *cpu_env, int fd) 8125 { 8126 CPUState *cpu = env_cpu(cpu_env); 8127 TaskState *ts = cpu->opaque; 8128 abi_ulong auxv = ts->info->saved_auxv; 8129 abi_ulong len = ts->info->auxv_len; 8130 char *ptr; 8131 8132 /* 8133 * Auxiliary vector is stored in target process stack. 8134 * read in whole auxv vector and copy it to file 8135 */ 8136 ptr = lock_user(VERIFY_READ, auxv, len, 0); 8137 if (ptr != NULL) { 8138 while (len > 0) { 8139 ssize_t r; 8140 r = write(fd, ptr, len); 8141 if (r <= 0) { 8142 break; 8143 } 8144 len -= r; 8145 ptr += r; 8146 } 8147 lseek(fd, 0, SEEK_SET); 8148 unlock_user(ptr, auxv, len); 8149 } 8150 8151 return 0; 8152 } 8153 8154 static int is_proc_myself(const char *filename, const char *entry) 8155 { 8156 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 8157 filename += strlen("/proc/"); 8158 if (!strncmp(filename, "self/", strlen("self/"))) { 8159 filename += strlen("self/"); 8160 } else if (*filename >= '1' && *filename <= '9') { 8161 char myself[80]; 8162 snprintf(myself, sizeof(myself), "%d/", getpid()); 8163 if (!strncmp(filename, myself, strlen(myself))) { 8164 filename += strlen(myself); 8165 } else { 8166 return 0; 8167 } 8168 } else { 8169 return 0; 8170 } 8171 if (!strcmp(filename, entry)) { 8172 return 1; 8173 } 8174 } 8175 return 0; 8176 } 8177 8178 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \ 8179 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) 8180 static int is_proc(const char *filename, const char *entry) 8181 { 8182 return strcmp(filename, entry) == 0; 8183 } 8184 #endif 8185 8186 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN 8187 static int open_net_route(CPUArchState *cpu_env, int fd) 8188 { 8189 FILE *fp; 8190 char *line = NULL; 8191 size_t len = 0; 8192 ssize_t read; 8193 8194 fp = fopen("/proc/net/route", "r"); 8195 if (fp == NULL) { 8196 return -1; 8197 } 8198 8199 /* read header */ 8200 8201 read = getline(&line, &len, fp); 8202 dprintf(fd, "%s", line); 8203 8204 /* read routes */ 8205 8206 while ((read = getline(&line, &len, fp)) != -1) { 8207 char iface[16]; 8208 uint32_t dest, gw, mask; 8209 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 8210 int fields; 8211 8212 fields = sscanf(line, 8213 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 8214 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 8215 &mask, &mtu, &window, &irtt); 8216 if (fields != 11) { 8217 continue; 8218 } 8219 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 8220 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 8221 metric, tswap32(mask), mtu, window, irtt); 8222 } 8223 8224 free(line); 8225 fclose(fp); 8226 8227 return 0; 8228 } 8229 #endif 8230 8231 #if defined(TARGET_SPARC) 8232 static int open_cpuinfo(CPUArchState *cpu_env, int fd) 8233 { 8234 dprintf(fd, "type\t\t: sun4u\n"); 8235 return 0; 8236 } 8237 #endif 8238 8239 #if defined(TARGET_HPPA) 8240 static int open_cpuinfo(CPUArchState *cpu_env, int fd) 8241 { 8242 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n"); 8243 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n"); 8244 dprintf(fd, "capabilities\t: os32\n"); 8245 dprintf(fd, "model\t\t: 9000/778/B160L\n"); 8246 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n"); 8247 return 0; 8248 } 8249 #endif 8250 8251 #if defined(TARGET_M68K) 8252 static int open_hardware(CPUArchState *cpu_env, int fd) 8253 { 8254 dprintf(fd, "Model:\t\tqemu-m68k\n"); 8255 return 0; 8256 } 8257 #endif 8258 8259 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) 8260 { 8261 struct fake_open { 8262 const char *filename; 8263 int (*fill)(CPUArchState *cpu_env, int fd); 8264 int (*cmp)(const char *s1, const char *s2); 8265 }; 8266 const struct fake_open *fake_open; 8267 static const struct fake_open fakes[] = { 8268 { "maps", open_self_maps, is_proc_myself }, 8269 { "stat", open_self_stat, is_proc_myself }, 8270 { "auxv", open_self_auxv, is_proc_myself }, 8271 { "cmdline", open_self_cmdline, is_proc_myself }, 8272 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN 8273 { "/proc/net/route", open_net_route, is_proc }, 8274 #endif 8275 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) 8276 { "/proc/cpuinfo", open_cpuinfo, is_proc }, 8277 #endif 8278 #if defined(TARGET_M68K) 8279 { "/proc/hardware", open_hardware, is_proc }, 8280 #endif 8281 { NULL, NULL, NULL } 8282 }; 8283 8284 if (is_proc_myself(pathname, "exe")) { 8285 int execfd = qemu_getauxval(AT_EXECFD); 8286 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode); 8287 } 8288 8289 for (fake_open = fakes; fake_open->filename; fake_open++) { 8290 if (fake_open->cmp(pathname, fake_open->filename)) { 8291 break; 8292 } 8293 } 8294 8295 if (fake_open->filename) { 8296 const char *tmpdir; 8297 char filename[PATH_MAX]; 8298 int fd, r; 8299 8300 fd = memfd_create("qemu-open", 0); 8301 if (fd < 0) { 8302 if (errno != ENOSYS) { 8303 return fd; 8304 } 8305 /* create temporary file to map stat to */ 8306 tmpdir = getenv("TMPDIR"); 8307 if (!tmpdir) 8308 tmpdir = "/tmp"; 8309 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 8310 fd = mkstemp(filename); 8311 if (fd < 0) { 8312 return fd; 8313 } 8314 unlink(filename); 8315 } 8316 8317 if ((r = fake_open->fill(cpu_env, fd))) { 8318 int e = errno; 8319 close(fd); 8320 errno = e; 8321 return r; 8322 } 8323 lseek(fd, 0, SEEK_SET); 8324 8325 return fd; 8326 } 8327 8328 return safe_openat(dirfd, path(pathname), flags, mode); 8329 } 8330 8331 #define TIMER_MAGIC 0x0caf0000 8332 #define TIMER_MAGIC_MASK 0xffff0000 8333 8334 /* Convert QEMU provided timer ID back to internal 16bit index format */ 8335 static target_timer_t get_timer_id(abi_long arg) 8336 { 8337 target_timer_t timerid = arg; 8338 8339 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) { 8340 return -TARGET_EINVAL; 8341 } 8342 8343 timerid &= 0xffff; 8344 8345 if (timerid >= ARRAY_SIZE(g_posix_timers)) { 8346 return -TARGET_EINVAL; 8347 } 8348 8349 return timerid; 8350 } 8351 8352 static int target_to_host_cpu_mask(unsigned long *host_mask, 8353 size_t host_size, 8354 abi_ulong target_addr, 8355 size_t target_size) 8356 { 8357 unsigned target_bits = sizeof(abi_ulong) * 8; 8358 unsigned host_bits = sizeof(*host_mask) * 8; 8359 abi_ulong *target_mask; 8360 unsigned i, j; 8361 8362 assert(host_size >= target_size); 8363 8364 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1); 8365 if (!target_mask) { 8366 return -TARGET_EFAULT; 8367 } 8368 memset(host_mask, 0, host_size); 8369 8370 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 8371 unsigned bit = i * target_bits; 8372 abi_ulong val; 8373 8374 __get_user(val, &target_mask[i]); 8375 for (j = 0; j < target_bits; j++, bit++) { 8376 if (val & (1UL << j)) { 8377 host_mask[bit / host_bits] |= 1UL << (bit % host_bits); 8378 } 8379 } 8380 } 8381 8382 unlock_user(target_mask, target_addr, 0); 8383 return 0; 8384 } 8385 8386 static int host_to_target_cpu_mask(const unsigned long *host_mask, 8387 size_t host_size, 8388 abi_ulong target_addr, 8389 size_t target_size) 8390 { 8391 unsigned target_bits = sizeof(abi_ulong) * 8; 8392 unsigned host_bits = sizeof(*host_mask) * 8; 8393 abi_ulong *target_mask; 8394 unsigned i, j; 8395 8396 assert(host_size >= target_size); 8397 8398 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0); 8399 if (!target_mask) { 8400 return -TARGET_EFAULT; 8401 } 8402 8403 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 8404 unsigned bit = i * target_bits; 8405 abi_ulong val = 0; 8406 8407 for (j = 0; j < target_bits; j++, bit++) { 8408 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) { 8409 val |= 1UL << j; 8410 } 8411 } 8412 __put_user(val, &target_mask[i]); 8413 } 8414 8415 unlock_user(target_mask, target_addr, target_size); 8416 return 0; 8417 } 8418 8419 #ifdef TARGET_NR_getdents 8420 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count) 8421 { 8422 g_autofree void *hdirp = NULL; 8423 void *tdirp; 8424 int hlen, hoff, toff; 8425 int hreclen, treclen; 8426 off64_t prev_diroff = 0; 8427 8428 hdirp = g_try_malloc(count); 8429 if (!hdirp) { 8430 return -TARGET_ENOMEM; 8431 } 8432 8433 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 8434 hlen = sys_getdents(dirfd, hdirp, count); 8435 #else 8436 hlen = sys_getdents64(dirfd, hdirp, count); 8437 #endif 8438 8439 hlen = get_errno(hlen); 8440 if (is_error(hlen)) { 8441 return hlen; 8442 } 8443 8444 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0); 8445 if (!tdirp) { 8446 return -TARGET_EFAULT; 8447 } 8448 8449 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) { 8450 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 8451 struct linux_dirent *hde = hdirp + hoff; 8452 #else 8453 struct linux_dirent64 *hde = hdirp + hoff; 8454 #endif 8455 struct target_dirent *tde = tdirp + toff; 8456 int namelen; 8457 uint8_t type; 8458 8459 namelen = strlen(hde->d_name); 8460 hreclen = hde->d_reclen; 8461 treclen = offsetof(struct target_dirent, d_name) + namelen + 2; 8462 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent)); 8463 8464 if (toff + treclen > count) { 8465 /* 8466 * If the host struct is smaller than the target struct, or 8467 * requires less alignment and thus packs into less space, 8468 * then the host can return more entries than we can pass 8469 * on to the guest. 8470 */ 8471 if (toff == 0) { 8472 toff = -TARGET_EINVAL; /* result buffer is too small */ 8473 break; 8474 } 8475 /* 8476 * Return what we have, resetting the file pointer to the 8477 * location of the first record not returned. 8478 */ 8479 lseek64(dirfd, prev_diroff, SEEK_SET); 8480 break; 8481 } 8482 8483 prev_diroff = hde->d_off; 8484 tde->d_ino = tswapal(hde->d_ino); 8485 tde->d_off = tswapal(hde->d_off); 8486 tde->d_reclen = tswap16(treclen); 8487 memcpy(tde->d_name, hde->d_name, namelen + 1); 8488 8489 /* 8490 * The getdents type is in what was formerly a padding byte at the 8491 * end of the structure. 8492 */ 8493 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 8494 type = *((uint8_t *)hde + hreclen - 1); 8495 #else 8496 type = hde->d_type; 8497 #endif 8498 *((uint8_t *)tde + treclen - 1) = type; 8499 } 8500 8501 unlock_user(tdirp, arg2, toff); 8502 return toff; 8503 } 8504 #endif /* TARGET_NR_getdents */ 8505 8506 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 8507 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count) 8508 { 8509 g_autofree void *hdirp = NULL; 8510 void *tdirp; 8511 int hlen, hoff, toff; 8512 int hreclen, treclen; 8513 off64_t prev_diroff = 0; 8514 8515 hdirp = g_try_malloc(count); 8516 if (!hdirp) { 8517 return -TARGET_ENOMEM; 8518 } 8519 8520 hlen = get_errno(sys_getdents64(dirfd, hdirp, count)); 8521 if (is_error(hlen)) { 8522 return hlen; 8523 } 8524 8525 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0); 8526 if (!tdirp) { 8527 return -TARGET_EFAULT; 8528 } 8529 8530 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) { 8531 struct linux_dirent64 *hde = hdirp + hoff; 8532 struct target_dirent64 *tde = tdirp + toff; 8533 int namelen; 8534 8535 namelen = strlen(hde->d_name) + 1; 8536 hreclen = hde->d_reclen; 8537 treclen = offsetof(struct target_dirent64, d_name) + namelen; 8538 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64)); 8539 8540 if (toff + treclen > count) { 8541 /* 8542 * If the host struct is smaller than the target struct, or 8543 * requires less alignment and thus packs into less space, 8544 * then the host can return more entries than we can pass 8545 * on to the guest. 8546 */ 8547 if (toff == 0) { 8548 toff = -TARGET_EINVAL; /* result buffer is too small */ 8549 break; 8550 } 8551 /* 8552 * Return what we have, resetting the file pointer to the 8553 * location of the first record not returned. 8554 */ 8555 lseek64(dirfd, prev_diroff, SEEK_SET); 8556 break; 8557 } 8558 8559 prev_diroff = hde->d_off; 8560 tde->d_ino = tswap64(hde->d_ino); 8561 tde->d_off = tswap64(hde->d_off); 8562 tde->d_reclen = tswap16(treclen); 8563 tde->d_type = hde->d_type; 8564 memcpy(tde->d_name, hde->d_name, namelen); 8565 } 8566 8567 unlock_user(tdirp, arg2, toff); 8568 return toff; 8569 } 8570 #endif /* TARGET_NR_getdents64 */ 8571 8572 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root) 8573 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old) 8574 #endif 8575 8576 /* This is an internal helper for do_syscall so that it is easier 8577 * to have a single return point, so that actions, such as logging 8578 * of syscall results, can be performed. 8579 * All errnos that do_syscall() returns must be -TARGET_<errcode>. 8580 */ 8581 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, 8582 abi_long arg2, abi_long arg3, abi_long arg4, 8583 abi_long arg5, abi_long arg6, abi_long arg7, 8584 abi_long arg8) 8585 { 8586 CPUState *cpu = env_cpu(cpu_env); 8587 abi_long ret; 8588 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \ 8589 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \ 8590 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \ 8591 || defined(TARGET_NR_statx) 8592 struct stat st; 8593 #endif 8594 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \ 8595 || defined(TARGET_NR_fstatfs) 8596 struct statfs stfs; 8597 #endif 8598 void *p; 8599 8600 switch(num) { 8601 case TARGET_NR_exit: 8602 /* In old applications this may be used to implement _exit(2). 8603 However in threaded applications it is used for thread termination, 8604 and _exit_group is used for application termination. 8605 Do thread termination if we have more then one thread. */ 8606 8607 if (block_signals()) { 8608 return -QEMU_ERESTARTSYS; 8609 } 8610 8611 pthread_mutex_lock(&clone_lock); 8612 8613 if (CPU_NEXT(first_cpu)) { 8614 TaskState *ts = cpu->opaque; 8615 8616 object_property_set_bool(OBJECT(cpu), "realized", false, NULL); 8617 object_unref(OBJECT(cpu)); 8618 /* 8619 * At this point the CPU should be unrealized and removed 8620 * from cpu lists. We can clean-up the rest of the thread 8621 * data without the lock held. 8622 */ 8623 8624 pthread_mutex_unlock(&clone_lock); 8625 8626 if (ts->child_tidptr) { 8627 put_user_u32(0, ts->child_tidptr); 8628 do_sys_futex(g2h(cpu, ts->child_tidptr), 8629 FUTEX_WAKE, INT_MAX, NULL, NULL, 0); 8630 } 8631 thread_cpu = NULL; 8632 g_free(ts); 8633 rcu_unregister_thread(); 8634 pthread_exit(NULL); 8635 } 8636 8637 pthread_mutex_unlock(&clone_lock); 8638 preexit_cleanup(cpu_env, arg1); 8639 _exit(arg1); 8640 return 0; /* avoid warning */ 8641 case TARGET_NR_read: 8642 if (arg2 == 0 && arg3 == 0) { 8643 return get_errno(safe_read(arg1, 0, 0)); 8644 } else { 8645 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 8646 return -TARGET_EFAULT; 8647 ret = get_errno(safe_read(arg1, p, arg3)); 8648 if (ret >= 0 && 8649 fd_trans_host_to_target_data(arg1)) { 8650 ret = fd_trans_host_to_target_data(arg1)(p, ret); 8651 } 8652 unlock_user(p, arg2, ret); 8653 } 8654 return ret; 8655 case TARGET_NR_write: 8656 if (arg2 == 0 && arg3 == 0) { 8657 return get_errno(safe_write(arg1, 0, 0)); 8658 } 8659 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 8660 return -TARGET_EFAULT; 8661 if (fd_trans_target_to_host_data(arg1)) { 8662 void *copy = g_malloc(arg3); 8663 memcpy(copy, p, arg3); 8664 ret = fd_trans_target_to_host_data(arg1)(copy, arg3); 8665 if (ret >= 0) { 8666 ret = get_errno(safe_write(arg1, copy, ret)); 8667 } 8668 g_free(copy); 8669 } else { 8670 ret = get_errno(safe_write(arg1, p, arg3)); 8671 } 8672 unlock_user(p, arg2, 0); 8673 return ret; 8674 8675 #ifdef TARGET_NR_open 8676 case TARGET_NR_open: 8677 if (!(p = lock_user_string(arg1))) 8678 return -TARGET_EFAULT; 8679 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, 8680 target_to_host_bitmask(arg2, fcntl_flags_tbl), 8681 arg3)); 8682 fd_trans_unregister(ret); 8683 unlock_user(p, arg1, 0); 8684 return ret; 8685 #endif 8686 case TARGET_NR_openat: 8687 if (!(p = lock_user_string(arg2))) 8688 return -TARGET_EFAULT; 8689 ret = get_errno(do_openat(cpu_env, arg1, p, 8690 target_to_host_bitmask(arg3, fcntl_flags_tbl), 8691 arg4)); 8692 fd_trans_unregister(ret); 8693 unlock_user(p, arg2, 0); 8694 return ret; 8695 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 8696 case TARGET_NR_name_to_handle_at: 8697 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5); 8698 return ret; 8699 #endif 8700 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 8701 case TARGET_NR_open_by_handle_at: 8702 ret = do_open_by_handle_at(arg1, arg2, arg3); 8703 fd_trans_unregister(ret); 8704 return ret; 8705 #endif 8706 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open) 8707 case TARGET_NR_pidfd_open: 8708 return get_errno(pidfd_open(arg1, arg2)); 8709 #endif 8710 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal) 8711 case TARGET_NR_pidfd_send_signal: 8712 { 8713 siginfo_t uinfo; 8714 8715 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); 8716 if (!p) { 8717 return -TARGET_EFAULT; 8718 } 8719 target_to_host_siginfo(&uinfo, p); 8720 unlock_user(p, arg3, 0); 8721 ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2), 8722 &uinfo, arg4)); 8723 } 8724 return ret; 8725 #endif 8726 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd) 8727 case TARGET_NR_pidfd_getfd: 8728 return get_errno(pidfd_getfd(arg1, arg2, arg3)); 8729 #endif 8730 case TARGET_NR_close: 8731 fd_trans_unregister(arg1); 8732 return get_errno(close(arg1)); 8733 8734 case TARGET_NR_brk: 8735 return do_brk(arg1); 8736 #ifdef TARGET_NR_fork 8737 case TARGET_NR_fork: 8738 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0)); 8739 #endif 8740 #ifdef TARGET_NR_waitpid 8741 case TARGET_NR_waitpid: 8742 { 8743 int status; 8744 ret = get_errno(safe_wait4(arg1, &status, arg3, 0)); 8745 if (!is_error(ret) && arg2 && ret 8746 && put_user_s32(host_to_target_waitstatus(status), arg2)) 8747 return -TARGET_EFAULT; 8748 } 8749 return ret; 8750 #endif 8751 #ifdef TARGET_NR_waitid 8752 case TARGET_NR_waitid: 8753 { 8754 siginfo_t info; 8755 info.si_pid = 0; 8756 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL)); 8757 if (!is_error(ret) && arg3 && info.si_pid != 0) { 8758 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 8759 return -TARGET_EFAULT; 8760 host_to_target_siginfo(p, &info); 8761 unlock_user(p, arg3, sizeof(target_siginfo_t)); 8762 } 8763 } 8764 return ret; 8765 #endif 8766 #ifdef TARGET_NR_creat /* not on alpha */ 8767 case TARGET_NR_creat: 8768 if (!(p = lock_user_string(arg1))) 8769 return -TARGET_EFAULT; 8770 ret = get_errno(creat(p, arg2)); 8771 fd_trans_unregister(ret); 8772 unlock_user(p, arg1, 0); 8773 return ret; 8774 #endif 8775 #ifdef TARGET_NR_link 8776 case TARGET_NR_link: 8777 { 8778 void * p2; 8779 p = lock_user_string(arg1); 8780 p2 = lock_user_string(arg2); 8781 if (!p || !p2) 8782 ret = -TARGET_EFAULT; 8783 else 8784 ret = get_errno(link(p, p2)); 8785 unlock_user(p2, arg2, 0); 8786 unlock_user(p, arg1, 0); 8787 } 8788 return ret; 8789 #endif 8790 #if defined(TARGET_NR_linkat) 8791 case TARGET_NR_linkat: 8792 { 8793 void * p2 = NULL; 8794 if (!arg2 || !arg4) 8795 return -TARGET_EFAULT; 8796 p = lock_user_string(arg2); 8797 p2 = lock_user_string(arg4); 8798 if (!p || !p2) 8799 ret = -TARGET_EFAULT; 8800 else 8801 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 8802 unlock_user(p, arg2, 0); 8803 unlock_user(p2, arg4, 0); 8804 } 8805 return ret; 8806 #endif 8807 #ifdef TARGET_NR_unlink 8808 case TARGET_NR_unlink: 8809 if (!(p = lock_user_string(arg1))) 8810 return -TARGET_EFAULT; 8811 ret = get_errno(unlink(p)); 8812 unlock_user(p, arg1, 0); 8813 return ret; 8814 #endif 8815 #if defined(TARGET_NR_unlinkat) 8816 case TARGET_NR_unlinkat: 8817 if (!(p = lock_user_string(arg2))) 8818 return -TARGET_EFAULT; 8819 ret = get_errno(unlinkat(arg1, p, arg3)); 8820 unlock_user(p, arg2, 0); 8821 return ret; 8822 #endif 8823 case TARGET_NR_execve: 8824 { 8825 char **argp, **envp; 8826 int argc, envc; 8827 abi_ulong gp; 8828 abi_ulong guest_argp; 8829 abi_ulong guest_envp; 8830 abi_ulong addr; 8831 char **q; 8832 8833 argc = 0; 8834 guest_argp = arg2; 8835 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 8836 if (get_user_ual(addr, gp)) 8837 return -TARGET_EFAULT; 8838 if (!addr) 8839 break; 8840 argc++; 8841 } 8842 envc = 0; 8843 guest_envp = arg3; 8844 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 8845 if (get_user_ual(addr, gp)) 8846 return -TARGET_EFAULT; 8847 if (!addr) 8848 break; 8849 envc++; 8850 } 8851 8852 argp = g_new0(char *, argc + 1); 8853 envp = g_new0(char *, envc + 1); 8854 8855 for (gp = guest_argp, q = argp; gp; 8856 gp += sizeof(abi_ulong), q++) { 8857 if (get_user_ual(addr, gp)) 8858 goto execve_efault; 8859 if (!addr) 8860 break; 8861 if (!(*q = lock_user_string(addr))) 8862 goto execve_efault; 8863 } 8864 *q = NULL; 8865 8866 for (gp = guest_envp, q = envp; gp; 8867 gp += sizeof(abi_ulong), q++) { 8868 if (get_user_ual(addr, gp)) 8869 goto execve_efault; 8870 if (!addr) 8871 break; 8872 if (!(*q = lock_user_string(addr))) 8873 goto execve_efault; 8874 } 8875 *q = NULL; 8876 8877 if (!(p = lock_user_string(arg1))) 8878 goto execve_efault; 8879 /* Although execve() is not an interruptible syscall it is 8880 * a special case where we must use the safe_syscall wrapper: 8881 * if we allow a signal to happen before we make the host 8882 * syscall then we will 'lose' it, because at the point of 8883 * execve the process leaves QEMU's control. So we use the 8884 * safe syscall wrapper to ensure that we either take the 8885 * signal as a guest signal, or else it does not happen 8886 * before the execve completes and makes it the other 8887 * program's problem. 8888 */ 8889 ret = get_errno(safe_execve(p, argp, envp)); 8890 unlock_user(p, arg1, 0); 8891 8892 goto execve_end; 8893 8894 execve_efault: 8895 ret = -TARGET_EFAULT; 8896 8897 execve_end: 8898 for (gp = guest_argp, q = argp; *q; 8899 gp += sizeof(abi_ulong), q++) { 8900 if (get_user_ual(addr, gp) 8901 || !addr) 8902 break; 8903 unlock_user(*q, addr, 0); 8904 } 8905 for (gp = guest_envp, q = envp; *q; 8906 gp += sizeof(abi_ulong), q++) { 8907 if (get_user_ual(addr, gp) 8908 || !addr) 8909 break; 8910 unlock_user(*q, addr, 0); 8911 } 8912 8913 g_free(argp); 8914 g_free(envp); 8915 } 8916 return ret; 8917 case TARGET_NR_chdir: 8918 if (!(p = lock_user_string(arg1))) 8919 return -TARGET_EFAULT; 8920 ret = get_errno(chdir(p)); 8921 unlock_user(p, arg1, 0); 8922 return ret; 8923 #ifdef TARGET_NR_time 8924 case TARGET_NR_time: 8925 { 8926 time_t host_time; 8927 ret = get_errno(time(&host_time)); 8928 if (!is_error(ret) 8929 && arg1 8930 && put_user_sal(host_time, arg1)) 8931 return -TARGET_EFAULT; 8932 } 8933 return ret; 8934 #endif 8935 #ifdef TARGET_NR_mknod 8936 case TARGET_NR_mknod: 8937 if (!(p = lock_user_string(arg1))) 8938 return -TARGET_EFAULT; 8939 ret = get_errno(mknod(p, arg2, arg3)); 8940 unlock_user(p, arg1, 0); 8941 return ret; 8942 #endif 8943 #if defined(TARGET_NR_mknodat) 8944 case TARGET_NR_mknodat: 8945 if (!(p = lock_user_string(arg2))) 8946 return -TARGET_EFAULT; 8947 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 8948 unlock_user(p, arg2, 0); 8949 return ret; 8950 #endif 8951 #ifdef TARGET_NR_chmod 8952 case TARGET_NR_chmod: 8953 if (!(p = lock_user_string(arg1))) 8954 return -TARGET_EFAULT; 8955 ret = get_errno(chmod(p, arg2)); 8956 unlock_user(p, arg1, 0); 8957 return ret; 8958 #endif 8959 #ifdef TARGET_NR_lseek 8960 case TARGET_NR_lseek: 8961 return get_errno(lseek(arg1, arg2, arg3)); 8962 #endif 8963 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 8964 /* Alpha specific */ 8965 case TARGET_NR_getxpid: 8966 cpu_env->ir[IR_A4] = getppid(); 8967 return get_errno(getpid()); 8968 #endif 8969 #ifdef TARGET_NR_getpid 8970 case TARGET_NR_getpid: 8971 return get_errno(getpid()); 8972 #endif 8973 case TARGET_NR_mount: 8974 { 8975 /* need to look at the data field */ 8976 void *p2, *p3; 8977 8978 if (arg1) { 8979 p = lock_user_string(arg1); 8980 if (!p) { 8981 return -TARGET_EFAULT; 8982 } 8983 } else { 8984 p = NULL; 8985 } 8986 8987 p2 = lock_user_string(arg2); 8988 if (!p2) { 8989 if (arg1) { 8990 unlock_user(p, arg1, 0); 8991 } 8992 return -TARGET_EFAULT; 8993 } 8994 8995 if (arg3) { 8996 p3 = lock_user_string(arg3); 8997 if (!p3) { 8998 if (arg1) { 8999 unlock_user(p, arg1, 0); 9000 } 9001 unlock_user(p2, arg2, 0); 9002 return -TARGET_EFAULT; 9003 } 9004 } else { 9005 p3 = NULL; 9006 } 9007 9008 /* FIXME - arg5 should be locked, but it isn't clear how to 9009 * do that since it's not guaranteed to be a NULL-terminated 9010 * string. 9011 */ 9012 if (!arg5) { 9013 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 9014 } else { 9015 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5)); 9016 } 9017 ret = get_errno(ret); 9018 9019 if (arg1) { 9020 unlock_user(p, arg1, 0); 9021 } 9022 unlock_user(p2, arg2, 0); 9023 if (arg3) { 9024 unlock_user(p3, arg3, 0); 9025 } 9026 } 9027 return ret; 9028 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount) 9029 #if defined(TARGET_NR_umount) 9030 case TARGET_NR_umount: 9031 #endif 9032 #if defined(TARGET_NR_oldumount) 9033 case TARGET_NR_oldumount: 9034 #endif 9035 if (!(p = lock_user_string(arg1))) 9036 return -TARGET_EFAULT; 9037 ret = get_errno(umount(p)); 9038 unlock_user(p, arg1, 0); 9039 return ret; 9040 #endif 9041 #ifdef TARGET_NR_stime /* not on alpha */ 9042 case TARGET_NR_stime: 9043 { 9044 struct timespec ts; 9045 ts.tv_nsec = 0; 9046 if (get_user_sal(ts.tv_sec, arg1)) { 9047 return -TARGET_EFAULT; 9048 } 9049 return get_errno(clock_settime(CLOCK_REALTIME, &ts)); 9050 } 9051 #endif 9052 #ifdef TARGET_NR_alarm /* not on alpha */ 9053 case TARGET_NR_alarm: 9054 return alarm(arg1); 9055 #endif 9056 #ifdef TARGET_NR_pause /* not on alpha */ 9057 case TARGET_NR_pause: 9058 if (!block_signals()) { 9059 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask); 9060 } 9061 return -TARGET_EINTR; 9062 #endif 9063 #ifdef TARGET_NR_utime 9064 case TARGET_NR_utime: 9065 { 9066 struct utimbuf tbuf, *host_tbuf; 9067 struct target_utimbuf *target_tbuf; 9068 if (arg2) { 9069 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 9070 return -TARGET_EFAULT; 9071 tbuf.actime = tswapal(target_tbuf->actime); 9072 tbuf.modtime = tswapal(target_tbuf->modtime); 9073 unlock_user_struct(target_tbuf, arg2, 0); 9074 host_tbuf = &tbuf; 9075 } else { 9076 host_tbuf = NULL; 9077 } 9078 if (!(p = lock_user_string(arg1))) 9079 return -TARGET_EFAULT; 9080 ret = get_errno(utime(p, host_tbuf)); 9081 unlock_user(p, arg1, 0); 9082 } 9083 return ret; 9084 #endif 9085 #ifdef TARGET_NR_utimes 9086 case TARGET_NR_utimes: 9087 { 9088 struct timeval *tvp, tv[2]; 9089 if (arg2) { 9090 if (copy_from_user_timeval(&tv[0], arg2) 9091 || copy_from_user_timeval(&tv[1], 9092 arg2 + sizeof(struct target_timeval))) 9093 return -TARGET_EFAULT; 9094 tvp = tv; 9095 } else { 9096 tvp = NULL; 9097 } 9098 if (!(p = lock_user_string(arg1))) 9099 return -TARGET_EFAULT; 9100 ret = get_errno(utimes(p, tvp)); 9101 unlock_user(p, arg1, 0); 9102 } 9103 return ret; 9104 #endif 9105 #if defined(TARGET_NR_futimesat) 9106 case TARGET_NR_futimesat: 9107 { 9108 struct timeval *tvp, tv[2]; 9109 if (arg3) { 9110 if (copy_from_user_timeval(&tv[0], arg3) 9111 || copy_from_user_timeval(&tv[1], 9112 arg3 + sizeof(struct target_timeval))) 9113 return -TARGET_EFAULT; 9114 tvp = tv; 9115 } else { 9116 tvp = NULL; 9117 } 9118 if (!(p = lock_user_string(arg2))) { 9119 return -TARGET_EFAULT; 9120 } 9121 ret = get_errno(futimesat(arg1, path(p), tvp)); 9122 unlock_user(p, arg2, 0); 9123 } 9124 return ret; 9125 #endif 9126 #ifdef TARGET_NR_access 9127 case TARGET_NR_access: 9128 if (!(p = lock_user_string(arg1))) { 9129 return -TARGET_EFAULT; 9130 } 9131 ret = get_errno(access(path(p), arg2)); 9132 unlock_user(p, arg1, 0); 9133 return ret; 9134 #endif 9135 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 9136 case TARGET_NR_faccessat: 9137 if (!(p = lock_user_string(arg2))) { 9138 return -TARGET_EFAULT; 9139 } 9140 ret = get_errno(faccessat(arg1, p, arg3, 0)); 9141 unlock_user(p, arg2, 0); 9142 return ret; 9143 #endif 9144 #ifdef TARGET_NR_nice /* not on alpha */ 9145 case TARGET_NR_nice: 9146 return get_errno(nice(arg1)); 9147 #endif 9148 case TARGET_NR_sync: 9149 sync(); 9150 return 0; 9151 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS) 9152 case TARGET_NR_syncfs: 9153 return get_errno(syncfs(arg1)); 9154 #endif 9155 case TARGET_NR_kill: 9156 return get_errno(safe_kill(arg1, target_to_host_signal(arg2))); 9157 #ifdef TARGET_NR_rename 9158 case TARGET_NR_rename: 9159 { 9160 void *p2; 9161 p = lock_user_string(arg1); 9162 p2 = lock_user_string(arg2); 9163 if (!p || !p2) 9164 ret = -TARGET_EFAULT; 9165 else 9166 ret = get_errno(rename(p, p2)); 9167 unlock_user(p2, arg2, 0); 9168 unlock_user(p, arg1, 0); 9169 } 9170 return ret; 9171 #endif 9172 #if defined(TARGET_NR_renameat) 9173 case TARGET_NR_renameat: 9174 { 9175 void *p2; 9176 p = lock_user_string(arg2); 9177 p2 = lock_user_string(arg4); 9178 if (!p || !p2) 9179 ret = -TARGET_EFAULT; 9180 else 9181 ret = get_errno(renameat(arg1, p, arg3, p2)); 9182 unlock_user(p2, arg4, 0); 9183 unlock_user(p, arg2, 0); 9184 } 9185 return ret; 9186 #endif 9187 #if defined(TARGET_NR_renameat2) 9188 case TARGET_NR_renameat2: 9189 { 9190 void *p2; 9191 p = lock_user_string(arg2); 9192 p2 = lock_user_string(arg4); 9193 if (!p || !p2) { 9194 ret = -TARGET_EFAULT; 9195 } else { 9196 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5)); 9197 } 9198 unlock_user(p2, arg4, 0); 9199 unlock_user(p, arg2, 0); 9200 } 9201 return ret; 9202 #endif 9203 #ifdef TARGET_NR_mkdir 9204 case TARGET_NR_mkdir: 9205 if (!(p = lock_user_string(arg1))) 9206 return -TARGET_EFAULT; 9207 ret = get_errno(mkdir(p, arg2)); 9208 unlock_user(p, arg1, 0); 9209 return ret; 9210 #endif 9211 #if defined(TARGET_NR_mkdirat) 9212 case TARGET_NR_mkdirat: 9213 if (!(p = lock_user_string(arg2))) 9214 return -TARGET_EFAULT; 9215 ret = get_errno(mkdirat(arg1, p, arg3)); 9216 unlock_user(p, arg2, 0); 9217 return ret; 9218 #endif 9219 #ifdef TARGET_NR_rmdir 9220 case TARGET_NR_rmdir: 9221 if (!(p = lock_user_string(arg1))) 9222 return -TARGET_EFAULT; 9223 ret = get_errno(rmdir(p)); 9224 unlock_user(p, arg1, 0); 9225 return ret; 9226 #endif 9227 case TARGET_NR_dup: 9228 ret = get_errno(dup(arg1)); 9229 if (ret >= 0) { 9230 fd_trans_dup(arg1, ret); 9231 } 9232 return ret; 9233 #ifdef TARGET_NR_pipe 9234 case TARGET_NR_pipe: 9235 return do_pipe(cpu_env, arg1, 0, 0); 9236 #endif 9237 #ifdef TARGET_NR_pipe2 9238 case TARGET_NR_pipe2: 9239 return do_pipe(cpu_env, arg1, 9240 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 9241 #endif 9242 case TARGET_NR_times: 9243 { 9244 struct target_tms *tmsp; 9245 struct tms tms; 9246 ret = get_errno(times(&tms)); 9247 if (arg1) { 9248 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 9249 if (!tmsp) 9250 return -TARGET_EFAULT; 9251 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 9252 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 9253 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 9254 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 9255 } 9256 if (!is_error(ret)) 9257 ret = host_to_target_clock_t(ret); 9258 } 9259 return ret; 9260 case TARGET_NR_acct: 9261 if (arg1 == 0) { 9262 ret = get_errno(acct(NULL)); 9263 } else { 9264 if (!(p = lock_user_string(arg1))) { 9265 return -TARGET_EFAULT; 9266 } 9267 ret = get_errno(acct(path(p))); 9268 unlock_user(p, arg1, 0); 9269 } 9270 return ret; 9271 #ifdef TARGET_NR_umount2 9272 case TARGET_NR_umount2: 9273 if (!(p = lock_user_string(arg1))) 9274 return -TARGET_EFAULT; 9275 ret = get_errno(umount2(p, arg2)); 9276 unlock_user(p, arg1, 0); 9277 return ret; 9278 #endif 9279 case TARGET_NR_ioctl: 9280 return do_ioctl(arg1, arg2, arg3); 9281 #ifdef TARGET_NR_fcntl 9282 case TARGET_NR_fcntl: 9283 return do_fcntl(arg1, arg2, arg3); 9284 #endif 9285 case TARGET_NR_setpgid: 9286 return get_errno(setpgid(arg1, arg2)); 9287 case TARGET_NR_umask: 9288 return get_errno(umask(arg1)); 9289 case TARGET_NR_chroot: 9290 if (!(p = lock_user_string(arg1))) 9291 return -TARGET_EFAULT; 9292 ret = get_errno(chroot(p)); 9293 unlock_user(p, arg1, 0); 9294 return ret; 9295 #ifdef TARGET_NR_dup2 9296 case TARGET_NR_dup2: 9297 ret = get_errno(dup2(arg1, arg2)); 9298 if (ret >= 0) { 9299 fd_trans_dup(arg1, arg2); 9300 } 9301 return ret; 9302 #endif 9303 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 9304 case TARGET_NR_dup3: 9305 { 9306 int host_flags; 9307 9308 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) { 9309 return -EINVAL; 9310 } 9311 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl); 9312 ret = get_errno(dup3(arg1, arg2, host_flags)); 9313 if (ret >= 0) { 9314 fd_trans_dup(arg1, arg2); 9315 } 9316 return ret; 9317 } 9318 #endif 9319 #ifdef TARGET_NR_getppid /* not on alpha */ 9320 case TARGET_NR_getppid: 9321 return get_errno(getppid()); 9322 #endif 9323 #ifdef TARGET_NR_getpgrp 9324 case TARGET_NR_getpgrp: 9325 return get_errno(getpgrp()); 9326 #endif 9327 case TARGET_NR_setsid: 9328 return get_errno(setsid()); 9329 #ifdef TARGET_NR_sigaction 9330 case TARGET_NR_sigaction: 9331 { 9332 #if defined(TARGET_MIPS) 9333 struct target_sigaction act, oact, *pact, *old_act; 9334 9335 if (arg2) { 9336 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 9337 return -TARGET_EFAULT; 9338 act._sa_handler = old_act->_sa_handler; 9339 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 9340 act.sa_flags = old_act->sa_flags; 9341 unlock_user_struct(old_act, arg2, 0); 9342 pact = &act; 9343 } else { 9344 pact = NULL; 9345 } 9346 9347 ret = get_errno(do_sigaction(arg1, pact, &oact, 0)); 9348 9349 if (!is_error(ret) && arg3) { 9350 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 9351 return -TARGET_EFAULT; 9352 old_act->_sa_handler = oact._sa_handler; 9353 old_act->sa_flags = oact.sa_flags; 9354 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 9355 old_act->sa_mask.sig[1] = 0; 9356 old_act->sa_mask.sig[2] = 0; 9357 old_act->sa_mask.sig[3] = 0; 9358 unlock_user_struct(old_act, arg3, 1); 9359 } 9360 #else 9361 struct target_old_sigaction *old_act; 9362 struct target_sigaction act, oact, *pact; 9363 if (arg2) { 9364 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 9365 return -TARGET_EFAULT; 9366 act._sa_handler = old_act->_sa_handler; 9367 target_siginitset(&act.sa_mask, old_act->sa_mask); 9368 act.sa_flags = old_act->sa_flags; 9369 #ifdef TARGET_ARCH_HAS_SA_RESTORER 9370 act.sa_restorer = old_act->sa_restorer; 9371 #endif 9372 unlock_user_struct(old_act, arg2, 0); 9373 pact = &act; 9374 } else { 9375 pact = NULL; 9376 } 9377 ret = get_errno(do_sigaction(arg1, pact, &oact, 0)); 9378 if (!is_error(ret) && arg3) { 9379 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 9380 return -TARGET_EFAULT; 9381 old_act->_sa_handler = oact._sa_handler; 9382 old_act->sa_mask = oact.sa_mask.sig[0]; 9383 old_act->sa_flags = oact.sa_flags; 9384 #ifdef TARGET_ARCH_HAS_SA_RESTORER 9385 old_act->sa_restorer = oact.sa_restorer; 9386 #endif 9387 unlock_user_struct(old_act, arg3, 1); 9388 } 9389 #endif 9390 } 9391 return ret; 9392 #endif 9393 case TARGET_NR_rt_sigaction: 9394 { 9395 /* 9396 * For Alpha and SPARC this is a 5 argument syscall, with 9397 * a 'restorer' parameter which must be copied into the 9398 * sa_restorer field of the sigaction struct. 9399 * For Alpha that 'restorer' is arg5; for SPARC it is arg4, 9400 * and arg5 is the sigsetsize. 9401 */ 9402 #if defined(TARGET_ALPHA) 9403 target_ulong sigsetsize = arg4; 9404 target_ulong restorer = arg5; 9405 #elif defined(TARGET_SPARC) 9406 target_ulong restorer = arg4; 9407 target_ulong sigsetsize = arg5; 9408 #else 9409 target_ulong sigsetsize = arg4; 9410 target_ulong restorer = 0; 9411 #endif 9412 struct target_sigaction *act = NULL; 9413 struct target_sigaction *oact = NULL; 9414 9415 if (sigsetsize != sizeof(target_sigset_t)) { 9416 return -TARGET_EINVAL; 9417 } 9418 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) { 9419 return -TARGET_EFAULT; 9420 } 9421 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 9422 ret = -TARGET_EFAULT; 9423 } else { 9424 ret = get_errno(do_sigaction(arg1, act, oact, restorer)); 9425 if (oact) { 9426 unlock_user_struct(oact, arg3, 1); 9427 } 9428 } 9429 if (act) { 9430 unlock_user_struct(act, arg2, 0); 9431 } 9432 } 9433 return ret; 9434 #ifdef TARGET_NR_sgetmask /* not on alpha */ 9435 case TARGET_NR_sgetmask: 9436 { 9437 sigset_t cur_set; 9438 abi_ulong target_set; 9439 ret = do_sigprocmask(0, NULL, &cur_set); 9440 if (!ret) { 9441 host_to_target_old_sigset(&target_set, &cur_set); 9442 ret = target_set; 9443 } 9444 } 9445 return ret; 9446 #endif 9447 #ifdef TARGET_NR_ssetmask /* not on alpha */ 9448 case TARGET_NR_ssetmask: 9449 { 9450 sigset_t set, oset; 9451 abi_ulong target_set = arg1; 9452 target_to_host_old_sigset(&set, &target_set); 9453 ret = do_sigprocmask(SIG_SETMASK, &set, &oset); 9454 if (!ret) { 9455 host_to_target_old_sigset(&target_set, &oset); 9456 ret = target_set; 9457 } 9458 } 9459 return ret; 9460 #endif 9461 #ifdef TARGET_NR_sigprocmask 9462 case TARGET_NR_sigprocmask: 9463 { 9464 #if defined(TARGET_ALPHA) 9465 sigset_t set, oldset; 9466 abi_ulong mask; 9467 int how; 9468 9469 switch (arg1) { 9470 case TARGET_SIG_BLOCK: 9471 how = SIG_BLOCK; 9472 break; 9473 case TARGET_SIG_UNBLOCK: 9474 how = SIG_UNBLOCK; 9475 break; 9476 case TARGET_SIG_SETMASK: 9477 how = SIG_SETMASK; 9478 break; 9479 default: 9480 return -TARGET_EINVAL; 9481 } 9482 mask = arg2; 9483 target_to_host_old_sigset(&set, &mask); 9484 9485 ret = do_sigprocmask(how, &set, &oldset); 9486 if (!is_error(ret)) { 9487 host_to_target_old_sigset(&mask, &oldset); 9488 ret = mask; 9489 cpu_env->ir[IR_V0] = 0; /* force no error */ 9490 } 9491 #else 9492 sigset_t set, oldset, *set_ptr; 9493 int how; 9494 9495 if (arg2) { 9496 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1); 9497 if (!p) { 9498 return -TARGET_EFAULT; 9499 } 9500 target_to_host_old_sigset(&set, p); 9501 unlock_user(p, arg2, 0); 9502 set_ptr = &set; 9503 switch (arg1) { 9504 case TARGET_SIG_BLOCK: 9505 how = SIG_BLOCK; 9506 break; 9507 case TARGET_SIG_UNBLOCK: 9508 how = SIG_UNBLOCK; 9509 break; 9510 case TARGET_SIG_SETMASK: 9511 how = SIG_SETMASK; 9512 break; 9513 default: 9514 return -TARGET_EINVAL; 9515 } 9516 } else { 9517 how = 0; 9518 set_ptr = NULL; 9519 } 9520 ret = do_sigprocmask(how, set_ptr, &oldset); 9521 if (!is_error(ret) && arg3) { 9522 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9523 return -TARGET_EFAULT; 9524 host_to_target_old_sigset(p, &oldset); 9525 unlock_user(p, arg3, sizeof(target_sigset_t)); 9526 } 9527 #endif 9528 } 9529 return ret; 9530 #endif 9531 case TARGET_NR_rt_sigprocmask: 9532 { 9533 int how = arg1; 9534 sigset_t set, oldset, *set_ptr; 9535 9536 if (arg4 != sizeof(target_sigset_t)) { 9537 return -TARGET_EINVAL; 9538 } 9539 9540 if (arg2) { 9541 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1); 9542 if (!p) { 9543 return -TARGET_EFAULT; 9544 } 9545 target_to_host_sigset(&set, p); 9546 unlock_user(p, arg2, 0); 9547 set_ptr = &set; 9548 switch(how) { 9549 case TARGET_SIG_BLOCK: 9550 how = SIG_BLOCK; 9551 break; 9552 case TARGET_SIG_UNBLOCK: 9553 how = SIG_UNBLOCK; 9554 break; 9555 case TARGET_SIG_SETMASK: 9556 how = SIG_SETMASK; 9557 break; 9558 default: 9559 return -TARGET_EINVAL; 9560 } 9561 } else { 9562 how = 0; 9563 set_ptr = NULL; 9564 } 9565 ret = do_sigprocmask(how, set_ptr, &oldset); 9566 if (!is_error(ret) && arg3) { 9567 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9568 return -TARGET_EFAULT; 9569 host_to_target_sigset(p, &oldset); 9570 unlock_user(p, arg3, sizeof(target_sigset_t)); 9571 } 9572 } 9573 return ret; 9574 #ifdef TARGET_NR_sigpending 9575 case TARGET_NR_sigpending: 9576 { 9577 sigset_t set; 9578 ret = get_errno(sigpending(&set)); 9579 if (!is_error(ret)) { 9580 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9581 return -TARGET_EFAULT; 9582 host_to_target_old_sigset(p, &set); 9583 unlock_user(p, arg1, sizeof(target_sigset_t)); 9584 } 9585 } 9586 return ret; 9587 #endif 9588 case TARGET_NR_rt_sigpending: 9589 { 9590 sigset_t set; 9591 9592 /* Yes, this check is >, not != like most. We follow the kernel's 9593 * logic and it does it like this because it implements 9594 * NR_sigpending through the same code path, and in that case 9595 * the old_sigset_t is smaller in size. 9596 */ 9597 if (arg2 > sizeof(target_sigset_t)) { 9598 return -TARGET_EINVAL; 9599 } 9600 9601 ret = get_errno(sigpending(&set)); 9602 if (!is_error(ret)) { 9603 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9604 return -TARGET_EFAULT; 9605 host_to_target_sigset(p, &set); 9606 unlock_user(p, arg1, sizeof(target_sigset_t)); 9607 } 9608 } 9609 return ret; 9610 #ifdef TARGET_NR_sigsuspend 9611 case TARGET_NR_sigsuspend: 9612 { 9613 sigset_t *set; 9614 9615 #if defined(TARGET_ALPHA) 9616 TaskState *ts = cpu->opaque; 9617 /* target_to_host_old_sigset will bswap back */ 9618 abi_ulong mask = tswapal(arg1); 9619 set = &ts->sigsuspend_mask; 9620 target_to_host_old_sigset(set, &mask); 9621 #else 9622 ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t)); 9623 if (ret != 0) { 9624 return ret; 9625 } 9626 #endif 9627 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE)); 9628 finish_sigsuspend_mask(ret); 9629 } 9630 return ret; 9631 #endif 9632 case TARGET_NR_rt_sigsuspend: 9633 { 9634 sigset_t *set; 9635 9636 ret = process_sigsuspend_mask(&set, arg1, arg2); 9637 if (ret != 0) { 9638 return ret; 9639 } 9640 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE)); 9641 finish_sigsuspend_mask(ret); 9642 } 9643 return ret; 9644 #ifdef TARGET_NR_rt_sigtimedwait 9645 case TARGET_NR_rt_sigtimedwait: 9646 { 9647 sigset_t set; 9648 struct timespec uts, *puts; 9649 siginfo_t uinfo; 9650 9651 if (arg4 != sizeof(target_sigset_t)) { 9652 return -TARGET_EINVAL; 9653 } 9654 9655 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9656 return -TARGET_EFAULT; 9657 target_to_host_sigset(&set, p); 9658 unlock_user(p, arg1, 0); 9659 if (arg3) { 9660 puts = &uts; 9661 if (target_to_host_timespec(puts, arg3)) { 9662 return -TARGET_EFAULT; 9663 } 9664 } else { 9665 puts = NULL; 9666 } 9667 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9668 SIGSET_T_SIZE)); 9669 if (!is_error(ret)) { 9670 if (arg2) { 9671 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 9672 0); 9673 if (!p) { 9674 return -TARGET_EFAULT; 9675 } 9676 host_to_target_siginfo(p, &uinfo); 9677 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9678 } 9679 ret = host_to_target_signal(ret); 9680 } 9681 } 9682 return ret; 9683 #endif 9684 #ifdef TARGET_NR_rt_sigtimedwait_time64 9685 case TARGET_NR_rt_sigtimedwait_time64: 9686 { 9687 sigset_t set; 9688 struct timespec uts, *puts; 9689 siginfo_t uinfo; 9690 9691 if (arg4 != sizeof(target_sigset_t)) { 9692 return -TARGET_EINVAL; 9693 } 9694 9695 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1); 9696 if (!p) { 9697 return -TARGET_EFAULT; 9698 } 9699 target_to_host_sigset(&set, p); 9700 unlock_user(p, arg1, 0); 9701 if (arg3) { 9702 puts = &uts; 9703 if (target_to_host_timespec64(puts, arg3)) { 9704 return -TARGET_EFAULT; 9705 } 9706 } else { 9707 puts = NULL; 9708 } 9709 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9710 SIGSET_T_SIZE)); 9711 if (!is_error(ret)) { 9712 if (arg2) { 9713 p = lock_user(VERIFY_WRITE, arg2, 9714 sizeof(target_siginfo_t), 0); 9715 if (!p) { 9716 return -TARGET_EFAULT; 9717 } 9718 host_to_target_siginfo(p, &uinfo); 9719 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9720 } 9721 ret = host_to_target_signal(ret); 9722 } 9723 } 9724 return ret; 9725 #endif 9726 case TARGET_NR_rt_sigqueueinfo: 9727 { 9728 siginfo_t uinfo; 9729 9730 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); 9731 if (!p) { 9732 return -TARGET_EFAULT; 9733 } 9734 target_to_host_siginfo(&uinfo, p); 9735 unlock_user(p, arg3, 0); 9736 ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo)); 9737 } 9738 return ret; 9739 case TARGET_NR_rt_tgsigqueueinfo: 9740 { 9741 siginfo_t uinfo; 9742 9743 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1); 9744 if (!p) { 9745 return -TARGET_EFAULT; 9746 } 9747 target_to_host_siginfo(&uinfo, p); 9748 unlock_user(p, arg4, 0); 9749 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo)); 9750 } 9751 return ret; 9752 #ifdef TARGET_NR_sigreturn 9753 case TARGET_NR_sigreturn: 9754 if (block_signals()) { 9755 return -QEMU_ERESTARTSYS; 9756 } 9757 return do_sigreturn(cpu_env); 9758 #endif 9759 case TARGET_NR_rt_sigreturn: 9760 if (block_signals()) { 9761 return -QEMU_ERESTARTSYS; 9762 } 9763 return do_rt_sigreturn(cpu_env); 9764 case TARGET_NR_sethostname: 9765 if (!(p = lock_user_string(arg1))) 9766 return -TARGET_EFAULT; 9767 ret = get_errno(sethostname(p, arg2)); 9768 unlock_user(p, arg1, 0); 9769 return ret; 9770 #ifdef TARGET_NR_setrlimit 9771 case TARGET_NR_setrlimit: 9772 { 9773 int resource = target_to_host_resource(arg1); 9774 struct target_rlimit *target_rlim; 9775 struct rlimit rlim; 9776 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 9777 return -TARGET_EFAULT; 9778 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 9779 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 9780 unlock_user_struct(target_rlim, arg2, 0); 9781 /* 9782 * If we just passed through resource limit settings for memory then 9783 * they would also apply to QEMU's own allocations, and QEMU will 9784 * crash or hang or die if its allocations fail. Ideally we would 9785 * track the guest allocations in QEMU and apply the limits ourselves. 9786 * For now, just tell the guest the call succeeded but don't actually 9787 * limit anything. 9788 */ 9789 if (resource != RLIMIT_AS && 9790 resource != RLIMIT_DATA && 9791 resource != RLIMIT_STACK) { 9792 return get_errno(setrlimit(resource, &rlim)); 9793 } else { 9794 return 0; 9795 } 9796 } 9797 #endif 9798 #ifdef TARGET_NR_getrlimit 9799 case TARGET_NR_getrlimit: 9800 { 9801 int resource = target_to_host_resource(arg1); 9802 struct target_rlimit *target_rlim; 9803 struct rlimit rlim; 9804 9805 ret = get_errno(getrlimit(resource, &rlim)); 9806 if (!is_error(ret)) { 9807 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 9808 return -TARGET_EFAULT; 9809 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 9810 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 9811 unlock_user_struct(target_rlim, arg2, 1); 9812 } 9813 } 9814 return ret; 9815 #endif 9816 case TARGET_NR_getrusage: 9817 { 9818 struct rusage rusage; 9819 ret = get_errno(getrusage(arg1, &rusage)); 9820 if (!is_error(ret)) { 9821 ret = host_to_target_rusage(arg2, &rusage); 9822 } 9823 } 9824 return ret; 9825 #if defined(TARGET_NR_gettimeofday) 9826 case TARGET_NR_gettimeofday: 9827 { 9828 struct timeval tv; 9829 struct timezone tz; 9830 9831 ret = get_errno(gettimeofday(&tv, &tz)); 9832 if (!is_error(ret)) { 9833 if (arg1 && copy_to_user_timeval(arg1, &tv)) { 9834 return -TARGET_EFAULT; 9835 } 9836 if (arg2 && copy_to_user_timezone(arg2, &tz)) { 9837 return -TARGET_EFAULT; 9838 } 9839 } 9840 } 9841 return ret; 9842 #endif 9843 #if defined(TARGET_NR_settimeofday) 9844 case TARGET_NR_settimeofday: 9845 { 9846 struct timeval tv, *ptv = NULL; 9847 struct timezone tz, *ptz = NULL; 9848 9849 if (arg1) { 9850 if (copy_from_user_timeval(&tv, arg1)) { 9851 return -TARGET_EFAULT; 9852 } 9853 ptv = &tv; 9854 } 9855 9856 if (arg2) { 9857 if (copy_from_user_timezone(&tz, arg2)) { 9858 return -TARGET_EFAULT; 9859 } 9860 ptz = &tz; 9861 } 9862 9863 return get_errno(settimeofday(ptv, ptz)); 9864 } 9865 #endif 9866 #if defined(TARGET_NR_select) 9867 case TARGET_NR_select: 9868 #if defined(TARGET_WANT_NI_OLD_SELECT) 9869 /* some architectures used to have old_select here 9870 * but now ENOSYS it. 9871 */ 9872 ret = -TARGET_ENOSYS; 9873 #elif defined(TARGET_WANT_OLD_SYS_SELECT) 9874 ret = do_old_select(arg1); 9875 #else 9876 ret = do_select(arg1, arg2, arg3, arg4, arg5); 9877 #endif 9878 return ret; 9879 #endif 9880 #ifdef TARGET_NR_pselect6 9881 case TARGET_NR_pselect6: 9882 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false); 9883 #endif 9884 #ifdef TARGET_NR_pselect6_time64 9885 case TARGET_NR_pselect6_time64: 9886 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true); 9887 #endif 9888 #ifdef TARGET_NR_symlink 9889 case TARGET_NR_symlink: 9890 { 9891 void *p2; 9892 p = lock_user_string(arg1); 9893 p2 = lock_user_string(arg2); 9894 if (!p || !p2) 9895 ret = -TARGET_EFAULT; 9896 else 9897 ret = get_errno(symlink(p, p2)); 9898 unlock_user(p2, arg2, 0); 9899 unlock_user(p, arg1, 0); 9900 } 9901 return ret; 9902 #endif 9903 #if defined(TARGET_NR_symlinkat) 9904 case TARGET_NR_symlinkat: 9905 { 9906 void *p2; 9907 p = lock_user_string(arg1); 9908 p2 = lock_user_string(arg3); 9909 if (!p || !p2) 9910 ret = -TARGET_EFAULT; 9911 else 9912 ret = get_errno(symlinkat(p, arg2, p2)); 9913 unlock_user(p2, arg3, 0); 9914 unlock_user(p, arg1, 0); 9915 } 9916 return ret; 9917 #endif 9918 #ifdef TARGET_NR_readlink 9919 case TARGET_NR_readlink: 9920 { 9921 void *p2; 9922 p = lock_user_string(arg1); 9923 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9924 if (!p || !p2) { 9925 ret = -TARGET_EFAULT; 9926 } else if (!arg3) { 9927 /* Short circuit this for the magic exe check. */ 9928 ret = -TARGET_EINVAL; 9929 } else if (is_proc_myself((const char *)p, "exe")) { 9930 char real[PATH_MAX], *temp; 9931 temp = realpath(exec_path, real); 9932 /* Return value is # of bytes that we wrote to the buffer. */ 9933 if (temp == NULL) { 9934 ret = get_errno(-1); 9935 } else { 9936 /* Don't worry about sign mismatch as earlier mapping 9937 * logic would have thrown a bad address error. */ 9938 ret = MIN(strlen(real), arg3); 9939 /* We cannot NUL terminate the string. */ 9940 memcpy(p2, real, ret); 9941 } 9942 } else { 9943 ret = get_errno(readlink(path(p), p2, arg3)); 9944 } 9945 unlock_user(p2, arg2, ret); 9946 unlock_user(p, arg1, 0); 9947 } 9948 return ret; 9949 #endif 9950 #if defined(TARGET_NR_readlinkat) 9951 case TARGET_NR_readlinkat: 9952 { 9953 void *p2; 9954 p = lock_user_string(arg2); 9955 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 9956 if (!p || !p2) { 9957 ret = -TARGET_EFAULT; 9958 } else if (!arg4) { 9959 /* Short circuit this for the magic exe check. */ 9960 ret = -TARGET_EINVAL; 9961 } else if (is_proc_myself((const char *)p, "exe")) { 9962 char real[PATH_MAX], *temp; 9963 temp = realpath(exec_path, real); 9964 /* Return value is # of bytes that we wrote to the buffer. */ 9965 if (temp == NULL) { 9966 ret = get_errno(-1); 9967 } else { 9968 /* Don't worry about sign mismatch as earlier mapping 9969 * logic would have thrown a bad address error. */ 9970 ret = MIN(strlen(real), arg4); 9971 /* We cannot NUL terminate the string. */ 9972 memcpy(p2, real, ret); 9973 } 9974 } else { 9975 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 9976 } 9977 unlock_user(p2, arg3, ret); 9978 unlock_user(p, arg2, 0); 9979 } 9980 return ret; 9981 #endif 9982 #ifdef TARGET_NR_swapon 9983 case TARGET_NR_swapon: 9984 if (!(p = lock_user_string(arg1))) 9985 return -TARGET_EFAULT; 9986 ret = get_errno(swapon(p, arg2)); 9987 unlock_user(p, arg1, 0); 9988 return ret; 9989 #endif 9990 case TARGET_NR_reboot: 9991 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 9992 /* arg4 must be ignored in all other cases */ 9993 p = lock_user_string(arg4); 9994 if (!p) { 9995 return -TARGET_EFAULT; 9996 } 9997 ret = get_errno(reboot(arg1, arg2, arg3, p)); 9998 unlock_user(p, arg4, 0); 9999 } else { 10000 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 10001 } 10002 return ret; 10003 #ifdef TARGET_NR_mmap 10004 case TARGET_NR_mmap: 10005 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 10006 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 10007 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 10008 || defined(TARGET_S390X) 10009 { 10010 abi_ulong *v; 10011 abi_ulong v1, v2, v3, v4, v5, v6; 10012 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 10013 return -TARGET_EFAULT; 10014 v1 = tswapal(v[0]); 10015 v2 = tswapal(v[1]); 10016 v3 = tswapal(v[2]); 10017 v4 = tswapal(v[3]); 10018 v5 = tswapal(v[4]); 10019 v6 = tswapal(v[5]); 10020 unlock_user(v, arg1, 0); 10021 ret = get_errno(target_mmap(v1, v2, v3, 10022 target_to_host_bitmask(v4, mmap_flags_tbl), 10023 v5, v6)); 10024 } 10025 #else 10026 /* mmap pointers are always untagged */ 10027 ret = get_errno(target_mmap(arg1, arg2, arg3, 10028 target_to_host_bitmask(arg4, mmap_flags_tbl), 10029 arg5, 10030 arg6)); 10031 #endif 10032 return ret; 10033 #endif 10034 #ifdef TARGET_NR_mmap2 10035 case TARGET_NR_mmap2: 10036 #ifndef MMAP_SHIFT 10037 #define MMAP_SHIFT 12 10038 #endif 10039 ret = target_mmap(arg1, arg2, arg3, 10040 target_to_host_bitmask(arg4, mmap_flags_tbl), 10041 arg5, arg6 << MMAP_SHIFT); 10042 return get_errno(ret); 10043 #endif 10044 case TARGET_NR_munmap: 10045 arg1 = cpu_untagged_addr(cpu, arg1); 10046 return get_errno(target_munmap(arg1, arg2)); 10047 case TARGET_NR_mprotect: 10048 arg1 = cpu_untagged_addr(cpu, arg1); 10049 { 10050 TaskState *ts = cpu->opaque; 10051 /* Special hack to detect libc making the stack executable. */ 10052 if ((arg3 & PROT_GROWSDOWN) 10053 && arg1 >= ts->info->stack_limit 10054 && arg1 <= ts->info->start_stack) { 10055 arg3 &= ~PROT_GROWSDOWN; 10056 arg2 = arg2 + arg1 - ts->info->stack_limit; 10057 arg1 = ts->info->stack_limit; 10058 } 10059 } 10060 return get_errno(target_mprotect(arg1, arg2, arg3)); 10061 #ifdef TARGET_NR_mremap 10062 case TARGET_NR_mremap: 10063 arg1 = cpu_untagged_addr(cpu, arg1); 10064 /* mremap new_addr (arg5) is always untagged */ 10065 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 10066 #endif 10067 /* ??? msync/mlock/munlock are broken for softmmu. */ 10068 #ifdef TARGET_NR_msync 10069 case TARGET_NR_msync: 10070 return get_errno(msync(g2h(cpu, arg1), arg2, arg3)); 10071 #endif 10072 #ifdef TARGET_NR_mlock 10073 case TARGET_NR_mlock: 10074 return get_errno(mlock(g2h(cpu, arg1), arg2)); 10075 #endif 10076 #ifdef TARGET_NR_munlock 10077 case TARGET_NR_munlock: 10078 return get_errno(munlock(g2h(cpu, arg1), arg2)); 10079 #endif 10080 #ifdef TARGET_NR_mlockall 10081 case TARGET_NR_mlockall: 10082 return get_errno(mlockall(target_to_host_mlockall_arg(arg1))); 10083 #endif 10084 #ifdef TARGET_NR_munlockall 10085 case TARGET_NR_munlockall: 10086 return get_errno(munlockall()); 10087 #endif 10088 #ifdef TARGET_NR_truncate 10089 case TARGET_NR_truncate: 10090 if (!(p = lock_user_string(arg1))) 10091 return -TARGET_EFAULT; 10092 ret = get_errno(truncate(p, arg2)); 10093 unlock_user(p, arg1, 0); 10094 return ret; 10095 #endif 10096 #ifdef TARGET_NR_ftruncate 10097 case TARGET_NR_ftruncate: 10098 return get_errno(ftruncate(arg1, arg2)); 10099 #endif 10100 case TARGET_NR_fchmod: 10101 return get_errno(fchmod(arg1, arg2)); 10102 #if defined(TARGET_NR_fchmodat) 10103 case TARGET_NR_fchmodat: 10104 if (!(p = lock_user_string(arg2))) 10105 return -TARGET_EFAULT; 10106 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 10107 unlock_user(p, arg2, 0); 10108 return ret; 10109 #endif 10110 case TARGET_NR_getpriority: 10111 /* Note that negative values are valid for getpriority, so we must 10112 differentiate based on errno settings. */ 10113 errno = 0; 10114 ret = getpriority(arg1, arg2); 10115 if (ret == -1 && errno != 0) { 10116 return -host_to_target_errno(errno); 10117 } 10118 #ifdef TARGET_ALPHA 10119 /* Return value is the unbiased priority. Signal no error. */ 10120 cpu_env->ir[IR_V0] = 0; 10121 #else 10122 /* Return value is a biased priority to avoid negative numbers. */ 10123 ret = 20 - ret; 10124 #endif 10125 return ret; 10126 case TARGET_NR_setpriority: 10127 return get_errno(setpriority(arg1, arg2, arg3)); 10128 #ifdef TARGET_NR_statfs 10129 case TARGET_NR_statfs: 10130 if (!(p = lock_user_string(arg1))) { 10131 return -TARGET_EFAULT; 10132 } 10133 ret = get_errno(statfs(path(p), &stfs)); 10134 unlock_user(p, arg1, 0); 10135 convert_statfs: 10136 if (!is_error(ret)) { 10137 struct target_statfs *target_stfs; 10138 10139 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 10140 return -TARGET_EFAULT; 10141 __put_user(stfs.f_type, &target_stfs->f_type); 10142 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 10143 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 10144 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 10145 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 10146 __put_user(stfs.f_files, &target_stfs->f_files); 10147 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 10148 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 10149 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 10150 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 10151 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 10152 #ifdef _STATFS_F_FLAGS 10153 __put_user(stfs.f_flags, &target_stfs->f_flags); 10154 #else 10155 __put_user(0, &target_stfs->f_flags); 10156 #endif 10157 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 10158 unlock_user_struct(target_stfs, arg2, 1); 10159 } 10160 return ret; 10161 #endif 10162 #ifdef TARGET_NR_fstatfs 10163 case TARGET_NR_fstatfs: 10164 ret = get_errno(fstatfs(arg1, &stfs)); 10165 goto convert_statfs; 10166 #endif 10167 #ifdef TARGET_NR_statfs64 10168 case TARGET_NR_statfs64: 10169 if (!(p = lock_user_string(arg1))) { 10170 return -TARGET_EFAULT; 10171 } 10172 ret = get_errno(statfs(path(p), &stfs)); 10173 unlock_user(p, arg1, 0); 10174 convert_statfs64: 10175 if (!is_error(ret)) { 10176 struct target_statfs64 *target_stfs; 10177 10178 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 10179 return -TARGET_EFAULT; 10180 __put_user(stfs.f_type, &target_stfs->f_type); 10181 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 10182 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 10183 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 10184 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 10185 __put_user(stfs.f_files, &target_stfs->f_files); 10186 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 10187 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 10188 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 10189 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 10190 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 10191 #ifdef _STATFS_F_FLAGS 10192 __put_user(stfs.f_flags, &target_stfs->f_flags); 10193 #else 10194 __put_user(0, &target_stfs->f_flags); 10195 #endif 10196 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 10197 unlock_user_struct(target_stfs, arg3, 1); 10198 } 10199 return ret; 10200 case TARGET_NR_fstatfs64: 10201 ret = get_errno(fstatfs(arg1, &stfs)); 10202 goto convert_statfs64; 10203 #endif 10204 #ifdef TARGET_NR_socketcall 10205 case TARGET_NR_socketcall: 10206 return do_socketcall(arg1, arg2); 10207 #endif 10208 #ifdef TARGET_NR_accept 10209 case TARGET_NR_accept: 10210 return do_accept4(arg1, arg2, arg3, 0); 10211 #endif 10212 #ifdef TARGET_NR_accept4 10213 case TARGET_NR_accept4: 10214 return do_accept4(arg1, arg2, arg3, arg4); 10215 #endif 10216 #ifdef TARGET_NR_bind 10217 case TARGET_NR_bind: 10218 return do_bind(arg1, arg2, arg3); 10219 #endif 10220 #ifdef TARGET_NR_connect 10221 case TARGET_NR_connect: 10222 return do_connect(arg1, arg2, arg3); 10223 #endif 10224 #ifdef TARGET_NR_getpeername 10225 case TARGET_NR_getpeername: 10226 return do_getpeername(arg1, arg2, arg3); 10227 #endif 10228 #ifdef TARGET_NR_getsockname 10229 case TARGET_NR_getsockname: 10230 return do_getsockname(arg1, arg2, arg3); 10231 #endif 10232 #ifdef TARGET_NR_getsockopt 10233 case TARGET_NR_getsockopt: 10234 return do_getsockopt(arg1, arg2, arg3, arg4, arg5); 10235 #endif 10236 #ifdef TARGET_NR_listen 10237 case TARGET_NR_listen: 10238 return get_errno(listen(arg1, arg2)); 10239 #endif 10240 #ifdef TARGET_NR_recv 10241 case TARGET_NR_recv: 10242 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 10243 #endif 10244 #ifdef TARGET_NR_recvfrom 10245 case TARGET_NR_recvfrom: 10246 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 10247 #endif 10248 #ifdef TARGET_NR_recvmsg 10249 case TARGET_NR_recvmsg: 10250 return do_sendrecvmsg(arg1, arg2, arg3, 0); 10251 #endif 10252 #ifdef TARGET_NR_send 10253 case TARGET_NR_send: 10254 return do_sendto(arg1, arg2, arg3, arg4, 0, 0); 10255 #endif 10256 #ifdef TARGET_NR_sendmsg 10257 case TARGET_NR_sendmsg: 10258 return do_sendrecvmsg(arg1, arg2, arg3, 1); 10259 #endif 10260 #ifdef TARGET_NR_sendmmsg 10261 case TARGET_NR_sendmmsg: 10262 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 10263 #endif 10264 #ifdef TARGET_NR_recvmmsg 10265 case TARGET_NR_recvmmsg: 10266 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 10267 #endif 10268 #ifdef TARGET_NR_sendto 10269 case TARGET_NR_sendto: 10270 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 10271 #endif 10272 #ifdef TARGET_NR_shutdown 10273 case TARGET_NR_shutdown: 10274 return get_errno(shutdown(arg1, arg2)); 10275 #endif 10276 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 10277 case TARGET_NR_getrandom: 10278 p = lock_user(VERIFY_WRITE, arg1, arg2, 0); 10279 if (!p) { 10280 return -TARGET_EFAULT; 10281 } 10282 ret = get_errno(getrandom(p, arg2, arg3)); 10283 unlock_user(p, arg1, ret); 10284 return ret; 10285 #endif 10286 #ifdef TARGET_NR_socket 10287 case TARGET_NR_socket: 10288 return do_socket(arg1, arg2, arg3); 10289 #endif 10290 #ifdef TARGET_NR_socketpair 10291 case TARGET_NR_socketpair: 10292 return do_socketpair(arg1, arg2, arg3, arg4); 10293 #endif 10294 #ifdef TARGET_NR_setsockopt 10295 case TARGET_NR_setsockopt: 10296 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 10297 #endif 10298 #if defined(TARGET_NR_syslog) 10299 case TARGET_NR_syslog: 10300 { 10301 int len = arg2; 10302 10303 switch (arg1) { 10304 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */ 10305 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */ 10306 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */ 10307 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */ 10308 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */ 10309 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */ 10310 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */ 10311 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */ 10312 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3)); 10313 case TARGET_SYSLOG_ACTION_READ: /* Read from log */ 10314 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */ 10315 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */ 10316 { 10317 if (len < 0) { 10318 return -TARGET_EINVAL; 10319 } 10320 if (len == 0) { 10321 return 0; 10322 } 10323 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 10324 if (!p) { 10325 return -TARGET_EFAULT; 10326 } 10327 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 10328 unlock_user(p, arg2, arg3); 10329 } 10330 return ret; 10331 default: 10332 return -TARGET_EINVAL; 10333 } 10334 } 10335 break; 10336 #endif 10337 case TARGET_NR_setitimer: 10338 { 10339 struct itimerval value, ovalue, *pvalue; 10340 10341 if (arg2) { 10342 pvalue = &value; 10343 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 10344 || copy_from_user_timeval(&pvalue->it_value, 10345 arg2 + sizeof(struct target_timeval))) 10346 return -TARGET_EFAULT; 10347 } else { 10348 pvalue = NULL; 10349 } 10350 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 10351 if (!is_error(ret) && arg3) { 10352 if (copy_to_user_timeval(arg3, 10353 &ovalue.it_interval) 10354 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 10355 &ovalue.it_value)) 10356 return -TARGET_EFAULT; 10357 } 10358 } 10359 return ret; 10360 case TARGET_NR_getitimer: 10361 { 10362 struct itimerval value; 10363 10364 ret = get_errno(getitimer(arg1, &value)); 10365 if (!is_error(ret) && arg2) { 10366 if (copy_to_user_timeval(arg2, 10367 &value.it_interval) 10368 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 10369 &value.it_value)) 10370 return -TARGET_EFAULT; 10371 } 10372 } 10373 return ret; 10374 #ifdef TARGET_NR_stat 10375 case TARGET_NR_stat: 10376 if (!(p = lock_user_string(arg1))) { 10377 return -TARGET_EFAULT; 10378 } 10379 ret = get_errno(stat(path(p), &st)); 10380 unlock_user(p, arg1, 0); 10381 goto do_stat; 10382 #endif 10383 #ifdef TARGET_NR_lstat 10384 case TARGET_NR_lstat: 10385 if (!(p = lock_user_string(arg1))) { 10386 return -TARGET_EFAULT; 10387 } 10388 ret = get_errno(lstat(path(p), &st)); 10389 unlock_user(p, arg1, 0); 10390 goto do_stat; 10391 #endif 10392 #ifdef TARGET_NR_fstat 10393 case TARGET_NR_fstat: 10394 { 10395 ret = get_errno(fstat(arg1, &st)); 10396 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat) 10397 do_stat: 10398 #endif 10399 if (!is_error(ret)) { 10400 struct target_stat *target_st; 10401 10402 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 10403 return -TARGET_EFAULT; 10404 memset(target_st, 0, sizeof(*target_st)); 10405 __put_user(st.st_dev, &target_st->st_dev); 10406 __put_user(st.st_ino, &target_st->st_ino); 10407 __put_user(st.st_mode, &target_st->st_mode); 10408 __put_user(st.st_uid, &target_st->st_uid); 10409 __put_user(st.st_gid, &target_st->st_gid); 10410 __put_user(st.st_nlink, &target_st->st_nlink); 10411 __put_user(st.st_rdev, &target_st->st_rdev); 10412 __put_user(st.st_size, &target_st->st_size); 10413 __put_user(st.st_blksize, &target_st->st_blksize); 10414 __put_user(st.st_blocks, &target_st->st_blocks); 10415 __put_user(st.st_atime, &target_st->target_st_atime); 10416 __put_user(st.st_mtime, &target_st->target_st_mtime); 10417 __put_user(st.st_ctime, &target_st->target_st_ctime); 10418 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC) 10419 __put_user(st.st_atim.tv_nsec, 10420 &target_st->target_st_atime_nsec); 10421 __put_user(st.st_mtim.tv_nsec, 10422 &target_st->target_st_mtime_nsec); 10423 __put_user(st.st_ctim.tv_nsec, 10424 &target_st->target_st_ctime_nsec); 10425 #endif 10426 unlock_user_struct(target_st, arg2, 1); 10427 } 10428 } 10429 return ret; 10430 #endif 10431 case TARGET_NR_vhangup: 10432 return get_errno(vhangup()); 10433 #ifdef TARGET_NR_syscall 10434 case TARGET_NR_syscall: 10435 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 10436 arg6, arg7, arg8, 0); 10437 #endif 10438 #if defined(TARGET_NR_wait4) 10439 case TARGET_NR_wait4: 10440 { 10441 int status; 10442 abi_long status_ptr = arg2; 10443 struct rusage rusage, *rusage_ptr; 10444 abi_ulong target_rusage = arg4; 10445 abi_long rusage_err; 10446 if (target_rusage) 10447 rusage_ptr = &rusage; 10448 else 10449 rusage_ptr = NULL; 10450 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr)); 10451 if (!is_error(ret)) { 10452 if (status_ptr && ret) { 10453 status = host_to_target_waitstatus(status); 10454 if (put_user_s32(status, status_ptr)) 10455 return -TARGET_EFAULT; 10456 } 10457 if (target_rusage) { 10458 rusage_err = host_to_target_rusage(target_rusage, &rusage); 10459 if (rusage_err) { 10460 ret = rusage_err; 10461 } 10462 } 10463 } 10464 } 10465 return ret; 10466 #endif 10467 #ifdef TARGET_NR_swapoff 10468 case TARGET_NR_swapoff: 10469 if (!(p = lock_user_string(arg1))) 10470 return -TARGET_EFAULT; 10471 ret = get_errno(swapoff(p)); 10472 unlock_user(p, arg1, 0); 10473 return ret; 10474 #endif 10475 case TARGET_NR_sysinfo: 10476 { 10477 struct target_sysinfo *target_value; 10478 struct sysinfo value; 10479 ret = get_errno(sysinfo(&value)); 10480 if (!is_error(ret) && arg1) 10481 { 10482 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 10483 return -TARGET_EFAULT; 10484 __put_user(value.uptime, &target_value->uptime); 10485 __put_user(value.loads[0], &target_value->loads[0]); 10486 __put_user(value.loads[1], &target_value->loads[1]); 10487 __put_user(value.loads[2], &target_value->loads[2]); 10488 __put_user(value.totalram, &target_value->totalram); 10489 __put_user(value.freeram, &target_value->freeram); 10490 __put_user(value.sharedram, &target_value->sharedram); 10491 __put_user(value.bufferram, &target_value->bufferram); 10492 __put_user(value.totalswap, &target_value->totalswap); 10493 __put_user(value.freeswap, &target_value->freeswap); 10494 __put_user(value.procs, &target_value->procs); 10495 __put_user(value.totalhigh, &target_value->totalhigh); 10496 __put_user(value.freehigh, &target_value->freehigh); 10497 __put_user(value.mem_unit, &target_value->mem_unit); 10498 unlock_user_struct(target_value, arg1, 1); 10499 } 10500 } 10501 return ret; 10502 #ifdef TARGET_NR_ipc 10503 case TARGET_NR_ipc: 10504 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); 10505 #endif 10506 #ifdef TARGET_NR_semget 10507 case TARGET_NR_semget: 10508 return get_errno(semget(arg1, arg2, arg3)); 10509 #endif 10510 #ifdef TARGET_NR_semop 10511 case TARGET_NR_semop: 10512 return do_semtimedop(arg1, arg2, arg3, 0, false); 10513 #endif 10514 #ifdef TARGET_NR_semtimedop 10515 case TARGET_NR_semtimedop: 10516 return do_semtimedop(arg1, arg2, arg3, arg4, false); 10517 #endif 10518 #ifdef TARGET_NR_semtimedop_time64 10519 case TARGET_NR_semtimedop_time64: 10520 return do_semtimedop(arg1, arg2, arg3, arg4, true); 10521 #endif 10522 #ifdef TARGET_NR_semctl 10523 case TARGET_NR_semctl: 10524 return do_semctl(arg1, arg2, arg3, arg4); 10525 #endif 10526 #ifdef TARGET_NR_msgctl 10527 case TARGET_NR_msgctl: 10528 return do_msgctl(arg1, arg2, arg3); 10529 #endif 10530 #ifdef TARGET_NR_msgget 10531 case TARGET_NR_msgget: 10532 return get_errno(msgget(arg1, arg2)); 10533 #endif 10534 #ifdef TARGET_NR_msgrcv 10535 case TARGET_NR_msgrcv: 10536 return do_msgrcv(arg1, arg2, arg3, arg4, arg5); 10537 #endif 10538 #ifdef TARGET_NR_msgsnd 10539 case TARGET_NR_msgsnd: 10540 return do_msgsnd(arg1, arg2, arg3, arg4); 10541 #endif 10542 #ifdef TARGET_NR_shmget 10543 case TARGET_NR_shmget: 10544 return get_errno(shmget(arg1, arg2, arg3)); 10545 #endif 10546 #ifdef TARGET_NR_shmctl 10547 case TARGET_NR_shmctl: 10548 return do_shmctl(arg1, arg2, arg3); 10549 #endif 10550 #ifdef TARGET_NR_shmat 10551 case TARGET_NR_shmat: 10552 return do_shmat(cpu_env, arg1, arg2, arg3); 10553 #endif 10554 #ifdef TARGET_NR_shmdt 10555 case TARGET_NR_shmdt: 10556 return do_shmdt(arg1); 10557 #endif 10558 case TARGET_NR_fsync: 10559 return get_errno(fsync(arg1)); 10560 case TARGET_NR_clone: 10561 /* Linux manages to have three different orderings for its 10562 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 10563 * match the kernel's CONFIG_CLONE_* settings. 10564 * Microblaze is further special in that it uses a sixth 10565 * implicit argument to clone for the TLS pointer. 10566 */ 10567 #if defined(TARGET_MICROBLAZE) 10568 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 10569 #elif defined(TARGET_CLONE_BACKWARDS) 10570 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 10571 #elif defined(TARGET_CLONE_BACKWARDS2) 10572 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 10573 #else 10574 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 10575 #endif 10576 return ret; 10577 #ifdef __NR_exit_group 10578 /* new thread calls */ 10579 case TARGET_NR_exit_group: 10580 preexit_cleanup(cpu_env, arg1); 10581 return get_errno(exit_group(arg1)); 10582 #endif 10583 case TARGET_NR_setdomainname: 10584 if (!(p = lock_user_string(arg1))) 10585 return -TARGET_EFAULT; 10586 ret = get_errno(setdomainname(p, arg2)); 10587 unlock_user(p, arg1, 0); 10588 return ret; 10589 case TARGET_NR_uname: 10590 /* no need to transcode because we use the linux syscall */ 10591 { 10592 struct new_utsname * buf; 10593 10594 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 10595 return -TARGET_EFAULT; 10596 ret = get_errno(sys_uname(buf)); 10597 if (!is_error(ret)) { 10598 /* Overwrite the native machine name with whatever is being 10599 emulated. */ 10600 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env), 10601 sizeof(buf->machine)); 10602 /* Allow the user to override the reported release. */ 10603 if (qemu_uname_release && *qemu_uname_release) { 10604 g_strlcpy(buf->release, qemu_uname_release, 10605 sizeof(buf->release)); 10606 } 10607 } 10608 unlock_user_struct(buf, arg1, 1); 10609 } 10610 return ret; 10611 #ifdef TARGET_I386 10612 case TARGET_NR_modify_ldt: 10613 return do_modify_ldt(cpu_env, arg1, arg2, arg3); 10614 #if !defined(TARGET_X86_64) 10615 case TARGET_NR_vm86: 10616 return do_vm86(cpu_env, arg1, arg2); 10617 #endif 10618 #endif 10619 #if defined(TARGET_NR_adjtimex) 10620 case TARGET_NR_adjtimex: 10621 { 10622 struct timex host_buf; 10623 10624 if (target_to_host_timex(&host_buf, arg1) != 0) { 10625 return -TARGET_EFAULT; 10626 } 10627 ret = get_errno(adjtimex(&host_buf)); 10628 if (!is_error(ret)) { 10629 if (host_to_target_timex(arg1, &host_buf) != 0) { 10630 return -TARGET_EFAULT; 10631 } 10632 } 10633 } 10634 return ret; 10635 #endif 10636 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) 10637 case TARGET_NR_clock_adjtime: 10638 { 10639 struct timex htx, *phtx = &htx; 10640 10641 if (target_to_host_timex(phtx, arg2) != 0) { 10642 return -TARGET_EFAULT; 10643 } 10644 ret = get_errno(clock_adjtime(arg1, phtx)); 10645 if (!is_error(ret) && phtx) { 10646 if (host_to_target_timex(arg2, phtx) != 0) { 10647 return -TARGET_EFAULT; 10648 } 10649 } 10650 } 10651 return ret; 10652 #endif 10653 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 10654 case TARGET_NR_clock_adjtime64: 10655 { 10656 struct timex htx; 10657 10658 if (target_to_host_timex64(&htx, arg2) != 0) { 10659 return -TARGET_EFAULT; 10660 } 10661 ret = get_errno(clock_adjtime(arg1, &htx)); 10662 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) { 10663 return -TARGET_EFAULT; 10664 } 10665 } 10666 return ret; 10667 #endif 10668 case TARGET_NR_getpgid: 10669 return get_errno(getpgid(arg1)); 10670 case TARGET_NR_fchdir: 10671 return get_errno(fchdir(arg1)); 10672 case TARGET_NR_personality: 10673 return get_errno(personality(arg1)); 10674 #ifdef TARGET_NR__llseek /* Not on alpha */ 10675 case TARGET_NR__llseek: 10676 { 10677 int64_t res; 10678 #if !defined(__NR_llseek) 10679 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5); 10680 if (res == -1) { 10681 ret = get_errno(res); 10682 } else { 10683 ret = 0; 10684 } 10685 #else 10686 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 10687 #endif 10688 if ((ret == 0) && put_user_s64(res, arg4)) { 10689 return -TARGET_EFAULT; 10690 } 10691 } 10692 return ret; 10693 #endif 10694 #ifdef TARGET_NR_getdents 10695 case TARGET_NR_getdents: 10696 return do_getdents(arg1, arg2, arg3); 10697 #endif /* TARGET_NR_getdents */ 10698 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 10699 case TARGET_NR_getdents64: 10700 return do_getdents64(arg1, arg2, arg3); 10701 #endif /* TARGET_NR_getdents64 */ 10702 #if defined(TARGET_NR__newselect) 10703 case TARGET_NR__newselect: 10704 return do_select(arg1, arg2, arg3, arg4, arg5); 10705 #endif 10706 #ifdef TARGET_NR_poll 10707 case TARGET_NR_poll: 10708 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false); 10709 #endif 10710 #ifdef TARGET_NR_ppoll 10711 case TARGET_NR_ppoll: 10712 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false); 10713 #endif 10714 #ifdef TARGET_NR_ppoll_time64 10715 case TARGET_NR_ppoll_time64: 10716 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true); 10717 #endif 10718 case TARGET_NR_flock: 10719 /* NOTE: the flock constant seems to be the same for every 10720 Linux platform */ 10721 return get_errno(safe_flock(arg1, arg2)); 10722 case TARGET_NR_readv: 10723 { 10724 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10725 if (vec != NULL) { 10726 ret = get_errno(safe_readv(arg1, vec, arg3)); 10727 unlock_iovec(vec, arg2, arg3, 1); 10728 } else { 10729 ret = -host_to_target_errno(errno); 10730 } 10731 } 10732 return ret; 10733 case TARGET_NR_writev: 10734 { 10735 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10736 if (vec != NULL) { 10737 ret = get_errno(safe_writev(arg1, vec, arg3)); 10738 unlock_iovec(vec, arg2, arg3, 0); 10739 } else { 10740 ret = -host_to_target_errno(errno); 10741 } 10742 } 10743 return ret; 10744 #if defined(TARGET_NR_preadv) 10745 case TARGET_NR_preadv: 10746 { 10747 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10748 if (vec != NULL) { 10749 unsigned long low, high; 10750 10751 target_to_host_low_high(arg4, arg5, &low, &high); 10752 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high)); 10753 unlock_iovec(vec, arg2, arg3, 1); 10754 } else { 10755 ret = -host_to_target_errno(errno); 10756 } 10757 } 10758 return ret; 10759 #endif 10760 #if defined(TARGET_NR_pwritev) 10761 case TARGET_NR_pwritev: 10762 { 10763 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10764 if (vec != NULL) { 10765 unsigned long low, high; 10766 10767 target_to_host_low_high(arg4, arg5, &low, &high); 10768 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high)); 10769 unlock_iovec(vec, arg2, arg3, 0); 10770 } else { 10771 ret = -host_to_target_errno(errno); 10772 } 10773 } 10774 return ret; 10775 #endif 10776 case TARGET_NR_getsid: 10777 return get_errno(getsid(arg1)); 10778 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 10779 case TARGET_NR_fdatasync: 10780 return get_errno(fdatasync(arg1)); 10781 #endif 10782 case TARGET_NR_sched_getaffinity: 10783 { 10784 unsigned int mask_size; 10785 unsigned long *mask; 10786 10787 /* 10788 * sched_getaffinity needs multiples of ulong, so need to take 10789 * care of mismatches between target ulong and host ulong sizes. 10790 */ 10791 if (arg2 & (sizeof(abi_ulong) - 1)) { 10792 return -TARGET_EINVAL; 10793 } 10794 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10795 10796 mask = alloca(mask_size); 10797 memset(mask, 0, mask_size); 10798 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 10799 10800 if (!is_error(ret)) { 10801 if (ret > arg2) { 10802 /* More data returned than the caller's buffer will fit. 10803 * This only happens if sizeof(abi_long) < sizeof(long) 10804 * and the caller passed us a buffer holding an odd number 10805 * of abi_longs. If the host kernel is actually using the 10806 * extra 4 bytes then fail EINVAL; otherwise we can just 10807 * ignore them and only copy the interesting part. 10808 */ 10809 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 10810 if (numcpus > arg2 * 8) { 10811 return -TARGET_EINVAL; 10812 } 10813 ret = arg2; 10814 } 10815 10816 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) { 10817 return -TARGET_EFAULT; 10818 } 10819 } 10820 } 10821 return ret; 10822 case TARGET_NR_sched_setaffinity: 10823 { 10824 unsigned int mask_size; 10825 unsigned long *mask; 10826 10827 /* 10828 * sched_setaffinity needs multiples of ulong, so need to take 10829 * care of mismatches between target ulong and host ulong sizes. 10830 */ 10831 if (arg2 & (sizeof(abi_ulong) - 1)) { 10832 return -TARGET_EINVAL; 10833 } 10834 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10835 mask = alloca(mask_size); 10836 10837 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2); 10838 if (ret) { 10839 return ret; 10840 } 10841 10842 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 10843 } 10844 case TARGET_NR_getcpu: 10845 { 10846 unsigned cpu, node; 10847 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL, 10848 arg2 ? &node : NULL, 10849 NULL)); 10850 if (is_error(ret)) { 10851 return ret; 10852 } 10853 if (arg1 && put_user_u32(cpu, arg1)) { 10854 return -TARGET_EFAULT; 10855 } 10856 if (arg2 && put_user_u32(node, arg2)) { 10857 return -TARGET_EFAULT; 10858 } 10859 } 10860 return ret; 10861 case TARGET_NR_sched_setparam: 10862 { 10863 struct target_sched_param *target_schp; 10864 struct sched_param schp; 10865 10866 if (arg2 == 0) { 10867 return -TARGET_EINVAL; 10868 } 10869 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) { 10870 return -TARGET_EFAULT; 10871 } 10872 schp.sched_priority = tswap32(target_schp->sched_priority); 10873 unlock_user_struct(target_schp, arg2, 0); 10874 return get_errno(sys_sched_setparam(arg1, &schp)); 10875 } 10876 case TARGET_NR_sched_getparam: 10877 { 10878 struct target_sched_param *target_schp; 10879 struct sched_param schp; 10880 10881 if (arg2 == 0) { 10882 return -TARGET_EINVAL; 10883 } 10884 ret = get_errno(sys_sched_getparam(arg1, &schp)); 10885 if (!is_error(ret)) { 10886 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) { 10887 return -TARGET_EFAULT; 10888 } 10889 target_schp->sched_priority = tswap32(schp.sched_priority); 10890 unlock_user_struct(target_schp, arg2, 1); 10891 } 10892 } 10893 return ret; 10894 case TARGET_NR_sched_setscheduler: 10895 { 10896 struct target_sched_param *target_schp; 10897 struct sched_param schp; 10898 if (arg3 == 0) { 10899 return -TARGET_EINVAL; 10900 } 10901 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) { 10902 return -TARGET_EFAULT; 10903 } 10904 schp.sched_priority = tswap32(target_schp->sched_priority); 10905 unlock_user_struct(target_schp, arg3, 0); 10906 return get_errno(sys_sched_setscheduler(arg1, arg2, &schp)); 10907 } 10908 case TARGET_NR_sched_getscheduler: 10909 return get_errno(sys_sched_getscheduler(arg1)); 10910 case TARGET_NR_sched_getattr: 10911 { 10912 struct target_sched_attr *target_scha; 10913 struct sched_attr scha; 10914 if (arg2 == 0) { 10915 return -TARGET_EINVAL; 10916 } 10917 if (arg3 > sizeof(scha)) { 10918 arg3 = sizeof(scha); 10919 } 10920 ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4)); 10921 if (!is_error(ret)) { 10922 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0); 10923 if (!target_scha) { 10924 return -TARGET_EFAULT; 10925 } 10926 target_scha->size = tswap32(scha.size); 10927 target_scha->sched_policy = tswap32(scha.sched_policy); 10928 target_scha->sched_flags = tswap64(scha.sched_flags); 10929 target_scha->sched_nice = tswap32(scha.sched_nice); 10930 target_scha->sched_priority = tswap32(scha.sched_priority); 10931 target_scha->sched_runtime = tswap64(scha.sched_runtime); 10932 target_scha->sched_deadline = tswap64(scha.sched_deadline); 10933 target_scha->sched_period = tswap64(scha.sched_period); 10934 if (scha.size > offsetof(struct sched_attr, sched_util_min)) { 10935 target_scha->sched_util_min = tswap32(scha.sched_util_min); 10936 target_scha->sched_util_max = tswap32(scha.sched_util_max); 10937 } 10938 unlock_user(target_scha, arg2, arg3); 10939 } 10940 return ret; 10941 } 10942 case TARGET_NR_sched_setattr: 10943 { 10944 struct target_sched_attr *target_scha; 10945 struct sched_attr scha; 10946 uint32_t size; 10947 int zeroed; 10948 if (arg2 == 0) { 10949 return -TARGET_EINVAL; 10950 } 10951 if (get_user_u32(size, arg2)) { 10952 return -TARGET_EFAULT; 10953 } 10954 if (!size) { 10955 size = offsetof(struct target_sched_attr, sched_util_min); 10956 } 10957 if (size < offsetof(struct target_sched_attr, sched_util_min)) { 10958 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) { 10959 return -TARGET_EFAULT; 10960 } 10961 return -TARGET_E2BIG; 10962 } 10963 10964 zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size); 10965 if (zeroed < 0) { 10966 return zeroed; 10967 } else if (zeroed == 0) { 10968 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) { 10969 return -TARGET_EFAULT; 10970 } 10971 return -TARGET_E2BIG; 10972 } 10973 if (size > sizeof(struct target_sched_attr)) { 10974 size = sizeof(struct target_sched_attr); 10975 } 10976 10977 target_scha = lock_user(VERIFY_READ, arg2, size, 1); 10978 if (!target_scha) { 10979 return -TARGET_EFAULT; 10980 } 10981 scha.size = size; 10982 scha.sched_policy = tswap32(target_scha->sched_policy); 10983 scha.sched_flags = tswap64(target_scha->sched_flags); 10984 scha.sched_nice = tswap32(target_scha->sched_nice); 10985 scha.sched_priority = tswap32(target_scha->sched_priority); 10986 scha.sched_runtime = tswap64(target_scha->sched_runtime); 10987 scha.sched_deadline = tswap64(target_scha->sched_deadline); 10988 scha.sched_period = tswap64(target_scha->sched_period); 10989 if (size > offsetof(struct target_sched_attr, sched_util_min)) { 10990 scha.sched_util_min = tswap32(target_scha->sched_util_min); 10991 scha.sched_util_max = tswap32(target_scha->sched_util_max); 10992 } 10993 unlock_user(target_scha, arg2, 0); 10994 return get_errno(sys_sched_setattr(arg1, &scha, arg3)); 10995 } 10996 case TARGET_NR_sched_yield: 10997 return get_errno(sched_yield()); 10998 case TARGET_NR_sched_get_priority_max: 10999 return get_errno(sched_get_priority_max(arg1)); 11000 case TARGET_NR_sched_get_priority_min: 11001 return get_errno(sched_get_priority_min(arg1)); 11002 #ifdef TARGET_NR_sched_rr_get_interval 11003 case TARGET_NR_sched_rr_get_interval: 11004 { 11005 struct timespec ts; 11006 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 11007 if (!is_error(ret)) { 11008 ret = host_to_target_timespec(arg2, &ts); 11009 } 11010 } 11011 return ret; 11012 #endif 11013 #ifdef TARGET_NR_sched_rr_get_interval_time64 11014 case TARGET_NR_sched_rr_get_interval_time64: 11015 { 11016 struct timespec ts; 11017 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 11018 if (!is_error(ret)) { 11019 ret = host_to_target_timespec64(arg2, &ts); 11020 } 11021 } 11022 return ret; 11023 #endif 11024 #if defined(TARGET_NR_nanosleep) 11025 case TARGET_NR_nanosleep: 11026 { 11027 struct timespec req, rem; 11028 target_to_host_timespec(&req, arg1); 11029 ret = get_errno(safe_nanosleep(&req, &rem)); 11030 if (is_error(ret) && arg2) { 11031 host_to_target_timespec(arg2, &rem); 11032 } 11033 } 11034 return ret; 11035 #endif 11036 case TARGET_NR_prctl: 11037 return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5); 11038 break; 11039 #ifdef TARGET_NR_arch_prctl 11040 case TARGET_NR_arch_prctl: 11041 return do_arch_prctl(cpu_env, arg1, arg2); 11042 #endif 11043 #ifdef TARGET_NR_pread64 11044 case TARGET_NR_pread64: 11045 if (regpairs_aligned(cpu_env, num)) { 11046 arg4 = arg5; 11047 arg5 = arg6; 11048 } 11049 if (arg2 == 0 && arg3 == 0) { 11050 /* Special-case NULL buffer and zero length, which should succeed */ 11051 p = 0; 11052 } else { 11053 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11054 if (!p) { 11055 return -TARGET_EFAULT; 11056 } 11057 } 11058 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 11059 unlock_user(p, arg2, ret); 11060 return ret; 11061 case TARGET_NR_pwrite64: 11062 if (regpairs_aligned(cpu_env, num)) { 11063 arg4 = arg5; 11064 arg5 = arg6; 11065 } 11066 if (arg2 == 0 && arg3 == 0) { 11067 /* Special-case NULL buffer and zero length, which should succeed */ 11068 p = 0; 11069 } else { 11070 p = lock_user(VERIFY_READ, arg2, arg3, 1); 11071 if (!p) { 11072 return -TARGET_EFAULT; 11073 } 11074 } 11075 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 11076 unlock_user(p, arg2, 0); 11077 return ret; 11078 #endif 11079 case TARGET_NR_getcwd: 11080 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 11081 return -TARGET_EFAULT; 11082 ret = get_errno(sys_getcwd1(p, arg2)); 11083 unlock_user(p, arg1, ret); 11084 return ret; 11085 case TARGET_NR_capget: 11086 case TARGET_NR_capset: 11087 { 11088 struct target_user_cap_header *target_header; 11089 struct target_user_cap_data *target_data = NULL; 11090 struct __user_cap_header_struct header; 11091 struct __user_cap_data_struct data[2]; 11092 struct __user_cap_data_struct *dataptr = NULL; 11093 int i, target_datalen; 11094 int data_items = 1; 11095 11096 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 11097 return -TARGET_EFAULT; 11098 } 11099 header.version = tswap32(target_header->version); 11100 header.pid = tswap32(target_header->pid); 11101 11102 if (header.version != _LINUX_CAPABILITY_VERSION) { 11103 /* Version 2 and up takes pointer to two user_data structs */ 11104 data_items = 2; 11105 } 11106 11107 target_datalen = sizeof(*target_data) * data_items; 11108 11109 if (arg2) { 11110 if (num == TARGET_NR_capget) { 11111 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 11112 } else { 11113 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 11114 } 11115 if (!target_data) { 11116 unlock_user_struct(target_header, arg1, 0); 11117 return -TARGET_EFAULT; 11118 } 11119 11120 if (num == TARGET_NR_capset) { 11121 for (i = 0; i < data_items; i++) { 11122 data[i].effective = tswap32(target_data[i].effective); 11123 data[i].permitted = tswap32(target_data[i].permitted); 11124 data[i].inheritable = tswap32(target_data[i].inheritable); 11125 } 11126 } 11127 11128 dataptr = data; 11129 } 11130 11131 if (num == TARGET_NR_capget) { 11132 ret = get_errno(capget(&header, dataptr)); 11133 } else { 11134 ret = get_errno(capset(&header, dataptr)); 11135 } 11136 11137 /* The kernel always updates version for both capget and capset */ 11138 target_header->version = tswap32(header.version); 11139 unlock_user_struct(target_header, arg1, 1); 11140 11141 if (arg2) { 11142 if (num == TARGET_NR_capget) { 11143 for (i = 0; i < data_items; i++) { 11144 target_data[i].effective = tswap32(data[i].effective); 11145 target_data[i].permitted = tswap32(data[i].permitted); 11146 target_data[i].inheritable = tswap32(data[i].inheritable); 11147 } 11148 unlock_user(target_data, arg2, target_datalen); 11149 } else { 11150 unlock_user(target_data, arg2, 0); 11151 } 11152 } 11153 return ret; 11154 } 11155 case TARGET_NR_sigaltstack: 11156 return do_sigaltstack(arg1, arg2, cpu_env); 11157 11158 #ifdef CONFIG_SENDFILE 11159 #ifdef TARGET_NR_sendfile 11160 case TARGET_NR_sendfile: 11161 { 11162 off_t *offp = NULL; 11163 off_t off; 11164 if (arg3) { 11165 ret = get_user_sal(off, arg3); 11166 if (is_error(ret)) { 11167 return ret; 11168 } 11169 offp = &off; 11170 } 11171 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 11172 if (!is_error(ret) && arg3) { 11173 abi_long ret2 = put_user_sal(off, arg3); 11174 if (is_error(ret2)) { 11175 ret = ret2; 11176 } 11177 } 11178 return ret; 11179 } 11180 #endif 11181 #ifdef TARGET_NR_sendfile64 11182 case TARGET_NR_sendfile64: 11183 { 11184 off_t *offp = NULL; 11185 off_t off; 11186 if (arg3) { 11187 ret = get_user_s64(off, arg3); 11188 if (is_error(ret)) { 11189 return ret; 11190 } 11191 offp = &off; 11192 } 11193 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 11194 if (!is_error(ret) && arg3) { 11195 abi_long ret2 = put_user_s64(off, arg3); 11196 if (is_error(ret2)) { 11197 ret = ret2; 11198 } 11199 } 11200 return ret; 11201 } 11202 #endif 11203 #endif 11204 #ifdef TARGET_NR_vfork 11205 case TARGET_NR_vfork: 11206 return get_errno(do_fork(cpu_env, 11207 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD, 11208 0, 0, 0, 0)); 11209 #endif 11210 #ifdef TARGET_NR_ugetrlimit 11211 case TARGET_NR_ugetrlimit: 11212 { 11213 struct rlimit rlim; 11214 int resource = target_to_host_resource(arg1); 11215 ret = get_errno(getrlimit(resource, &rlim)); 11216 if (!is_error(ret)) { 11217 struct target_rlimit *target_rlim; 11218 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 11219 return -TARGET_EFAULT; 11220 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 11221 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 11222 unlock_user_struct(target_rlim, arg2, 1); 11223 } 11224 return ret; 11225 } 11226 #endif 11227 #ifdef TARGET_NR_truncate64 11228 case TARGET_NR_truncate64: 11229 if (!(p = lock_user_string(arg1))) 11230 return -TARGET_EFAULT; 11231 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 11232 unlock_user(p, arg1, 0); 11233 return ret; 11234 #endif 11235 #ifdef TARGET_NR_ftruncate64 11236 case TARGET_NR_ftruncate64: 11237 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 11238 #endif 11239 #ifdef TARGET_NR_stat64 11240 case TARGET_NR_stat64: 11241 if (!(p = lock_user_string(arg1))) { 11242 return -TARGET_EFAULT; 11243 } 11244 ret = get_errno(stat(path(p), &st)); 11245 unlock_user(p, arg1, 0); 11246 if (!is_error(ret)) 11247 ret = host_to_target_stat64(cpu_env, arg2, &st); 11248 return ret; 11249 #endif 11250 #ifdef TARGET_NR_lstat64 11251 case TARGET_NR_lstat64: 11252 if (!(p = lock_user_string(arg1))) { 11253 return -TARGET_EFAULT; 11254 } 11255 ret = get_errno(lstat(path(p), &st)); 11256 unlock_user(p, arg1, 0); 11257 if (!is_error(ret)) 11258 ret = host_to_target_stat64(cpu_env, arg2, &st); 11259 return ret; 11260 #endif 11261 #ifdef TARGET_NR_fstat64 11262 case TARGET_NR_fstat64: 11263 ret = get_errno(fstat(arg1, &st)); 11264 if (!is_error(ret)) 11265 ret = host_to_target_stat64(cpu_env, arg2, &st); 11266 return ret; 11267 #endif 11268 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 11269 #ifdef TARGET_NR_fstatat64 11270 case TARGET_NR_fstatat64: 11271 #endif 11272 #ifdef TARGET_NR_newfstatat 11273 case TARGET_NR_newfstatat: 11274 #endif 11275 if (!(p = lock_user_string(arg2))) { 11276 return -TARGET_EFAULT; 11277 } 11278 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 11279 unlock_user(p, arg2, 0); 11280 if (!is_error(ret)) 11281 ret = host_to_target_stat64(cpu_env, arg3, &st); 11282 return ret; 11283 #endif 11284 #if defined(TARGET_NR_statx) 11285 case TARGET_NR_statx: 11286 { 11287 struct target_statx *target_stx; 11288 int dirfd = arg1; 11289 int flags = arg3; 11290 11291 p = lock_user_string(arg2); 11292 if (p == NULL) { 11293 return -TARGET_EFAULT; 11294 } 11295 #if defined(__NR_statx) 11296 { 11297 /* 11298 * It is assumed that struct statx is architecture independent. 11299 */ 11300 struct target_statx host_stx; 11301 int mask = arg4; 11302 11303 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx)); 11304 if (!is_error(ret)) { 11305 if (host_to_target_statx(&host_stx, arg5) != 0) { 11306 unlock_user(p, arg2, 0); 11307 return -TARGET_EFAULT; 11308 } 11309 } 11310 11311 if (ret != -TARGET_ENOSYS) { 11312 unlock_user(p, arg2, 0); 11313 return ret; 11314 } 11315 } 11316 #endif 11317 ret = get_errno(fstatat(dirfd, path(p), &st, flags)); 11318 unlock_user(p, arg2, 0); 11319 11320 if (!is_error(ret)) { 11321 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) { 11322 return -TARGET_EFAULT; 11323 } 11324 memset(target_stx, 0, sizeof(*target_stx)); 11325 __put_user(major(st.st_dev), &target_stx->stx_dev_major); 11326 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor); 11327 __put_user(st.st_ino, &target_stx->stx_ino); 11328 __put_user(st.st_mode, &target_stx->stx_mode); 11329 __put_user(st.st_uid, &target_stx->stx_uid); 11330 __put_user(st.st_gid, &target_stx->stx_gid); 11331 __put_user(st.st_nlink, &target_stx->stx_nlink); 11332 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major); 11333 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor); 11334 __put_user(st.st_size, &target_stx->stx_size); 11335 __put_user(st.st_blksize, &target_stx->stx_blksize); 11336 __put_user(st.st_blocks, &target_stx->stx_blocks); 11337 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec); 11338 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec); 11339 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec); 11340 unlock_user_struct(target_stx, arg5, 1); 11341 } 11342 } 11343 return ret; 11344 #endif 11345 #ifdef TARGET_NR_lchown 11346 case TARGET_NR_lchown: 11347 if (!(p = lock_user_string(arg1))) 11348 return -TARGET_EFAULT; 11349 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 11350 unlock_user(p, arg1, 0); 11351 return ret; 11352 #endif 11353 #ifdef TARGET_NR_getuid 11354 case TARGET_NR_getuid: 11355 return get_errno(high2lowuid(getuid())); 11356 #endif 11357 #ifdef TARGET_NR_getgid 11358 case TARGET_NR_getgid: 11359 return get_errno(high2lowgid(getgid())); 11360 #endif 11361 #ifdef TARGET_NR_geteuid 11362 case TARGET_NR_geteuid: 11363 return get_errno(high2lowuid(geteuid())); 11364 #endif 11365 #ifdef TARGET_NR_getegid 11366 case TARGET_NR_getegid: 11367 return get_errno(high2lowgid(getegid())); 11368 #endif 11369 case TARGET_NR_setreuid: 11370 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 11371 case TARGET_NR_setregid: 11372 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 11373 case TARGET_NR_getgroups: 11374 { 11375 int gidsetsize = arg1; 11376 target_id *target_grouplist; 11377 gid_t *grouplist; 11378 int i; 11379 11380 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11381 ret = get_errno(getgroups(gidsetsize, grouplist)); 11382 if (gidsetsize == 0) 11383 return ret; 11384 if (!is_error(ret)) { 11385 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 11386 if (!target_grouplist) 11387 return -TARGET_EFAULT; 11388 for(i = 0;i < ret; i++) 11389 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 11390 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 11391 } 11392 } 11393 return ret; 11394 case TARGET_NR_setgroups: 11395 { 11396 int gidsetsize = arg1; 11397 target_id *target_grouplist; 11398 gid_t *grouplist = NULL; 11399 int i; 11400 if (gidsetsize) { 11401 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11402 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 11403 if (!target_grouplist) { 11404 return -TARGET_EFAULT; 11405 } 11406 for (i = 0; i < gidsetsize; i++) { 11407 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 11408 } 11409 unlock_user(target_grouplist, arg2, 0); 11410 } 11411 return get_errno(setgroups(gidsetsize, grouplist)); 11412 } 11413 case TARGET_NR_fchown: 11414 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 11415 #if defined(TARGET_NR_fchownat) 11416 case TARGET_NR_fchownat: 11417 if (!(p = lock_user_string(arg2))) 11418 return -TARGET_EFAULT; 11419 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 11420 low2highgid(arg4), arg5)); 11421 unlock_user(p, arg2, 0); 11422 return ret; 11423 #endif 11424 #ifdef TARGET_NR_setresuid 11425 case TARGET_NR_setresuid: 11426 return get_errno(sys_setresuid(low2highuid(arg1), 11427 low2highuid(arg2), 11428 low2highuid(arg3))); 11429 #endif 11430 #ifdef TARGET_NR_getresuid 11431 case TARGET_NR_getresuid: 11432 { 11433 uid_t ruid, euid, suid; 11434 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11435 if (!is_error(ret)) { 11436 if (put_user_id(high2lowuid(ruid), arg1) 11437 || put_user_id(high2lowuid(euid), arg2) 11438 || put_user_id(high2lowuid(suid), arg3)) 11439 return -TARGET_EFAULT; 11440 } 11441 } 11442 return ret; 11443 #endif 11444 #ifdef TARGET_NR_getresgid 11445 case TARGET_NR_setresgid: 11446 return get_errno(sys_setresgid(low2highgid(arg1), 11447 low2highgid(arg2), 11448 low2highgid(arg3))); 11449 #endif 11450 #ifdef TARGET_NR_getresgid 11451 case TARGET_NR_getresgid: 11452 { 11453 gid_t rgid, egid, sgid; 11454 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11455 if (!is_error(ret)) { 11456 if (put_user_id(high2lowgid(rgid), arg1) 11457 || put_user_id(high2lowgid(egid), arg2) 11458 || put_user_id(high2lowgid(sgid), arg3)) 11459 return -TARGET_EFAULT; 11460 } 11461 } 11462 return ret; 11463 #endif 11464 #ifdef TARGET_NR_chown 11465 case TARGET_NR_chown: 11466 if (!(p = lock_user_string(arg1))) 11467 return -TARGET_EFAULT; 11468 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 11469 unlock_user(p, arg1, 0); 11470 return ret; 11471 #endif 11472 case TARGET_NR_setuid: 11473 return get_errno(sys_setuid(low2highuid(arg1))); 11474 case TARGET_NR_setgid: 11475 return get_errno(sys_setgid(low2highgid(arg1))); 11476 case TARGET_NR_setfsuid: 11477 return get_errno(setfsuid(arg1)); 11478 case TARGET_NR_setfsgid: 11479 return get_errno(setfsgid(arg1)); 11480 11481 #ifdef TARGET_NR_lchown32 11482 case TARGET_NR_lchown32: 11483 if (!(p = lock_user_string(arg1))) 11484 return -TARGET_EFAULT; 11485 ret = get_errno(lchown(p, arg2, arg3)); 11486 unlock_user(p, arg1, 0); 11487 return ret; 11488 #endif 11489 #ifdef TARGET_NR_getuid32 11490 case TARGET_NR_getuid32: 11491 return get_errno(getuid()); 11492 #endif 11493 11494 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 11495 /* Alpha specific */ 11496 case TARGET_NR_getxuid: 11497 { 11498 uid_t euid; 11499 euid=geteuid(); 11500 cpu_env->ir[IR_A4]=euid; 11501 } 11502 return get_errno(getuid()); 11503 #endif 11504 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 11505 /* Alpha specific */ 11506 case TARGET_NR_getxgid: 11507 { 11508 uid_t egid; 11509 egid=getegid(); 11510 cpu_env->ir[IR_A4]=egid; 11511 } 11512 return get_errno(getgid()); 11513 #endif 11514 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 11515 /* Alpha specific */ 11516 case TARGET_NR_osf_getsysinfo: 11517 ret = -TARGET_EOPNOTSUPP; 11518 switch (arg1) { 11519 case TARGET_GSI_IEEE_FP_CONTROL: 11520 { 11521 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env); 11522 uint64_t swcr = cpu_env->swcr; 11523 11524 swcr &= ~SWCR_STATUS_MASK; 11525 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK; 11526 11527 if (put_user_u64 (swcr, arg2)) 11528 return -TARGET_EFAULT; 11529 ret = 0; 11530 } 11531 break; 11532 11533 /* case GSI_IEEE_STATE_AT_SIGNAL: 11534 -- Not implemented in linux kernel. 11535 case GSI_UACPROC: 11536 -- Retrieves current unaligned access state; not much used. 11537 case GSI_PROC_TYPE: 11538 -- Retrieves implver information; surely not used. 11539 case GSI_GET_HWRPB: 11540 -- Grabs a copy of the HWRPB; surely not used. 11541 */ 11542 } 11543 return ret; 11544 #endif 11545 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 11546 /* Alpha specific */ 11547 case TARGET_NR_osf_setsysinfo: 11548 ret = -TARGET_EOPNOTSUPP; 11549 switch (arg1) { 11550 case TARGET_SSI_IEEE_FP_CONTROL: 11551 { 11552 uint64_t swcr, fpcr; 11553 11554 if (get_user_u64 (swcr, arg2)) { 11555 return -TARGET_EFAULT; 11556 } 11557 11558 /* 11559 * The kernel calls swcr_update_status to update the 11560 * status bits from the fpcr at every point that it 11561 * could be queried. Therefore, we store the status 11562 * bits only in FPCR. 11563 */ 11564 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK); 11565 11566 fpcr = cpu_alpha_load_fpcr(cpu_env); 11567 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32); 11568 fpcr |= alpha_ieee_swcr_to_fpcr(swcr); 11569 cpu_alpha_store_fpcr(cpu_env, fpcr); 11570 ret = 0; 11571 } 11572 break; 11573 11574 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 11575 { 11576 uint64_t exc, fpcr, fex; 11577 11578 if (get_user_u64(exc, arg2)) { 11579 return -TARGET_EFAULT; 11580 } 11581 exc &= SWCR_STATUS_MASK; 11582 fpcr = cpu_alpha_load_fpcr(cpu_env); 11583 11584 /* Old exceptions are not signaled. */ 11585 fex = alpha_ieee_fpcr_to_swcr(fpcr); 11586 fex = exc & ~fex; 11587 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT; 11588 fex &= (cpu_env)->swcr; 11589 11590 /* Update the hardware fpcr. */ 11591 fpcr |= alpha_ieee_swcr_to_fpcr(exc); 11592 cpu_alpha_store_fpcr(cpu_env, fpcr); 11593 11594 if (fex) { 11595 int si_code = TARGET_FPE_FLTUNK; 11596 target_siginfo_t info; 11597 11598 if (fex & SWCR_TRAP_ENABLE_DNO) { 11599 si_code = TARGET_FPE_FLTUND; 11600 } 11601 if (fex & SWCR_TRAP_ENABLE_INE) { 11602 si_code = TARGET_FPE_FLTRES; 11603 } 11604 if (fex & SWCR_TRAP_ENABLE_UNF) { 11605 si_code = TARGET_FPE_FLTUND; 11606 } 11607 if (fex & SWCR_TRAP_ENABLE_OVF) { 11608 si_code = TARGET_FPE_FLTOVF; 11609 } 11610 if (fex & SWCR_TRAP_ENABLE_DZE) { 11611 si_code = TARGET_FPE_FLTDIV; 11612 } 11613 if (fex & SWCR_TRAP_ENABLE_INV) { 11614 si_code = TARGET_FPE_FLTINV; 11615 } 11616 11617 info.si_signo = SIGFPE; 11618 info.si_errno = 0; 11619 info.si_code = si_code; 11620 info._sifields._sigfault._addr = (cpu_env)->pc; 11621 queue_signal(cpu_env, info.si_signo, 11622 QEMU_SI_FAULT, &info); 11623 } 11624 ret = 0; 11625 } 11626 break; 11627 11628 /* case SSI_NVPAIRS: 11629 -- Used with SSIN_UACPROC to enable unaligned accesses. 11630 case SSI_IEEE_STATE_AT_SIGNAL: 11631 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 11632 -- Not implemented in linux kernel 11633 */ 11634 } 11635 return ret; 11636 #endif 11637 #ifdef TARGET_NR_osf_sigprocmask 11638 /* Alpha specific. */ 11639 case TARGET_NR_osf_sigprocmask: 11640 { 11641 abi_ulong mask; 11642 int how; 11643 sigset_t set, oldset; 11644 11645 switch(arg1) { 11646 case TARGET_SIG_BLOCK: 11647 how = SIG_BLOCK; 11648 break; 11649 case TARGET_SIG_UNBLOCK: 11650 how = SIG_UNBLOCK; 11651 break; 11652 case TARGET_SIG_SETMASK: 11653 how = SIG_SETMASK; 11654 break; 11655 default: 11656 return -TARGET_EINVAL; 11657 } 11658 mask = arg2; 11659 target_to_host_old_sigset(&set, &mask); 11660 ret = do_sigprocmask(how, &set, &oldset); 11661 if (!ret) { 11662 host_to_target_old_sigset(&mask, &oldset); 11663 ret = mask; 11664 } 11665 } 11666 return ret; 11667 #endif 11668 11669 #ifdef TARGET_NR_getgid32 11670 case TARGET_NR_getgid32: 11671 return get_errno(getgid()); 11672 #endif 11673 #ifdef TARGET_NR_geteuid32 11674 case TARGET_NR_geteuid32: 11675 return get_errno(geteuid()); 11676 #endif 11677 #ifdef TARGET_NR_getegid32 11678 case TARGET_NR_getegid32: 11679 return get_errno(getegid()); 11680 #endif 11681 #ifdef TARGET_NR_setreuid32 11682 case TARGET_NR_setreuid32: 11683 return get_errno(setreuid(arg1, arg2)); 11684 #endif 11685 #ifdef TARGET_NR_setregid32 11686 case TARGET_NR_setregid32: 11687 return get_errno(setregid(arg1, arg2)); 11688 #endif 11689 #ifdef TARGET_NR_getgroups32 11690 case TARGET_NR_getgroups32: 11691 { 11692 int gidsetsize = arg1; 11693 uint32_t *target_grouplist; 11694 gid_t *grouplist; 11695 int i; 11696 11697 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11698 ret = get_errno(getgroups(gidsetsize, grouplist)); 11699 if (gidsetsize == 0) 11700 return ret; 11701 if (!is_error(ret)) { 11702 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 11703 if (!target_grouplist) { 11704 return -TARGET_EFAULT; 11705 } 11706 for(i = 0;i < ret; i++) 11707 target_grouplist[i] = tswap32(grouplist[i]); 11708 unlock_user(target_grouplist, arg2, gidsetsize * 4); 11709 } 11710 } 11711 return ret; 11712 #endif 11713 #ifdef TARGET_NR_setgroups32 11714 case TARGET_NR_setgroups32: 11715 { 11716 int gidsetsize = arg1; 11717 uint32_t *target_grouplist; 11718 gid_t *grouplist; 11719 int i; 11720 11721 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11722 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 11723 if (!target_grouplist) { 11724 return -TARGET_EFAULT; 11725 } 11726 for(i = 0;i < gidsetsize; i++) 11727 grouplist[i] = tswap32(target_grouplist[i]); 11728 unlock_user(target_grouplist, arg2, 0); 11729 return get_errno(setgroups(gidsetsize, grouplist)); 11730 } 11731 #endif 11732 #ifdef TARGET_NR_fchown32 11733 case TARGET_NR_fchown32: 11734 return get_errno(fchown(arg1, arg2, arg3)); 11735 #endif 11736 #ifdef TARGET_NR_setresuid32 11737 case TARGET_NR_setresuid32: 11738 return get_errno(sys_setresuid(arg1, arg2, arg3)); 11739 #endif 11740 #ifdef TARGET_NR_getresuid32 11741 case TARGET_NR_getresuid32: 11742 { 11743 uid_t ruid, euid, suid; 11744 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11745 if (!is_error(ret)) { 11746 if (put_user_u32(ruid, arg1) 11747 || put_user_u32(euid, arg2) 11748 || put_user_u32(suid, arg3)) 11749 return -TARGET_EFAULT; 11750 } 11751 } 11752 return ret; 11753 #endif 11754 #ifdef TARGET_NR_setresgid32 11755 case TARGET_NR_setresgid32: 11756 return get_errno(sys_setresgid(arg1, arg2, arg3)); 11757 #endif 11758 #ifdef TARGET_NR_getresgid32 11759 case TARGET_NR_getresgid32: 11760 { 11761 gid_t rgid, egid, sgid; 11762 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11763 if (!is_error(ret)) { 11764 if (put_user_u32(rgid, arg1) 11765 || put_user_u32(egid, arg2) 11766 || put_user_u32(sgid, arg3)) 11767 return -TARGET_EFAULT; 11768 } 11769 } 11770 return ret; 11771 #endif 11772 #ifdef TARGET_NR_chown32 11773 case TARGET_NR_chown32: 11774 if (!(p = lock_user_string(arg1))) 11775 return -TARGET_EFAULT; 11776 ret = get_errno(chown(p, arg2, arg3)); 11777 unlock_user(p, arg1, 0); 11778 return ret; 11779 #endif 11780 #ifdef TARGET_NR_setuid32 11781 case TARGET_NR_setuid32: 11782 return get_errno(sys_setuid(arg1)); 11783 #endif 11784 #ifdef TARGET_NR_setgid32 11785 case TARGET_NR_setgid32: 11786 return get_errno(sys_setgid(arg1)); 11787 #endif 11788 #ifdef TARGET_NR_setfsuid32 11789 case TARGET_NR_setfsuid32: 11790 return get_errno(setfsuid(arg1)); 11791 #endif 11792 #ifdef TARGET_NR_setfsgid32 11793 case TARGET_NR_setfsgid32: 11794 return get_errno(setfsgid(arg1)); 11795 #endif 11796 #ifdef TARGET_NR_mincore 11797 case TARGET_NR_mincore: 11798 { 11799 void *a = lock_user(VERIFY_READ, arg1, arg2, 0); 11800 if (!a) { 11801 return -TARGET_ENOMEM; 11802 } 11803 p = lock_user_string(arg3); 11804 if (!p) { 11805 ret = -TARGET_EFAULT; 11806 } else { 11807 ret = get_errno(mincore(a, arg2, p)); 11808 unlock_user(p, arg3, ret); 11809 } 11810 unlock_user(a, arg1, 0); 11811 } 11812 return ret; 11813 #endif 11814 #ifdef TARGET_NR_arm_fadvise64_64 11815 case TARGET_NR_arm_fadvise64_64: 11816 /* arm_fadvise64_64 looks like fadvise64_64 but 11817 * with different argument order: fd, advice, offset, len 11818 * rather than the usual fd, offset, len, advice. 11819 * Note that offset and len are both 64-bit so appear as 11820 * pairs of 32-bit registers. 11821 */ 11822 ret = posix_fadvise(arg1, target_offset64(arg3, arg4), 11823 target_offset64(arg5, arg6), arg2); 11824 return -host_to_target_errno(ret); 11825 #endif 11826 11827 #if TARGET_ABI_BITS == 32 11828 11829 #ifdef TARGET_NR_fadvise64_64 11830 case TARGET_NR_fadvise64_64: 11831 #if defined(TARGET_PPC) || defined(TARGET_XTENSA) 11832 /* 6 args: fd, advice, offset (high, low), len (high, low) */ 11833 ret = arg2; 11834 arg2 = arg3; 11835 arg3 = arg4; 11836 arg4 = arg5; 11837 arg5 = arg6; 11838 arg6 = ret; 11839 #else 11840 /* 6 args: fd, offset (high, low), len (high, low), advice */ 11841 if (regpairs_aligned(cpu_env, num)) { 11842 /* offset is in (3,4), len in (5,6) and advice in 7 */ 11843 arg2 = arg3; 11844 arg3 = arg4; 11845 arg4 = arg5; 11846 arg5 = arg6; 11847 arg6 = arg7; 11848 } 11849 #endif 11850 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), 11851 target_offset64(arg4, arg5), arg6); 11852 return -host_to_target_errno(ret); 11853 #endif 11854 11855 #ifdef TARGET_NR_fadvise64 11856 case TARGET_NR_fadvise64: 11857 /* 5 args: fd, offset (high, low), len, advice */ 11858 if (regpairs_aligned(cpu_env, num)) { 11859 /* offset is in (3,4), len in 5 and advice in 6 */ 11860 arg2 = arg3; 11861 arg3 = arg4; 11862 arg4 = arg5; 11863 arg5 = arg6; 11864 } 11865 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5); 11866 return -host_to_target_errno(ret); 11867 #endif 11868 11869 #else /* not a 32-bit ABI */ 11870 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64) 11871 #ifdef TARGET_NR_fadvise64_64 11872 case TARGET_NR_fadvise64_64: 11873 #endif 11874 #ifdef TARGET_NR_fadvise64 11875 case TARGET_NR_fadvise64: 11876 #endif 11877 #ifdef TARGET_S390X 11878 switch (arg4) { 11879 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 11880 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 11881 case 6: arg4 = POSIX_FADV_DONTNEED; break; 11882 case 7: arg4 = POSIX_FADV_NOREUSE; break; 11883 default: break; 11884 } 11885 #endif 11886 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4)); 11887 #endif 11888 #endif /* end of 64-bit ABI fadvise handling */ 11889 11890 #ifdef TARGET_NR_madvise 11891 case TARGET_NR_madvise: 11892 return target_madvise(arg1, arg2, arg3); 11893 #endif 11894 #ifdef TARGET_NR_fcntl64 11895 case TARGET_NR_fcntl64: 11896 { 11897 int cmd; 11898 struct flock64 fl; 11899 from_flock64_fn *copyfrom = copy_from_user_flock64; 11900 to_flock64_fn *copyto = copy_to_user_flock64; 11901 11902 #ifdef TARGET_ARM 11903 if (!cpu_env->eabi) { 11904 copyfrom = copy_from_user_oabi_flock64; 11905 copyto = copy_to_user_oabi_flock64; 11906 } 11907 #endif 11908 11909 cmd = target_to_host_fcntl_cmd(arg2); 11910 if (cmd == -TARGET_EINVAL) { 11911 return cmd; 11912 } 11913 11914 switch(arg2) { 11915 case TARGET_F_GETLK64: 11916 ret = copyfrom(&fl, arg3); 11917 if (ret) { 11918 break; 11919 } 11920 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 11921 if (ret == 0) { 11922 ret = copyto(arg3, &fl); 11923 } 11924 break; 11925 11926 case TARGET_F_SETLK64: 11927 case TARGET_F_SETLKW64: 11928 ret = copyfrom(&fl, arg3); 11929 if (ret) { 11930 break; 11931 } 11932 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 11933 break; 11934 default: 11935 ret = do_fcntl(arg1, arg2, arg3); 11936 break; 11937 } 11938 return ret; 11939 } 11940 #endif 11941 #ifdef TARGET_NR_cacheflush 11942 case TARGET_NR_cacheflush: 11943 /* self-modifying code is handled automatically, so nothing needed */ 11944 return 0; 11945 #endif 11946 #ifdef TARGET_NR_getpagesize 11947 case TARGET_NR_getpagesize: 11948 return TARGET_PAGE_SIZE; 11949 #endif 11950 case TARGET_NR_gettid: 11951 return get_errno(sys_gettid()); 11952 #ifdef TARGET_NR_readahead 11953 case TARGET_NR_readahead: 11954 #if TARGET_ABI_BITS == 32 11955 if (regpairs_aligned(cpu_env, num)) { 11956 arg2 = arg3; 11957 arg3 = arg4; 11958 arg4 = arg5; 11959 } 11960 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4)); 11961 #else 11962 ret = get_errno(readahead(arg1, arg2, arg3)); 11963 #endif 11964 return ret; 11965 #endif 11966 #ifdef CONFIG_ATTR 11967 #ifdef TARGET_NR_setxattr 11968 case TARGET_NR_listxattr: 11969 case TARGET_NR_llistxattr: 11970 { 11971 void *p, *b = 0; 11972 if (arg2) { 11973 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11974 if (!b) { 11975 return -TARGET_EFAULT; 11976 } 11977 } 11978 p = lock_user_string(arg1); 11979 if (p) { 11980 if (num == TARGET_NR_listxattr) { 11981 ret = get_errno(listxattr(p, b, arg3)); 11982 } else { 11983 ret = get_errno(llistxattr(p, b, arg3)); 11984 } 11985 } else { 11986 ret = -TARGET_EFAULT; 11987 } 11988 unlock_user(p, arg1, 0); 11989 unlock_user(b, arg2, arg3); 11990 return ret; 11991 } 11992 case TARGET_NR_flistxattr: 11993 { 11994 void *b = 0; 11995 if (arg2) { 11996 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11997 if (!b) { 11998 return -TARGET_EFAULT; 11999 } 12000 } 12001 ret = get_errno(flistxattr(arg1, b, arg3)); 12002 unlock_user(b, arg2, arg3); 12003 return ret; 12004 } 12005 case TARGET_NR_setxattr: 12006 case TARGET_NR_lsetxattr: 12007 { 12008 void *p, *n, *v = 0; 12009 if (arg3) { 12010 v = lock_user(VERIFY_READ, arg3, arg4, 1); 12011 if (!v) { 12012 return -TARGET_EFAULT; 12013 } 12014 } 12015 p = lock_user_string(arg1); 12016 n = lock_user_string(arg2); 12017 if (p && n) { 12018 if (num == TARGET_NR_setxattr) { 12019 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 12020 } else { 12021 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 12022 } 12023 } else { 12024 ret = -TARGET_EFAULT; 12025 } 12026 unlock_user(p, arg1, 0); 12027 unlock_user(n, arg2, 0); 12028 unlock_user(v, arg3, 0); 12029 } 12030 return ret; 12031 case TARGET_NR_fsetxattr: 12032 { 12033 void *n, *v = 0; 12034 if (arg3) { 12035 v = lock_user(VERIFY_READ, arg3, arg4, 1); 12036 if (!v) { 12037 return -TARGET_EFAULT; 12038 } 12039 } 12040 n = lock_user_string(arg2); 12041 if (n) { 12042 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 12043 } else { 12044 ret = -TARGET_EFAULT; 12045 } 12046 unlock_user(n, arg2, 0); 12047 unlock_user(v, arg3, 0); 12048 } 12049 return ret; 12050 case TARGET_NR_getxattr: 12051 case TARGET_NR_lgetxattr: 12052 { 12053 void *p, *n, *v = 0; 12054 if (arg3) { 12055 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 12056 if (!v) { 12057 return -TARGET_EFAULT; 12058 } 12059 } 12060 p = lock_user_string(arg1); 12061 n = lock_user_string(arg2); 12062 if (p && n) { 12063 if (num == TARGET_NR_getxattr) { 12064 ret = get_errno(getxattr(p, n, v, arg4)); 12065 } else { 12066 ret = get_errno(lgetxattr(p, n, v, arg4)); 12067 } 12068 } else { 12069 ret = -TARGET_EFAULT; 12070 } 12071 unlock_user(p, arg1, 0); 12072 unlock_user(n, arg2, 0); 12073 unlock_user(v, arg3, arg4); 12074 } 12075 return ret; 12076 case TARGET_NR_fgetxattr: 12077 { 12078 void *n, *v = 0; 12079 if (arg3) { 12080 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 12081 if (!v) { 12082 return -TARGET_EFAULT; 12083 } 12084 } 12085 n = lock_user_string(arg2); 12086 if (n) { 12087 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 12088 } else { 12089 ret = -TARGET_EFAULT; 12090 } 12091 unlock_user(n, arg2, 0); 12092 unlock_user(v, arg3, arg4); 12093 } 12094 return ret; 12095 case TARGET_NR_removexattr: 12096 case TARGET_NR_lremovexattr: 12097 { 12098 void *p, *n; 12099 p = lock_user_string(arg1); 12100 n = lock_user_string(arg2); 12101 if (p && n) { 12102 if (num == TARGET_NR_removexattr) { 12103 ret = get_errno(removexattr(p, n)); 12104 } else { 12105 ret = get_errno(lremovexattr(p, n)); 12106 } 12107 } else { 12108 ret = -TARGET_EFAULT; 12109 } 12110 unlock_user(p, arg1, 0); 12111 unlock_user(n, arg2, 0); 12112 } 12113 return ret; 12114 case TARGET_NR_fremovexattr: 12115 { 12116 void *n; 12117 n = lock_user_string(arg2); 12118 if (n) { 12119 ret = get_errno(fremovexattr(arg1, n)); 12120 } else { 12121 ret = -TARGET_EFAULT; 12122 } 12123 unlock_user(n, arg2, 0); 12124 } 12125 return ret; 12126 #endif 12127 #endif /* CONFIG_ATTR */ 12128 #ifdef TARGET_NR_set_thread_area 12129 case TARGET_NR_set_thread_area: 12130 #if defined(TARGET_MIPS) 12131 cpu_env->active_tc.CP0_UserLocal = arg1; 12132 return 0; 12133 #elif defined(TARGET_CRIS) 12134 if (arg1 & 0xff) 12135 ret = -TARGET_EINVAL; 12136 else { 12137 cpu_env->pregs[PR_PID] = arg1; 12138 ret = 0; 12139 } 12140 return ret; 12141 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 12142 return do_set_thread_area(cpu_env, arg1); 12143 #elif defined(TARGET_M68K) 12144 { 12145 TaskState *ts = cpu->opaque; 12146 ts->tp_value = arg1; 12147 return 0; 12148 } 12149 #else 12150 return -TARGET_ENOSYS; 12151 #endif 12152 #endif 12153 #ifdef TARGET_NR_get_thread_area 12154 case TARGET_NR_get_thread_area: 12155 #if defined(TARGET_I386) && defined(TARGET_ABI32) 12156 return do_get_thread_area(cpu_env, arg1); 12157 #elif defined(TARGET_M68K) 12158 { 12159 TaskState *ts = cpu->opaque; 12160 return ts->tp_value; 12161 } 12162 #else 12163 return -TARGET_ENOSYS; 12164 #endif 12165 #endif 12166 #ifdef TARGET_NR_getdomainname 12167 case TARGET_NR_getdomainname: 12168 return -TARGET_ENOSYS; 12169 #endif 12170 12171 #ifdef TARGET_NR_clock_settime 12172 case TARGET_NR_clock_settime: 12173 { 12174 struct timespec ts; 12175 12176 ret = target_to_host_timespec(&ts, arg2); 12177 if (!is_error(ret)) { 12178 ret = get_errno(clock_settime(arg1, &ts)); 12179 } 12180 return ret; 12181 } 12182 #endif 12183 #ifdef TARGET_NR_clock_settime64 12184 case TARGET_NR_clock_settime64: 12185 { 12186 struct timespec ts; 12187 12188 ret = target_to_host_timespec64(&ts, arg2); 12189 if (!is_error(ret)) { 12190 ret = get_errno(clock_settime(arg1, &ts)); 12191 } 12192 return ret; 12193 } 12194 #endif 12195 #ifdef TARGET_NR_clock_gettime 12196 case TARGET_NR_clock_gettime: 12197 { 12198 struct timespec ts; 12199 ret = get_errno(clock_gettime(arg1, &ts)); 12200 if (!is_error(ret)) { 12201 ret = host_to_target_timespec(arg2, &ts); 12202 } 12203 return ret; 12204 } 12205 #endif 12206 #ifdef TARGET_NR_clock_gettime64 12207 case TARGET_NR_clock_gettime64: 12208 { 12209 struct timespec ts; 12210 ret = get_errno(clock_gettime(arg1, &ts)); 12211 if (!is_error(ret)) { 12212 ret = host_to_target_timespec64(arg2, &ts); 12213 } 12214 return ret; 12215 } 12216 #endif 12217 #ifdef TARGET_NR_clock_getres 12218 case TARGET_NR_clock_getres: 12219 { 12220 struct timespec ts; 12221 ret = get_errno(clock_getres(arg1, &ts)); 12222 if (!is_error(ret)) { 12223 host_to_target_timespec(arg2, &ts); 12224 } 12225 return ret; 12226 } 12227 #endif 12228 #ifdef TARGET_NR_clock_getres_time64 12229 case TARGET_NR_clock_getres_time64: 12230 { 12231 struct timespec ts; 12232 ret = get_errno(clock_getres(arg1, &ts)); 12233 if (!is_error(ret)) { 12234 host_to_target_timespec64(arg2, &ts); 12235 } 12236 return ret; 12237 } 12238 #endif 12239 #ifdef TARGET_NR_clock_nanosleep 12240 case TARGET_NR_clock_nanosleep: 12241 { 12242 struct timespec ts; 12243 if (target_to_host_timespec(&ts, arg3)) { 12244 return -TARGET_EFAULT; 12245 } 12246 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 12247 &ts, arg4 ? &ts : NULL)); 12248 /* 12249 * if the call is interrupted by a signal handler, it fails 12250 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not 12251 * TIMER_ABSTIME, it returns the remaining unslept time in arg4. 12252 */ 12253 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME && 12254 host_to_target_timespec(arg4, &ts)) { 12255 return -TARGET_EFAULT; 12256 } 12257 12258 return ret; 12259 } 12260 #endif 12261 #ifdef TARGET_NR_clock_nanosleep_time64 12262 case TARGET_NR_clock_nanosleep_time64: 12263 { 12264 struct timespec ts; 12265 12266 if (target_to_host_timespec64(&ts, arg3)) { 12267 return -TARGET_EFAULT; 12268 } 12269 12270 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 12271 &ts, arg4 ? &ts : NULL)); 12272 12273 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME && 12274 host_to_target_timespec64(arg4, &ts)) { 12275 return -TARGET_EFAULT; 12276 } 12277 return ret; 12278 } 12279 #endif 12280 12281 #if defined(TARGET_NR_set_tid_address) 12282 case TARGET_NR_set_tid_address: 12283 { 12284 TaskState *ts = cpu->opaque; 12285 ts->child_tidptr = arg1; 12286 /* do not call host set_tid_address() syscall, instead return tid() */ 12287 return get_errno(sys_gettid()); 12288 } 12289 #endif 12290 12291 case TARGET_NR_tkill: 12292 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2))); 12293 12294 case TARGET_NR_tgkill: 12295 return get_errno(safe_tgkill((int)arg1, (int)arg2, 12296 target_to_host_signal(arg3))); 12297 12298 #ifdef TARGET_NR_set_robust_list 12299 case TARGET_NR_set_robust_list: 12300 case TARGET_NR_get_robust_list: 12301 /* The ABI for supporting robust futexes has userspace pass 12302 * the kernel a pointer to a linked list which is updated by 12303 * userspace after the syscall; the list is walked by the kernel 12304 * when the thread exits. Since the linked list in QEMU guest 12305 * memory isn't a valid linked list for the host and we have 12306 * no way to reliably intercept the thread-death event, we can't 12307 * support these. Silently return ENOSYS so that guest userspace 12308 * falls back to a non-robust futex implementation (which should 12309 * be OK except in the corner case of the guest crashing while 12310 * holding a mutex that is shared with another process via 12311 * shared memory). 12312 */ 12313 return -TARGET_ENOSYS; 12314 #endif 12315 12316 #if defined(TARGET_NR_utimensat) 12317 case TARGET_NR_utimensat: 12318 { 12319 struct timespec *tsp, ts[2]; 12320 if (!arg3) { 12321 tsp = NULL; 12322 } else { 12323 if (target_to_host_timespec(ts, arg3)) { 12324 return -TARGET_EFAULT; 12325 } 12326 if (target_to_host_timespec(ts + 1, arg3 + 12327 sizeof(struct target_timespec))) { 12328 return -TARGET_EFAULT; 12329 } 12330 tsp = ts; 12331 } 12332 if (!arg2) 12333 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 12334 else { 12335 if (!(p = lock_user_string(arg2))) { 12336 return -TARGET_EFAULT; 12337 } 12338 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 12339 unlock_user(p, arg2, 0); 12340 } 12341 } 12342 return ret; 12343 #endif 12344 #ifdef TARGET_NR_utimensat_time64 12345 case TARGET_NR_utimensat_time64: 12346 { 12347 struct timespec *tsp, ts[2]; 12348 if (!arg3) { 12349 tsp = NULL; 12350 } else { 12351 if (target_to_host_timespec64(ts, arg3)) { 12352 return -TARGET_EFAULT; 12353 } 12354 if (target_to_host_timespec64(ts + 1, arg3 + 12355 sizeof(struct target__kernel_timespec))) { 12356 return -TARGET_EFAULT; 12357 } 12358 tsp = ts; 12359 } 12360 if (!arg2) 12361 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 12362 else { 12363 p = lock_user_string(arg2); 12364 if (!p) { 12365 return -TARGET_EFAULT; 12366 } 12367 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 12368 unlock_user(p, arg2, 0); 12369 } 12370 } 12371 return ret; 12372 #endif 12373 #ifdef TARGET_NR_futex 12374 case TARGET_NR_futex: 12375 return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6); 12376 #endif 12377 #ifdef TARGET_NR_futex_time64 12378 case TARGET_NR_futex_time64: 12379 return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6); 12380 #endif 12381 #ifdef CONFIG_INOTIFY 12382 #if defined(TARGET_NR_inotify_init) 12383 case TARGET_NR_inotify_init: 12384 ret = get_errno(inotify_init()); 12385 if (ret >= 0) { 12386 fd_trans_register(ret, &target_inotify_trans); 12387 } 12388 return ret; 12389 #endif 12390 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1) 12391 case TARGET_NR_inotify_init1: 12392 ret = get_errno(inotify_init1(target_to_host_bitmask(arg1, 12393 fcntl_flags_tbl))); 12394 if (ret >= 0) { 12395 fd_trans_register(ret, &target_inotify_trans); 12396 } 12397 return ret; 12398 #endif 12399 #if defined(TARGET_NR_inotify_add_watch) 12400 case TARGET_NR_inotify_add_watch: 12401 p = lock_user_string(arg2); 12402 ret = get_errno(inotify_add_watch(arg1, path(p), arg3)); 12403 unlock_user(p, arg2, 0); 12404 return ret; 12405 #endif 12406 #if defined(TARGET_NR_inotify_rm_watch) 12407 case TARGET_NR_inotify_rm_watch: 12408 return get_errno(inotify_rm_watch(arg1, arg2)); 12409 #endif 12410 #endif 12411 12412 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 12413 case TARGET_NR_mq_open: 12414 { 12415 struct mq_attr posix_mq_attr; 12416 struct mq_attr *pposix_mq_attr; 12417 int host_flags; 12418 12419 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl); 12420 pposix_mq_attr = NULL; 12421 if (arg4) { 12422 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) { 12423 return -TARGET_EFAULT; 12424 } 12425 pposix_mq_attr = &posix_mq_attr; 12426 } 12427 p = lock_user_string(arg1 - 1); 12428 if (!p) { 12429 return -TARGET_EFAULT; 12430 } 12431 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr)); 12432 unlock_user (p, arg1, 0); 12433 } 12434 return ret; 12435 12436 case TARGET_NR_mq_unlink: 12437 p = lock_user_string(arg1 - 1); 12438 if (!p) { 12439 return -TARGET_EFAULT; 12440 } 12441 ret = get_errno(mq_unlink(p)); 12442 unlock_user (p, arg1, 0); 12443 return ret; 12444 12445 #ifdef TARGET_NR_mq_timedsend 12446 case TARGET_NR_mq_timedsend: 12447 { 12448 struct timespec ts; 12449 12450 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12451 if (arg5 != 0) { 12452 if (target_to_host_timespec(&ts, arg5)) { 12453 return -TARGET_EFAULT; 12454 } 12455 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12456 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) { 12457 return -TARGET_EFAULT; 12458 } 12459 } else { 12460 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12461 } 12462 unlock_user (p, arg2, arg3); 12463 } 12464 return ret; 12465 #endif 12466 #ifdef TARGET_NR_mq_timedsend_time64 12467 case TARGET_NR_mq_timedsend_time64: 12468 { 12469 struct timespec ts; 12470 12471 p = lock_user(VERIFY_READ, arg2, arg3, 1); 12472 if (arg5 != 0) { 12473 if (target_to_host_timespec64(&ts, arg5)) { 12474 return -TARGET_EFAULT; 12475 } 12476 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12477 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) { 12478 return -TARGET_EFAULT; 12479 } 12480 } else { 12481 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12482 } 12483 unlock_user(p, arg2, arg3); 12484 } 12485 return ret; 12486 #endif 12487 12488 #ifdef TARGET_NR_mq_timedreceive 12489 case TARGET_NR_mq_timedreceive: 12490 { 12491 struct timespec ts; 12492 unsigned int prio; 12493 12494 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12495 if (arg5 != 0) { 12496 if (target_to_host_timespec(&ts, arg5)) { 12497 return -TARGET_EFAULT; 12498 } 12499 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12500 &prio, &ts)); 12501 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) { 12502 return -TARGET_EFAULT; 12503 } 12504 } else { 12505 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12506 &prio, NULL)); 12507 } 12508 unlock_user (p, arg2, arg3); 12509 if (arg4 != 0) 12510 put_user_u32(prio, arg4); 12511 } 12512 return ret; 12513 #endif 12514 #ifdef TARGET_NR_mq_timedreceive_time64 12515 case TARGET_NR_mq_timedreceive_time64: 12516 { 12517 struct timespec ts; 12518 unsigned int prio; 12519 12520 p = lock_user(VERIFY_READ, arg2, arg3, 1); 12521 if (arg5 != 0) { 12522 if (target_to_host_timespec64(&ts, arg5)) { 12523 return -TARGET_EFAULT; 12524 } 12525 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12526 &prio, &ts)); 12527 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) { 12528 return -TARGET_EFAULT; 12529 } 12530 } else { 12531 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12532 &prio, NULL)); 12533 } 12534 unlock_user(p, arg2, arg3); 12535 if (arg4 != 0) { 12536 put_user_u32(prio, arg4); 12537 } 12538 } 12539 return ret; 12540 #endif 12541 12542 /* Not implemented for now... */ 12543 /* case TARGET_NR_mq_notify: */ 12544 /* break; */ 12545 12546 case TARGET_NR_mq_getsetattr: 12547 { 12548 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 12549 ret = 0; 12550 if (arg2 != 0) { 12551 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 12552 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in, 12553 &posix_mq_attr_out)); 12554 } else if (arg3 != 0) { 12555 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out)); 12556 } 12557 if (ret == 0 && arg3 != 0) { 12558 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 12559 } 12560 } 12561 return ret; 12562 #endif 12563 12564 #ifdef CONFIG_SPLICE 12565 #ifdef TARGET_NR_tee 12566 case TARGET_NR_tee: 12567 { 12568 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 12569 } 12570 return ret; 12571 #endif 12572 #ifdef TARGET_NR_splice 12573 case TARGET_NR_splice: 12574 { 12575 loff_t loff_in, loff_out; 12576 loff_t *ploff_in = NULL, *ploff_out = NULL; 12577 if (arg2) { 12578 if (get_user_u64(loff_in, arg2)) { 12579 return -TARGET_EFAULT; 12580 } 12581 ploff_in = &loff_in; 12582 } 12583 if (arg4) { 12584 if (get_user_u64(loff_out, arg4)) { 12585 return -TARGET_EFAULT; 12586 } 12587 ploff_out = &loff_out; 12588 } 12589 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 12590 if (arg2) { 12591 if (put_user_u64(loff_in, arg2)) { 12592 return -TARGET_EFAULT; 12593 } 12594 } 12595 if (arg4) { 12596 if (put_user_u64(loff_out, arg4)) { 12597 return -TARGET_EFAULT; 12598 } 12599 } 12600 } 12601 return ret; 12602 #endif 12603 #ifdef TARGET_NR_vmsplice 12604 case TARGET_NR_vmsplice: 12605 { 12606 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 12607 if (vec != NULL) { 12608 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 12609 unlock_iovec(vec, arg2, arg3, 0); 12610 } else { 12611 ret = -host_to_target_errno(errno); 12612 } 12613 } 12614 return ret; 12615 #endif 12616 #endif /* CONFIG_SPLICE */ 12617 #ifdef CONFIG_EVENTFD 12618 #if defined(TARGET_NR_eventfd) 12619 case TARGET_NR_eventfd: 12620 ret = get_errno(eventfd(arg1, 0)); 12621 if (ret >= 0) { 12622 fd_trans_register(ret, &target_eventfd_trans); 12623 } 12624 return ret; 12625 #endif 12626 #if defined(TARGET_NR_eventfd2) 12627 case TARGET_NR_eventfd2: 12628 { 12629 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)); 12630 if (arg2 & TARGET_O_NONBLOCK) { 12631 host_flags |= O_NONBLOCK; 12632 } 12633 if (arg2 & TARGET_O_CLOEXEC) { 12634 host_flags |= O_CLOEXEC; 12635 } 12636 ret = get_errno(eventfd(arg1, host_flags)); 12637 if (ret >= 0) { 12638 fd_trans_register(ret, &target_eventfd_trans); 12639 } 12640 return ret; 12641 } 12642 #endif 12643 #endif /* CONFIG_EVENTFD */ 12644 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 12645 case TARGET_NR_fallocate: 12646 #if TARGET_ABI_BITS == 32 12647 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 12648 target_offset64(arg5, arg6))); 12649 #else 12650 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 12651 #endif 12652 return ret; 12653 #endif 12654 #if defined(CONFIG_SYNC_FILE_RANGE) 12655 #if defined(TARGET_NR_sync_file_range) 12656 case TARGET_NR_sync_file_range: 12657 #if TARGET_ABI_BITS == 32 12658 #if defined(TARGET_MIPS) 12659 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12660 target_offset64(arg5, arg6), arg7)); 12661 #else 12662 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 12663 target_offset64(arg4, arg5), arg6)); 12664 #endif /* !TARGET_MIPS */ 12665 #else 12666 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 12667 #endif 12668 return ret; 12669 #endif 12670 #if defined(TARGET_NR_sync_file_range2) || \ 12671 defined(TARGET_NR_arm_sync_file_range) 12672 #if defined(TARGET_NR_sync_file_range2) 12673 case TARGET_NR_sync_file_range2: 12674 #endif 12675 #if defined(TARGET_NR_arm_sync_file_range) 12676 case TARGET_NR_arm_sync_file_range: 12677 #endif 12678 /* This is like sync_file_range but the arguments are reordered */ 12679 #if TARGET_ABI_BITS == 32 12680 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12681 target_offset64(arg5, arg6), arg2)); 12682 #else 12683 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 12684 #endif 12685 return ret; 12686 #endif 12687 #endif 12688 #if defined(TARGET_NR_signalfd4) 12689 case TARGET_NR_signalfd4: 12690 return do_signalfd4(arg1, arg2, arg4); 12691 #endif 12692 #if defined(TARGET_NR_signalfd) 12693 case TARGET_NR_signalfd: 12694 return do_signalfd4(arg1, arg2, 0); 12695 #endif 12696 #if defined(CONFIG_EPOLL) 12697 #if defined(TARGET_NR_epoll_create) 12698 case TARGET_NR_epoll_create: 12699 return get_errno(epoll_create(arg1)); 12700 #endif 12701 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 12702 case TARGET_NR_epoll_create1: 12703 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl))); 12704 #endif 12705 #if defined(TARGET_NR_epoll_ctl) 12706 case TARGET_NR_epoll_ctl: 12707 { 12708 struct epoll_event ep; 12709 struct epoll_event *epp = 0; 12710 if (arg4) { 12711 if (arg2 != EPOLL_CTL_DEL) { 12712 struct target_epoll_event *target_ep; 12713 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 12714 return -TARGET_EFAULT; 12715 } 12716 ep.events = tswap32(target_ep->events); 12717 /* 12718 * The epoll_data_t union is just opaque data to the kernel, 12719 * so we transfer all 64 bits across and need not worry what 12720 * actual data type it is. 12721 */ 12722 ep.data.u64 = tswap64(target_ep->data.u64); 12723 unlock_user_struct(target_ep, arg4, 0); 12724 } 12725 /* 12726 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a 12727 * non-null pointer, even though this argument is ignored. 12728 * 12729 */ 12730 epp = &ep; 12731 } 12732 return get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 12733 } 12734 #endif 12735 12736 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait) 12737 #if defined(TARGET_NR_epoll_wait) 12738 case TARGET_NR_epoll_wait: 12739 #endif 12740 #if defined(TARGET_NR_epoll_pwait) 12741 case TARGET_NR_epoll_pwait: 12742 #endif 12743 { 12744 struct target_epoll_event *target_ep; 12745 struct epoll_event *ep; 12746 int epfd = arg1; 12747 int maxevents = arg3; 12748 int timeout = arg4; 12749 12750 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) { 12751 return -TARGET_EINVAL; 12752 } 12753 12754 target_ep = lock_user(VERIFY_WRITE, arg2, 12755 maxevents * sizeof(struct target_epoll_event), 1); 12756 if (!target_ep) { 12757 return -TARGET_EFAULT; 12758 } 12759 12760 ep = g_try_new(struct epoll_event, maxevents); 12761 if (!ep) { 12762 unlock_user(target_ep, arg2, 0); 12763 return -TARGET_ENOMEM; 12764 } 12765 12766 switch (num) { 12767 #if defined(TARGET_NR_epoll_pwait) 12768 case TARGET_NR_epoll_pwait: 12769 { 12770 sigset_t *set = NULL; 12771 12772 if (arg5) { 12773 ret = process_sigsuspend_mask(&set, arg5, arg6); 12774 if (ret != 0) { 12775 break; 12776 } 12777 } 12778 12779 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12780 set, SIGSET_T_SIZE)); 12781 12782 if (set) { 12783 finish_sigsuspend_mask(ret); 12784 } 12785 break; 12786 } 12787 #endif 12788 #if defined(TARGET_NR_epoll_wait) 12789 case TARGET_NR_epoll_wait: 12790 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12791 NULL, 0)); 12792 break; 12793 #endif 12794 default: 12795 ret = -TARGET_ENOSYS; 12796 } 12797 if (!is_error(ret)) { 12798 int i; 12799 for (i = 0; i < ret; i++) { 12800 target_ep[i].events = tswap32(ep[i].events); 12801 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 12802 } 12803 unlock_user(target_ep, arg2, 12804 ret * sizeof(struct target_epoll_event)); 12805 } else { 12806 unlock_user(target_ep, arg2, 0); 12807 } 12808 g_free(ep); 12809 return ret; 12810 } 12811 #endif 12812 #endif 12813 #ifdef TARGET_NR_prlimit64 12814 case TARGET_NR_prlimit64: 12815 { 12816 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 12817 struct target_rlimit64 *target_rnew, *target_rold; 12818 struct host_rlimit64 rnew, rold, *rnewp = 0; 12819 int resource = target_to_host_resource(arg2); 12820 12821 if (arg3 && (resource != RLIMIT_AS && 12822 resource != RLIMIT_DATA && 12823 resource != RLIMIT_STACK)) { 12824 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 12825 return -TARGET_EFAULT; 12826 } 12827 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 12828 rnew.rlim_max = tswap64(target_rnew->rlim_max); 12829 unlock_user_struct(target_rnew, arg3, 0); 12830 rnewp = &rnew; 12831 } 12832 12833 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0)); 12834 if (!is_error(ret) && arg4) { 12835 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 12836 return -TARGET_EFAULT; 12837 } 12838 target_rold->rlim_cur = tswap64(rold.rlim_cur); 12839 target_rold->rlim_max = tswap64(rold.rlim_max); 12840 unlock_user_struct(target_rold, arg4, 1); 12841 } 12842 return ret; 12843 } 12844 #endif 12845 #ifdef TARGET_NR_gethostname 12846 case TARGET_NR_gethostname: 12847 { 12848 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 12849 if (name) { 12850 ret = get_errno(gethostname(name, arg2)); 12851 unlock_user(name, arg1, arg2); 12852 } else { 12853 ret = -TARGET_EFAULT; 12854 } 12855 return ret; 12856 } 12857 #endif 12858 #ifdef TARGET_NR_atomic_cmpxchg_32 12859 case TARGET_NR_atomic_cmpxchg_32: 12860 { 12861 /* should use start_exclusive from main.c */ 12862 abi_ulong mem_value; 12863 if (get_user_u32(mem_value, arg6)) { 12864 target_siginfo_t info; 12865 info.si_signo = SIGSEGV; 12866 info.si_errno = 0; 12867 info.si_code = TARGET_SEGV_MAPERR; 12868 info._sifields._sigfault._addr = arg6; 12869 queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info); 12870 ret = 0xdeadbeef; 12871 12872 } 12873 if (mem_value == arg2) 12874 put_user_u32(arg1, arg6); 12875 return mem_value; 12876 } 12877 #endif 12878 #ifdef TARGET_NR_atomic_barrier 12879 case TARGET_NR_atomic_barrier: 12880 /* Like the kernel implementation and the 12881 qemu arm barrier, no-op this? */ 12882 return 0; 12883 #endif 12884 12885 #ifdef TARGET_NR_timer_create 12886 case TARGET_NR_timer_create: 12887 { 12888 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 12889 12890 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 12891 12892 int clkid = arg1; 12893 int timer_index = next_free_host_timer(); 12894 12895 if (timer_index < 0) { 12896 ret = -TARGET_EAGAIN; 12897 } else { 12898 timer_t *phtimer = g_posix_timers + timer_index; 12899 12900 if (arg2) { 12901 phost_sevp = &host_sevp; 12902 ret = target_to_host_sigevent(phost_sevp, arg2); 12903 if (ret != 0) { 12904 free_host_timer_slot(timer_index); 12905 return ret; 12906 } 12907 } 12908 12909 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 12910 if (ret) { 12911 free_host_timer_slot(timer_index); 12912 } else { 12913 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { 12914 timer_delete(*phtimer); 12915 free_host_timer_slot(timer_index); 12916 return -TARGET_EFAULT; 12917 } 12918 } 12919 } 12920 return ret; 12921 } 12922 #endif 12923 12924 #ifdef TARGET_NR_timer_settime 12925 case TARGET_NR_timer_settime: 12926 { 12927 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 12928 * struct itimerspec * old_value */ 12929 target_timer_t timerid = get_timer_id(arg1); 12930 12931 if (timerid < 0) { 12932 ret = timerid; 12933 } else if (arg3 == 0) { 12934 ret = -TARGET_EINVAL; 12935 } else { 12936 timer_t htimer = g_posix_timers[timerid]; 12937 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 12938 12939 if (target_to_host_itimerspec(&hspec_new, arg3)) { 12940 return -TARGET_EFAULT; 12941 } 12942 ret = get_errno( 12943 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 12944 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) { 12945 return -TARGET_EFAULT; 12946 } 12947 } 12948 return ret; 12949 } 12950 #endif 12951 12952 #ifdef TARGET_NR_timer_settime64 12953 case TARGET_NR_timer_settime64: 12954 { 12955 target_timer_t timerid = get_timer_id(arg1); 12956 12957 if (timerid < 0) { 12958 ret = timerid; 12959 } else if (arg3 == 0) { 12960 ret = -TARGET_EINVAL; 12961 } else { 12962 timer_t htimer = g_posix_timers[timerid]; 12963 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 12964 12965 if (target_to_host_itimerspec64(&hspec_new, arg3)) { 12966 return -TARGET_EFAULT; 12967 } 12968 ret = get_errno( 12969 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 12970 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) { 12971 return -TARGET_EFAULT; 12972 } 12973 } 12974 return ret; 12975 } 12976 #endif 12977 12978 #ifdef TARGET_NR_timer_gettime 12979 case TARGET_NR_timer_gettime: 12980 { 12981 /* args: timer_t timerid, struct itimerspec *curr_value */ 12982 target_timer_t timerid = get_timer_id(arg1); 12983 12984 if (timerid < 0) { 12985 ret = timerid; 12986 } else if (!arg2) { 12987 ret = -TARGET_EFAULT; 12988 } else { 12989 timer_t htimer = g_posix_timers[timerid]; 12990 struct itimerspec hspec; 12991 ret = get_errno(timer_gettime(htimer, &hspec)); 12992 12993 if (host_to_target_itimerspec(arg2, &hspec)) { 12994 ret = -TARGET_EFAULT; 12995 } 12996 } 12997 return ret; 12998 } 12999 #endif 13000 13001 #ifdef TARGET_NR_timer_gettime64 13002 case TARGET_NR_timer_gettime64: 13003 { 13004 /* args: timer_t timerid, struct itimerspec64 *curr_value */ 13005 target_timer_t timerid = get_timer_id(arg1); 13006 13007 if (timerid < 0) { 13008 ret = timerid; 13009 } else if (!arg2) { 13010 ret = -TARGET_EFAULT; 13011 } else { 13012 timer_t htimer = g_posix_timers[timerid]; 13013 struct itimerspec hspec; 13014 ret = get_errno(timer_gettime(htimer, &hspec)); 13015 13016 if (host_to_target_itimerspec64(arg2, &hspec)) { 13017 ret = -TARGET_EFAULT; 13018 } 13019 } 13020 return ret; 13021 } 13022 #endif 13023 13024 #ifdef TARGET_NR_timer_getoverrun 13025 case TARGET_NR_timer_getoverrun: 13026 { 13027 /* args: timer_t timerid */ 13028 target_timer_t timerid = get_timer_id(arg1); 13029 13030 if (timerid < 0) { 13031 ret = timerid; 13032 } else { 13033 timer_t htimer = g_posix_timers[timerid]; 13034 ret = get_errno(timer_getoverrun(htimer)); 13035 } 13036 return ret; 13037 } 13038 #endif 13039 13040 #ifdef TARGET_NR_timer_delete 13041 case TARGET_NR_timer_delete: 13042 { 13043 /* args: timer_t timerid */ 13044 target_timer_t timerid = get_timer_id(arg1); 13045 13046 if (timerid < 0) { 13047 ret = timerid; 13048 } else { 13049 timer_t htimer = g_posix_timers[timerid]; 13050 ret = get_errno(timer_delete(htimer)); 13051 free_host_timer_slot(timerid); 13052 } 13053 return ret; 13054 } 13055 #endif 13056 13057 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) 13058 case TARGET_NR_timerfd_create: 13059 return get_errno(timerfd_create(arg1, 13060 target_to_host_bitmask(arg2, fcntl_flags_tbl))); 13061 #endif 13062 13063 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) 13064 case TARGET_NR_timerfd_gettime: 13065 { 13066 struct itimerspec its_curr; 13067 13068 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 13069 13070 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { 13071 return -TARGET_EFAULT; 13072 } 13073 } 13074 return ret; 13075 #endif 13076 13077 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD) 13078 case TARGET_NR_timerfd_gettime64: 13079 { 13080 struct itimerspec its_curr; 13081 13082 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 13083 13084 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) { 13085 return -TARGET_EFAULT; 13086 } 13087 } 13088 return ret; 13089 #endif 13090 13091 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) 13092 case TARGET_NR_timerfd_settime: 13093 { 13094 struct itimerspec its_new, its_old, *p_new; 13095 13096 if (arg3) { 13097 if (target_to_host_itimerspec(&its_new, arg3)) { 13098 return -TARGET_EFAULT; 13099 } 13100 p_new = &its_new; 13101 } else { 13102 p_new = NULL; 13103 } 13104 13105 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 13106 13107 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { 13108 return -TARGET_EFAULT; 13109 } 13110 } 13111 return ret; 13112 #endif 13113 13114 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD) 13115 case TARGET_NR_timerfd_settime64: 13116 { 13117 struct itimerspec its_new, its_old, *p_new; 13118 13119 if (arg3) { 13120 if (target_to_host_itimerspec64(&its_new, arg3)) { 13121 return -TARGET_EFAULT; 13122 } 13123 p_new = &its_new; 13124 } else { 13125 p_new = NULL; 13126 } 13127 13128 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 13129 13130 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) { 13131 return -TARGET_EFAULT; 13132 } 13133 } 13134 return ret; 13135 #endif 13136 13137 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 13138 case TARGET_NR_ioprio_get: 13139 return get_errno(ioprio_get(arg1, arg2)); 13140 #endif 13141 13142 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 13143 case TARGET_NR_ioprio_set: 13144 return get_errno(ioprio_set(arg1, arg2, arg3)); 13145 #endif 13146 13147 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) 13148 case TARGET_NR_setns: 13149 return get_errno(setns(arg1, arg2)); 13150 #endif 13151 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) 13152 case TARGET_NR_unshare: 13153 return get_errno(unshare(arg1)); 13154 #endif 13155 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 13156 case TARGET_NR_kcmp: 13157 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5)); 13158 #endif 13159 #ifdef TARGET_NR_swapcontext 13160 case TARGET_NR_swapcontext: 13161 /* PowerPC specific. */ 13162 return do_swapcontext(cpu_env, arg1, arg2, arg3); 13163 #endif 13164 #ifdef TARGET_NR_memfd_create 13165 case TARGET_NR_memfd_create: 13166 p = lock_user_string(arg1); 13167 if (!p) { 13168 return -TARGET_EFAULT; 13169 } 13170 ret = get_errno(memfd_create(p, arg2)); 13171 fd_trans_unregister(ret); 13172 unlock_user(p, arg1, 0); 13173 return ret; 13174 #endif 13175 #if defined TARGET_NR_membarrier && defined __NR_membarrier 13176 case TARGET_NR_membarrier: 13177 return get_errno(membarrier(arg1, arg2)); 13178 #endif 13179 13180 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range) 13181 case TARGET_NR_copy_file_range: 13182 { 13183 loff_t inoff, outoff; 13184 loff_t *pinoff = NULL, *poutoff = NULL; 13185 13186 if (arg2) { 13187 if (get_user_u64(inoff, arg2)) { 13188 return -TARGET_EFAULT; 13189 } 13190 pinoff = &inoff; 13191 } 13192 if (arg4) { 13193 if (get_user_u64(outoff, arg4)) { 13194 return -TARGET_EFAULT; 13195 } 13196 poutoff = &outoff; 13197 } 13198 /* Do not sign-extend the count parameter. */ 13199 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff, 13200 (abi_ulong)arg5, arg6)); 13201 if (!is_error(ret) && ret > 0) { 13202 if (arg2) { 13203 if (put_user_u64(inoff, arg2)) { 13204 return -TARGET_EFAULT; 13205 } 13206 } 13207 if (arg4) { 13208 if (put_user_u64(outoff, arg4)) { 13209 return -TARGET_EFAULT; 13210 } 13211 } 13212 } 13213 } 13214 return ret; 13215 #endif 13216 13217 #if defined(TARGET_NR_pivot_root) 13218 case TARGET_NR_pivot_root: 13219 { 13220 void *p2; 13221 p = lock_user_string(arg1); /* new_root */ 13222 p2 = lock_user_string(arg2); /* put_old */ 13223 if (!p || !p2) { 13224 ret = -TARGET_EFAULT; 13225 } else { 13226 ret = get_errno(pivot_root(p, p2)); 13227 } 13228 unlock_user(p2, arg2, 0); 13229 unlock_user(p, arg1, 0); 13230 } 13231 return ret; 13232 #endif 13233 13234 default: 13235 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num); 13236 return -TARGET_ENOSYS; 13237 } 13238 return ret; 13239 } 13240 13241 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1, 13242 abi_long arg2, abi_long arg3, abi_long arg4, 13243 abi_long arg5, abi_long arg6, abi_long arg7, 13244 abi_long arg8) 13245 { 13246 CPUState *cpu = env_cpu(cpu_env); 13247 abi_long ret; 13248 13249 #ifdef DEBUG_ERESTARTSYS 13250 /* Debug-only code for exercising the syscall-restart code paths 13251 * in the per-architecture cpu main loops: restart every syscall 13252 * the guest makes once before letting it through. 13253 */ 13254 { 13255 static bool flag; 13256 flag = !flag; 13257 if (flag) { 13258 return -QEMU_ERESTARTSYS; 13259 } 13260 } 13261 #endif 13262 13263 record_syscall_start(cpu, num, arg1, 13264 arg2, arg3, arg4, arg5, arg6, arg7, arg8); 13265 13266 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { 13267 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6); 13268 } 13269 13270 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4, 13271 arg5, arg6, arg7, arg8); 13272 13273 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { 13274 print_syscall_ret(cpu_env, num, ret, arg1, arg2, 13275 arg3, arg4, arg5, arg6); 13276 } 13277 13278 record_syscall_return(cpu, num, ret); 13279 return ret; 13280 } 13281