1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include "qemu/osdep.h" 21 #include "qemu/cutils.h" 22 #include "qemu/path.h" 23 #include "qemu/memfd.h" 24 #include "qemu/queue.h" 25 #include <elf.h> 26 #include <endian.h> 27 #include <grp.h> 28 #include <sys/ipc.h> 29 #include <sys/msg.h> 30 #include <sys/wait.h> 31 #include <sys/mount.h> 32 #include <sys/file.h> 33 #include <sys/fsuid.h> 34 #include <sys/personality.h> 35 #include <sys/prctl.h> 36 #include <sys/resource.h> 37 #include <sys/swap.h> 38 #include <linux/capability.h> 39 #include <sched.h> 40 #include <sys/timex.h> 41 #include <sys/socket.h> 42 #include <linux/sockios.h> 43 #include <sys/un.h> 44 #include <sys/uio.h> 45 #include <poll.h> 46 #include <sys/times.h> 47 #include <sys/shm.h> 48 #include <sys/sem.h> 49 #include <sys/statfs.h> 50 #include <utime.h> 51 #include <sys/sysinfo.h> 52 #include <sys/signalfd.h> 53 //#include <sys/user.h> 54 #include <netinet/in.h> 55 #include <netinet/ip.h> 56 #include <netinet/tcp.h> 57 #include <netinet/udp.h> 58 #include <linux/wireless.h> 59 #include <linux/icmp.h> 60 #include <linux/icmpv6.h> 61 #include <linux/if_tun.h> 62 #include <linux/in6.h> 63 #include <linux/errqueue.h> 64 #include <linux/random.h> 65 #ifdef CONFIG_TIMERFD 66 #include <sys/timerfd.h> 67 #endif 68 #ifdef CONFIG_EVENTFD 69 #include <sys/eventfd.h> 70 #endif 71 #ifdef CONFIG_EPOLL 72 #include <sys/epoll.h> 73 #endif 74 #ifdef CONFIG_ATTR 75 #include "qemu/xattr.h" 76 #endif 77 #ifdef CONFIG_SENDFILE 78 #include <sys/sendfile.h> 79 #endif 80 #ifdef HAVE_SYS_KCOV_H 81 #include <sys/kcov.h> 82 #endif 83 84 #define termios host_termios 85 #define winsize host_winsize 86 #define termio host_termio 87 #define sgttyb host_sgttyb /* same as target */ 88 #define tchars host_tchars /* same as target */ 89 #define ltchars host_ltchars /* same as target */ 90 91 #include <linux/termios.h> 92 #include <linux/unistd.h> 93 #include <linux/cdrom.h> 94 #include <linux/hdreg.h> 95 #include <linux/soundcard.h> 96 #include <linux/kd.h> 97 #include <linux/mtio.h> 98 #include <linux/fs.h> 99 #include <linux/fd.h> 100 #if defined(CONFIG_FIEMAP) 101 #include <linux/fiemap.h> 102 #endif 103 #include <linux/fb.h> 104 #if defined(CONFIG_USBFS) 105 #include <linux/usbdevice_fs.h> 106 #include <linux/usb/ch9.h> 107 #endif 108 #include <linux/vt.h> 109 #include <linux/dm-ioctl.h> 110 #include <linux/reboot.h> 111 #include <linux/route.h> 112 #include <linux/filter.h> 113 #include <linux/blkpg.h> 114 #include <netpacket/packet.h> 115 #include <linux/netlink.h> 116 #include <linux/if_alg.h> 117 #include <linux/rtc.h> 118 #include <sound/asound.h> 119 #ifdef HAVE_BTRFS_H 120 #include <linux/btrfs.h> 121 #endif 122 #ifdef HAVE_DRM_H 123 #include <libdrm/drm.h> 124 #include <libdrm/i915_drm.h> 125 #endif 126 #include "linux_loop.h" 127 #include "uname.h" 128 129 #include "qemu.h" 130 #include "user-internals.h" 131 #include "strace.h" 132 #include "signal-common.h" 133 #include "loader.h" 134 #include "user-mmap.h" 135 #include "user/safe-syscall.h" 136 #include "qemu/guest-random.h" 137 #include "qemu/selfmap.h" 138 #include "user/syscall-trace.h" 139 #include "special-errno.h" 140 #include "qapi/error.h" 141 #include "fd-trans.h" 142 #include "tcg/tcg.h" 143 144 #ifndef CLONE_IO 145 #define CLONE_IO 0x80000000 /* Clone io context */ 146 #endif 147 148 /* We can't directly call the host clone syscall, because this will 149 * badly confuse libc (breaking mutexes, for example). So we must 150 * divide clone flags into: 151 * * flag combinations that look like pthread_create() 152 * * flag combinations that look like fork() 153 * * flags we can implement within QEMU itself 154 * * flags we can't support and will return an error for 155 */ 156 /* For thread creation, all these flags must be present; for 157 * fork, none must be present. 158 */ 159 #define CLONE_THREAD_FLAGS \ 160 (CLONE_VM | CLONE_FS | CLONE_FILES | \ 161 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM) 162 163 /* These flags are ignored: 164 * CLONE_DETACHED is now ignored by the kernel; 165 * CLONE_IO is just an optimisation hint to the I/O scheduler 166 */ 167 #define CLONE_IGNORED_FLAGS \ 168 (CLONE_DETACHED | CLONE_IO) 169 170 /* Flags for fork which we can implement within QEMU itself */ 171 #define CLONE_OPTIONAL_FORK_FLAGS \ 172 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 173 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID) 174 175 /* Flags for thread creation which we can implement within QEMU itself */ 176 #define CLONE_OPTIONAL_THREAD_FLAGS \ 177 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 178 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT) 179 180 #define CLONE_INVALID_FORK_FLAGS \ 181 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS)) 182 183 #define CLONE_INVALID_THREAD_FLAGS \ 184 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \ 185 CLONE_IGNORED_FLAGS)) 186 187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits 188 * have almost all been allocated. We cannot support any of 189 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC, 190 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED. 191 * The checks against the invalid thread masks above will catch these. 192 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.) 193 */ 194 195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted 196 * once. This exercises the codepaths for restart. 197 */ 198 //#define DEBUG_ERESTARTSYS 199 200 //#include <linux/msdos_fs.h> 201 #define VFAT_IOCTL_READDIR_BOTH \ 202 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2) 203 #define VFAT_IOCTL_READDIR_SHORT \ 204 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2) 205 206 #undef _syscall0 207 #undef _syscall1 208 #undef _syscall2 209 #undef _syscall3 210 #undef _syscall4 211 #undef _syscall5 212 #undef _syscall6 213 214 #define _syscall0(type,name) \ 215 static type name (void) \ 216 { \ 217 return syscall(__NR_##name); \ 218 } 219 220 #define _syscall1(type,name,type1,arg1) \ 221 static type name (type1 arg1) \ 222 { \ 223 return syscall(__NR_##name, arg1); \ 224 } 225 226 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 227 static type name (type1 arg1,type2 arg2) \ 228 { \ 229 return syscall(__NR_##name, arg1, arg2); \ 230 } 231 232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 233 static type name (type1 arg1,type2 arg2,type3 arg3) \ 234 { \ 235 return syscall(__NR_##name, arg1, arg2, arg3); \ 236 } 237 238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 240 { \ 241 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 242 } 243 244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 245 type5,arg5) \ 246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 247 { \ 248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 249 } 250 251 252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 253 type5,arg5,type6,arg6) \ 254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 255 type6 arg6) \ 256 { \ 257 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 258 } 259 260 261 #define __NR_sys_uname __NR_uname 262 #define __NR_sys_getcwd1 __NR_getcwd 263 #define __NR_sys_getdents __NR_getdents 264 #define __NR_sys_getdents64 __NR_getdents64 265 #define __NR_sys_getpriority __NR_getpriority 266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo 268 #define __NR_sys_syslog __NR_syslog 269 #if defined(__NR_futex) 270 # define __NR_sys_futex __NR_futex 271 #endif 272 #if defined(__NR_futex_time64) 273 # define __NR_sys_futex_time64 __NR_futex_time64 274 #endif 275 #define __NR_sys_inotify_init __NR_inotify_init 276 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 277 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 278 #define __NR_sys_statx __NR_statx 279 280 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__) 281 #define __NR__llseek __NR_lseek 282 #endif 283 284 /* Newer kernel ports have llseek() instead of _llseek() */ 285 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 286 #define TARGET_NR__llseek TARGET_NR_llseek 287 #endif 288 289 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */ 290 #ifndef TARGET_O_NONBLOCK_MASK 291 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK 292 #endif 293 294 #define __NR_sys_gettid __NR_gettid 295 _syscall0(int, sys_gettid) 296 297 /* For the 64-bit guest on 32-bit host case we must emulate 298 * getdents using getdents64, because otherwise the host 299 * might hand us back more dirent records than we can fit 300 * into the guest buffer after structure format conversion. 301 * Otherwise we emulate getdents with getdents if the host has it. 302 */ 303 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS 304 #define EMULATE_GETDENTS_WITH_GETDENTS 305 #endif 306 307 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS) 308 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 309 #endif 310 #if (defined(TARGET_NR_getdents) && \ 311 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \ 312 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 313 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 314 #endif 315 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 316 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 317 loff_t *, res, uint, wh); 318 #endif 319 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo) 320 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig, 321 siginfo_t *, uinfo) 322 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 323 #ifdef __NR_exit_group 324 _syscall1(int,exit_group,int,error_code) 325 #endif 326 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 327 _syscall1(int,set_tid_address,int *,tidptr) 328 #endif 329 #if defined(__NR_futex) 330 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 331 const struct timespec *,timeout,int *,uaddr2,int,val3) 332 #endif 333 #if defined(__NR_futex_time64) 334 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val, 335 const struct timespec *,timeout,int *,uaddr2,int,val3) 336 #endif 337 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 338 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 339 unsigned long *, user_mask_ptr); 340 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 341 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 342 unsigned long *, user_mask_ptr); 343 #define __NR_sys_getcpu __NR_getcpu 344 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache); 345 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 346 void *, arg); 347 _syscall2(int, capget, struct __user_cap_header_struct *, header, 348 struct __user_cap_data_struct *, data); 349 _syscall2(int, capset, struct __user_cap_header_struct *, header, 350 struct __user_cap_data_struct *, data); 351 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 352 _syscall2(int, ioprio_get, int, which, int, who) 353 #endif 354 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 355 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio) 356 #endif 357 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 358 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags) 359 #endif 360 361 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 362 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type, 363 unsigned long, idx1, unsigned long, idx2) 364 #endif 365 366 /* 367 * It is assumed that struct statx is architecture independent. 368 */ 369 #if defined(TARGET_NR_statx) && defined(__NR_statx) 370 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags, 371 unsigned int, mask, struct target_statx *, statxbuf) 372 #endif 373 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier) 374 _syscall2(int, membarrier, int, cmd, int, flags) 375 #endif 376 377 static const bitmask_transtbl fcntl_flags_tbl[] = { 378 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 379 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 380 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 381 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 382 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 383 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 384 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 385 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 386 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 387 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 388 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 389 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 390 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 391 #if defined(O_DIRECT) 392 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 393 #endif 394 #if defined(O_NOATIME) 395 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 396 #endif 397 #if defined(O_CLOEXEC) 398 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 399 #endif 400 #if defined(O_PATH) 401 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 402 #endif 403 #if defined(O_TMPFILE) 404 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE }, 405 #endif 406 /* Don't terminate the list prematurely on 64-bit host+guest. */ 407 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 408 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 409 #endif 410 { 0, 0, 0, 0 } 411 }; 412 413 _syscall2(int, sys_getcwd1, char *, buf, size_t, size) 414 415 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64) 416 #if defined(__NR_utimensat) 417 #define __NR_sys_utimensat __NR_utimensat 418 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 419 const struct timespec *,tsp,int,flags) 420 #else 421 static int sys_utimensat(int dirfd, const char *pathname, 422 const struct timespec times[2], int flags) 423 { 424 errno = ENOSYS; 425 return -1; 426 } 427 #endif 428 #endif /* TARGET_NR_utimensat */ 429 430 #ifdef TARGET_NR_renameat2 431 #if defined(__NR_renameat2) 432 #define __NR_sys_renameat2 __NR_renameat2 433 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd, 434 const char *, new, unsigned int, flags) 435 #else 436 static int sys_renameat2(int oldfd, const char *old, 437 int newfd, const char *new, int flags) 438 { 439 if (flags == 0) { 440 return renameat(oldfd, old, newfd, new); 441 } 442 errno = ENOSYS; 443 return -1; 444 } 445 #endif 446 #endif /* TARGET_NR_renameat2 */ 447 448 #ifdef CONFIG_INOTIFY 449 #include <sys/inotify.h> 450 451 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 452 static int sys_inotify_init(void) 453 { 454 return (inotify_init()); 455 } 456 #endif 457 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 458 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 459 { 460 return (inotify_add_watch(fd, pathname, mask)); 461 } 462 #endif 463 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 464 static int sys_inotify_rm_watch(int fd, int32_t wd) 465 { 466 return (inotify_rm_watch(fd, wd)); 467 } 468 #endif 469 #ifdef CONFIG_INOTIFY1 470 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 471 static int sys_inotify_init1(int flags) 472 { 473 return (inotify_init1(flags)); 474 } 475 #endif 476 #endif 477 #else 478 /* Userspace can usually survive runtime without inotify */ 479 #undef TARGET_NR_inotify_init 480 #undef TARGET_NR_inotify_init1 481 #undef TARGET_NR_inotify_add_watch 482 #undef TARGET_NR_inotify_rm_watch 483 #endif /* CONFIG_INOTIFY */ 484 485 #if defined(TARGET_NR_prlimit64) 486 #ifndef __NR_prlimit64 487 # define __NR_prlimit64 -1 488 #endif 489 #define __NR_sys_prlimit64 __NR_prlimit64 490 /* The glibc rlimit structure may not be that used by the underlying syscall */ 491 struct host_rlimit64 { 492 uint64_t rlim_cur; 493 uint64_t rlim_max; 494 }; 495 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 496 const struct host_rlimit64 *, new_limit, 497 struct host_rlimit64 *, old_limit) 498 #endif 499 500 501 #if defined(TARGET_NR_timer_create) 502 /* Maximum of 32 active POSIX timers allowed at any one time. */ 503 static timer_t g_posix_timers[32] = { 0, } ; 504 505 static inline int next_free_host_timer(void) 506 { 507 int k ; 508 /* FIXME: Does finding the next free slot require a lock? */ 509 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 510 if (g_posix_timers[k] == 0) { 511 g_posix_timers[k] = (timer_t) 1; 512 return k; 513 } 514 } 515 return -1; 516 } 517 #endif 518 519 static inline int host_to_target_errno(int host_errno) 520 { 521 switch (host_errno) { 522 #define E(X) case X: return TARGET_##X; 523 #include "errnos.c.inc" 524 #undef E 525 default: 526 return host_errno; 527 } 528 } 529 530 static inline int target_to_host_errno(int target_errno) 531 { 532 switch (target_errno) { 533 #define E(X) case TARGET_##X: return X; 534 #include "errnos.c.inc" 535 #undef E 536 default: 537 return target_errno; 538 } 539 } 540 541 static inline abi_long get_errno(abi_long ret) 542 { 543 if (ret == -1) 544 return -host_to_target_errno(errno); 545 else 546 return ret; 547 } 548 549 const char *target_strerror(int err) 550 { 551 if (err == QEMU_ERESTARTSYS) { 552 return "To be restarted"; 553 } 554 if (err == QEMU_ESIGRETURN) { 555 return "Successful exit from sigreturn"; 556 } 557 558 return strerror(target_to_host_errno(err)); 559 } 560 561 #define safe_syscall0(type, name) \ 562 static type safe_##name(void) \ 563 { \ 564 return safe_syscall(__NR_##name); \ 565 } 566 567 #define safe_syscall1(type, name, type1, arg1) \ 568 static type safe_##name(type1 arg1) \ 569 { \ 570 return safe_syscall(__NR_##name, arg1); \ 571 } 572 573 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \ 574 static type safe_##name(type1 arg1, type2 arg2) \ 575 { \ 576 return safe_syscall(__NR_##name, arg1, arg2); \ 577 } 578 579 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 580 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \ 581 { \ 582 return safe_syscall(__NR_##name, arg1, arg2, arg3); \ 583 } 584 585 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ 586 type4, arg4) \ 587 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ 588 { \ 589 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 590 } 591 592 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ 593 type4, arg4, type5, arg5) \ 594 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 595 type5 arg5) \ 596 { \ 597 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 598 } 599 600 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ 601 type4, arg4, type5, arg5, type6, arg6) \ 602 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 603 type5 arg5, type6 arg6) \ 604 { \ 605 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 606 } 607 608 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count) 609 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count) 610 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \ 611 int, flags, mode_t, mode) 612 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid) 613 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \ 614 struct rusage *, rusage) 615 #endif 616 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \ 617 int, options, struct rusage *, rusage) 618 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp) 619 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \ 620 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 621 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \ 622 fd_set *, exceptfds, struct timespec *, timeout, void *, sig) 623 #endif 624 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64) 625 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds, 626 struct timespec *, tsp, const sigset_t *, sigmask, 627 size_t, sigsetsize) 628 #endif 629 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events, 630 int, maxevents, int, timeout, const sigset_t *, sigmask, 631 size_t, sigsetsize) 632 #if defined(__NR_futex) 633 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \ 634 const struct timespec *,timeout,int *,uaddr2,int,val3) 635 #endif 636 #if defined(__NR_futex_time64) 637 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \ 638 const struct timespec *,timeout,int *,uaddr2,int,val3) 639 #endif 640 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize) 641 safe_syscall2(int, kill, pid_t, pid, int, sig) 642 safe_syscall2(int, tkill, int, tid, int, sig) 643 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig) 644 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt) 645 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt) 646 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt, 647 unsigned long, pos_l, unsigned long, pos_h) 648 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt, 649 unsigned long, pos_l, unsigned long, pos_h) 650 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr, 651 socklen_t, addrlen) 652 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len, 653 int, flags, const struct sockaddr *, addr, socklen_t, addrlen) 654 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len, 655 int, flags, struct sockaddr *, addr, socklen_t *, addrlen) 656 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags) 657 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags) 658 safe_syscall2(int, flock, int, fd, int, operation) 659 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64) 660 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo, 661 const struct timespec *, uts, size_t, sigsetsize) 662 #endif 663 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len, 664 int, flags) 665 #if defined(TARGET_NR_nanosleep) 666 safe_syscall2(int, nanosleep, const struct timespec *, req, 667 struct timespec *, rem) 668 #endif 669 #if defined(TARGET_NR_clock_nanosleep) || \ 670 defined(TARGET_NR_clock_nanosleep_time64) 671 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags, 672 const struct timespec *, req, struct timespec *, rem) 673 #endif 674 #ifdef __NR_ipc 675 #ifdef __s390x__ 676 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third, 677 void *, ptr) 678 #else 679 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third, 680 void *, ptr, long, fifth) 681 #endif 682 #endif 683 #ifdef __NR_msgsnd 684 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz, 685 int, flags) 686 #endif 687 #ifdef __NR_msgrcv 688 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz, 689 long, msgtype, int, flags) 690 #endif 691 #ifdef __NR_semtimedop 692 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops, 693 unsigned, nsops, const struct timespec *, timeout) 694 #endif 695 #if defined(TARGET_NR_mq_timedsend) || \ 696 defined(TARGET_NR_mq_timedsend_time64) 697 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr, 698 size_t, len, unsigned, prio, const struct timespec *, timeout) 699 #endif 700 #if defined(TARGET_NR_mq_timedreceive) || \ 701 defined(TARGET_NR_mq_timedreceive_time64) 702 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr, 703 size_t, len, unsigned *, prio, const struct timespec *, timeout) 704 #endif 705 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range) 706 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff, 707 int, outfd, loff_t *, poutoff, size_t, length, 708 unsigned int, flags) 709 #endif 710 711 /* We do ioctl like this rather than via safe_syscall3 to preserve the 712 * "third argument might be integer or pointer or not present" behaviour of 713 * the libc function. 714 */ 715 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__) 716 /* Similarly for fcntl. Note that callers must always: 717 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK 718 * use the flock64 struct rather than unsuffixed flock 719 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts. 720 */ 721 #ifdef __NR_fcntl64 722 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__) 723 #else 724 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__) 725 #endif 726 727 static inline int host_to_target_sock_type(int host_type) 728 { 729 int target_type; 730 731 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 732 case SOCK_DGRAM: 733 target_type = TARGET_SOCK_DGRAM; 734 break; 735 case SOCK_STREAM: 736 target_type = TARGET_SOCK_STREAM; 737 break; 738 default: 739 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 740 break; 741 } 742 743 #if defined(SOCK_CLOEXEC) 744 if (host_type & SOCK_CLOEXEC) { 745 target_type |= TARGET_SOCK_CLOEXEC; 746 } 747 #endif 748 749 #if defined(SOCK_NONBLOCK) 750 if (host_type & SOCK_NONBLOCK) { 751 target_type |= TARGET_SOCK_NONBLOCK; 752 } 753 #endif 754 755 return target_type; 756 } 757 758 static abi_ulong target_brk; 759 static abi_ulong target_original_brk; 760 static abi_ulong brk_page; 761 762 void target_set_brk(abi_ulong new_brk) 763 { 764 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 765 brk_page = HOST_PAGE_ALIGN(target_brk); 766 } 767 768 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 769 #define DEBUGF_BRK(message, args...) 770 771 /* do_brk() must return target values and target errnos. */ 772 abi_long do_brk(abi_ulong new_brk) 773 { 774 abi_long mapped_addr; 775 abi_ulong new_alloc_size; 776 777 /* brk pointers are always untagged */ 778 779 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 780 781 if (!new_brk) { 782 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 783 return target_brk; 784 } 785 if (new_brk < target_original_brk) { 786 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 787 target_brk); 788 return target_brk; 789 } 790 791 /* If the new brk is less than the highest page reserved to the 792 * target heap allocation, set it and we're almost done... */ 793 if (new_brk <= brk_page) { 794 /* Heap contents are initialized to zero, as for anonymous 795 * mapped pages. */ 796 if (new_brk > target_brk) { 797 memset(g2h_untagged(target_brk), 0, new_brk - target_brk); 798 } 799 target_brk = new_brk; 800 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 801 return target_brk; 802 } 803 804 /* We need to allocate more memory after the brk... Note that 805 * we don't use MAP_FIXED because that will map over the top of 806 * any existing mapping (like the one with the host libc or qemu 807 * itself); instead we treat "mapped but at wrong address" as 808 * a failure and unmap again. 809 */ 810 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 811 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 812 PROT_READ|PROT_WRITE, 813 MAP_ANON|MAP_PRIVATE, 0, 0)); 814 815 if (mapped_addr == brk_page) { 816 /* Heap contents are initialized to zero, as for anonymous 817 * mapped pages. Technically the new pages are already 818 * initialized to zero since they *are* anonymous mapped 819 * pages, however we have to take care with the contents that 820 * come from the remaining part of the previous page: it may 821 * contains garbage data due to a previous heap usage (grown 822 * then shrunken). */ 823 memset(g2h_untagged(target_brk), 0, brk_page - target_brk); 824 825 target_brk = new_brk; 826 brk_page = HOST_PAGE_ALIGN(target_brk); 827 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 828 target_brk); 829 return target_brk; 830 } else if (mapped_addr != -1) { 831 /* Mapped but at wrong address, meaning there wasn't actually 832 * enough space for this brk. 833 */ 834 target_munmap(mapped_addr, new_alloc_size); 835 mapped_addr = -1; 836 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 837 } 838 else { 839 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 840 } 841 842 #if defined(TARGET_ALPHA) 843 /* We (partially) emulate OSF/1 on Alpha, which requires we 844 return a proper errno, not an unchanged brk value. */ 845 return -TARGET_ENOMEM; 846 #endif 847 /* For everything else, return the previous break. */ 848 return target_brk; 849 } 850 851 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \ 852 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 853 static inline abi_long copy_from_user_fdset(fd_set *fds, 854 abi_ulong target_fds_addr, 855 int n) 856 { 857 int i, nw, j, k; 858 abi_ulong b, *target_fds; 859 860 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 861 if (!(target_fds = lock_user(VERIFY_READ, 862 target_fds_addr, 863 sizeof(abi_ulong) * nw, 864 1))) 865 return -TARGET_EFAULT; 866 867 FD_ZERO(fds); 868 k = 0; 869 for (i = 0; i < nw; i++) { 870 /* grab the abi_ulong */ 871 __get_user(b, &target_fds[i]); 872 for (j = 0; j < TARGET_ABI_BITS; j++) { 873 /* check the bit inside the abi_ulong */ 874 if ((b >> j) & 1) 875 FD_SET(k, fds); 876 k++; 877 } 878 } 879 880 unlock_user(target_fds, target_fds_addr, 0); 881 882 return 0; 883 } 884 885 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 886 abi_ulong target_fds_addr, 887 int n) 888 { 889 if (target_fds_addr) { 890 if (copy_from_user_fdset(fds, target_fds_addr, n)) 891 return -TARGET_EFAULT; 892 *fds_ptr = fds; 893 } else { 894 *fds_ptr = NULL; 895 } 896 return 0; 897 } 898 899 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 900 const fd_set *fds, 901 int n) 902 { 903 int i, nw, j, k; 904 abi_long v; 905 abi_ulong *target_fds; 906 907 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 908 if (!(target_fds = lock_user(VERIFY_WRITE, 909 target_fds_addr, 910 sizeof(abi_ulong) * nw, 911 0))) 912 return -TARGET_EFAULT; 913 914 k = 0; 915 for (i = 0; i < nw; i++) { 916 v = 0; 917 for (j = 0; j < TARGET_ABI_BITS; j++) { 918 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 919 k++; 920 } 921 __put_user(v, &target_fds[i]); 922 } 923 924 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 925 926 return 0; 927 } 928 #endif 929 930 #if defined(__alpha__) 931 #define HOST_HZ 1024 932 #else 933 #define HOST_HZ 100 934 #endif 935 936 static inline abi_long host_to_target_clock_t(long ticks) 937 { 938 #if HOST_HZ == TARGET_HZ 939 return ticks; 940 #else 941 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 942 #endif 943 } 944 945 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 946 const struct rusage *rusage) 947 { 948 struct target_rusage *target_rusage; 949 950 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 951 return -TARGET_EFAULT; 952 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 953 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 954 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 955 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 956 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 957 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 958 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 959 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 960 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 961 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 962 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 963 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 964 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 965 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 966 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 967 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 968 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 969 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 970 unlock_user_struct(target_rusage, target_addr, 1); 971 972 return 0; 973 } 974 975 #ifdef TARGET_NR_setrlimit 976 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 977 { 978 abi_ulong target_rlim_swap; 979 rlim_t result; 980 981 target_rlim_swap = tswapal(target_rlim); 982 if (target_rlim_swap == TARGET_RLIM_INFINITY) 983 return RLIM_INFINITY; 984 985 result = target_rlim_swap; 986 if (target_rlim_swap != (rlim_t)result) 987 return RLIM_INFINITY; 988 989 return result; 990 } 991 #endif 992 993 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit) 994 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 995 { 996 abi_ulong target_rlim_swap; 997 abi_ulong result; 998 999 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 1000 target_rlim_swap = TARGET_RLIM_INFINITY; 1001 else 1002 target_rlim_swap = rlim; 1003 result = tswapal(target_rlim_swap); 1004 1005 return result; 1006 } 1007 #endif 1008 1009 static inline int target_to_host_resource(int code) 1010 { 1011 switch (code) { 1012 case TARGET_RLIMIT_AS: 1013 return RLIMIT_AS; 1014 case TARGET_RLIMIT_CORE: 1015 return RLIMIT_CORE; 1016 case TARGET_RLIMIT_CPU: 1017 return RLIMIT_CPU; 1018 case TARGET_RLIMIT_DATA: 1019 return RLIMIT_DATA; 1020 case TARGET_RLIMIT_FSIZE: 1021 return RLIMIT_FSIZE; 1022 case TARGET_RLIMIT_LOCKS: 1023 return RLIMIT_LOCKS; 1024 case TARGET_RLIMIT_MEMLOCK: 1025 return RLIMIT_MEMLOCK; 1026 case TARGET_RLIMIT_MSGQUEUE: 1027 return RLIMIT_MSGQUEUE; 1028 case TARGET_RLIMIT_NICE: 1029 return RLIMIT_NICE; 1030 case TARGET_RLIMIT_NOFILE: 1031 return RLIMIT_NOFILE; 1032 case TARGET_RLIMIT_NPROC: 1033 return RLIMIT_NPROC; 1034 case TARGET_RLIMIT_RSS: 1035 return RLIMIT_RSS; 1036 case TARGET_RLIMIT_RTPRIO: 1037 return RLIMIT_RTPRIO; 1038 case TARGET_RLIMIT_SIGPENDING: 1039 return RLIMIT_SIGPENDING; 1040 case TARGET_RLIMIT_STACK: 1041 return RLIMIT_STACK; 1042 default: 1043 return code; 1044 } 1045 } 1046 1047 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1048 abi_ulong target_tv_addr) 1049 { 1050 struct target_timeval *target_tv; 1051 1052 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) { 1053 return -TARGET_EFAULT; 1054 } 1055 1056 __get_user(tv->tv_sec, &target_tv->tv_sec); 1057 __get_user(tv->tv_usec, &target_tv->tv_usec); 1058 1059 unlock_user_struct(target_tv, target_tv_addr, 0); 1060 1061 return 0; 1062 } 1063 1064 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1065 const struct timeval *tv) 1066 { 1067 struct target_timeval *target_tv; 1068 1069 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) { 1070 return -TARGET_EFAULT; 1071 } 1072 1073 __put_user(tv->tv_sec, &target_tv->tv_sec); 1074 __put_user(tv->tv_usec, &target_tv->tv_usec); 1075 1076 unlock_user_struct(target_tv, target_tv_addr, 1); 1077 1078 return 0; 1079 } 1080 1081 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 1082 static inline abi_long copy_from_user_timeval64(struct timeval *tv, 1083 abi_ulong target_tv_addr) 1084 { 1085 struct target__kernel_sock_timeval *target_tv; 1086 1087 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) { 1088 return -TARGET_EFAULT; 1089 } 1090 1091 __get_user(tv->tv_sec, &target_tv->tv_sec); 1092 __get_user(tv->tv_usec, &target_tv->tv_usec); 1093 1094 unlock_user_struct(target_tv, target_tv_addr, 0); 1095 1096 return 0; 1097 } 1098 #endif 1099 1100 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr, 1101 const struct timeval *tv) 1102 { 1103 struct target__kernel_sock_timeval *target_tv; 1104 1105 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) { 1106 return -TARGET_EFAULT; 1107 } 1108 1109 __put_user(tv->tv_sec, &target_tv->tv_sec); 1110 __put_user(tv->tv_usec, &target_tv->tv_usec); 1111 1112 unlock_user_struct(target_tv, target_tv_addr, 1); 1113 1114 return 0; 1115 } 1116 1117 #if defined(TARGET_NR_futex) || \ 1118 defined(TARGET_NR_rt_sigtimedwait) || \ 1119 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \ 1120 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \ 1121 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \ 1122 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \ 1123 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \ 1124 defined(TARGET_NR_timer_settime) || \ 1125 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)) 1126 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 1127 abi_ulong target_addr) 1128 { 1129 struct target_timespec *target_ts; 1130 1131 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) { 1132 return -TARGET_EFAULT; 1133 } 1134 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 1135 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1136 unlock_user_struct(target_ts, target_addr, 0); 1137 return 0; 1138 } 1139 #endif 1140 1141 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \ 1142 defined(TARGET_NR_timer_settime64) || \ 1143 defined(TARGET_NR_mq_timedsend_time64) || \ 1144 defined(TARGET_NR_mq_timedreceive_time64) || \ 1145 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \ 1146 defined(TARGET_NR_clock_nanosleep_time64) || \ 1147 defined(TARGET_NR_rt_sigtimedwait_time64) || \ 1148 defined(TARGET_NR_utimensat) || \ 1149 defined(TARGET_NR_utimensat_time64) || \ 1150 defined(TARGET_NR_semtimedop_time64) || \ 1151 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64) 1152 static inline abi_long target_to_host_timespec64(struct timespec *host_ts, 1153 abi_ulong target_addr) 1154 { 1155 struct target__kernel_timespec *target_ts; 1156 1157 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) { 1158 return -TARGET_EFAULT; 1159 } 1160 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 1161 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1162 /* in 32bit mode, this drops the padding */ 1163 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec; 1164 unlock_user_struct(target_ts, target_addr, 0); 1165 return 0; 1166 } 1167 #endif 1168 1169 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 1170 struct timespec *host_ts) 1171 { 1172 struct target_timespec *target_ts; 1173 1174 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) { 1175 return -TARGET_EFAULT; 1176 } 1177 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 1178 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1179 unlock_user_struct(target_ts, target_addr, 1); 1180 return 0; 1181 } 1182 1183 static inline abi_long host_to_target_timespec64(abi_ulong target_addr, 1184 struct timespec *host_ts) 1185 { 1186 struct target__kernel_timespec *target_ts; 1187 1188 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) { 1189 return -TARGET_EFAULT; 1190 } 1191 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 1192 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1193 unlock_user_struct(target_ts, target_addr, 1); 1194 return 0; 1195 } 1196 1197 #if defined(TARGET_NR_gettimeofday) 1198 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr, 1199 struct timezone *tz) 1200 { 1201 struct target_timezone *target_tz; 1202 1203 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) { 1204 return -TARGET_EFAULT; 1205 } 1206 1207 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1208 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1209 1210 unlock_user_struct(target_tz, target_tz_addr, 1); 1211 1212 return 0; 1213 } 1214 #endif 1215 1216 #if defined(TARGET_NR_settimeofday) 1217 static inline abi_long copy_from_user_timezone(struct timezone *tz, 1218 abi_ulong target_tz_addr) 1219 { 1220 struct target_timezone *target_tz; 1221 1222 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 1223 return -TARGET_EFAULT; 1224 } 1225 1226 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1227 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1228 1229 unlock_user_struct(target_tz, target_tz_addr, 0); 1230 1231 return 0; 1232 } 1233 #endif 1234 1235 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1236 #include <mqueue.h> 1237 1238 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1239 abi_ulong target_mq_attr_addr) 1240 { 1241 struct target_mq_attr *target_mq_attr; 1242 1243 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1244 target_mq_attr_addr, 1)) 1245 return -TARGET_EFAULT; 1246 1247 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1248 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1249 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1250 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1251 1252 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1253 1254 return 0; 1255 } 1256 1257 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1258 const struct mq_attr *attr) 1259 { 1260 struct target_mq_attr *target_mq_attr; 1261 1262 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1263 target_mq_attr_addr, 0)) 1264 return -TARGET_EFAULT; 1265 1266 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1267 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1268 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1269 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1270 1271 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1272 1273 return 0; 1274 } 1275 #endif 1276 1277 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1278 /* do_select() must return target values and target errnos. */ 1279 static abi_long do_select(int n, 1280 abi_ulong rfd_addr, abi_ulong wfd_addr, 1281 abi_ulong efd_addr, abi_ulong target_tv_addr) 1282 { 1283 fd_set rfds, wfds, efds; 1284 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1285 struct timeval tv; 1286 struct timespec ts, *ts_ptr; 1287 abi_long ret; 1288 1289 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1290 if (ret) { 1291 return ret; 1292 } 1293 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1294 if (ret) { 1295 return ret; 1296 } 1297 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1298 if (ret) { 1299 return ret; 1300 } 1301 1302 if (target_tv_addr) { 1303 if (copy_from_user_timeval(&tv, target_tv_addr)) 1304 return -TARGET_EFAULT; 1305 ts.tv_sec = tv.tv_sec; 1306 ts.tv_nsec = tv.tv_usec * 1000; 1307 ts_ptr = &ts; 1308 } else { 1309 ts_ptr = NULL; 1310 } 1311 1312 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1313 ts_ptr, NULL)); 1314 1315 if (!is_error(ret)) { 1316 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1317 return -TARGET_EFAULT; 1318 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1319 return -TARGET_EFAULT; 1320 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1321 return -TARGET_EFAULT; 1322 1323 if (target_tv_addr) { 1324 tv.tv_sec = ts.tv_sec; 1325 tv.tv_usec = ts.tv_nsec / 1000; 1326 if (copy_to_user_timeval(target_tv_addr, &tv)) { 1327 return -TARGET_EFAULT; 1328 } 1329 } 1330 } 1331 1332 return ret; 1333 } 1334 1335 #if defined(TARGET_WANT_OLD_SYS_SELECT) 1336 static abi_long do_old_select(abi_ulong arg1) 1337 { 1338 struct target_sel_arg_struct *sel; 1339 abi_ulong inp, outp, exp, tvp; 1340 long nsel; 1341 1342 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) { 1343 return -TARGET_EFAULT; 1344 } 1345 1346 nsel = tswapal(sel->n); 1347 inp = tswapal(sel->inp); 1348 outp = tswapal(sel->outp); 1349 exp = tswapal(sel->exp); 1350 tvp = tswapal(sel->tvp); 1351 1352 unlock_user_struct(sel, arg1, 0); 1353 1354 return do_select(nsel, inp, outp, exp, tvp); 1355 } 1356 #endif 1357 #endif 1358 1359 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 1360 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3, 1361 abi_long arg4, abi_long arg5, abi_long arg6, 1362 bool time64) 1363 { 1364 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 1365 fd_set rfds, wfds, efds; 1366 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1367 struct timespec ts, *ts_ptr; 1368 abi_long ret; 1369 1370 /* 1371 * The 6th arg is actually two args smashed together, 1372 * so we cannot use the C library. 1373 */ 1374 sigset_t set; 1375 struct { 1376 sigset_t *set; 1377 size_t size; 1378 } sig, *sig_ptr; 1379 1380 abi_ulong arg_sigset, arg_sigsize, *arg7; 1381 target_sigset_t *target_sigset; 1382 1383 n = arg1; 1384 rfd_addr = arg2; 1385 wfd_addr = arg3; 1386 efd_addr = arg4; 1387 ts_addr = arg5; 1388 1389 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1390 if (ret) { 1391 return ret; 1392 } 1393 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1394 if (ret) { 1395 return ret; 1396 } 1397 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1398 if (ret) { 1399 return ret; 1400 } 1401 1402 /* 1403 * This takes a timespec, and not a timeval, so we cannot 1404 * use the do_select() helper ... 1405 */ 1406 if (ts_addr) { 1407 if (time64) { 1408 if (target_to_host_timespec64(&ts, ts_addr)) { 1409 return -TARGET_EFAULT; 1410 } 1411 } else { 1412 if (target_to_host_timespec(&ts, ts_addr)) { 1413 return -TARGET_EFAULT; 1414 } 1415 } 1416 ts_ptr = &ts; 1417 } else { 1418 ts_ptr = NULL; 1419 } 1420 1421 /* Extract the two packed args for the sigset */ 1422 if (arg6) { 1423 sig_ptr = &sig; 1424 sig.size = SIGSET_T_SIZE; 1425 1426 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 1427 if (!arg7) { 1428 return -TARGET_EFAULT; 1429 } 1430 arg_sigset = tswapal(arg7[0]); 1431 arg_sigsize = tswapal(arg7[1]); 1432 unlock_user(arg7, arg6, 0); 1433 1434 if (arg_sigset) { 1435 sig.set = &set; 1436 if (arg_sigsize != sizeof(*target_sigset)) { 1437 /* Like the kernel, we enforce correct size sigsets */ 1438 return -TARGET_EINVAL; 1439 } 1440 target_sigset = lock_user(VERIFY_READ, arg_sigset, 1441 sizeof(*target_sigset), 1); 1442 if (!target_sigset) { 1443 return -TARGET_EFAULT; 1444 } 1445 target_to_host_sigset(&set, target_sigset); 1446 unlock_user(target_sigset, arg_sigset, 0); 1447 } else { 1448 sig.set = NULL; 1449 } 1450 } else { 1451 sig_ptr = NULL; 1452 } 1453 1454 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1455 ts_ptr, sig_ptr)); 1456 1457 if (!is_error(ret)) { 1458 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) { 1459 return -TARGET_EFAULT; 1460 } 1461 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) { 1462 return -TARGET_EFAULT; 1463 } 1464 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) { 1465 return -TARGET_EFAULT; 1466 } 1467 if (time64) { 1468 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) { 1469 return -TARGET_EFAULT; 1470 } 1471 } else { 1472 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) { 1473 return -TARGET_EFAULT; 1474 } 1475 } 1476 } 1477 return ret; 1478 } 1479 #endif 1480 1481 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \ 1482 defined(TARGET_NR_ppoll_time64) 1483 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3, 1484 abi_long arg4, abi_long arg5, bool ppoll, bool time64) 1485 { 1486 struct target_pollfd *target_pfd; 1487 unsigned int nfds = arg2; 1488 struct pollfd *pfd; 1489 unsigned int i; 1490 abi_long ret; 1491 1492 pfd = NULL; 1493 target_pfd = NULL; 1494 if (nfds) { 1495 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) { 1496 return -TARGET_EINVAL; 1497 } 1498 target_pfd = lock_user(VERIFY_WRITE, arg1, 1499 sizeof(struct target_pollfd) * nfds, 1); 1500 if (!target_pfd) { 1501 return -TARGET_EFAULT; 1502 } 1503 1504 pfd = alloca(sizeof(struct pollfd) * nfds); 1505 for (i = 0; i < nfds; i++) { 1506 pfd[i].fd = tswap32(target_pfd[i].fd); 1507 pfd[i].events = tswap16(target_pfd[i].events); 1508 } 1509 } 1510 if (ppoll) { 1511 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 1512 target_sigset_t *target_set; 1513 sigset_t _set, *set = &_set; 1514 1515 if (arg3) { 1516 if (time64) { 1517 if (target_to_host_timespec64(timeout_ts, arg3)) { 1518 unlock_user(target_pfd, arg1, 0); 1519 return -TARGET_EFAULT; 1520 } 1521 } else { 1522 if (target_to_host_timespec(timeout_ts, arg3)) { 1523 unlock_user(target_pfd, arg1, 0); 1524 return -TARGET_EFAULT; 1525 } 1526 } 1527 } else { 1528 timeout_ts = NULL; 1529 } 1530 1531 if (arg4) { 1532 if (arg5 != sizeof(target_sigset_t)) { 1533 unlock_user(target_pfd, arg1, 0); 1534 return -TARGET_EINVAL; 1535 } 1536 1537 target_set = lock_user(VERIFY_READ, arg4, 1538 sizeof(target_sigset_t), 1); 1539 if (!target_set) { 1540 unlock_user(target_pfd, arg1, 0); 1541 return -TARGET_EFAULT; 1542 } 1543 target_to_host_sigset(set, target_set); 1544 } else { 1545 set = NULL; 1546 } 1547 1548 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts, 1549 set, SIGSET_T_SIZE)); 1550 1551 if (!is_error(ret) && arg3) { 1552 if (time64) { 1553 if (host_to_target_timespec64(arg3, timeout_ts)) { 1554 return -TARGET_EFAULT; 1555 } 1556 } else { 1557 if (host_to_target_timespec(arg3, timeout_ts)) { 1558 return -TARGET_EFAULT; 1559 } 1560 } 1561 } 1562 if (arg4) { 1563 unlock_user(target_set, arg4, 0); 1564 } 1565 } else { 1566 struct timespec ts, *pts; 1567 1568 if (arg3 >= 0) { 1569 /* Convert ms to secs, ns */ 1570 ts.tv_sec = arg3 / 1000; 1571 ts.tv_nsec = (arg3 % 1000) * 1000000LL; 1572 pts = &ts; 1573 } else { 1574 /* -ve poll() timeout means "infinite" */ 1575 pts = NULL; 1576 } 1577 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0)); 1578 } 1579 1580 if (!is_error(ret)) { 1581 for (i = 0; i < nfds; i++) { 1582 target_pfd[i].revents = tswap16(pfd[i].revents); 1583 } 1584 } 1585 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 1586 return ret; 1587 } 1588 #endif 1589 1590 static abi_long do_pipe2(int host_pipe[], int flags) 1591 { 1592 #ifdef CONFIG_PIPE2 1593 return pipe2(host_pipe, flags); 1594 #else 1595 return -ENOSYS; 1596 #endif 1597 } 1598 1599 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1600 int flags, int is_pipe2) 1601 { 1602 int host_pipe[2]; 1603 abi_long ret; 1604 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1605 1606 if (is_error(ret)) 1607 return get_errno(ret); 1608 1609 /* Several targets have special calling conventions for the original 1610 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1611 if (!is_pipe2) { 1612 #if defined(TARGET_ALPHA) 1613 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1614 return host_pipe[0]; 1615 #elif defined(TARGET_MIPS) 1616 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1617 return host_pipe[0]; 1618 #elif defined(TARGET_SH4) 1619 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1620 return host_pipe[0]; 1621 #elif defined(TARGET_SPARC) 1622 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1623 return host_pipe[0]; 1624 #endif 1625 } 1626 1627 if (put_user_s32(host_pipe[0], pipedes) 1628 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1629 return -TARGET_EFAULT; 1630 return get_errno(ret); 1631 } 1632 1633 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1634 abi_ulong target_addr, 1635 socklen_t len) 1636 { 1637 struct target_ip_mreqn *target_smreqn; 1638 1639 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1640 if (!target_smreqn) 1641 return -TARGET_EFAULT; 1642 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1643 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1644 if (len == sizeof(struct target_ip_mreqn)) 1645 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1646 unlock_user(target_smreqn, target_addr, 0); 1647 1648 return 0; 1649 } 1650 1651 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, 1652 abi_ulong target_addr, 1653 socklen_t len) 1654 { 1655 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1656 sa_family_t sa_family; 1657 struct target_sockaddr *target_saddr; 1658 1659 if (fd_trans_target_to_host_addr(fd)) { 1660 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len); 1661 } 1662 1663 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1664 if (!target_saddr) 1665 return -TARGET_EFAULT; 1666 1667 sa_family = tswap16(target_saddr->sa_family); 1668 1669 /* Oops. The caller might send a incomplete sun_path; sun_path 1670 * must be terminated by \0 (see the manual page), but 1671 * unfortunately it is quite common to specify sockaddr_un 1672 * length as "strlen(x->sun_path)" while it should be 1673 * "strlen(...) + 1". We'll fix that here if needed. 1674 * Linux kernel has a similar feature. 1675 */ 1676 1677 if (sa_family == AF_UNIX) { 1678 if (len < unix_maxlen && len > 0) { 1679 char *cp = (char*)target_saddr; 1680 1681 if ( cp[len-1] && !cp[len] ) 1682 len++; 1683 } 1684 if (len > unix_maxlen) 1685 len = unix_maxlen; 1686 } 1687 1688 memcpy(addr, target_saddr, len); 1689 addr->sa_family = sa_family; 1690 if (sa_family == AF_NETLINK) { 1691 struct sockaddr_nl *nladdr; 1692 1693 nladdr = (struct sockaddr_nl *)addr; 1694 nladdr->nl_pid = tswap32(nladdr->nl_pid); 1695 nladdr->nl_groups = tswap32(nladdr->nl_groups); 1696 } else if (sa_family == AF_PACKET) { 1697 struct target_sockaddr_ll *lladdr; 1698 1699 lladdr = (struct target_sockaddr_ll *)addr; 1700 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); 1701 lladdr->sll_hatype = tswap16(lladdr->sll_hatype); 1702 } 1703 unlock_user(target_saddr, target_addr, 0); 1704 1705 return 0; 1706 } 1707 1708 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1709 struct sockaddr *addr, 1710 socklen_t len) 1711 { 1712 struct target_sockaddr *target_saddr; 1713 1714 if (len == 0) { 1715 return 0; 1716 } 1717 assert(addr); 1718 1719 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1720 if (!target_saddr) 1721 return -TARGET_EFAULT; 1722 memcpy(target_saddr, addr, len); 1723 if (len >= offsetof(struct target_sockaddr, sa_family) + 1724 sizeof(target_saddr->sa_family)) { 1725 target_saddr->sa_family = tswap16(addr->sa_family); 1726 } 1727 if (addr->sa_family == AF_NETLINK && 1728 len >= sizeof(struct target_sockaddr_nl)) { 1729 struct target_sockaddr_nl *target_nl = 1730 (struct target_sockaddr_nl *)target_saddr; 1731 target_nl->nl_pid = tswap32(target_nl->nl_pid); 1732 target_nl->nl_groups = tswap32(target_nl->nl_groups); 1733 } else if (addr->sa_family == AF_PACKET) { 1734 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr; 1735 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex); 1736 target_ll->sll_hatype = tswap16(target_ll->sll_hatype); 1737 } else if (addr->sa_family == AF_INET6 && 1738 len >= sizeof(struct target_sockaddr_in6)) { 1739 struct target_sockaddr_in6 *target_in6 = 1740 (struct target_sockaddr_in6 *)target_saddr; 1741 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id); 1742 } 1743 unlock_user(target_saddr, target_addr, len); 1744 1745 return 0; 1746 } 1747 1748 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1749 struct target_msghdr *target_msgh) 1750 { 1751 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1752 abi_long msg_controllen; 1753 abi_ulong target_cmsg_addr; 1754 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1755 socklen_t space = 0; 1756 1757 msg_controllen = tswapal(target_msgh->msg_controllen); 1758 if (msg_controllen < sizeof (struct target_cmsghdr)) 1759 goto the_end; 1760 target_cmsg_addr = tswapal(target_msgh->msg_control); 1761 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1762 target_cmsg_start = target_cmsg; 1763 if (!target_cmsg) 1764 return -TARGET_EFAULT; 1765 1766 while (cmsg && target_cmsg) { 1767 void *data = CMSG_DATA(cmsg); 1768 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1769 1770 int len = tswapal(target_cmsg->cmsg_len) 1771 - sizeof(struct target_cmsghdr); 1772 1773 space += CMSG_SPACE(len); 1774 if (space > msgh->msg_controllen) { 1775 space -= CMSG_SPACE(len); 1776 /* This is a QEMU bug, since we allocated the payload 1777 * area ourselves (unlike overflow in host-to-target 1778 * conversion, which is just the guest giving us a buffer 1779 * that's too small). It can't happen for the payload types 1780 * we currently support; if it becomes an issue in future 1781 * we would need to improve our allocation strategy to 1782 * something more intelligent than "twice the size of the 1783 * target buffer we're reading from". 1784 */ 1785 qemu_log_mask(LOG_UNIMP, 1786 ("Unsupported ancillary data %d/%d: " 1787 "unhandled msg size\n"), 1788 tswap32(target_cmsg->cmsg_level), 1789 tswap32(target_cmsg->cmsg_type)); 1790 break; 1791 } 1792 1793 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1794 cmsg->cmsg_level = SOL_SOCKET; 1795 } else { 1796 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1797 } 1798 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1799 cmsg->cmsg_len = CMSG_LEN(len); 1800 1801 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 1802 int *fd = (int *)data; 1803 int *target_fd = (int *)target_data; 1804 int i, numfds = len / sizeof(int); 1805 1806 for (i = 0; i < numfds; i++) { 1807 __get_user(fd[i], target_fd + i); 1808 } 1809 } else if (cmsg->cmsg_level == SOL_SOCKET 1810 && cmsg->cmsg_type == SCM_CREDENTIALS) { 1811 struct ucred *cred = (struct ucred *)data; 1812 struct target_ucred *target_cred = 1813 (struct target_ucred *)target_data; 1814 1815 __get_user(cred->pid, &target_cred->pid); 1816 __get_user(cred->uid, &target_cred->uid); 1817 __get_user(cred->gid, &target_cred->gid); 1818 } else { 1819 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", 1820 cmsg->cmsg_level, cmsg->cmsg_type); 1821 memcpy(data, target_data, len); 1822 } 1823 1824 cmsg = CMSG_NXTHDR(msgh, cmsg); 1825 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1826 target_cmsg_start); 1827 } 1828 unlock_user(target_cmsg, target_cmsg_addr, 0); 1829 the_end: 1830 msgh->msg_controllen = space; 1831 return 0; 1832 } 1833 1834 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1835 struct msghdr *msgh) 1836 { 1837 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1838 abi_long msg_controllen; 1839 abi_ulong target_cmsg_addr; 1840 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1841 socklen_t space = 0; 1842 1843 msg_controllen = tswapal(target_msgh->msg_controllen); 1844 if (msg_controllen < sizeof (struct target_cmsghdr)) 1845 goto the_end; 1846 target_cmsg_addr = tswapal(target_msgh->msg_control); 1847 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1848 target_cmsg_start = target_cmsg; 1849 if (!target_cmsg) 1850 return -TARGET_EFAULT; 1851 1852 while (cmsg && target_cmsg) { 1853 void *data = CMSG_DATA(cmsg); 1854 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1855 1856 int len = cmsg->cmsg_len - sizeof(struct cmsghdr); 1857 int tgt_len, tgt_space; 1858 1859 /* We never copy a half-header but may copy half-data; 1860 * this is Linux's behaviour in put_cmsg(). Note that 1861 * truncation here is a guest problem (which we report 1862 * to the guest via the CTRUNC bit), unlike truncation 1863 * in target_to_host_cmsg, which is a QEMU bug. 1864 */ 1865 if (msg_controllen < sizeof(struct target_cmsghdr)) { 1866 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1867 break; 1868 } 1869 1870 if (cmsg->cmsg_level == SOL_SOCKET) { 1871 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1872 } else { 1873 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1874 } 1875 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1876 1877 /* Payload types which need a different size of payload on 1878 * the target must adjust tgt_len here. 1879 */ 1880 tgt_len = len; 1881 switch (cmsg->cmsg_level) { 1882 case SOL_SOCKET: 1883 switch (cmsg->cmsg_type) { 1884 case SO_TIMESTAMP: 1885 tgt_len = sizeof(struct target_timeval); 1886 break; 1887 default: 1888 break; 1889 } 1890 break; 1891 default: 1892 break; 1893 } 1894 1895 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) { 1896 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1897 tgt_len = msg_controllen - sizeof(struct target_cmsghdr); 1898 } 1899 1900 /* We must now copy-and-convert len bytes of payload 1901 * into tgt_len bytes of destination space. Bear in mind 1902 * that in both source and destination we may be dealing 1903 * with a truncated value! 1904 */ 1905 switch (cmsg->cmsg_level) { 1906 case SOL_SOCKET: 1907 switch (cmsg->cmsg_type) { 1908 case SCM_RIGHTS: 1909 { 1910 int *fd = (int *)data; 1911 int *target_fd = (int *)target_data; 1912 int i, numfds = tgt_len / sizeof(int); 1913 1914 for (i = 0; i < numfds; i++) { 1915 __put_user(fd[i], target_fd + i); 1916 } 1917 break; 1918 } 1919 case SO_TIMESTAMP: 1920 { 1921 struct timeval *tv = (struct timeval *)data; 1922 struct target_timeval *target_tv = 1923 (struct target_timeval *)target_data; 1924 1925 if (len != sizeof(struct timeval) || 1926 tgt_len != sizeof(struct target_timeval)) { 1927 goto unimplemented; 1928 } 1929 1930 /* copy struct timeval to target */ 1931 __put_user(tv->tv_sec, &target_tv->tv_sec); 1932 __put_user(tv->tv_usec, &target_tv->tv_usec); 1933 break; 1934 } 1935 case SCM_CREDENTIALS: 1936 { 1937 struct ucred *cred = (struct ucred *)data; 1938 struct target_ucred *target_cred = 1939 (struct target_ucred *)target_data; 1940 1941 __put_user(cred->pid, &target_cred->pid); 1942 __put_user(cred->uid, &target_cred->uid); 1943 __put_user(cred->gid, &target_cred->gid); 1944 break; 1945 } 1946 default: 1947 goto unimplemented; 1948 } 1949 break; 1950 1951 case SOL_IP: 1952 switch (cmsg->cmsg_type) { 1953 case IP_TTL: 1954 { 1955 uint32_t *v = (uint32_t *)data; 1956 uint32_t *t_int = (uint32_t *)target_data; 1957 1958 if (len != sizeof(uint32_t) || 1959 tgt_len != sizeof(uint32_t)) { 1960 goto unimplemented; 1961 } 1962 __put_user(*v, t_int); 1963 break; 1964 } 1965 case IP_RECVERR: 1966 { 1967 struct errhdr_t { 1968 struct sock_extended_err ee; 1969 struct sockaddr_in offender; 1970 }; 1971 struct errhdr_t *errh = (struct errhdr_t *)data; 1972 struct errhdr_t *target_errh = 1973 (struct errhdr_t *)target_data; 1974 1975 if (len != sizeof(struct errhdr_t) || 1976 tgt_len != sizeof(struct errhdr_t)) { 1977 goto unimplemented; 1978 } 1979 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 1980 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 1981 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 1982 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 1983 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 1984 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 1985 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 1986 host_to_target_sockaddr((unsigned long) &target_errh->offender, 1987 (void *) &errh->offender, sizeof(errh->offender)); 1988 break; 1989 } 1990 default: 1991 goto unimplemented; 1992 } 1993 break; 1994 1995 case SOL_IPV6: 1996 switch (cmsg->cmsg_type) { 1997 case IPV6_HOPLIMIT: 1998 { 1999 uint32_t *v = (uint32_t *)data; 2000 uint32_t *t_int = (uint32_t *)target_data; 2001 2002 if (len != sizeof(uint32_t) || 2003 tgt_len != sizeof(uint32_t)) { 2004 goto unimplemented; 2005 } 2006 __put_user(*v, t_int); 2007 break; 2008 } 2009 case IPV6_RECVERR: 2010 { 2011 struct errhdr6_t { 2012 struct sock_extended_err ee; 2013 struct sockaddr_in6 offender; 2014 }; 2015 struct errhdr6_t *errh = (struct errhdr6_t *)data; 2016 struct errhdr6_t *target_errh = 2017 (struct errhdr6_t *)target_data; 2018 2019 if (len != sizeof(struct errhdr6_t) || 2020 tgt_len != sizeof(struct errhdr6_t)) { 2021 goto unimplemented; 2022 } 2023 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 2024 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 2025 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 2026 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 2027 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 2028 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 2029 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 2030 host_to_target_sockaddr((unsigned long) &target_errh->offender, 2031 (void *) &errh->offender, sizeof(errh->offender)); 2032 break; 2033 } 2034 default: 2035 goto unimplemented; 2036 } 2037 break; 2038 2039 default: 2040 unimplemented: 2041 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", 2042 cmsg->cmsg_level, cmsg->cmsg_type); 2043 memcpy(target_data, data, MIN(len, tgt_len)); 2044 if (tgt_len > len) { 2045 memset(target_data + len, 0, tgt_len - len); 2046 } 2047 } 2048 2049 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len)); 2050 tgt_space = TARGET_CMSG_SPACE(tgt_len); 2051 if (msg_controllen < tgt_space) { 2052 tgt_space = msg_controllen; 2053 } 2054 msg_controllen -= tgt_space; 2055 space += tgt_space; 2056 cmsg = CMSG_NXTHDR(msgh, cmsg); 2057 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 2058 target_cmsg_start); 2059 } 2060 unlock_user(target_cmsg, target_cmsg_addr, space); 2061 the_end: 2062 target_msgh->msg_controllen = tswapal(space); 2063 return 0; 2064 } 2065 2066 /* do_setsockopt() Must return target values and target errnos. */ 2067 static abi_long do_setsockopt(int sockfd, int level, int optname, 2068 abi_ulong optval_addr, socklen_t optlen) 2069 { 2070 abi_long ret; 2071 int val; 2072 struct ip_mreqn *ip_mreq; 2073 struct ip_mreq_source *ip_mreq_source; 2074 2075 switch(level) { 2076 case SOL_TCP: 2077 case SOL_UDP: 2078 /* TCP and UDP options all take an 'int' value. */ 2079 if (optlen < sizeof(uint32_t)) 2080 return -TARGET_EINVAL; 2081 2082 if (get_user_u32(val, optval_addr)) 2083 return -TARGET_EFAULT; 2084 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2085 break; 2086 case SOL_IP: 2087 switch(optname) { 2088 case IP_TOS: 2089 case IP_TTL: 2090 case IP_HDRINCL: 2091 case IP_ROUTER_ALERT: 2092 case IP_RECVOPTS: 2093 case IP_RETOPTS: 2094 case IP_PKTINFO: 2095 case IP_MTU_DISCOVER: 2096 case IP_RECVERR: 2097 case IP_RECVTTL: 2098 case IP_RECVTOS: 2099 #ifdef IP_FREEBIND 2100 case IP_FREEBIND: 2101 #endif 2102 case IP_MULTICAST_TTL: 2103 case IP_MULTICAST_LOOP: 2104 val = 0; 2105 if (optlen >= sizeof(uint32_t)) { 2106 if (get_user_u32(val, optval_addr)) 2107 return -TARGET_EFAULT; 2108 } else if (optlen >= 1) { 2109 if (get_user_u8(val, optval_addr)) 2110 return -TARGET_EFAULT; 2111 } 2112 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2113 break; 2114 case IP_ADD_MEMBERSHIP: 2115 case IP_DROP_MEMBERSHIP: 2116 if (optlen < sizeof (struct target_ip_mreq) || 2117 optlen > sizeof (struct target_ip_mreqn)) 2118 return -TARGET_EINVAL; 2119 2120 ip_mreq = (struct ip_mreqn *) alloca(optlen); 2121 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 2122 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 2123 break; 2124 2125 case IP_BLOCK_SOURCE: 2126 case IP_UNBLOCK_SOURCE: 2127 case IP_ADD_SOURCE_MEMBERSHIP: 2128 case IP_DROP_SOURCE_MEMBERSHIP: 2129 if (optlen != sizeof (struct target_ip_mreq_source)) 2130 return -TARGET_EINVAL; 2131 2132 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2133 if (!ip_mreq_source) { 2134 return -TARGET_EFAULT; 2135 } 2136 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 2137 unlock_user (ip_mreq_source, optval_addr, 0); 2138 break; 2139 2140 default: 2141 goto unimplemented; 2142 } 2143 break; 2144 case SOL_IPV6: 2145 switch (optname) { 2146 case IPV6_MTU_DISCOVER: 2147 case IPV6_MTU: 2148 case IPV6_V6ONLY: 2149 case IPV6_RECVPKTINFO: 2150 case IPV6_UNICAST_HOPS: 2151 case IPV6_MULTICAST_HOPS: 2152 case IPV6_MULTICAST_LOOP: 2153 case IPV6_RECVERR: 2154 case IPV6_RECVHOPLIMIT: 2155 case IPV6_2292HOPLIMIT: 2156 case IPV6_CHECKSUM: 2157 case IPV6_ADDRFORM: 2158 case IPV6_2292PKTINFO: 2159 case IPV6_RECVTCLASS: 2160 case IPV6_RECVRTHDR: 2161 case IPV6_2292RTHDR: 2162 case IPV6_RECVHOPOPTS: 2163 case IPV6_2292HOPOPTS: 2164 case IPV6_RECVDSTOPTS: 2165 case IPV6_2292DSTOPTS: 2166 case IPV6_TCLASS: 2167 case IPV6_ADDR_PREFERENCES: 2168 #ifdef IPV6_RECVPATHMTU 2169 case IPV6_RECVPATHMTU: 2170 #endif 2171 #ifdef IPV6_TRANSPARENT 2172 case IPV6_TRANSPARENT: 2173 #endif 2174 #ifdef IPV6_FREEBIND 2175 case IPV6_FREEBIND: 2176 #endif 2177 #ifdef IPV6_RECVORIGDSTADDR 2178 case IPV6_RECVORIGDSTADDR: 2179 #endif 2180 val = 0; 2181 if (optlen < sizeof(uint32_t)) { 2182 return -TARGET_EINVAL; 2183 } 2184 if (get_user_u32(val, optval_addr)) { 2185 return -TARGET_EFAULT; 2186 } 2187 ret = get_errno(setsockopt(sockfd, level, optname, 2188 &val, sizeof(val))); 2189 break; 2190 case IPV6_PKTINFO: 2191 { 2192 struct in6_pktinfo pki; 2193 2194 if (optlen < sizeof(pki)) { 2195 return -TARGET_EINVAL; 2196 } 2197 2198 if (copy_from_user(&pki, optval_addr, sizeof(pki))) { 2199 return -TARGET_EFAULT; 2200 } 2201 2202 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex); 2203 2204 ret = get_errno(setsockopt(sockfd, level, optname, 2205 &pki, sizeof(pki))); 2206 break; 2207 } 2208 case IPV6_ADD_MEMBERSHIP: 2209 case IPV6_DROP_MEMBERSHIP: 2210 { 2211 struct ipv6_mreq ipv6mreq; 2212 2213 if (optlen < sizeof(ipv6mreq)) { 2214 return -TARGET_EINVAL; 2215 } 2216 2217 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) { 2218 return -TARGET_EFAULT; 2219 } 2220 2221 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface); 2222 2223 ret = get_errno(setsockopt(sockfd, level, optname, 2224 &ipv6mreq, sizeof(ipv6mreq))); 2225 break; 2226 } 2227 default: 2228 goto unimplemented; 2229 } 2230 break; 2231 case SOL_ICMPV6: 2232 switch (optname) { 2233 case ICMPV6_FILTER: 2234 { 2235 struct icmp6_filter icmp6f; 2236 2237 if (optlen > sizeof(icmp6f)) { 2238 optlen = sizeof(icmp6f); 2239 } 2240 2241 if (copy_from_user(&icmp6f, optval_addr, optlen)) { 2242 return -TARGET_EFAULT; 2243 } 2244 2245 for (val = 0; val < 8; val++) { 2246 icmp6f.data[val] = tswap32(icmp6f.data[val]); 2247 } 2248 2249 ret = get_errno(setsockopt(sockfd, level, optname, 2250 &icmp6f, optlen)); 2251 break; 2252 } 2253 default: 2254 goto unimplemented; 2255 } 2256 break; 2257 case SOL_RAW: 2258 switch (optname) { 2259 case ICMP_FILTER: 2260 case IPV6_CHECKSUM: 2261 /* those take an u32 value */ 2262 if (optlen < sizeof(uint32_t)) { 2263 return -TARGET_EINVAL; 2264 } 2265 2266 if (get_user_u32(val, optval_addr)) { 2267 return -TARGET_EFAULT; 2268 } 2269 ret = get_errno(setsockopt(sockfd, level, optname, 2270 &val, sizeof(val))); 2271 break; 2272 2273 default: 2274 goto unimplemented; 2275 } 2276 break; 2277 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE) 2278 case SOL_ALG: 2279 switch (optname) { 2280 case ALG_SET_KEY: 2281 { 2282 char *alg_key = g_malloc(optlen); 2283 2284 if (!alg_key) { 2285 return -TARGET_ENOMEM; 2286 } 2287 if (copy_from_user(alg_key, optval_addr, optlen)) { 2288 g_free(alg_key); 2289 return -TARGET_EFAULT; 2290 } 2291 ret = get_errno(setsockopt(sockfd, level, optname, 2292 alg_key, optlen)); 2293 g_free(alg_key); 2294 break; 2295 } 2296 case ALG_SET_AEAD_AUTHSIZE: 2297 { 2298 ret = get_errno(setsockopt(sockfd, level, optname, 2299 NULL, optlen)); 2300 break; 2301 } 2302 default: 2303 goto unimplemented; 2304 } 2305 break; 2306 #endif 2307 case TARGET_SOL_SOCKET: 2308 switch (optname) { 2309 case TARGET_SO_RCVTIMEO: 2310 { 2311 struct timeval tv; 2312 2313 optname = SO_RCVTIMEO; 2314 2315 set_timeout: 2316 if (optlen != sizeof(struct target_timeval)) { 2317 return -TARGET_EINVAL; 2318 } 2319 2320 if (copy_from_user_timeval(&tv, optval_addr)) { 2321 return -TARGET_EFAULT; 2322 } 2323 2324 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 2325 &tv, sizeof(tv))); 2326 return ret; 2327 } 2328 case TARGET_SO_SNDTIMEO: 2329 optname = SO_SNDTIMEO; 2330 goto set_timeout; 2331 case TARGET_SO_ATTACH_FILTER: 2332 { 2333 struct target_sock_fprog *tfprog; 2334 struct target_sock_filter *tfilter; 2335 struct sock_fprog fprog; 2336 struct sock_filter *filter; 2337 int i; 2338 2339 if (optlen != sizeof(*tfprog)) { 2340 return -TARGET_EINVAL; 2341 } 2342 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 2343 return -TARGET_EFAULT; 2344 } 2345 if (!lock_user_struct(VERIFY_READ, tfilter, 2346 tswapal(tfprog->filter), 0)) { 2347 unlock_user_struct(tfprog, optval_addr, 1); 2348 return -TARGET_EFAULT; 2349 } 2350 2351 fprog.len = tswap16(tfprog->len); 2352 filter = g_try_new(struct sock_filter, fprog.len); 2353 if (filter == NULL) { 2354 unlock_user_struct(tfilter, tfprog->filter, 1); 2355 unlock_user_struct(tfprog, optval_addr, 1); 2356 return -TARGET_ENOMEM; 2357 } 2358 for (i = 0; i < fprog.len; i++) { 2359 filter[i].code = tswap16(tfilter[i].code); 2360 filter[i].jt = tfilter[i].jt; 2361 filter[i].jf = tfilter[i].jf; 2362 filter[i].k = tswap32(tfilter[i].k); 2363 } 2364 fprog.filter = filter; 2365 2366 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 2367 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 2368 g_free(filter); 2369 2370 unlock_user_struct(tfilter, tfprog->filter, 1); 2371 unlock_user_struct(tfprog, optval_addr, 1); 2372 return ret; 2373 } 2374 case TARGET_SO_BINDTODEVICE: 2375 { 2376 char *dev_ifname, *addr_ifname; 2377 2378 if (optlen > IFNAMSIZ - 1) { 2379 optlen = IFNAMSIZ - 1; 2380 } 2381 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2382 if (!dev_ifname) { 2383 return -TARGET_EFAULT; 2384 } 2385 optname = SO_BINDTODEVICE; 2386 addr_ifname = alloca(IFNAMSIZ); 2387 memcpy(addr_ifname, dev_ifname, optlen); 2388 addr_ifname[optlen] = 0; 2389 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 2390 addr_ifname, optlen)); 2391 unlock_user (dev_ifname, optval_addr, 0); 2392 return ret; 2393 } 2394 case TARGET_SO_LINGER: 2395 { 2396 struct linger lg; 2397 struct target_linger *tlg; 2398 2399 if (optlen != sizeof(struct target_linger)) { 2400 return -TARGET_EINVAL; 2401 } 2402 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) { 2403 return -TARGET_EFAULT; 2404 } 2405 __get_user(lg.l_onoff, &tlg->l_onoff); 2406 __get_user(lg.l_linger, &tlg->l_linger); 2407 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER, 2408 &lg, sizeof(lg))); 2409 unlock_user_struct(tlg, optval_addr, 0); 2410 return ret; 2411 } 2412 /* Options with 'int' argument. */ 2413 case TARGET_SO_DEBUG: 2414 optname = SO_DEBUG; 2415 break; 2416 case TARGET_SO_REUSEADDR: 2417 optname = SO_REUSEADDR; 2418 break; 2419 #ifdef SO_REUSEPORT 2420 case TARGET_SO_REUSEPORT: 2421 optname = SO_REUSEPORT; 2422 break; 2423 #endif 2424 case TARGET_SO_TYPE: 2425 optname = SO_TYPE; 2426 break; 2427 case TARGET_SO_ERROR: 2428 optname = SO_ERROR; 2429 break; 2430 case TARGET_SO_DONTROUTE: 2431 optname = SO_DONTROUTE; 2432 break; 2433 case TARGET_SO_BROADCAST: 2434 optname = SO_BROADCAST; 2435 break; 2436 case TARGET_SO_SNDBUF: 2437 optname = SO_SNDBUF; 2438 break; 2439 case TARGET_SO_SNDBUFFORCE: 2440 optname = SO_SNDBUFFORCE; 2441 break; 2442 case TARGET_SO_RCVBUF: 2443 optname = SO_RCVBUF; 2444 break; 2445 case TARGET_SO_RCVBUFFORCE: 2446 optname = SO_RCVBUFFORCE; 2447 break; 2448 case TARGET_SO_KEEPALIVE: 2449 optname = SO_KEEPALIVE; 2450 break; 2451 case TARGET_SO_OOBINLINE: 2452 optname = SO_OOBINLINE; 2453 break; 2454 case TARGET_SO_NO_CHECK: 2455 optname = SO_NO_CHECK; 2456 break; 2457 case TARGET_SO_PRIORITY: 2458 optname = SO_PRIORITY; 2459 break; 2460 #ifdef SO_BSDCOMPAT 2461 case TARGET_SO_BSDCOMPAT: 2462 optname = SO_BSDCOMPAT; 2463 break; 2464 #endif 2465 case TARGET_SO_PASSCRED: 2466 optname = SO_PASSCRED; 2467 break; 2468 case TARGET_SO_PASSSEC: 2469 optname = SO_PASSSEC; 2470 break; 2471 case TARGET_SO_TIMESTAMP: 2472 optname = SO_TIMESTAMP; 2473 break; 2474 case TARGET_SO_RCVLOWAT: 2475 optname = SO_RCVLOWAT; 2476 break; 2477 default: 2478 goto unimplemented; 2479 } 2480 if (optlen < sizeof(uint32_t)) 2481 return -TARGET_EINVAL; 2482 2483 if (get_user_u32(val, optval_addr)) 2484 return -TARGET_EFAULT; 2485 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 2486 break; 2487 #ifdef SOL_NETLINK 2488 case SOL_NETLINK: 2489 switch (optname) { 2490 case NETLINK_PKTINFO: 2491 case NETLINK_ADD_MEMBERSHIP: 2492 case NETLINK_DROP_MEMBERSHIP: 2493 case NETLINK_BROADCAST_ERROR: 2494 case NETLINK_NO_ENOBUFS: 2495 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2496 case NETLINK_LISTEN_ALL_NSID: 2497 case NETLINK_CAP_ACK: 2498 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2499 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) 2500 case NETLINK_EXT_ACK: 2501 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2502 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) 2503 case NETLINK_GET_STRICT_CHK: 2504 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2505 break; 2506 default: 2507 goto unimplemented; 2508 } 2509 val = 0; 2510 if (optlen < sizeof(uint32_t)) { 2511 return -TARGET_EINVAL; 2512 } 2513 if (get_user_u32(val, optval_addr)) { 2514 return -TARGET_EFAULT; 2515 } 2516 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val, 2517 sizeof(val))); 2518 break; 2519 #endif /* SOL_NETLINK */ 2520 default: 2521 unimplemented: 2522 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n", 2523 level, optname); 2524 ret = -TARGET_ENOPROTOOPT; 2525 } 2526 return ret; 2527 } 2528 2529 /* do_getsockopt() Must return target values and target errnos. */ 2530 static abi_long do_getsockopt(int sockfd, int level, int optname, 2531 abi_ulong optval_addr, abi_ulong optlen) 2532 { 2533 abi_long ret; 2534 int len, val; 2535 socklen_t lv; 2536 2537 switch(level) { 2538 case TARGET_SOL_SOCKET: 2539 level = SOL_SOCKET; 2540 switch (optname) { 2541 /* These don't just return a single integer */ 2542 case TARGET_SO_PEERNAME: 2543 goto unimplemented; 2544 case TARGET_SO_RCVTIMEO: { 2545 struct timeval tv; 2546 socklen_t tvlen; 2547 2548 optname = SO_RCVTIMEO; 2549 2550 get_timeout: 2551 if (get_user_u32(len, optlen)) { 2552 return -TARGET_EFAULT; 2553 } 2554 if (len < 0) { 2555 return -TARGET_EINVAL; 2556 } 2557 2558 tvlen = sizeof(tv); 2559 ret = get_errno(getsockopt(sockfd, level, optname, 2560 &tv, &tvlen)); 2561 if (ret < 0) { 2562 return ret; 2563 } 2564 if (len > sizeof(struct target_timeval)) { 2565 len = sizeof(struct target_timeval); 2566 } 2567 if (copy_to_user_timeval(optval_addr, &tv)) { 2568 return -TARGET_EFAULT; 2569 } 2570 if (put_user_u32(len, optlen)) { 2571 return -TARGET_EFAULT; 2572 } 2573 break; 2574 } 2575 case TARGET_SO_SNDTIMEO: 2576 optname = SO_SNDTIMEO; 2577 goto get_timeout; 2578 case TARGET_SO_PEERCRED: { 2579 struct ucred cr; 2580 socklen_t crlen; 2581 struct target_ucred *tcr; 2582 2583 if (get_user_u32(len, optlen)) { 2584 return -TARGET_EFAULT; 2585 } 2586 if (len < 0) { 2587 return -TARGET_EINVAL; 2588 } 2589 2590 crlen = sizeof(cr); 2591 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 2592 &cr, &crlen)); 2593 if (ret < 0) { 2594 return ret; 2595 } 2596 if (len > crlen) { 2597 len = crlen; 2598 } 2599 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 2600 return -TARGET_EFAULT; 2601 } 2602 __put_user(cr.pid, &tcr->pid); 2603 __put_user(cr.uid, &tcr->uid); 2604 __put_user(cr.gid, &tcr->gid); 2605 unlock_user_struct(tcr, optval_addr, 1); 2606 if (put_user_u32(len, optlen)) { 2607 return -TARGET_EFAULT; 2608 } 2609 break; 2610 } 2611 case TARGET_SO_PEERSEC: { 2612 char *name; 2613 2614 if (get_user_u32(len, optlen)) { 2615 return -TARGET_EFAULT; 2616 } 2617 if (len < 0) { 2618 return -TARGET_EINVAL; 2619 } 2620 name = lock_user(VERIFY_WRITE, optval_addr, len, 0); 2621 if (!name) { 2622 return -TARGET_EFAULT; 2623 } 2624 lv = len; 2625 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC, 2626 name, &lv)); 2627 if (put_user_u32(lv, optlen)) { 2628 ret = -TARGET_EFAULT; 2629 } 2630 unlock_user(name, optval_addr, lv); 2631 break; 2632 } 2633 case TARGET_SO_LINGER: 2634 { 2635 struct linger lg; 2636 socklen_t lglen; 2637 struct target_linger *tlg; 2638 2639 if (get_user_u32(len, optlen)) { 2640 return -TARGET_EFAULT; 2641 } 2642 if (len < 0) { 2643 return -TARGET_EINVAL; 2644 } 2645 2646 lglen = sizeof(lg); 2647 ret = get_errno(getsockopt(sockfd, level, SO_LINGER, 2648 &lg, &lglen)); 2649 if (ret < 0) { 2650 return ret; 2651 } 2652 if (len > lglen) { 2653 len = lglen; 2654 } 2655 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) { 2656 return -TARGET_EFAULT; 2657 } 2658 __put_user(lg.l_onoff, &tlg->l_onoff); 2659 __put_user(lg.l_linger, &tlg->l_linger); 2660 unlock_user_struct(tlg, optval_addr, 1); 2661 if (put_user_u32(len, optlen)) { 2662 return -TARGET_EFAULT; 2663 } 2664 break; 2665 } 2666 /* Options with 'int' argument. */ 2667 case TARGET_SO_DEBUG: 2668 optname = SO_DEBUG; 2669 goto int_case; 2670 case TARGET_SO_REUSEADDR: 2671 optname = SO_REUSEADDR; 2672 goto int_case; 2673 #ifdef SO_REUSEPORT 2674 case TARGET_SO_REUSEPORT: 2675 optname = SO_REUSEPORT; 2676 goto int_case; 2677 #endif 2678 case TARGET_SO_TYPE: 2679 optname = SO_TYPE; 2680 goto int_case; 2681 case TARGET_SO_ERROR: 2682 optname = SO_ERROR; 2683 goto int_case; 2684 case TARGET_SO_DONTROUTE: 2685 optname = SO_DONTROUTE; 2686 goto int_case; 2687 case TARGET_SO_BROADCAST: 2688 optname = SO_BROADCAST; 2689 goto int_case; 2690 case TARGET_SO_SNDBUF: 2691 optname = SO_SNDBUF; 2692 goto int_case; 2693 case TARGET_SO_RCVBUF: 2694 optname = SO_RCVBUF; 2695 goto int_case; 2696 case TARGET_SO_KEEPALIVE: 2697 optname = SO_KEEPALIVE; 2698 goto int_case; 2699 case TARGET_SO_OOBINLINE: 2700 optname = SO_OOBINLINE; 2701 goto int_case; 2702 case TARGET_SO_NO_CHECK: 2703 optname = SO_NO_CHECK; 2704 goto int_case; 2705 case TARGET_SO_PRIORITY: 2706 optname = SO_PRIORITY; 2707 goto int_case; 2708 #ifdef SO_BSDCOMPAT 2709 case TARGET_SO_BSDCOMPAT: 2710 optname = SO_BSDCOMPAT; 2711 goto int_case; 2712 #endif 2713 case TARGET_SO_PASSCRED: 2714 optname = SO_PASSCRED; 2715 goto int_case; 2716 case TARGET_SO_TIMESTAMP: 2717 optname = SO_TIMESTAMP; 2718 goto int_case; 2719 case TARGET_SO_RCVLOWAT: 2720 optname = SO_RCVLOWAT; 2721 goto int_case; 2722 case TARGET_SO_ACCEPTCONN: 2723 optname = SO_ACCEPTCONN; 2724 goto int_case; 2725 case TARGET_SO_PROTOCOL: 2726 optname = SO_PROTOCOL; 2727 goto int_case; 2728 case TARGET_SO_DOMAIN: 2729 optname = SO_DOMAIN; 2730 goto int_case; 2731 default: 2732 goto int_case; 2733 } 2734 break; 2735 case SOL_TCP: 2736 case SOL_UDP: 2737 /* TCP and UDP options all take an 'int' value. */ 2738 int_case: 2739 if (get_user_u32(len, optlen)) 2740 return -TARGET_EFAULT; 2741 if (len < 0) 2742 return -TARGET_EINVAL; 2743 lv = sizeof(lv); 2744 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2745 if (ret < 0) 2746 return ret; 2747 if (optname == SO_TYPE) { 2748 val = host_to_target_sock_type(val); 2749 } 2750 if (len > lv) 2751 len = lv; 2752 if (len == 4) { 2753 if (put_user_u32(val, optval_addr)) 2754 return -TARGET_EFAULT; 2755 } else { 2756 if (put_user_u8(val, optval_addr)) 2757 return -TARGET_EFAULT; 2758 } 2759 if (put_user_u32(len, optlen)) 2760 return -TARGET_EFAULT; 2761 break; 2762 case SOL_IP: 2763 switch(optname) { 2764 case IP_TOS: 2765 case IP_TTL: 2766 case IP_HDRINCL: 2767 case IP_ROUTER_ALERT: 2768 case IP_RECVOPTS: 2769 case IP_RETOPTS: 2770 case IP_PKTINFO: 2771 case IP_MTU_DISCOVER: 2772 case IP_RECVERR: 2773 case IP_RECVTOS: 2774 #ifdef IP_FREEBIND 2775 case IP_FREEBIND: 2776 #endif 2777 case IP_MULTICAST_TTL: 2778 case IP_MULTICAST_LOOP: 2779 if (get_user_u32(len, optlen)) 2780 return -TARGET_EFAULT; 2781 if (len < 0) 2782 return -TARGET_EINVAL; 2783 lv = sizeof(lv); 2784 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2785 if (ret < 0) 2786 return ret; 2787 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 2788 len = 1; 2789 if (put_user_u32(len, optlen) 2790 || put_user_u8(val, optval_addr)) 2791 return -TARGET_EFAULT; 2792 } else { 2793 if (len > sizeof(int)) 2794 len = sizeof(int); 2795 if (put_user_u32(len, optlen) 2796 || put_user_u32(val, optval_addr)) 2797 return -TARGET_EFAULT; 2798 } 2799 break; 2800 default: 2801 ret = -TARGET_ENOPROTOOPT; 2802 break; 2803 } 2804 break; 2805 case SOL_IPV6: 2806 switch (optname) { 2807 case IPV6_MTU_DISCOVER: 2808 case IPV6_MTU: 2809 case IPV6_V6ONLY: 2810 case IPV6_RECVPKTINFO: 2811 case IPV6_UNICAST_HOPS: 2812 case IPV6_MULTICAST_HOPS: 2813 case IPV6_MULTICAST_LOOP: 2814 case IPV6_RECVERR: 2815 case IPV6_RECVHOPLIMIT: 2816 case IPV6_2292HOPLIMIT: 2817 case IPV6_CHECKSUM: 2818 case IPV6_ADDRFORM: 2819 case IPV6_2292PKTINFO: 2820 case IPV6_RECVTCLASS: 2821 case IPV6_RECVRTHDR: 2822 case IPV6_2292RTHDR: 2823 case IPV6_RECVHOPOPTS: 2824 case IPV6_2292HOPOPTS: 2825 case IPV6_RECVDSTOPTS: 2826 case IPV6_2292DSTOPTS: 2827 case IPV6_TCLASS: 2828 case IPV6_ADDR_PREFERENCES: 2829 #ifdef IPV6_RECVPATHMTU 2830 case IPV6_RECVPATHMTU: 2831 #endif 2832 #ifdef IPV6_TRANSPARENT 2833 case IPV6_TRANSPARENT: 2834 #endif 2835 #ifdef IPV6_FREEBIND 2836 case IPV6_FREEBIND: 2837 #endif 2838 #ifdef IPV6_RECVORIGDSTADDR 2839 case IPV6_RECVORIGDSTADDR: 2840 #endif 2841 if (get_user_u32(len, optlen)) 2842 return -TARGET_EFAULT; 2843 if (len < 0) 2844 return -TARGET_EINVAL; 2845 lv = sizeof(lv); 2846 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2847 if (ret < 0) 2848 return ret; 2849 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 2850 len = 1; 2851 if (put_user_u32(len, optlen) 2852 || put_user_u8(val, optval_addr)) 2853 return -TARGET_EFAULT; 2854 } else { 2855 if (len > sizeof(int)) 2856 len = sizeof(int); 2857 if (put_user_u32(len, optlen) 2858 || put_user_u32(val, optval_addr)) 2859 return -TARGET_EFAULT; 2860 } 2861 break; 2862 default: 2863 ret = -TARGET_ENOPROTOOPT; 2864 break; 2865 } 2866 break; 2867 #ifdef SOL_NETLINK 2868 case SOL_NETLINK: 2869 switch (optname) { 2870 case NETLINK_PKTINFO: 2871 case NETLINK_BROADCAST_ERROR: 2872 case NETLINK_NO_ENOBUFS: 2873 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2874 case NETLINK_LISTEN_ALL_NSID: 2875 case NETLINK_CAP_ACK: 2876 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2877 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) 2878 case NETLINK_EXT_ACK: 2879 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2880 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) 2881 case NETLINK_GET_STRICT_CHK: 2882 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2883 if (get_user_u32(len, optlen)) { 2884 return -TARGET_EFAULT; 2885 } 2886 if (len != sizeof(val)) { 2887 return -TARGET_EINVAL; 2888 } 2889 lv = len; 2890 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2891 if (ret < 0) { 2892 return ret; 2893 } 2894 if (put_user_u32(lv, optlen) 2895 || put_user_u32(val, optval_addr)) { 2896 return -TARGET_EFAULT; 2897 } 2898 break; 2899 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2900 case NETLINK_LIST_MEMBERSHIPS: 2901 { 2902 uint32_t *results; 2903 int i; 2904 if (get_user_u32(len, optlen)) { 2905 return -TARGET_EFAULT; 2906 } 2907 if (len < 0) { 2908 return -TARGET_EINVAL; 2909 } 2910 results = lock_user(VERIFY_WRITE, optval_addr, len, 1); 2911 if (!results && len > 0) { 2912 return -TARGET_EFAULT; 2913 } 2914 lv = len; 2915 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv)); 2916 if (ret < 0) { 2917 unlock_user(results, optval_addr, 0); 2918 return ret; 2919 } 2920 /* swap host endianess to target endianess. */ 2921 for (i = 0; i < (len / sizeof(uint32_t)); i++) { 2922 results[i] = tswap32(results[i]); 2923 } 2924 if (put_user_u32(lv, optlen)) { 2925 return -TARGET_EFAULT; 2926 } 2927 unlock_user(results, optval_addr, 0); 2928 break; 2929 } 2930 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2931 default: 2932 goto unimplemented; 2933 } 2934 break; 2935 #endif /* SOL_NETLINK */ 2936 default: 2937 unimplemented: 2938 qemu_log_mask(LOG_UNIMP, 2939 "getsockopt level=%d optname=%d not yet supported\n", 2940 level, optname); 2941 ret = -TARGET_EOPNOTSUPP; 2942 break; 2943 } 2944 return ret; 2945 } 2946 2947 /* Convert target low/high pair representing file offset into the host 2948 * low/high pair. This function doesn't handle offsets bigger than 64 bits 2949 * as the kernel doesn't handle them either. 2950 */ 2951 static void target_to_host_low_high(abi_ulong tlow, 2952 abi_ulong thigh, 2953 unsigned long *hlow, 2954 unsigned long *hhigh) 2955 { 2956 uint64_t off = tlow | 2957 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) << 2958 TARGET_LONG_BITS / 2; 2959 2960 *hlow = off; 2961 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2; 2962 } 2963 2964 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 2965 abi_ulong count, int copy) 2966 { 2967 struct target_iovec *target_vec; 2968 struct iovec *vec; 2969 abi_ulong total_len, max_len; 2970 int i; 2971 int err = 0; 2972 bool bad_address = false; 2973 2974 if (count == 0) { 2975 errno = 0; 2976 return NULL; 2977 } 2978 if (count > IOV_MAX) { 2979 errno = EINVAL; 2980 return NULL; 2981 } 2982 2983 vec = g_try_new0(struct iovec, count); 2984 if (vec == NULL) { 2985 errno = ENOMEM; 2986 return NULL; 2987 } 2988 2989 target_vec = lock_user(VERIFY_READ, target_addr, 2990 count * sizeof(struct target_iovec), 1); 2991 if (target_vec == NULL) { 2992 err = EFAULT; 2993 goto fail2; 2994 } 2995 2996 /* ??? If host page size > target page size, this will result in a 2997 value larger than what we can actually support. */ 2998 max_len = 0x7fffffff & TARGET_PAGE_MASK; 2999 total_len = 0; 3000 3001 for (i = 0; i < count; i++) { 3002 abi_ulong base = tswapal(target_vec[i].iov_base); 3003 abi_long len = tswapal(target_vec[i].iov_len); 3004 3005 if (len < 0) { 3006 err = EINVAL; 3007 goto fail; 3008 } else if (len == 0) { 3009 /* Zero length pointer is ignored. */ 3010 vec[i].iov_base = 0; 3011 } else { 3012 vec[i].iov_base = lock_user(type, base, len, copy); 3013 /* If the first buffer pointer is bad, this is a fault. But 3014 * subsequent bad buffers will result in a partial write; this 3015 * is realized by filling the vector with null pointers and 3016 * zero lengths. */ 3017 if (!vec[i].iov_base) { 3018 if (i == 0) { 3019 err = EFAULT; 3020 goto fail; 3021 } else { 3022 bad_address = true; 3023 } 3024 } 3025 if (bad_address) { 3026 len = 0; 3027 } 3028 if (len > max_len - total_len) { 3029 len = max_len - total_len; 3030 } 3031 } 3032 vec[i].iov_len = len; 3033 total_len += len; 3034 } 3035 3036 unlock_user(target_vec, target_addr, 0); 3037 return vec; 3038 3039 fail: 3040 while (--i >= 0) { 3041 if (tswapal(target_vec[i].iov_len) > 0) { 3042 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0); 3043 } 3044 } 3045 unlock_user(target_vec, target_addr, 0); 3046 fail2: 3047 g_free(vec); 3048 errno = err; 3049 return NULL; 3050 } 3051 3052 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 3053 abi_ulong count, int copy) 3054 { 3055 struct target_iovec *target_vec; 3056 int i; 3057 3058 target_vec = lock_user(VERIFY_READ, target_addr, 3059 count * sizeof(struct target_iovec), 1); 3060 if (target_vec) { 3061 for (i = 0; i < count; i++) { 3062 abi_ulong base = tswapal(target_vec[i].iov_base); 3063 abi_long len = tswapal(target_vec[i].iov_len); 3064 if (len < 0) { 3065 break; 3066 } 3067 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 3068 } 3069 unlock_user(target_vec, target_addr, 0); 3070 } 3071 3072 g_free(vec); 3073 } 3074 3075 static inline int target_to_host_sock_type(int *type) 3076 { 3077 int host_type = 0; 3078 int target_type = *type; 3079 3080 switch (target_type & TARGET_SOCK_TYPE_MASK) { 3081 case TARGET_SOCK_DGRAM: 3082 host_type = SOCK_DGRAM; 3083 break; 3084 case TARGET_SOCK_STREAM: 3085 host_type = SOCK_STREAM; 3086 break; 3087 default: 3088 host_type = target_type & TARGET_SOCK_TYPE_MASK; 3089 break; 3090 } 3091 if (target_type & TARGET_SOCK_CLOEXEC) { 3092 #if defined(SOCK_CLOEXEC) 3093 host_type |= SOCK_CLOEXEC; 3094 #else 3095 return -TARGET_EINVAL; 3096 #endif 3097 } 3098 if (target_type & TARGET_SOCK_NONBLOCK) { 3099 #if defined(SOCK_NONBLOCK) 3100 host_type |= SOCK_NONBLOCK; 3101 #elif !defined(O_NONBLOCK) 3102 return -TARGET_EINVAL; 3103 #endif 3104 } 3105 *type = host_type; 3106 return 0; 3107 } 3108 3109 /* Try to emulate socket type flags after socket creation. */ 3110 static int sock_flags_fixup(int fd, int target_type) 3111 { 3112 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 3113 if (target_type & TARGET_SOCK_NONBLOCK) { 3114 int flags = fcntl(fd, F_GETFL); 3115 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 3116 close(fd); 3117 return -TARGET_EINVAL; 3118 } 3119 } 3120 #endif 3121 return fd; 3122 } 3123 3124 /* do_socket() Must return target values and target errnos. */ 3125 static abi_long do_socket(int domain, int type, int protocol) 3126 { 3127 int target_type = type; 3128 int ret; 3129 3130 ret = target_to_host_sock_type(&type); 3131 if (ret) { 3132 return ret; 3133 } 3134 3135 if (domain == PF_NETLINK && !( 3136 #ifdef CONFIG_RTNETLINK 3137 protocol == NETLINK_ROUTE || 3138 #endif 3139 protocol == NETLINK_KOBJECT_UEVENT || 3140 protocol == NETLINK_AUDIT)) { 3141 return -TARGET_EPROTONOSUPPORT; 3142 } 3143 3144 if (domain == AF_PACKET || 3145 (domain == AF_INET && type == SOCK_PACKET)) { 3146 protocol = tswap16(protocol); 3147 } 3148 3149 ret = get_errno(socket(domain, type, protocol)); 3150 if (ret >= 0) { 3151 ret = sock_flags_fixup(ret, target_type); 3152 if (type == SOCK_PACKET) { 3153 /* Manage an obsolete case : 3154 * if socket type is SOCK_PACKET, bind by name 3155 */ 3156 fd_trans_register(ret, &target_packet_trans); 3157 } else if (domain == PF_NETLINK) { 3158 switch (protocol) { 3159 #ifdef CONFIG_RTNETLINK 3160 case NETLINK_ROUTE: 3161 fd_trans_register(ret, &target_netlink_route_trans); 3162 break; 3163 #endif 3164 case NETLINK_KOBJECT_UEVENT: 3165 /* nothing to do: messages are strings */ 3166 break; 3167 case NETLINK_AUDIT: 3168 fd_trans_register(ret, &target_netlink_audit_trans); 3169 break; 3170 default: 3171 g_assert_not_reached(); 3172 } 3173 } 3174 } 3175 return ret; 3176 } 3177 3178 /* do_bind() Must return target values and target errnos. */ 3179 static abi_long do_bind(int sockfd, abi_ulong target_addr, 3180 socklen_t addrlen) 3181 { 3182 void *addr; 3183 abi_long ret; 3184 3185 if ((int)addrlen < 0) { 3186 return -TARGET_EINVAL; 3187 } 3188 3189 addr = alloca(addrlen+1); 3190 3191 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3192 if (ret) 3193 return ret; 3194 3195 return get_errno(bind(sockfd, addr, addrlen)); 3196 } 3197 3198 /* do_connect() Must return target values and target errnos. */ 3199 static abi_long do_connect(int sockfd, abi_ulong target_addr, 3200 socklen_t addrlen) 3201 { 3202 void *addr; 3203 abi_long ret; 3204 3205 if ((int)addrlen < 0) { 3206 return -TARGET_EINVAL; 3207 } 3208 3209 addr = alloca(addrlen+1); 3210 3211 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3212 if (ret) 3213 return ret; 3214 3215 return get_errno(safe_connect(sockfd, addr, addrlen)); 3216 } 3217 3218 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 3219 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 3220 int flags, int send) 3221 { 3222 abi_long ret, len; 3223 struct msghdr msg; 3224 abi_ulong count; 3225 struct iovec *vec; 3226 abi_ulong target_vec; 3227 3228 if (msgp->msg_name) { 3229 msg.msg_namelen = tswap32(msgp->msg_namelen); 3230 msg.msg_name = alloca(msg.msg_namelen+1); 3231 ret = target_to_host_sockaddr(fd, msg.msg_name, 3232 tswapal(msgp->msg_name), 3233 msg.msg_namelen); 3234 if (ret == -TARGET_EFAULT) { 3235 /* For connected sockets msg_name and msg_namelen must 3236 * be ignored, so returning EFAULT immediately is wrong. 3237 * Instead, pass a bad msg_name to the host kernel, and 3238 * let it decide whether to return EFAULT or not. 3239 */ 3240 msg.msg_name = (void *)-1; 3241 } else if (ret) { 3242 goto out2; 3243 } 3244 } else { 3245 msg.msg_name = NULL; 3246 msg.msg_namelen = 0; 3247 } 3248 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 3249 msg.msg_control = alloca(msg.msg_controllen); 3250 memset(msg.msg_control, 0, msg.msg_controllen); 3251 3252 msg.msg_flags = tswap32(msgp->msg_flags); 3253 3254 count = tswapal(msgp->msg_iovlen); 3255 target_vec = tswapal(msgp->msg_iov); 3256 3257 if (count > IOV_MAX) { 3258 /* sendrcvmsg returns a different errno for this condition than 3259 * readv/writev, so we must catch it here before lock_iovec() does. 3260 */ 3261 ret = -TARGET_EMSGSIZE; 3262 goto out2; 3263 } 3264 3265 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 3266 target_vec, count, send); 3267 if (vec == NULL) { 3268 ret = -host_to_target_errno(errno); 3269 goto out2; 3270 } 3271 msg.msg_iovlen = count; 3272 msg.msg_iov = vec; 3273 3274 if (send) { 3275 if (fd_trans_target_to_host_data(fd)) { 3276 void *host_msg; 3277 3278 host_msg = g_malloc(msg.msg_iov->iov_len); 3279 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len); 3280 ret = fd_trans_target_to_host_data(fd)(host_msg, 3281 msg.msg_iov->iov_len); 3282 if (ret >= 0) { 3283 msg.msg_iov->iov_base = host_msg; 3284 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3285 } 3286 g_free(host_msg); 3287 } else { 3288 ret = target_to_host_cmsg(&msg, msgp); 3289 if (ret == 0) { 3290 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3291 } 3292 } 3293 } else { 3294 ret = get_errno(safe_recvmsg(fd, &msg, flags)); 3295 if (!is_error(ret)) { 3296 len = ret; 3297 if (fd_trans_host_to_target_data(fd)) { 3298 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base, 3299 MIN(msg.msg_iov->iov_len, len)); 3300 } else { 3301 ret = host_to_target_cmsg(msgp, &msg); 3302 } 3303 if (!is_error(ret)) { 3304 msgp->msg_namelen = tswap32(msg.msg_namelen); 3305 msgp->msg_flags = tswap32(msg.msg_flags); 3306 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) { 3307 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 3308 msg.msg_name, msg.msg_namelen); 3309 if (ret) { 3310 goto out; 3311 } 3312 } 3313 3314 ret = len; 3315 } 3316 } 3317 } 3318 3319 out: 3320 unlock_iovec(vec, target_vec, count, !send); 3321 out2: 3322 return ret; 3323 } 3324 3325 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 3326 int flags, int send) 3327 { 3328 abi_long ret; 3329 struct target_msghdr *msgp; 3330 3331 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 3332 msgp, 3333 target_msg, 3334 send ? 1 : 0)) { 3335 return -TARGET_EFAULT; 3336 } 3337 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 3338 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 3339 return ret; 3340 } 3341 3342 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 3343 * so it might not have this *mmsg-specific flag either. 3344 */ 3345 #ifndef MSG_WAITFORONE 3346 #define MSG_WAITFORONE 0x10000 3347 #endif 3348 3349 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 3350 unsigned int vlen, unsigned int flags, 3351 int send) 3352 { 3353 struct target_mmsghdr *mmsgp; 3354 abi_long ret = 0; 3355 int i; 3356 3357 if (vlen > UIO_MAXIOV) { 3358 vlen = UIO_MAXIOV; 3359 } 3360 3361 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 3362 if (!mmsgp) { 3363 return -TARGET_EFAULT; 3364 } 3365 3366 for (i = 0; i < vlen; i++) { 3367 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 3368 if (is_error(ret)) { 3369 break; 3370 } 3371 mmsgp[i].msg_len = tswap32(ret); 3372 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 3373 if (flags & MSG_WAITFORONE) { 3374 flags |= MSG_DONTWAIT; 3375 } 3376 } 3377 3378 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 3379 3380 /* Return number of datagrams sent if we sent any at all; 3381 * otherwise return the error. 3382 */ 3383 if (i) { 3384 return i; 3385 } 3386 return ret; 3387 } 3388 3389 /* do_accept4() Must return target values and target errnos. */ 3390 static abi_long do_accept4(int fd, abi_ulong target_addr, 3391 abi_ulong target_addrlen_addr, int flags) 3392 { 3393 socklen_t addrlen, ret_addrlen; 3394 void *addr; 3395 abi_long ret; 3396 int host_flags; 3397 3398 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 3399 3400 if (target_addr == 0) { 3401 return get_errno(safe_accept4(fd, NULL, NULL, host_flags)); 3402 } 3403 3404 /* linux returns EFAULT if addrlen pointer is invalid */ 3405 if (get_user_u32(addrlen, target_addrlen_addr)) 3406 return -TARGET_EFAULT; 3407 3408 if ((int)addrlen < 0) { 3409 return -TARGET_EINVAL; 3410 } 3411 3412 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3413 return -TARGET_EFAULT; 3414 } 3415 3416 addr = alloca(addrlen); 3417 3418 ret_addrlen = addrlen; 3419 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags)); 3420 if (!is_error(ret)) { 3421 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3422 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3423 ret = -TARGET_EFAULT; 3424 } 3425 } 3426 return ret; 3427 } 3428 3429 /* do_getpeername() Must return target values and target errnos. */ 3430 static abi_long do_getpeername(int fd, abi_ulong target_addr, 3431 abi_ulong target_addrlen_addr) 3432 { 3433 socklen_t addrlen, ret_addrlen; 3434 void *addr; 3435 abi_long ret; 3436 3437 if (get_user_u32(addrlen, target_addrlen_addr)) 3438 return -TARGET_EFAULT; 3439 3440 if ((int)addrlen < 0) { 3441 return -TARGET_EINVAL; 3442 } 3443 3444 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3445 return -TARGET_EFAULT; 3446 } 3447 3448 addr = alloca(addrlen); 3449 3450 ret_addrlen = addrlen; 3451 ret = get_errno(getpeername(fd, addr, &ret_addrlen)); 3452 if (!is_error(ret)) { 3453 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3454 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3455 ret = -TARGET_EFAULT; 3456 } 3457 } 3458 return ret; 3459 } 3460 3461 /* do_getsockname() Must return target values and target errnos. */ 3462 static abi_long do_getsockname(int fd, abi_ulong target_addr, 3463 abi_ulong target_addrlen_addr) 3464 { 3465 socklen_t addrlen, ret_addrlen; 3466 void *addr; 3467 abi_long ret; 3468 3469 if (get_user_u32(addrlen, target_addrlen_addr)) 3470 return -TARGET_EFAULT; 3471 3472 if ((int)addrlen < 0) { 3473 return -TARGET_EINVAL; 3474 } 3475 3476 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3477 return -TARGET_EFAULT; 3478 } 3479 3480 addr = alloca(addrlen); 3481 3482 ret_addrlen = addrlen; 3483 ret = get_errno(getsockname(fd, addr, &ret_addrlen)); 3484 if (!is_error(ret)) { 3485 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3486 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3487 ret = -TARGET_EFAULT; 3488 } 3489 } 3490 return ret; 3491 } 3492 3493 /* do_socketpair() Must return target values and target errnos. */ 3494 static abi_long do_socketpair(int domain, int type, int protocol, 3495 abi_ulong target_tab_addr) 3496 { 3497 int tab[2]; 3498 abi_long ret; 3499 3500 target_to_host_sock_type(&type); 3501 3502 ret = get_errno(socketpair(domain, type, protocol, tab)); 3503 if (!is_error(ret)) { 3504 if (put_user_s32(tab[0], target_tab_addr) 3505 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 3506 ret = -TARGET_EFAULT; 3507 } 3508 return ret; 3509 } 3510 3511 /* do_sendto() Must return target values and target errnos. */ 3512 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 3513 abi_ulong target_addr, socklen_t addrlen) 3514 { 3515 void *addr; 3516 void *host_msg; 3517 void *copy_msg = NULL; 3518 abi_long ret; 3519 3520 if ((int)addrlen < 0) { 3521 return -TARGET_EINVAL; 3522 } 3523 3524 host_msg = lock_user(VERIFY_READ, msg, len, 1); 3525 if (!host_msg) 3526 return -TARGET_EFAULT; 3527 if (fd_trans_target_to_host_data(fd)) { 3528 copy_msg = host_msg; 3529 host_msg = g_malloc(len); 3530 memcpy(host_msg, copy_msg, len); 3531 ret = fd_trans_target_to_host_data(fd)(host_msg, len); 3532 if (ret < 0) { 3533 goto fail; 3534 } 3535 } 3536 if (target_addr) { 3537 addr = alloca(addrlen+1); 3538 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen); 3539 if (ret) { 3540 goto fail; 3541 } 3542 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen)); 3543 } else { 3544 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0)); 3545 } 3546 fail: 3547 if (copy_msg) { 3548 g_free(host_msg); 3549 host_msg = copy_msg; 3550 } 3551 unlock_user(host_msg, msg, 0); 3552 return ret; 3553 } 3554 3555 /* do_recvfrom() Must return target values and target errnos. */ 3556 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 3557 abi_ulong target_addr, 3558 abi_ulong target_addrlen) 3559 { 3560 socklen_t addrlen, ret_addrlen; 3561 void *addr; 3562 void *host_msg; 3563 abi_long ret; 3564 3565 if (!msg) { 3566 host_msg = NULL; 3567 } else { 3568 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 3569 if (!host_msg) { 3570 return -TARGET_EFAULT; 3571 } 3572 } 3573 if (target_addr) { 3574 if (get_user_u32(addrlen, target_addrlen)) { 3575 ret = -TARGET_EFAULT; 3576 goto fail; 3577 } 3578 if ((int)addrlen < 0) { 3579 ret = -TARGET_EINVAL; 3580 goto fail; 3581 } 3582 addr = alloca(addrlen); 3583 ret_addrlen = addrlen; 3584 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, 3585 addr, &ret_addrlen)); 3586 } else { 3587 addr = NULL; /* To keep compiler quiet. */ 3588 addrlen = 0; /* To keep compiler quiet. */ 3589 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0)); 3590 } 3591 if (!is_error(ret)) { 3592 if (fd_trans_host_to_target_data(fd)) { 3593 abi_long trans; 3594 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len)); 3595 if (is_error(trans)) { 3596 ret = trans; 3597 goto fail; 3598 } 3599 } 3600 if (target_addr) { 3601 host_to_target_sockaddr(target_addr, addr, 3602 MIN(addrlen, ret_addrlen)); 3603 if (put_user_u32(ret_addrlen, target_addrlen)) { 3604 ret = -TARGET_EFAULT; 3605 goto fail; 3606 } 3607 } 3608 unlock_user(host_msg, msg, len); 3609 } else { 3610 fail: 3611 unlock_user(host_msg, msg, 0); 3612 } 3613 return ret; 3614 } 3615 3616 #ifdef TARGET_NR_socketcall 3617 /* do_socketcall() must return target values and target errnos. */ 3618 static abi_long do_socketcall(int num, abi_ulong vptr) 3619 { 3620 static const unsigned nargs[] = { /* number of arguments per operation */ 3621 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */ 3622 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */ 3623 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */ 3624 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */ 3625 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */ 3626 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */ 3627 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */ 3628 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */ 3629 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */ 3630 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */ 3631 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */ 3632 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */ 3633 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */ 3634 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 3635 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 3636 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */ 3637 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */ 3638 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */ 3639 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */ 3640 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */ 3641 }; 3642 abi_long a[6]; /* max 6 args */ 3643 unsigned i; 3644 3645 /* check the range of the first argument num */ 3646 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */ 3647 if (num < 1 || num > TARGET_SYS_SENDMMSG) { 3648 return -TARGET_EINVAL; 3649 } 3650 /* ensure we have space for args */ 3651 if (nargs[num] > ARRAY_SIZE(a)) { 3652 return -TARGET_EINVAL; 3653 } 3654 /* collect the arguments in a[] according to nargs[] */ 3655 for (i = 0; i < nargs[num]; ++i) { 3656 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 3657 return -TARGET_EFAULT; 3658 } 3659 } 3660 /* now when we have the args, invoke the appropriate underlying function */ 3661 switch (num) { 3662 case TARGET_SYS_SOCKET: /* domain, type, protocol */ 3663 return do_socket(a[0], a[1], a[2]); 3664 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */ 3665 return do_bind(a[0], a[1], a[2]); 3666 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */ 3667 return do_connect(a[0], a[1], a[2]); 3668 case TARGET_SYS_LISTEN: /* sockfd, backlog */ 3669 return get_errno(listen(a[0], a[1])); 3670 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */ 3671 return do_accept4(a[0], a[1], a[2], 0); 3672 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */ 3673 return do_getsockname(a[0], a[1], a[2]); 3674 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */ 3675 return do_getpeername(a[0], a[1], a[2]); 3676 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */ 3677 return do_socketpair(a[0], a[1], a[2], a[3]); 3678 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */ 3679 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 3680 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */ 3681 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 3682 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */ 3683 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 3684 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */ 3685 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 3686 case TARGET_SYS_SHUTDOWN: /* sockfd, how */ 3687 return get_errno(shutdown(a[0], a[1])); 3688 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 3689 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 3690 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 3691 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 3692 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */ 3693 return do_sendrecvmsg(a[0], a[1], a[2], 1); 3694 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */ 3695 return do_sendrecvmsg(a[0], a[1], a[2], 0); 3696 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */ 3697 return do_accept4(a[0], a[1], a[2], a[3]); 3698 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */ 3699 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0); 3700 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */ 3701 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1); 3702 default: 3703 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num); 3704 return -TARGET_EINVAL; 3705 } 3706 } 3707 #endif 3708 3709 #define N_SHM_REGIONS 32 3710 3711 static struct shm_region { 3712 abi_ulong start; 3713 abi_ulong size; 3714 bool in_use; 3715 } shm_regions[N_SHM_REGIONS]; 3716 3717 #ifndef TARGET_SEMID64_DS 3718 /* asm-generic version of this struct */ 3719 struct target_semid64_ds 3720 { 3721 struct target_ipc_perm sem_perm; 3722 abi_ulong sem_otime; 3723 #if TARGET_ABI_BITS == 32 3724 abi_ulong __unused1; 3725 #endif 3726 abi_ulong sem_ctime; 3727 #if TARGET_ABI_BITS == 32 3728 abi_ulong __unused2; 3729 #endif 3730 abi_ulong sem_nsems; 3731 abi_ulong __unused3; 3732 abi_ulong __unused4; 3733 }; 3734 #endif 3735 3736 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 3737 abi_ulong target_addr) 3738 { 3739 struct target_ipc_perm *target_ip; 3740 struct target_semid64_ds *target_sd; 3741 3742 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 3743 return -TARGET_EFAULT; 3744 target_ip = &(target_sd->sem_perm); 3745 host_ip->__key = tswap32(target_ip->__key); 3746 host_ip->uid = tswap32(target_ip->uid); 3747 host_ip->gid = tswap32(target_ip->gid); 3748 host_ip->cuid = tswap32(target_ip->cuid); 3749 host_ip->cgid = tswap32(target_ip->cgid); 3750 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 3751 host_ip->mode = tswap32(target_ip->mode); 3752 #else 3753 host_ip->mode = tswap16(target_ip->mode); 3754 #endif 3755 #if defined(TARGET_PPC) 3756 host_ip->__seq = tswap32(target_ip->__seq); 3757 #else 3758 host_ip->__seq = tswap16(target_ip->__seq); 3759 #endif 3760 unlock_user_struct(target_sd, target_addr, 0); 3761 return 0; 3762 } 3763 3764 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 3765 struct ipc_perm *host_ip) 3766 { 3767 struct target_ipc_perm *target_ip; 3768 struct target_semid64_ds *target_sd; 3769 3770 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3771 return -TARGET_EFAULT; 3772 target_ip = &(target_sd->sem_perm); 3773 target_ip->__key = tswap32(host_ip->__key); 3774 target_ip->uid = tswap32(host_ip->uid); 3775 target_ip->gid = tswap32(host_ip->gid); 3776 target_ip->cuid = tswap32(host_ip->cuid); 3777 target_ip->cgid = tswap32(host_ip->cgid); 3778 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 3779 target_ip->mode = tswap32(host_ip->mode); 3780 #else 3781 target_ip->mode = tswap16(host_ip->mode); 3782 #endif 3783 #if defined(TARGET_PPC) 3784 target_ip->__seq = tswap32(host_ip->__seq); 3785 #else 3786 target_ip->__seq = tswap16(host_ip->__seq); 3787 #endif 3788 unlock_user_struct(target_sd, target_addr, 1); 3789 return 0; 3790 } 3791 3792 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 3793 abi_ulong target_addr) 3794 { 3795 struct target_semid64_ds *target_sd; 3796 3797 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 3798 return -TARGET_EFAULT; 3799 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 3800 return -TARGET_EFAULT; 3801 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 3802 host_sd->sem_otime = tswapal(target_sd->sem_otime); 3803 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 3804 unlock_user_struct(target_sd, target_addr, 0); 3805 return 0; 3806 } 3807 3808 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 3809 struct semid_ds *host_sd) 3810 { 3811 struct target_semid64_ds *target_sd; 3812 3813 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3814 return -TARGET_EFAULT; 3815 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 3816 return -TARGET_EFAULT; 3817 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 3818 target_sd->sem_otime = tswapal(host_sd->sem_otime); 3819 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 3820 unlock_user_struct(target_sd, target_addr, 1); 3821 return 0; 3822 } 3823 3824 struct target_seminfo { 3825 int semmap; 3826 int semmni; 3827 int semmns; 3828 int semmnu; 3829 int semmsl; 3830 int semopm; 3831 int semume; 3832 int semusz; 3833 int semvmx; 3834 int semaem; 3835 }; 3836 3837 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 3838 struct seminfo *host_seminfo) 3839 { 3840 struct target_seminfo *target_seminfo; 3841 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 3842 return -TARGET_EFAULT; 3843 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 3844 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 3845 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 3846 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 3847 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 3848 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 3849 __put_user(host_seminfo->semume, &target_seminfo->semume); 3850 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 3851 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 3852 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 3853 unlock_user_struct(target_seminfo, target_addr, 1); 3854 return 0; 3855 } 3856 3857 union semun { 3858 int val; 3859 struct semid_ds *buf; 3860 unsigned short *array; 3861 struct seminfo *__buf; 3862 }; 3863 3864 union target_semun { 3865 int val; 3866 abi_ulong buf; 3867 abi_ulong array; 3868 abi_ulong __buf; 3869 }; 3870 3871 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 3872 abi_ulong target_addr) 3873 { 3874 int nsems; 3875 unsigned short *array; 3876 union semun semun; 3877 struct semid_ds semid_ds; 3878 int i, ret; 3879 3880 semun.buf = &semid_ds; 3881 3882 ret = semctl(semid, 0, IPC_STAT, semun); 3883 if (ret == -1) 3884 return get_errno(ret); 3885 3886 nsems = semid_ds.sem_nsems; 3887 3888 *host_array = g_try_new(unsigned short, nsems); 3889 if (!*host_array) { 3890 return -TARGET_ENOMEM; 3891 } 3892 array = lock_user(VERIFY_READ, target_addr, 3893 nsems*sizeof(unsigned short), 1); 3894 if (!array) { 3895 g_free(*host_array); 3896 return -TARGET_EFAULT; 3897 } 3898 3899 for(i=0; i<nsems; i++) { 3900 __get_user((*host_array)[i], &array[i]); 3901 } 3902 unlock_user(array, target_addr, 0); 3903 3904 return 0; 3905 } 3906 3907 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 3908 unsigned short **host_array) 3909 { 3910 int nsems; 3911 unsigned short *array; 3912 union semun semun; 3913 struct semid_ds semid_ds; 3914 int i, ret; 3915 3916 semun.buf = &semid_ds; 3917 3918 ret = semctl(semid, 0, IPC_STAT, semun); 3919 if (ret == -1) 3920 return get_errno(ret); 3921 3922 nsems = semid_ds.sem_nsems; 3923 3924 array = lock_user(VERIFY_WRITE, target_addr, 3925 nsems*sizeof(unsigned short), 0); 3926 if (!array) 3927 return -TARGET_EFAULT; 3928 3929 for(i=0; i<nsems; i++) { 3930 __put_user((*host_array)[i], &array[i]); 3931 } 3932 g_free(*host_array); 3933 unlock_user(array, target_addr, 1); 3934 3935 return 0; 3936 } 3937 3938 static inline abi_long do_semctl(int semid, int semnum, int cmd, 3939 abi_ulong target_arg) 3940 { 3941 union target_semun target_su = { .buf = target_arg }; 3942 union semun arg; 3943 struct semid_ds dsarg; 3944 unsigned short *array = NULL; 3945 struct seminfo seminfo; 3946 abi_long ret = -TARGET_EINVAL; 3947 abi_long err; 3948 cmd &= 0xff; 3949 3950 switch( cmd ) { 3951 case GETVAL: 3952 case SETVAL: 3953 /* In 64 bit cross-endian situations, we will erroneously pick up 3954 * the wrong half of the union for the "val" element. To rectify 3955 * this, the entire 8-byte structure is byteswapped, followed by 3956 * a swap of the 4 byte val field. In other cases, the data is 3957 * already in proper host byte order. */ 3958 if (sizeof(target_su.val) != (sizeof(target_su.buf))) { 3959 target_su.buf = tswapal(target_su.buf); 3960 arg.val = tswap32(target_su.val); 3961 } else { 3962 arg.val = target_su.val; 3963 } 3964 ret = get_errno(semctl(semid, semnum, cmd, arg)); 3965 break; 3966 case GETALL: 3967 case SETALL: 3968 err = target_to_host_semarray(semid, &array, target_su.array); 3969 if (err) 3970 return err; 3971 arg.array = array; 3972 ret = get_errno(semctl(semid, semnum, cmd, arg)); 3973 err = host_to_target_semarray(semid, target_su.array, &array); 3974 if (err) 3975 return err; 3976 break; 3977 case IPC_STAT: 3978 case IPC_SET: 3979 case SEM_STAT: 3980 err = target_to_host_semid_ds(&dsarg, target_su.buf); 3981 if (err) 3982 return err; 3983 arg.buf = &dsarg; 3984 ret = get_errno(semctl(semid, semnum, cmd, arg)); 3985 err = host_to_target_semid_ds(target_su.buf, &dsarg); 3986 if (err) 3987 return err; 3988 break; 3989 case IPC_INFO: 3990 case SEM_INFO: 3991 arg.__buf = &seminfo; 3992 ret = get_errno(semctl(semid, semnum, cmd, arg)); 3993 err = host_to_target_seminfo(target_su.__buf, &seminfo); 3994 if (err) 3995 return err; 3996 break; 3997 case IPC_RMID: 3998 case GETPID: 3999 case GETNCNT: 4000 case GETZCNT: 4001 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 4002 break; 4003 } 4004 4005 return ret; 4006 } 4007 4008 struct target_sembuf { 4009 unsigned short sem_num; 4010 short sem_op; 4011 short sem_flg; 4012 }; 4013 4014 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 4015 abi_ulong target_addr, 4016 unsigned nsops) 4017 { 4018 struct target_sembuf *target_sembuf; 4019 int i; 4020 4021 target_sembuf = lock_user(VERIFY_READ, target_addr, 4022 nsops*sizeof(struct target_sembuf), 1); 4023 if (!target_sembuf) 4024 return -TARGET_EFAULT; 4025 4026 for(i=0; i<nsops; i++) { 4027 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 4028 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 4029 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 4030 } 4031 4032 unlock_user(target_sembuf, target_addr, 0); 4033 4034 return 0; 4035 } 4036 4037 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \ 4038 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64) 4039 4040 /* 4041 * This macro is required to handle the s390 variants, which passes the 4042 * arguments in a different order than default. 4043 */ 4044 #ifdef __s390x__ 4045 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \ 4046 (__nsops), (__timeout), (__sops) 4047 #else 4048 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \ 4049 (__nsops), 0, (__sops), (__timeout) 4050 #endif 4051 4052 static inline abi_long do_semtimedop(int semid, 4053 abi_long ptr, 4054 unsigned nsops, 4055 abi_long timeout, bool time64) 4056 { 4057 struct sembuf *sops; 4058 struct timespec ts, *pts = NULL; 4059 abi_long ret; 4060 4061 if (timeout) { 4062 pts = &ts; 4063 if (time64) { 4064 if (target_to_host_timespec64(pts, timeout)) { 4065 return -TARGET_EFAULT; 4066 } 4067 } else { 4068 if (target_to_host_timespec(pts, timeout)) { 4069 return -TARGET_EFAULT; 4070 } 4071 } 4072 } 4073 4074 if (nsops > TARGET_SEMOPM) { 4075 return -TARGET_E2BIG; 4076 } 4077 4078 sops = g_new(struct sembuf, nsops); 4079 4080 if (target_to_host_sembuf(sops, ptr, nsops)) { 4081 g_free(sops); 4082 return -TARGET_EFAULT; 4083 } 4084 4085 ret = -TARGET_ENOSYS; 4086 #ifdef __NR_semtimedop 4087 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts)); 4088 #endif 4089 #ifdef __NR_ipc 4090 if (ret == -TARGET_ENOSYS) { 4091 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, 4092 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts))); 4093 } 4094 #endif 4095 g_free(sops); 4096 return ret; 4097 } 4098 #endif 4099 4100 struct target_msqid_ds 4101 { 4102 struct target_ipc_perm msg_perm; 4103 abi_ulong msg_stime; 4104 #if TARGET_ABI_BITS == 32 4105 abi_ulong __unused1; 4106 #endif 4107 abi_ulong msg_rtime; 4108 #if TARGET_ABI_BITS == 32 4109 abi_ulong __unused2; 4110 #endif 4111 abi_ulong msg_ctime; 4112 #if TARGET_ABI_BITS == 32 4113 abi_ulong __unused3; 4114 #endif 4115 abi_ulong __msg_cbytes; 4116 abi_ulong msg_qnum; 4117 abi_ulong msg_qbytes; 4118 abi_ulong msg_lspid; 4119 abi_ulong msg_lrpid; 4120 abi_ulong __unused4; 4121 abi_ulong __unused5; 4122 }; 4123 4124 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 4125 abi_ulong target_addr) 4126 { 4127 struct target_msqid_ds *target_md; 4128 4129 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 4130 return -TARGET_EFAULT; 4131 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 4132 return -TARGET_EFAULT; 4133 host_md->msg_stime = tswapal(target_md->msg_stime); 4134 host_md->msg_rtime = tswapal(target_md->msg_rtime); 4135 host_md->msg_ctime = tswapal(target_md->msg_ctime); 4136 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 4137 host_md->msg_qnum = tswapal(target_md->msg_qnum); 4138 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 4139 host_md->msg_lspid = tswapal(target_md->msg_lspid); 4140 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 4141 unlock_user_struct(target_md, target_addr, 0); 4142 return 0; 4143 } 4144 4145 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 4146 struct msqid_ds *host_md) 4147 { 4148 struct target_msqid_ds *target_md; 4149 4150 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 4151 return -TARGET_EFAULT; 4152 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 4153 return -TARGET_EFAULT; 4154 target_md->msg_stime = tswapal(host_md->msg_stime); 4155 target_md->msg_rtime = tswapal(host_md->msg_rtime); 4156 target_md->msg_ctime = tswapal(host_md->msg_ctime); 4157 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 4158 target_md->msg_qnum = tswapal(host_md->msg_qnum); 4159 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 4160 target_md->msg_lspid = tswapal(host_md->msg_lspid); 4161 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 4162 unlock_user_struct(target_md, target_addr, 1); 4163 return 0; 4164 } 4165 4166 struct target_msginfo { 4167 int msgpool; 4168 int msgmap; 4169 int msgmax; 4170 int msgmnb; 4171 int msgmni; 4172 int msgssz; 4173 int msgtql; 4174 unsigned short int msgseg; 4175 }; 4176 4177 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 4178 struct msginfo *host_msginfo) 4179 { 4180 struct target_msginfo *target_msginfo; 4181 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 4182 return -TARGET_EFAULT; 4183 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 4184 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 4185 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 4186 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 4187 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 4188 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 4189 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 4190 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 4191 unlock_user_struct(target_msginfo, target_addr, 1); 4192 return 0; 4193 } 4194 4195 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 4196 { 4197 struct msqid_ds dsarg; 4198 struct msginfo msginfo; 4199 abi_long ret = -TARGET_EINVAL; 4200 4201 cmd &= 0xff; 4202 4203 switch (cmd) { 4204 case IPC_STAT: 4205 case IPC_SET: 4206 case MSG_STAT: 4207 if (target_to_host_msqid_ds(&dsarg,ptr)) 4208 return -TARGET_EFAULT; 4209 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 4210 if (host_to_target_msqid_ds(ptr,&dsarg)) 4211 return -TARGET_EFAULT; 4212 break; 4213 case IPC_RMID: 4214 ret = get_errno(msgctl(msgid, cmd, NULL)); 4215 break; 4216 case IPC_INFO: 4217 case MSG_INFO: 4218 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 4219 if (host_to_target_msginfo(ptr, &msginfo)) 4220 return -TARGET_EFAULT; 4221 break; 4222 } 4223 4224 return ret; 4225 } 4226 4227 struct target_msgbuf { 4228 abi_long mtype; 4229 char mtext[1]; 4230 }; 4231 4232 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 4233 ssize_t msgsz, int msgflg) 4234 { 4235 struct target_msgbuf *target_mb; 4236 struct msgbuf *host_mb; 4237 abi_long ret = 0; 4238 4239 if (msgsz < 0) { 4240 return -TARGET_EINVAL; 4241 } 4242 4243 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 4244 return -TARGET_EFAULT; 4245 host_mb = g_try_malloc(msgsz + sizeof(long)); 4246 if (!host_mb) { 4247 unlock_user_struct(target_mb, msgp, 0); 4248 return -TARGET_ENOMEM; 4249 } 4250 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 4251 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 4252 ret = -TARGET_ENOSYS; 4253 #ifdef __NR_msgsnd 4254 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg)); 4255 #endif 4256 #ifdef __NR_ipc 4257 if (ret == -TARGET_ENOSYS) { 4258 #ifdef __s390x__ 4259 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg, 4260 host_mb)); 4261 #else 4262 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg, 4263 host_mb, 0)); 4264 #endif 4265 } 4266 #endif 4267 g_free(host_mb); 4268 unlock_user_struct(target_mb, msgp, 0); 4269 4270 return ret; 4271 } 4272 4273 #ifdef __NR_ipc 4274 #if defined(__sparc__) 4275 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */ 4276 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp 4277 #elif defined(__s390x__) 4278 /* The s390 sys_ipc variant has only five parameters. */ 4279 #define MSGRCV_ARGS(__msgp, __msgtyp) \ 4280 ((long int[]){(long int)__msgp, __msgtyp}) 4281 #else 4282 #define MSGRCV_ARGS(__msgp, __msgtyp) \ 4283 ((long int[]){(long int)__msgp, __msgtyp}), 0 4284 #endif 4285 #endif 4286 4287 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 4288 ssize_t msgsz, abi_long msgtyp, 4289 int msgflg) 4290 { 4291 struct target_msgbuf *target_mb; 4292 char *target_mtext; 4293 struct msgbuf *host_mb; 4294 abi_long ret = 0; 4295 4296 if (msgsz < 0) { 4297 return -TARGET_EINVAL; 4298 } 4299 4300 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 4301 return -TARGET_EFAULT; 4302 4303 host_mb = g_try_malloc(msgsz + sizeof(long)); 4304 if (!host_mb) { 4305 ret = -TARGET_ENOMEM; 4306 goto end; 4307 } 4308 ret = -TARGET_ENOSYS; 4309 #ifdef __NR_msgrcv 4310 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 4311 #endif 4312 #ifdef __NR_ipc 4313 if (ret == -TARGET_ENOSYS) { 4314 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz, 4315 msgflg, MSGRCV_ARGS(host_mb, msgtyp))); 4316 } 4317 #endif 4318 4319 if (ret > 0) { 4320 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 4321 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 4322 if (!target_mtext) { 4323 ret = -TARGET_EFAULT; 4324 goto end; 4325 } 4326 memcpy(target_mb->mtext, host_mb->mtext, ret); 4327 unlock_user(target_mtext, target_mtext_addr, ret); 4328 } 4329 4330 target_mb->mtype = tswapal(host_mb->mtype); 4331 4332 end: 4333 if (target_mb) 4334 unlock_user_struct(target_mb, msgp, 1); 4335 g_free(host_mb); 4336 return ret; 4337 } 4338 4339 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 4340 abi_ulong target_addr) 4341 { 4342 struct target_shmid_ds *target_sd; 4343 4344 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4345 return -TARGET_EFAULT; 4346 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 4347 return -TARGET_EFAULT; 4348 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4349 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 4350 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4351 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4352 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4353 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4354 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4355 unlock_user_struct(target_sd, target_addr, 0); 4356 return 0; 4357 } 4358 4359 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 4360 struct shmid_ds *host_sd) 4361 { 4362 struct target_shmid_ds *target_sd; 4363 4364 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4365 return -TARGET_EFAULT; 4366 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 4367 return -TARGET_EFAULT; 4368 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4369 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 4370 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4371 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4372 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4373 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4374 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4375 unlock_user_struct(target_sd, target_addr, 1); 4376 return 0; 4377 } 4378 4379 struct target_shminfo { 4380 abi_ulong shmmax; 4381 abi_ulong shmmin; 4382 abi_ulong shmmni; 4383 abi_ulong shmseg; 4384 abi_ulong shmall; 4385 }; 4386 4387 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 4388 struct shminfo *host_shminfo) 4389 { 4390 struct target_shminfo *target_shminfo; 4391 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 4392 return -TARGET_EFAULT; 4393 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 4394 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 4395 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 4396 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 4397 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 4398 unlock_user_struct(target_shminfo, target_addr, 1); 4399 return 0; 4400 } 4401 4402 struct target_shm_info { 4403 int used_ids; 4404 abi_ulong shm_tot; 4405 abi_ulong shm_rss; 4406 abi_ulong shm_swp; 4407 abi_ulong swap_attempts; 4408 abi_ulong swap_successes; 4409 }; 4410 4411 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 4412 struct shm_info *host_shm_info) 4413 { 4414 struct target_shm_info *target_shm_info; 4415 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 4416 return -TARGET_EFAULT; 4417 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 4418 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 4419 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 4420 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 4421 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 4422 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 4423 unlock_user_struct(target_shm_info, target_addr, 1); 4424 return 0; 4425 } 4426 4427 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 4428 { 4429 struct shmid_ds dsarg; 4430 struct shminfo shminfo; 4431 struct shm_info shm_info; 4432 abi_long ret = -TARGET_EINVAL; 4433 4434 cmd &= 0xff; 4435 4436 switch(cmd) { 4437 case IPC_STAT: 4438 case IPC_SET: 4439 case SHM_STAT: 4440 if (target_to_host_shmid_ds(&dsarg, buf)) 4441 return -TARGET_EFAULT; 4442 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 4443 if (host_to_target_shmid_ds(buf, &dsarg)) 4444 return -TARGET_EFAULT; 4445 break; 4446 case IPC_INFO: 4447 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 4448 if (host_to_target_shminfo(buf, &shminfo)) 4449 return -TARGET_EFAULT; 4450 break; 4451 case SHM_INFO: 4452 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 4453 if (host_to_target_shm_info(buf, &shm_info)) 4454 return -TARGET_EFAULT; 4455 break; 4456 case IPC_RMID: 4457 case SHM_LOCK: 4458 case SHM_UNLOCK: 4459 ret = get_errno(shmctl(shmid, cmd, NULL)); 4460 break; 4461 } 4462 4463 return ret; 4464 } 4465 4466 #ifndef TARGET_FORCE_SHMLBA 4467 /* For most architectures, SHMLBA is the same as the page size; 4468 * some architectures have larger values, in which case they should 4469 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function. 4470 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA 4471 * and defining its own value for SHMLBA. 4472 * 4473 * The kernel also permits SHMLBA to be set by the architecture to a 4474 * value larger than the page size without setting __ARCH_FORCE_SHMLBA; 4475 * this means that addresses are rounded to the large size if 4476 * SHM_RND is set but addresses not aligned to that size are not rejected 4477 * as long as they are at least page-aligned. Since the only architecture 4478 * which uses this is ia64 this code doesn't provide for that oddity. 4479 */ 4480 static inline abi_ulong target_shmlba(CPUArchState *cpu_env) 4481 { 4482 return TARGET_PAGE_SIZE; 4483 } 4484 #endif 4485 4486 static inline abi_ulong do_shmat(CPUArchState *cpu_env, 4487 int shmid, abi_ulong shmaddr, int shmflg) 4488 { 4489 CPUState *cpu = env_cpu(cpu_env); 4490 abi_long raddr; 4491 void *host_raddr; 4492 struct shmid_ds shm_info; 4493 int i,ret; 4494 abi_ulong shmlba; 4495 4496 /* shmat pointers are always untagged */ 4497 4498 /* find out the length of the shared memory segment */ 4499 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 4500 if (is_error(ret)) { 4501 /* can't get length, bail out */ 4502 return ret; 4503 } 4504 4505 shmlba = target_shmlba(cpu_env); 4506 4507 if (shmaddr & (shmlba - 1)) { 4508 if (shmflg & SHM_RND) { 4509 shmaddr &= ~(shmlba - 1); 4510 } else { 4511 return -TARGET_EINVAL; 4512 } 4513 } 4514 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) { 4515 return -TARGET_EINVAL; 4516 } 4517 4518 mmap_lock(); 4519 4520 /* 4521 * We're mapping shared memory, so ensure we generate code for parallel 4522 * execution and flush old translations. This will work up to the level 4523 * supported by the host -- anything that requires EXCP_ATOMIC will not 4524 * be atomic with respect to an external process. 4525 */ 4526 if (!(cpu->tcg_cflags & CF_PARALLEL)) { 4527 cpu->tcg_cflags |= CF_PARALLEL; 4528 tb_flush(cpu); 4529 } 4530 4531 if (shmaddr) 4532 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg); 4533 else { 4534 abi_ulong mmap_start; 4535 4536 /* In order to use the host shmat, we need to honor host SHMLBA. */ 4537 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba)); 4538 4539 if (mmap_start == -1) { 4540 errno = ENOMEM; 4541 host_raddr = (void *)-1; 4542 } else 4543 host_raddr = shmat(shmid, g2h_untagged(mmap_start), 4544 shmflg | SHM_REMAP); 4545 } 4546 4547 if (host_raddr == (void *)-1) { 4548 mmap_unlock(); 4549 return get_errno((long)host_raddr); 4550 } 4551 raddr=h2g((unsigned long)host_raddr); 4552 4553 page_set_flags(raddr, raddr + shm_info.shm_segsz, 4554 PAGE_VALID | PAGE_RESET | PAGE_READ | 4555 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE)); 4556 4557 for (i = 0; i < N_SHM_REGIONS; i++) { 4558 if (!shm_regions[i].in_use) { 4559 shm_regions[i].in_use = true; 4560 shm_regions[i].start = raddr; 4561 shm_regions[i].size = shm_info.shm_segsz; 4562 break; 4563 } 4564 } 4565 4566 mmap_unlock(); 4567 return raddr; 4568 4569 } 4570 4571 static inline abi_long do_shmdt(abi_ulong shmaddr) 4572 { 4573 int i; 4574 abi_long rv; 4575 4576 /* shmdt pointers are always untagged */ 4577 4578 mmap_lock(); 4579 4580 for (i = 0; i < N_SHM_REGIONS; ++i) { 4581 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { 4582 shm_regions[i].in_use = false; 4583 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 4584 break; 4585 } 4586 } 4587 rv = get_errno(shmdt(g2h_untagged(shmaddr))); 4588 4589 mmap_unlock(); 4590 4591 return rv; 4592 } 4593 4594 #ifdef TARGET_NR_ipc 4595 /* ??? This only works with linear mappings. */ 4596 /* do_ipc() must return target values and target errnos. */ 4597 static abi_long do_ipc(CPUArchState *cpu_env, 4598 unsigned int call, abi_long first, 4599 abi_long second, abi_long third, 4600 abi_long ptr, abi_long fifth) 4601 { 4602 int version; 4603 abi_long ret = 0; 4604 4605 version = call >> 16; 4606 call &= 0xffff; 4607 4608 switch (call) { 4609 case IPCOP_semop: 4610 ret = do_semtimedop(first, ptr, second, 0, false); 4611 break; 4612 case IPCOP_semtimedop: 4613 /* 4614 * The s390 sys_ipc variant has only five parameters instead of six 4615 * (as for default variant) and the only difference is the handling of 4616 * SEMTIMEDOP where on s390 the third parameter is used as a pointer 4617 * to a struct timespec where the generic variant uses fifth parameter. 4618 */ 4619 #if defined(TARGET_S390X) 4620 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64); 4621 #else 4622 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64); 4623 #endif 4624 break; 4625 4626 case IPCOP_semget: 4627 ret = get_errno(semget(first, second, third)); 4628 break; 4629 4630 case IPCOP_semctl: { 4631 /* The semun argument to semctl is passed by value, so dereference the 4632 * ptr argument. */ 4633 abi_ulong atptr; 4634 get_user_ual(atptr, ptr); 4635 ret = do_semctl(first, second, third, atptr); 4636 break; 4637 } 4638 4639 case IPCOP_msgget: 4640 ret = get_errno(msgget(first, second)); 4641 break; 4642 4643 case IPCOP_msgsnd: 4644 ret = do_msgsnd(first, ptr, second, third); 4645 break; 4646 4647 case IPCOP_msgctl: 4648 ret = do_msgctl(first, second, ptr); 4649 break; 4650 4651 case IPCOP_msgrcv: 4652 switch (version) { 4653 case 0: 4654 { 4655 struct target_ipc_kludge { 4656 abi_long msgp; 4657 abi_long msgtyp; 4658 } *tmp; 4659 4660 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 4661 ret = -TARGET_EFAULT; 4662 break; 4663 } 4664 4665 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 4666 4667 unlock_user_struct(tmp, ptr, 0); 4668 break; 4669 } 4670 default: 4671 ret = do_msgrcv(first, ptr, second, fifth, third); 4672 } 4673 break; 4674 4675 case IPCOP_shmat: 4676 switch (version) { 4677 default: 4678 { 4679 abi_ulong raddr; 4680 raddr = do_shmat(cpu_env, first, ptr, second); 4681 if (is_error(raddr)) 4682 return get_errno(raddr); 4683 if (put_user_ual(raddr, third)) 4684 return -TARGET_EFAULT; 4685 break; 4686 } 4687 case 1: 4688 ret = -TARGET_EINVAL; 4689 break; 4690 } 4691 break; 4692 case IPCOP_shmdt: 4693 ret = do_shmdt(ptr); 4694 break; 4695 4696 case IPCOP_shmget: 4697 /* IPC_* flag values are the same on all linux platforms */ 4698 ret = get_errno(shmget(first, second, third)); 4699 break; 4700 4701 /* IPC_* and SHM_* command values are the same on all linux platforms */ 4702 case IPCOP_shmctl: 4703 ret = do_shmctl(first, second, ptr); 4704 break; 4705 default: 4706 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n", 4707 call, version); 4708 ret = -TARGET_ENOSYS; 4709 break; 4710 } 4711 return ret; 4712 } 4713 #endif 4714 4715 /* kernel structure types definitions */ 4716 4717 #define STRUCT(name, ...) STRUCT_ ## name, 4718 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 4719 enum { 4720 #include "syscall_types.h" 4721 STRUCT_MAX 4722 }; 4723 #undef STRUCT 4724 #undef STRUCT_SPECIAL 4725 4726 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 4727 #define STRUCT_SPECIAL(name) 4728 #include "syscall_types.h" 4729 #undef STRUCT 4730 #undef STRUCT_SPECIAL 4731 4732 #define MAX_STRUCT_SIZE 4096 4733 4734 #ifdef CONFIG_FIEMAP 4735 /* So fiemap access checks don't overflow on 32 bit systems. 4736 * This is very slightly smaller than the limit imposed by 4737 * the underlying kernel. 4738 */ 4739 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 4740 / sizeof(struct fiemap_extent)) 4741 4742 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 4743 int fd, int cmd, abi_long arg) 4744 { 4745 /* The parameter for this ioctl is a struct fiemap followed 4746 * by an array of struct fiemap_extent whose size is set 4747 * in fiemap->fm_extent_count. The array is filled in by the 4748 * ioctl. 4749 */ 4750 int target_size_in, target_size_out; 4751 struct fiemap *fm; 4752 const argtype *arg_type = ie->arg_type; 4753 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 4754 void *argptr, *p; 4755 abi_long ret; 4756 int i, extent_size = thunk_type_size(extent_arg_type, 0); 4757 uint32_t outbufsz; 4758 int free_fm = 0; 4759 4760 assert(arg_type[0] == TYPE_PTR); 4761 assert(ie->access == IOC_RW); 4762 arg_type++; 4763 target_size_in = thunk_type_size(arg_type, 0); 4764 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 4765 if (!argptr) { 4766 return -TARGET_EFAULT; 4767 } 4768 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4769 unlock_user(argptr, arg, 0); 4770 fm = (struct fiemap *)buf_temp; 4771 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 4772 return -TARGET_EINVAL; 4773 } 4774 4775 outbufsz = sizeof (*fm) + 4776 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 4777 4778 if (outbufsz > MAX_STRUCT_SIZE) { 4779 /* We can't fit all the extents into the fixed size buffer. 4780 * Allocate one that is large enough and use it instead. 4781 */ 4782 fm = g_try_malloc(outbufsz); 4783 if (!fm) { 4784 return -TARGET_ENOMEM; 4785 } 4786 memcpy(fm, buf_temp, sizeof(struct fiemap)); 4787 free_fm = 1; 4788 } 4789 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm)); 4790 if (!is_error(ret)) { 4791 target_size_out = target_size_in; 4792 /* An extent_count of 0 means we were only counting the extents 4793 * so there are no structs to copy 4794 */ 4795 if (fm->fm_extent_count != 0) { 4796 target_size_out += fm->fm_mapped_extents * extent_size; 4797 } 4798 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 4799 if (!argptr) { 4800 ret = -TARGET_EFAULT; 4801 } else { 4802 /* Convert the struct fiemap */ 4803 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 4804 if (fm->fm_extent_count != 0) { 4805 p = argptr + target_size_in; 4806 /* ...and then all the struct fiemap_extents */ 4807 for (i = 0; i < fm->fm_mapped_extents; i++) { 4808 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 4809 THUNK_TARGET); 4810 p += extent_size; 4811 } 4812 } 4813 unlock_user(argptr, arg, target_size_out); 4814 } 4815 } 4816 if (free_fm) { 4817 g_free(fm); 4818 } 4819 return ret; 4820 } 4821 #endif 4822 4823 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 4824 int fd, int cmd, abi_long arg) 4825 { 4826 const argtype *arg_type = ie->arg_type; 4827 int target_size; 4828 void *argptr; 4829 int ret; 4830 struct ifconf *host_ifconf; 4831 uint32_t outbufsz; 4832 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 4833 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) }; 4834 int target_ifreq_size; 4835 int nb_ifreq; 4836 int free_buf = 0; 4837 int i; 4838 int target_ifc_len; 4839 abi_long target_ifc_buf; 4840 int host_ifc_len; 4841 char *host_ifc_buf; 4842 4843 assert(arg_type[0] == TYPE_PTR); 4844 assert(ie->access == IOC_RW); 4845 4846 arg_type++; 4847 target_size = thunk_type_size(arg_type, 0); 4848 4849 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 4850 if (!argptr) 4851 return -TARGET_EFAULT; 4852 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4853 unlock_user(argptr, arg, 0); 4854 4855 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 4856 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 4857 target_ifreq_size = thunk_type_size(ifreq_max_type, 0); 4858 4859 if (target_ifc_buf != 0) { 4860 target_ifc_len = host_ifconf->ifc_len; 4861 nb_ifreq = target_ifc_len / target_ifreq_size; 4862 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 4863 4864 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 4865 if (outbufsz > MAX_STRUCT_SIZE) { 4866 /* 4867 * We can't fit all the extents into the fixed size buffer. 4868 * Allocate one that is large enough and use it instead. 4869 */ 4870 host_ifconf = malloc(outbufsz); 4871 if (!host_ifconf) { 4872 return -TARGET_ENOMEM; 4873 } 4874 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 4875 free_buf = 1; 4876 } 4877 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf); 4878 4879 host_ifconf->ifc_len = host_ifc_len; 4880 } else { 4881 host_ifc_buf = NULL; 4882 } 4883 host_ifconf->ifc_buf = host_ifc_buf; 4884 4885 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf)); 4886 if (!is_error(ret)) { 4887 /* convert host ifc_len to target ifc_len */ 4888 4889 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 4890 target_ifc_len = nb_ifreq * target_ifreq_size; 4891 host_ifconf->ifc_len = target_ifc_len; 4892 4893 /* restore target ifc_buf */ 4894 4895 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 4896 4897 /* copy struct ifconf to target user */ 4898 4899 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 4900 if (!argptr) 4901 return -TARGET_EFAULT; 4902 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 4903 unlock_user(argptr, arg, target_size); 4904 4905 if (target_ifc_buf != 0) { 4906 /* copy ifreq[] to target user */ 4907 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 4908 for (i = 0; i < nb_ifreq ; i++) { 4909 thunk_convert(argptr + i * target_ifreq_size, 4910 host_ifc_buf + i * sizeof(struct ifreq), 4911 ifreq_arg_type, THUNK_TARGET); 4912 } 4913 unlock_user(argptr, target_ifc_buf, target_ifc_len); 4914 } 4915 } 4916 4917 if (free_buf) { 4918 free(host_ifconf); 4919 } 4920 4921 return ret; 4922 } 4923 4924 #if defined(CONFIG_USBFS) 4925 #if HOST_LONG_BITS > 64 4926 #error USBDEVFS thunks do not support >64 bit hosts yet. 4927 #endif 4928 struct live_urb { 4929 uint64_t target_urb_adr; 4930 uint64_t target_buf_adr; 4931 char *target_buf_ptr; 4932 struct usbdevfs_urb host_urb; 4933 }; 4934 4935 static GHashTable *usbdevfs_urb_hashtable(void) 4936 { 4937 static GHashTable *urb_hashtable; 4938 4939 if (!urb_hashtable) { 4940 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal); 4941 } 4942 return urb_hashtable; 4943 } 4944 4945 static void urb_hashtable_insert(struct live_urb *urb) 4946 { 4947 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 4948 g_hash_table_insert(urb_hashtable, urb, urb); 4949 } 4950 4951 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr) 4952 { 4953 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 4954 return g_hash_table_lookup(urb_hashtable, &target_urb_adr); 4955 } 4956 4957 static void urb_hashtable_remove(struct live_urb *urb) 4958 { 4959 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 4960 g_hash_table_remove(urb_hashtable, urb); 4961 } 4962 4963 static abi_long 4964 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp, 4965 int fd, int cmd, abi_long arg) 4966 { 4967 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) }; 4968 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 }; 4969 struct live_urb *lurb; 4970 void *argptr; 4971 uint64_t hurb; 4972 int target_size; 4973 uintptr_t target_urb_adr; 4974 abi_long ret; 4975 4976 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET); 4977 4978 memset(buf_temp, 0, sizeof(uint64_t)); 4979 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 4980 if (is_error(ret)) { 4981 return ret; 4982 } 4983 4984 memcpy(&hurb, buf_temp, sizeof(uint64_t)); 4985 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb)); 4986 if (!lurb->target_urb_adr) { 4987 return -TARGET_EFAULT; 4988 } 4989 urb_hashtable_remove(lurb); 4990 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 4991 lurb->host_urb.buffer_length); 4992 lurb->target_buf_ptr = NULL; 4993 4994 /* restore the guest buffer pointer */ 4995 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr; 4996 4997 /* update the guest urb struct */ 4998 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0); 4999 if (!argptr) { 5000 g_free(lurb); 5001 return -TARGET_EFAULT; 5002 } 5003 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET); 5004 unlock_user(argptr, lurb->target_urb_adr, target_size); 5005 5006 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET); 5007 /* write back the urb handle */ 5008 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5009 if (!argptr) { 5010 g_free(lurb); 5011 return -TARGET_EFAULT; 5012 } 5013 5014 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */ 5015 target_urb_adr = lurb->target_urb_adr; 5016 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET); 5017 unlock_user(argptr, arg, target_size); 5018 5019 g_free(lurb); 5020 return ret; 5021 } 5022 5023 static abi_long 5024 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie, 5025 uint8_t *buf_temp __attribute__((unused)), 5026 int fd, int cmd, abi_long arg) 5027 { 5028 struct live_urb *lurb; 5029 5030 /* map target address back to host URB with metadata. */ 5031 lurb = urb_hashtable_lookup(arg); 5032 if (!lurb) { 5033 return -TARGET_EFAULT; 5034 } 5035 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); 5036 } 5037 5038 static abi_long 5039 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp, 5040 int fd, int cmd, abi_long arg) 5041 { 5042 const argtype *arg_type = ie->arg_type; 5043 int target_size; 5044 abi_long ret; 5045 void *argptr; 5046 int rw_dir; 5047 struct live_urb *lurb; 5048 5049 /* 5050 * each submitted URB needs to map to a unique ID for the 5051 * kernel, and that unique ID needs to be a pointer to 5052 * host memory. hence, we need to malloc for each URB. 5053 * isochronous transfers have a variable length struct. 5054 */ 5055 arg_type++; 5056 target_size = thunk_type_size(arg_type, THUNK_TARGET); 5057 5058 /* construct host copy of urb and metadata */ 5059 lurb = g_try_malloc0(sizeof(struct live_urb)); 5060 if (!lurb) { 5061 return -TARGET_ENOMEM; 5062 } 5063 5064 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5065 if (!argptr) { 5066 g_free(lurb); 5067 return -TARGET_EFAULT; 5068 } 5069 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST); 5070 unlock_user(argptr, arg, 0); 5071 5072 lurb->target_urb_adr = arg; 5073 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer; 5074 5075 /* buffer space used depends on endpoint type so lock the entire buffer */ 5076 /* control type urbs should check the buffer contents for true direction */ 5077 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ; 5078 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr, 5079 lurb->host_urb.buffer_length, 1); 5080 if (lurb->target_buf_ptr == NULL) { 5081 g_free(lurb); 5082 return -TARGET_EFAULT; 5083 } 5084 5085 /* update buffer pointer in host copy */ 5086 lurb->host_urb.buffer = lurb->target_buf_ptr; 5087 5088 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); 5089 if (is_error(ret)) { 5090 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0); 5091 g_free(lurb); 5092 } else { 5093 urb_hashtable_insert(lurb); 5094 } 5095 5096 return ret; 5097 } 5098 #endif /* CONFIG_USBFS */ 5099 5100 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5101 int cmd, abi_long arg) 5102 { 5103 void *argptr; 5104 struct dm_ioctl *host_dm; 5105 abi_long guest_data; 5106 uint32_t guest_data_size; 5107 int target_size; 5108 const argtype *arg_type = ie->arg_type; 5109 abi_long ret; 5110 void *big_buf = NULL; 5111 char *host_data; 5112 5113 arg_type++; 5114 target_size = thunk_type_size(arg_type, 0); 5115 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5116 if (!argptr) { 5117 ret = -TARGET_EFAULT; 5118 goto out; 5119 } 5120 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5121 unlock_user(argptr, arg, 0); 5122 5123 /* buf_temp is too small, so fetch things into a bigger buffer */ 5124 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 5125 memcpy(big_buf, buf_temp, target_size); 5126 buf_temp = big_buf; 5127 host_dm = big_buf; 5128 5129 guest_data = arg + host_dm->data_start; 5130 if ((guest_data - arg) < 0) { 5131 ret = -TARGET_EINVAL; 5132 goto out; 5133 } 5134 guest_data_size = host_dm->data_size - host_dm->data_start; 5135 host_data = (char*)host_dm + host_dm->data_start; 5136 5137 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 5138 if (!argptr) { 5139 ret = -TARGET_EFAULT; 5140 goto out; 5141 } 5142 5143 switch (ie->host_cmd) { 5144 case DM_REMOVE_ALL: 5145 case DM_LIST_DEVICES: 5146 case DM_DEV_CREATE: 5147 case DM_DEV_REMOVE: 5148 case DM_DEV_SUSPEND: 5149 case DM_DEV_STATUS: 5150 case DM_DEV_WAIT: 5151 case DM_TABLE_STATUS: 5152 case DM_TABLE_CLEAR: 5153 case DM_TABLE_DEPS: 5154 case DM_LIST_VERSIONS: 5155 /* no input data */ 5156 break; 5157 case DM_DEV_RENAME: 5158 case DM_DEV_SET_GEOMETRY: 5159 /* data contains only strings */ 5160 memcpy(host_data, argptr, guest_data_size); 5161 break; 5162 case DM_TARGET_MSG: 5163 memcpy(host_data, argptr, guest_data_size); 5164 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 5165 break; 5166 case DM_TABLE_LOAD: 5167 { 5168 void *gspec = argptr; 5169 void *cur_data = host_data; 5170 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5171 int spec_size = thunk_type_size(arg_type, 0); 5172 int i; 5173 5174 for (i = 0; i < host_dm->target_count; i++) { 5175 struct dm_target_spec *spec = cur_data; 5176 uint32_t next; 5177 int slen; 5178 5179 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 5180 slen = strlen((char*)gspec + spec_size) + 1; 5181 next = spec->next; 5182 spec->next = sizeof(*spec) + slen; 5183 strcpy((char*)&spec[1], gspec + spec_size); 5184 gspec += next; 5185 cur_data += spec->next; 5186 } 5187 break; 5188 } 5189 default: 5190 ret = -TARGET_EINVAL; 5191 unlock_user(argptr, guest_data, 0); 5192 goto out; 5193 } 5194 unlock_user(argptr, guest_data, 0); 5195 5196 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5197 if (!is_error(ret)) { 5198 guest_data = arg + host_dm->data_start; 5199 guest_data_size = host_dm->data_size - host_dm->data_start; 5200 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 5201 switch (ie->host_cmd) { 5202 case DM_REMOVE_ALL: 5203 case DM_DEV_CREATE: 5204 case DM_DEV_REMOVE: 5205 case DM_DEV_RENAME: 5206 case DM_DEV_SUSPEND: 5207 case DM_DEV_STATUS: 5208 case DM_TABLE_LOAD: 5209 case DM_TABLE_CLEAR: 5210 case DM_TARGET_MSG: 5211 case DM_DEV_SET_GEOMETRY: 5212 /* no return data */ 5213 break; 5214 case DM_LIST_DEVICES: 5215 { 5216 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 5217 uint32_t remaining_data = guest_data_size; 5218 void *cur_data = argptr; 5219 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 5220 int nl_size = 12; /* can't use thunk_size due to alignment */ 5221 5222 while (1) { 5223 uint32_t next = nl->next; 5224 if (next) { 5225 nl->next = nl_size + (strlen(nl->name) + 1); 5226 } 5227 if (remaining_data < nl->next) { 5228 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5229 break; 5230 } 5231 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 5232 strcpy(cur_data + nl_size, nl->name); 5233 cur_data += nl->next; 5234 remaining_data -= nl->next; 5235 if (!next) { 5236 break; 5237 } 5238 nl = (void*)nl + next; 5239 } 5240 break; 5241 } 5242 case DM_DEV_WAIT: 5243 case DM_TABLE_STATUS: 5244 { 5245 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 5246 void *cur_data = argptr; 5247 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5248 int spec_size = thunk_type_size(arg_type, 0); 5249 int i; 5250 5251 for (i = 0; i < host_dm->target_count; i++) { 5252 uint32_t next = spec->next; 5253 int slen = strlen((char*)&spec[1]) + 1; 5254 spec->next = (cur_data - argptr) + spec_size + slen; 5255 if (guest_data_size < spec->next) { 5256 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5257 break; 5258 } 5259 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 5260 strcpy(cur_data + spec_size, (char*)&spec[1]); 5261 cur_data = argptr + spec->next; 5262 spec = (void*)host_dm + host_dm->data_start + next; 5263 } 5264 break; 5265 } 5266 case DM_TABLE_DEPS: 5267 { 5268 void *hdata = (void*)host_dm + host_dm->data_start; 5269 int count = *(uint32_t*)hdata; 5270 uint64_t *hdev = hdata + 8; 5271 uint64_t *gdev = argptr + 8; 5272 int i; 5273 5274 *(uint32_t*)argptr = tswap32(count); 5275 for (i = 0; i < count; i++) { 5276 *gdev = tswap64(*hdev); 5277 gdev++; 5278 hdev++; 5279 } 5280 break; 5281 } 5282 case DM_LIST_VERSIONS: 5283 { 5284 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 5285 uint32_t remaining_data = guest_data_size; 5286 void *cur_data = argptr; 5287 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 5288 int vers_size = thunk_type_size(arg_type, 0); 5289 5290 while (1) { 5291 uint32_t next = vers->next; 5292 if (next) { 5293 vers->next = vers_size + (strlen(vers->name) + 1); 5294 } 5295 if (remaining_data < vers->next) { 5296 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5297 break; 5298 } 5299 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 5300 strcpy(cur_data + vers_size, vers->name); 5301 cur_data += vers->next; 5302 remaining_data -= vers->next; 5303 if (!next) { 5304 break; 5305 } 5306 vers = (void*)vers + next; 5307 } 5308 break; 5309 } 5310 default: 5311 unlock_user(argptr, guest_data, 0); 5312 ret = -TARGET_EINVAL; 5313 goto out; 5314 } 5315 unlock_user(argptr, guest_data, guest_data_size); 5316 5317 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5318 if (!argptr) { 5319 ret = -TARGET_EFAULT; 5320 goto out; 5321 } 5322 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5323 unlock_user(argptr, arg, target_size); 5324 } 5325 out: 5326 g_free(big_buf); 5327 return ret; 5328 } 5329 5330 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5331 int cmd, abi_long arg) 5332 { 5333 void *argptr; 5334 int target_size; 5335 const argtype *arg_type = ie->arg_type; 5336 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) }; 5337 abi_long ret; 5338 5339 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp; 5340 struct blkpg_partition host_part; 5341 5342 /* Read and convert blkpg */ 5343 arg_type++; 5344 target_size = thunk_type_size(arg_type, 0); 5345 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5346 if (!argptr) { 5347 ret = -TARGET_EFAULT; 5348 goto out; 5349 } 5350 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5351 unlock_user(argptr, arg, 0); 5352 5353 switch (host_blkpg->op) { 5354 case BLKPG_ADD_PARTITION: 5355 case BLKPG_DEL_PARTITION: 5356 /* payload is struct blkpg_partition */ 5357 break; 5358 default: 5359 /* Unknown opcode */ 5360 ret = -TARGET_EINVAL; 5361 goto out; 5362 } 5363 5364 /* Read and convert blkpg->data */ 5365 arg = (abi_long)(uintptr_t)host_blkpg->data; 5366 target_size = thunk_type_size(part_arg_type, 0); 5367 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5368 if (!argptr) { 5369 ret = -TARGET_EFAULT; 5370 goto out; 5371 } 5372 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST); 5373 unlock_user(argptr, arg, 0); 5374 5375 /* Swizzle the data pointer to our local copy and call! */ 5376 host_blkpg->data = &host_part; 5377 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg)); 5378 5379 out: 5380 return ret; 5381 } 5382 5383 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 5384 int fd, int cmd, abi_long arg) 5385 { 5386 const argtype *arg_type = ie->arg_type; 5387 const StructEntry *se; 5388 const argtype *field_types; 5389 const int *dst_offsets, *src_offsets; 5390 int target_size; 5391 void *argptr; 5392 abi_ulong *target_rt_dev_ptr = NULL; 5393 unsigned long *host_rt_dev_ptr = NULL; 5394 abi_long ret; 5395 int i; 5396 5397 assert(ie->access == IOC_W); 5398 assert(*arg_type == TYPE_PTR); 5399 arg_type++; 5400 assert(*arg_type == TYPE_STRUCT); 5401 target_size = thunk_type_size(arg_type, 0); 5402 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5403 if (!argptr) { 5404 return -TARGET_EFAULT; 5405 } 5406 arg_type++; 5407 assert(*arg_type == (int)STRUCT_rtentry); 5408 se = struct_entries + *arg_type++; 5409 assert(se->convert[0] == NULL); 5410 /* convert struct here to be able to catch rt_dev string */ 5411 field_types = se->field_types; 5412 dst_offsets = se->field_offsets[THUNK_HOST]; 5413 src_offsets = se->field_offsets[THUNK_TARGET]; 5414 for (i = 0; i < se->nb_fields; i++) { 5415 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 5416 assert(*field_types == TYPE_PTRVOID); 5417 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 5418 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 5419 if (*target_rt_dev_ptr != 0) { 5420 *host_rt_dev_ptr = (unsigned long)lock_user_string( 5421 tswapal(*target_rt_dev_ptr)); 5422 if (!*host_rt_dev_ptr) { 5423 unlock_user(argptr, arg, 0); 5424 return -TARGET_EFAULT; 5425 } 5426 } else { 5427 *host_rt_dev_ptr = 0; 5428 } 5429 field_types++; 5430 continue; 5431 } 5432 field_types = thunk_convert(buf_temp + dst_offsets[i], 5433 argptr + src_offsets[i], 5434 field_types, THUNK_HOST); 5435 } 5436 unlock_user(argptr, arg, 0); 5437 5438 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5439 5440 assert(host_rt_dev_ptr != NULL); 5441 assert(target_rt_dev_ptr != NULL); 5442 if (*host_rt_dev_ptr != 0) { 5443 unlock_user((void *)*host_rt_dev_ptr, 5444 *target_rt_dev_ptr, 0); 5445 } 5446 return ret; 5447 } 5448 5449 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 5450 int fd, int cmd, abi_long arg) 5451 { 5452 int sig = target_to_host_signal(arg); 5453 return get_errno(safe_ioctl(fd, ie->host_cmd, sig)); 5454 } 5455 5456 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp, 5457 int fd, int cmd, abi_long arg) 5458 { 5459 struct timeval tv; 5460 abi_long ret; 5461 5462 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv)); 5463 if (is_error(ret)) { 5464 return ret; 5465 } 5466 5467 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) { 5468 if (copy_to_user_timeval(arg, &tv)) { 5469 return -TARGET_EFAULT; 5470 } 5471 } else { 5472 if (copy_to_user_timeval64(arg, &tv)) { 5473 return -TARGET_EFAULT; 5474 } 5475 } 5476 5477 return ret; 5478 } 5479 5480 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp, 5481 int fd, int cmd, abi_long arg) 5482 { 5483 struct timespec ts; 5484 abi_long ret; 5485 5486 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts)); 5487 if (is_error(ret)) { 5488 return ret; 5489 } 5490 5491 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) { 5492 if (host_to_target_timespec(arg, &ts)) { 5493 return -TARGET_EFAULT; 5494 } 5495 } else{ 5496 if (host_to_target_timespec64(arg, &ts)) { 5497 return -TARGET_EFAULT; 5498 } 5499 } 5500 5501 return ret; 5502 } 5503 5504 #ifdef TIOCGPTPEER 5505 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp, 5506 int fd, int cmd, abi_long arg) 5507 { 5508 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl); 5509 return get_errno(safe_ioctl(fd, ie->host_cmd, flags)); 5510 } 5511 #endif 5512 5513 #ifdef HAVE_DRM_H 5514 5515 static void unlock_drm_version(struct drm_version *host_ver, 5516 struct target_drm_version *target_ver, 5517 bool copy) 5518 { 5519 unlock_user(host_ver->name, target_ver->name, 5520 copy ? host_ver->name_len : 0); 5521 unlock_user(host_ver->date, target_ver->date, 5522 copy ? host_ver->date_len : 0); 5523 unlock_user(host_ver->desc, target_ver->desc, 5524 copy ? host_ver->desc_len : 0); 5525 } 5526 5527 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver, 5528 struct target_drm_version *target_ver) 5529 { 5530 memset(host_ver, 0, sizeof(*host_ver)); 5531 5532 __get_user(host_ver->name_len, &target_ver->name_len); 5533 if (host_ver->name_len) { 5534 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name, 5535 target_ver->name_len, 0); 5536 if (!host_ver->name) { 5537 return -EFAULT; 5538 } 5539 } 5540 5541 __get_user(host_ver->date_len, &target_ver->date_len); 5542 if (host_ver->date_len) { 5543 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date, 5544 target_ver->date_len, 0); 5545 if (!host_ver->date) { 5546 goto err; 5547 } 5548 } 5549 5550 __get_user(host_ver->desc_len, &target_ver->desc_len); 5551 if (host_ver->desc_len) { 5552 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc, 5553 target_ver->desc_len, 0); 5554 if (!host_ver->desc) { 5555 goto err; 5556 } 5557 } 5558 5559 return 0; 5560 err: 5561 unlock_drm_version(host_ver, target_ver, false); 5562 return -EFAULT; 5563 } 5564 5565 static inline void host_to_target_drmversion( 5566 struct target_drm_version *target_ver, 5567 struct drm_version *host_ver) 5568 { 5569 __put_user(host_ver->version_major, &target_ver->version_major); 5570 __put_user(host_ver->version_minor, &target_ver->version_minor); 5571 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel); 5572 __put_user(host_ver->name_len, &target_ver->name_len); 5573 __put_user(host_ver->date_len, &target_ver->date_len); 5574 __put_user(host_ver->desc_len, &target_ver->desc_len); 5575 unlock_drm_version(host_ver, target_ver, true); 5576 } 5577 5578 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp, 5579 int fd, int cmd, abi_long arg) 5580 { 5581 struct drm_version *ver; 5582 struct target_drm_version *target_ver; 5583 abi_long ret; 5584 5585 switch (ie->host_cmd) { 5586 case DRM_IOCTL_VERSION: 5587 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) { 5588 return -TARGET_EFAULT; 5589 } 5590 ver = (struct drm_version *)buf_temp; 5591 ret = target_to_host_drmversion(ver, target_ver); 5592 if (!is_error(ret)) { 5593 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver)); 5594 if (is_error(ret)) { 5595 unlock_drm_version(ver, target_ver, false); 5596 } else { 5597 host_to_target_drmversion(target_ver, ver); 5598 } 5599 } 5600 unlock_user_struct(target_ver, arg, 0); 5601 return ret; 5602 } 5603 return -TARGET_ENOSYS; 5604 } 5605 5606 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie, 5607 struct drm_i915_getparam *gparam, 5608 int fd, abi_long arg) 5609 { 5610 abi_long ret; 5611 int value; 5612 struct target_drm_i915_getparam *target_gparam; 5613 5614 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) { 5615 return -TARGET_EFAULT; 5616 } 5617 5618 __get_user(gparam->param, &target_gparam->param); 5619 gparam->value = &value; 5620 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam)); 5621 put_user_s32(value, target_gparam->value); 5622 5623 unlock_user_struct(target_gparam, arg, 0); 5624 return ret; 5625 } 5626 5627 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp, 5628 int fd, int cmd, abi_long arg) 5629 { 5630 switch (ie->host_cmd) { 5631 case DRM_IOCTL_I915_GETPARAM: 5632 return do_ioctl_drm_i915_getparam(ie, 5633 (struct drm_i915_getparam *)buf_temp, 5634 fd, arg); 5635 default: 5636 return -TARGET_ENOSYS; 5637 } 5638 } 5639 5640 #endif 5641 5642 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp, 5643 int fd, int cmd, abi_long arg) 5644 { 5645 struct tun_filter *filter = (struct tun_filter *)buf_temp; 5646 struct tun_filter *target_filter; 5647 char *target_addr; 5648 5649 assert(ie->access == IOC_W); 5650 5651 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1); 5652 if (!target_filter) { 5653 return -TARGET_EFAULT; 5654 } 5655 filter->flags = tswap16(target_filter->flags); 5656 filter->count = tswap16(target_filter->count); 5657 unlock_user(target_filter, arg, 0); 5658 5659 if (filter->count) { 5660 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN > 5661 MAX_STRUCT_SIZE) { 5662 return -TARGET_EFAULT; 5663 } 5664 5665 target_addr = lock_user(VERIFY_READ, 5666 arg + offsetof(struct tun_filter, addr), 5667 filter->count * ETH_ALEN, 1); 5668 if (!target_addr) { 5669 return -TARGET_EFAULT; 5670 } 5671 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN); 5672 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0); 5673 } 5674 5675 return get_errno(safe_ioctl(fd, ie->host_cmd, filter)); 5676 } 5677 5678 IOCTLEntry ioctl_entries[] = { 5679 #define IOCTL(cmd, access, ...) \ 5680 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 5681 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 5682 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 5683 #define IOCTL_IGNORE(cmd) \ 5684 { TARGET_ ## cmd, 0, #cmd }, 5685 #include "ioctls.h" 5686 { 0, 0, }, 5687 }; 5688 5689 /* ??? Implement proper locking for ioctls. */ 5690 /* do_ioctl() Must return target values and target errnos. */ 5691 static abi_long do_ioctl(int fd, int cmd, abi_long arg) 5692 { 5693 const IOCTLEntry *ie; 5694 const argtype *arg_type; 5695 abi_long ret; 5696 uint8_t buf_temp[MAX_STRUCT_SIZE]; 5697 int target_size; 5698 void *argptr; 5699 5700 ie = ioctl_entries; 5701 for(;;) { 5702 if (ie->target_cmd == 0) { 5703 qemu_log_mask( 5704 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 5705 return -TARGET_ENOSYS; 5706 } 5707 if (ie->target_cmd == cmd) 5708 break; 5709 ie++; 5710 } 5711 arg_type = ie->arg_type; 5712 if (ie->do_ioctl) { 5713 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 5714 } else if (!ie->host_cmd) { 5715 /* Some architectures define BSD ioctls in their headers 5716 that are not implemented in Linux. */ 5717 return -TARGET_ENOSYS; 5718 } 5719 5720 switch(arg_type[0]) { 5721 case TYPE_NULL: 5722 /* no argument */ 5723 ret = get_errno(safe_ioctl(fd, ie->host_cmd)); 5724 break; 5725 case TYPE_PTRVOID: 5726 case TYPE_INT: 5727 case TYPE_LONG: 5728 case TYPE_ULONG: 5729 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg)); 5730 break; 5731 case TYPE_PTR: 5732 arg_type++; 5733 target_size = thunk_type_size(arg_type, 0); 5734 switch(ie->access) { 5735 case IOC_R: 5736 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5737 if (!is_error(ret)) { 5738 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5739 if (!argptr) 5740 return -TARGET_EFAULT; 5741 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5742 unlock_user(argptr, arg, target_size); 5743 } 5744 break; 5745 case IOC_W: 5746 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5747 if (!argptr) 5748 return -TARGET_EFAULT; 5749 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5750 unlock_user(argptr, arg, 0); 5751 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5752 break; 5753 default: 5754 case IOC_RW: 5755 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5756 if (!argptr) 5757 return -TARGET_EFAULT; 5758 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5759 unlock_user(argptr, arg, 0); 5760 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5761 if (!is_error(ret)) { 5762 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5763 if (!argptr) 5764 return -TARGET_EFAULT; 5765 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5766 unlock_user(argptr, arg, target_size); 5767 } 5768 break; 5769 } 5770 break; 5771 default: 5772 qemu_log_mask(LOG_UNIMP, 5773 "Unsupported ioctl type: cmd=0x%04lx type=%d\n", 5774 (long)cmd, arg_type[0]); 5775 ret = -TARGET_ENOSYS; 5776 break; 5777 } 5778 return ret; 5779 } 5780 5781 static const bitmask_transtbl iflag_tbl[] = { 5782 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 5783 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 5784 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 5785 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 5786 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 5787 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 5788 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 5789 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 5790 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 5791 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 5792 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 5793 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 5794 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 5795 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 5796 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8}, 5797 { 0, 0, 0, 0 } 5798 }; 5799 5800 static const bitmask_transtbl oflag_tbl[] = { 5801 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 5802 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 5803 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 5804 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 5805 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 5806 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 5807 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 5808 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 5809 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 5810 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 5811 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 5812 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 5813 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 5814 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 5815 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 5816 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 5817 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 5818 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 5819 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 5820 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 5821 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 5822 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 5823 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 5824 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 5825 { 0, 0, 0, 0 } 5826 }; 5827 5828 static const bitmask_transtbl cflag_tbl[] = { 5829 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 5830 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 5831 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 5832 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 5833 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 5834 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 5835 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 5836 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 5837 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 5838 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 5839 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 5840 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 5841 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 5842 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 5843 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 5844 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 5845 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 5846 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 5847 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 5848 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 5849 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 5850 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 5851 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 5852 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 5853 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 5854 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 5855 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 5856 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 5857 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 5858 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 5859 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 5860 { 0, 0, 0, 0 } 5861 }; 5862 5863 static const bitmask_transtbl lflag_tbl[] = { 5864 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 5865 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 5866 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 5867 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 5868 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 5869 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 5870 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 5871 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 5872 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 5873 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 5874 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 5875 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 5876 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 5877 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 5878 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 5879 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC}, 5880 { 0, 0, 0, 0 } 5881 }; 5882 5883 static void target_to_host_termios (void *dst, const void *src) 5884 { 5885 struct host_termios *host = dst; 5886 const struct target_termios *target = src; 5887 5888 host->c_iflag = 5889 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 5890 host->c_oflag = 5891 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 5892 host->c_cflag = 5893 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 5894 host->c_lflag = 5895 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 5896 host->c_line = target->c_line; 5897 5898 memset(host->c_cc, 0, sizeof(host->c_cc)); 5899 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 5900 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 5901 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 5902 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 5903 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 5904 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 5905 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 5906 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 5907 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 5908 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 5909 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 5910 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 5911 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 5912 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 5913 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 5914 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 5915 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 5916 } 5917 5918 static void host_to_target_termios (void *dst, const void *src) 5919 { 5920 struct target_termios *target = dst; 5921 const struct host_termios *host = src; 5922 5923 target->c_iflag = 5924 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 5925 target->c_oflag = 5926 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 5927 target->c_cflag = 5928 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 5929 target->c_lflag = 5930 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 5931 target->c_line = host->c_line; 5932 5933 memset(target->c_cc, 0, sizeof(target->c_cc)); 5934 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 5935 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 5936 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 5937 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 5938 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 5939 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 5940 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 5941 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 5942 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 5943 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 5944 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 5945 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 5946 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 5947 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 5948 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 5949 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 5950 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 5951 } 5952 5953 static const StructEntry struct_termios_def = { 5954 .convert = { host_to_target_termios, target_to_host_termios }, 5955 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 5956 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 5957 .print = print_termios, 5958 }; 5959 5960 static const bitmask_transtbl mmap_flags_tbl[] = { 5961 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 5962 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 5963 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 5964 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, 5965 MAP_ANONYMOUS, MAP_ANONYMOUS }, 5966 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, 5967 MAP_GROWSDOWN, MAP_GROWSDOWN }, 5968 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, 5969 MAP_DENYWRITE, MAP_DENYWRITE }, 5970 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, 5971 MAP_EXECUTABLE, MAP_EXECUTABLE }, 5972 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 5973 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, 5974 MAP_NORESERVE, MAP_NORESERVE }, 5975 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB }, 5976 /* MAP_STACK had been ignored by the kernel for quite some time. 5977 Recognize it for the target insofar as we do not want to pass 5978 it through to the host. */ 5979 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 }, 5980 { 0, 0, 0, 0 } 5981 }; 5982 5983 /* 5984 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64) 5985 * TARGET_I386 is defined if TARGET_X86_64 is defined 5986 */ 5987 #if defined(TARGET_I386) 5988 5989 /* NOTE: there is really one LDT for all the threads */ 5990 static uint8_t *ldt_table; 5991 5992 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 5993 { 5994 int size; 5995 void *p; 5996 5997 if (!ldt_table) 5998 return 0; 5999 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 6000 if (size > bytecount) 6001 size = bytecount; 6002 p = lock_user(VERIFY_WRITE, ptr, size, 0); 6003 if (!p) 6004 return -TARGET_EFAULT; 6005 /* ??? Should this by byteswapped? */ 6006 memcpy(p, ldt_table, size); 6007 unlock_user(p, ptr, size); 6008 return size; 6009 } 6010 6011 /* XXX: add locking support */ 6012 static abi_long write_ldt(CPUX86State *env, 6013 abi_ulong ptr, unsigned long bytecount, int oldmode) 6014 { 6015 struct target_modify_ldt_ldt_s ldt_info; 6016 struct target_modify_ldt_ldt_s *target_ldt_info; 6017 int seg_32bit, contents, read_exec_only, limit_in_pages; 6018 int seg_not_present, useable, lm; 6019 uint32_t *lp, entry_1, entry_2; 6020 6021 if (bytecount != sizeof(ldt_info)) 6022 return -TARGET_EINVAL; 6023 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 6024 return -TARGET_EFAULT; 6025 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6026 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6027 ldt_info.limit = tswap32(target_ldt_info->limit); 6028 ldt_info.flags = tswap32(target_ldt_info->flags); 6029 unlock_user_struct(target_ldt_info, ptr, 0); 6030 6031 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 6032 return -TARGET_EINVAL; 6033 seg_32bit = ldt_info.flags & 1; 6034 contents = (ldt_info.flags >> 1) & 3; 6035 read_exec_only = (ldt_info.flags >> 3) & 1; 6036 limit_in_pages = (ldt_info.flags >> 4) & 1; 6037 seg_not_present = (ldt_info.flags >> 5) & 1; 6038 useable = (ldt_info.flags >> 6) & 1; 6039 #ifdef TARGET_ABI32 6040 lm = 0; 6041 #else 6042 lm = (ldt_info.flags >> 7) & 1; 6043 #endif 6044 if (contents == 3) { 6045 if (oldmode) 6046 return -TARGET_EINVAL; 6047 if (seg_not_present == 0) 6048 return -TARGET_EINVAL; 6049 } 6050 /* allocate the LDT */ 6051 if (!ldt_table) { 6052 env->ldt.base = target_mmap(0, 6053 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 6054 PROT_READ|PROT_WRITE, 6055 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 6056 if (env->ldt.base == -1) 6057 return -TARGET_ENOMEM; 6058 memset(g2h_untagged(env->ldt.base), 0, 6059 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 6060 env->ldt.limit = 0xffff; 6061 ldt_table = g2h_untagged(env->ldt.base); 6062 } 6063 6064 /* NOTE: same code as Linux kernel */ 6065 /* Allow LDTs to be cleared by the user. */ 6066 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6067 if (oldmode || 6068 (contents == 0 && 6069 read_exec_only == 1 && 6070 seg_32bit == 0 && 6071 limit_in_pages == 0 && 6072 seg_not_present == 1 && 6073 useable == 0 )) { 6074 entry_1 = 0; 6075 entry_2 = 0; 6076 goto install; 6077 } 6078 } 6079 6080 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6081 (ldt_info.limit & 0x0ffff); 6082 entry_2 = (ldt_info.base_addr & 0xff000000) | 6083 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6084 (ldt_info.limit & 0xf0000) | 6085 ((read_exec_only ^ 1) << 9) | 6086 (contents << 10) | 6087 ((seg_not_present ^ 1) << 15) | 6088 (seg_32bit << 22) | 6089 (limit_in_pages << 23) | 6090 (lm << 21) | 6091 0x7000; 6092 if (!oldmode) 6093 entry_2 |= (useable << 20); 6094 6095 /* Install the new entry ... */ 6096 install: 6097 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 6098 lp[0] = tswap32(entry_1); 6099 lp[1] = tswap32(entry_2); 6100 return 0; 6101 } 6102 6103 /* specific and weird i386 syscalls */ 6104 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 6105 unsigned long bytecount) 6106 { 6107 abi_long ret; 6108 6109 switch (func) { 6110 case 0: 6111 ret = read_ldt(ptr, bytecount); 6112 break; 6113 case 1: 6114 ret = write_ldt(env, ptr, bytecount, 1); 6115 break; 6116 case 0x11: 6117 ret = write_ldt(env, ptr, bytecount, 0); 6118 break; 6119 default: 6120 ret = -TARGET_ENOSYS; 6121 break; 6122 } 6123 return ret; 6124 } 6125 6126 #if defined(TARGET_ABI32) 6127 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 6128 { 6129 uint64_t *gdt_table = g2h_untagged(env->gdt.base); 6130 struct target_modify_ldt_ldt_s ldt_info; 6131 struct target_modify_ldt_ldt_s *target_ldt_info; 6132 int seg_32bit, contents, read_exec_only, limit_in_pages; 6133 int seg_not_present, useable, lm; 6134 uint32_t *lp, entry_1, entry_2; 6135 int i; 6136 6137 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6138 if (!target_ldt_info) 6139 return -TARGET_EFAULT; 6140 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6141 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6142 ldt_info.limit = tswap32(target_ldt_info->limit); 6143 ldt_info.flags = tswap32(target_ldt_info->flags); 6144 if (ldt_info.entry_number == -1) { 6145 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 6146 if (gdt_table[i] == 0) { 6147 ldt_info.entry_number = i; 6148 target_ldt_info->entry_number = tswap32(i); 6149 break; 6150 } 6151 } 6152 } 6153 unlock_user_struct(target_ldt_info, ptr, 1); 6154 6155 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 6156 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 6157 return -TARGET_EINVAL; 6158 seg_32bit = ldt_info.flags & 1; 6159 contents = (ldt_info.flags >> 1) & 3; 6160 read_exec_only = (ldt_info.flags >> 3) & 1; 6161 limit_in_pages = (ldt_info.flags >> 4) & 1; 6162 seg_not_present = (ldt_info.flags >> 5) & 1; 6163 useable = (ldt_info.flags >> 6) & 1; 6164 #ifdef TARGET_ABI32 6165 lm = 0; 6166 #else 6167 lm = (ldt_info.flags >> 7) & 1; 6168 #endif 6169 6170 if (contents == 3) { 6171 if (seg_not_present == 0) 6172 return -TARGET_EINVAL; 6173 } 6174 6175 /* NOTE: same code as Linux kernel */ 6176 /* Allow LDTs to be cleared by the user. */ 6177 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6178 if ((contents == 0 && 6179 read_exec_only == 1 && 6180 seg_32bit == 0 && 6181 limit_in_pages == 0 && 6182 seg_not_present == 1 && 6183 useable == 0 )) { 6184 entry_1 = 0; 6185 entry_2 = 0; 6186 goto install; 6187 } 6188 } 6189 6190 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6191 (ldt_info.limit & 0x0ffff); 6192 entry_2 = (ldt_info.base_addr & 0xff000000) | 6193 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6194 (ldt_info.limit & 0xf0000) | 6195 ((read_exec_only ^ 1) << 9) | 6196 (contents << 10) | 6197 ((seg_not_present ^ 1) << 15) | 6198 (seg_32bit << 22) | 6199 (limit_in_pages << 23) | 6200 (useable << 20) | 6201 (lm << 21) | 6202 0x7000; 6203 6204 /* Install the new entry ... */ 6205 install: 6206 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 6207 lp[0] = tswap32(entry_1); 6208 lp[1] = tswap32(entry_2); 6209 return 0; 6210 } 6211 6212 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 6213 { 6214 struct target_modify_ldt_ldt_s *target_ldt_info; 6215 uint64_t *gdt_table = g2h_untagged(env->gdt.base); 6216 uint32_t base_addr, limit, flags; 6217 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 6218 int seg_not_present, useable, lm; 6219 uint32_t *lp, entry_1, entry_2; 6220 6221 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6222 if (!target_ldt_info) 6223 return -TARGET_EFAULT; 6224 idx = tswap32(target_ldt_info->entry_number); 6225 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 6226 idx > TARGET_GDT_ENTRY_TLS_MAX) { 6227 unlock_user_struct(target_ldt_info, ptr, 1); 6228 return -TARGET_EINVAL; 6229 } 6230 lp = (uint32_t *)(gdt_table + idx); 6231 entry_1 = tswap32(lp[0]); 6232 entry_2 = tswap32(lp[1]); 6233 6234 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 6235 contents = (entry_2 >> 10) & 3; 6236 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 6237 seg_32bit = (entry_2 >> 22) & 1; 6238 limit_in_pages = (entry_2 >> 23) & 1; 6239 useable = (entry_2 >> 20) & 1; 6240 #ifdef TARGET_ABI32 6241 lm = 0; 6242 #else 6243 lm = (entry_2 >> 21) & 1; 6244 #endif 6245 flags = (seg_32bit << 0) | (contents << 1) | 6246 (read_exec_only << 3) | (limit_in_pages << 4) | 6247 (seg_not_present << 5) | (useable << 6) | (lm << 7); 6248 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 6249 base_addr = (entry_1 >> 16) | 6250 (entry_2 & 0xff000000) | 6251 ((entry_2 & 0xff) << 16); 6252 target_ldt_info->base_addr = tswapal(base_addr); 6253 target_ldt_info->limit = tswap32(limit); 6254 target_ldt_info->flags = tswap32(flags); 6255 unlock_user_struct(target_ldt_info, ptr, 1); 6256 return 0; 6257 } 6258 6259 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6260 { 6261 return -TARGET_ENOSYS; 6262 } 6263 #else 6264 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6265 { 6266 abi_long ret = 0; 6267 abi_ulong val; 6268 int idx; 6269 6270 switch(code) { 6271 case TARGET_ARCH_SET_GS: 6272 case TARGET_ARCH_SET_FS: 6273 if (code == TARGET_ARCH_SET_GS) 6274 idx = R_GS; 6275 else 6276 idx = R_FS; 6277 cpu_x86_load_seg(env, idx, 0); 6278 env->segs[idx].base = addr; 6279 break; 6280 case TARGET_ARCH_GET_GS: 6281 case TARGET_ARCH_GET_FS: 6282 if (code == TARGET_ARCH_GET_GS) 6283 idx = R_GS; 6284 else 6285 idx = R_FS; 6286 val = env->segs[idx].base; 6287 if (put_user(val, addr, abi_ulong)) 6288 ret = -TARGET_EFAULT; 6289 break; 6290 default: 6291 ret = -TARGET_EINVAL; 6292 break; 6293 } 6294 return ret; 6295 } 6296 #endif /* defined(TARGET_ABI32 */ 6297 6298 #endif /* defined(TARGET_I386) */ 6299 6300 #define NEW_STACK_SIZE 0x40000 6301 6302 6303 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 6304 typedef struct { 6305 CPUArchState *env; 6306 pthread_mutex_t mutex; 6307 pthread_cond_t cond; 6308 pthread_t thread; 6309 uint32_t tid; 6310 abi_ulong child_tidptr; 6311 abi_ulong parent_tidptr; 6312 sigset_t sigmask; 6313 } new_thread_info; 6314 6315 static void *clone_func(void *arg) 6316 { 6317 new_thread_info *info = arg; 6318 CPUArchState *env; 6319 CPUState *cpu; 6320 TaskState *ts; 6321 6322 rcu_register_thread(); 6323 tcg_register_thread(); 6324 env = info->env; 6325 cpu = env_cpu(env); 6326 thread_cpu = cpu; 6327 ts = (TaskState *)cpu->opaque; 6328 info->tid = sys_gettid(); 6329 task_settid(ts); 6330 if (info->child_tidptr) 6331 put_user_u32(info->tid, info->child_tidptr); 6332 if (info->parent_tidptr) 6333 put_user_u32(info->tid, info->parent_tidptr); 6334 qemu_guest_random_seed_thread_part2(cpu->random_seed); 6335 /* Enable signals. */ 6336 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 6337 /* Signal to the parent that we're ready. */ 6338 pthread_mutex_lock(&info->mutex); 6339 pthread_cond_broadcast(&info->cond); 6340 pthread_mutex_unlock(&info->mutex); 6341 /* Wait until the parent has finished initializing the tls state. */ 6342 pthread_mutex_lock(&clone_lock); 6343 pthread_mutex_unlock(&clone_lock); 6344 cpu_loop(env); 6345 /* never exits */ 6346 return NULL; 6347 } 6348 6349 /* do_fork() Must return host values and target errnos (unlike most 6350 do_*() functions). */ 6351 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 6352 abi_ulong parent_tidptr, target_ulong newtls, 6353 abi_ulong child_tidptr) 6354 { 6355 CPUState *cpu = env_cpu(env); 6356 int ret; 6357 TaskState *ts; 6358 CPUState *new_cpu; 6359 CPUArchState *new_env; 6360 sigset_t sigmask; 6361 6362 flags &= ~CLONE_IGNORED_FLAGS; 6363 6364 /* Emulate vfork() with fork() */ 6365 if (flags & CLONE_VFORK) 6366 flags &= ~(CLONE_VFORK | CLONE_VM); 6367 6368 if (flags & CLONE_VM) { 6369 TaskState *parent_ts = (TaskState *)cpu->opaque; 6370 new_thread_info info; 6371 pthread_attr_t attr; 6372 6373 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) || 6374 (flags & CLONE_INVALID_THREAD_FLAGS)) { 6375 return -TARGET_EINVAL; 6376 } 6377 6378 ts = g_new0(TaskState, 1); 6379 init_task_state(ts); 6380 6381 /* Grab a mutex so that thread setup appears atomic. */ 6382 pthread_mutex_lock(&clone_lock); 6383 6384 /* 6385 * If this is our first additional thread, we need to ensure we 6386 * generate code for parallel execution and flush old translations. 6387 * Do this now so that the copy gets CF_PARALLEL too. 6388 */ 6389 if (!(cpu->tcg_cflags & CF_PARALLEL)) { 6390 cpu->tcg_cflags |= CF_PARALLEL; 6391 tb_flush(cpu); 6392 } 6393 6394 /* we create a new CPU instance. */ 6395 new_env = cpu_copy(env); 6396 /* Init regs that differ from the parent. */ 6397 cpu_clone_regs_child(new_env, newsp, flags); 6398 cpu_clone_regs_parent(env, flags); 6399 new_cpu = env_cpu(new_env); 6400 new_cpu->opaque = ts; 6401 ts->bprm = parent_ts->bprm; 6402 ts->info = parent_ts->info; 6403 ts->signal_mask = parent_ts->signal_mask; 6404 6405 if (flags & CLONE_CHILD_CLEARTID) { 6406 ts->child_tidptr = child_tidptr; 6407 } 6408 6409 if (flags & CLONE_SETTLS) { 6410 cpu_set_tls (new_env, newtls); 6411 } 6412 6413 memset(&info, 0, sizeof(info)); 6414 pthread_mutex_init(&info.mutex, NULL); 6415 pthread_mutex_lock(&info.mutex); 6416 pthread_cond_init(&info.cond, NULL); 6417 info.env = new_env; 6418 if (flags & CLONE_CHILD_SETTID) { 6419 info.child_tidptr = child_tidptr; 6420 } 6421 if (flags & CLONE_PARENT_SETTID) { 6422 info.parent_tidptr = parent_tidptr; 6423 } 6424 6425 ret = pthread_attr_init(&attr); 6426 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 6427 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 6428 /* It is not safe to deliver signals until the child has finished 6429 initializing, so temporarily block all signals. */ 6430 sigfillset(&sigmask); 6431 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 6432 cpu->random_seed = qemu_guest_random_seed_thread_part1(); 6433 6434 ret = pthread_create(&info.thread, &attr, clone_func, &info); 6435 /* TODO: Free new CPU state if thread creation failed. */ 6436 6437 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 6438 pthread_attr_destroy(&attr); 6439 if (ret == 0) { 6440 /* Wait for the child to initialize. */ 6441 pthread_cond_wait(&info.cond, &info.mutex); 6442 ret = info.tid; 6443 } else { 6444 ret = -1; 6445 } 6446 pthread_mutex_unlock(&info.mutex); 6447 pthread_cond_destroy(&info.cond); 6448 pthread_mutex_destroy(&info.mutex); 6449 pthread_mutex_unlock(&clone_lock); 6450 } else { 6451 /* if no CLONE_VM, we consider it is a fork */ 6452 if (flags & CLONE_INVALID_FORK_FLAGS) { 6453 return -TARGET_EINVAL; 6454 } 6455 6456 /* We can't support custom termination signals */ 6457 if ((flags & CSIGNAL) != TARGET_SIGCHLD) { 6458 return -TARGET_EINVAL; 6459 } 6460 6461 if (block_signals()) { 6462 return -QEMU_ERESTARTSYS; 6463 } 6464 6465 fork_start(); 6466 ret = fork(); 6467 if (ret == 0) { 6468 /* Child Process. */ 6469 cpu_clone_regs_child(env, newsp, flags); 6470 fork_end(1); 6471 /* There is a race condition here. The parent process could 6472 theoretically read the TID in the child process before the child 6473 tid is set. This would require using either ptrace 6474 (not implemented) or having *_tidptr to point at a shared memory 6475 mapping. We can't repeat the spinlock hack used above because 6476 the child process gets its own copy of the lock. */ 6477 if (flags & CLONE_CHILD_SETTID) 6478 put_user_u32(sys_gettid(), child_tidptr); 6479 if (flags & CLONE_PARENT_SETTID) 6480 put_user_u32(sys_gettid(), parent_tidptr); 6481 ts = (TaskState *)cpu->opaque; 6482 if (flags & CLONE_SETTLS) 6483 cpu_set_tls (env, newtls); 6484 if (flags & CLONE_CHILD_CLEARTID) 6485 ts->child_tidptr = child_tidptr; 6486 } else { 6487 cpu_clone_regs_parent(env, flags); 6488 fork_end(0); 6489 } 6490 } 6491 return ret; 6492 } 6493 6494 /* warning : doesn't handle linux specific flags... */ 6495 static int target_to_host_fcntl_cmd(int cmd) 6496 { 6497 int ret; 6498 6499 switch(cmd) { 6500 case TARGET_F_DUPFD: 6501 case TARGET_F_GETFD: 6502 case TARGET_F_SETFD: 6503 case TARGET_F_GETFL: 6504 case TARGET_F_SETFL: 6505 case TARGET_F_OFD_GETLK: 6506 case TARGET_F_OFD_SETLK: 6507 case TARGET_F_OFD_SETLKW: 6508 ret = cmd; 6509 break; 6510 case TARGET_F_GETLK: 6511 ret = F_GETLK64; 6512 break; 6513 case TARGET_F_SETLK: 6514 ret = F_SETLK64; 6515 break; 6516 case TARGET_F_SETLKW: 6517 ret = F_SETLKW64; 6518 break; 6519 case TARGET_F_GETOWN: 6520 ret = F_GETOWN; 6521 break; 6522 case TARGET_F_SETOWN: 6523 ret = F_SETOWN; 6524 break; 6525 case TARGET_F_GETSIG: 6526 ret = F_GETSIG; 6527 break; 6528 case TARGET_F_SETSIG: 6529 ret = F_SETSIG; 6530 break; 6531 #if TARGET_ABI_BITS == 32 6532 case TARGET_F_GETLK64: 6533 ret = F_GETLK64; 6534 break; 6535 case TARGET_F_SETLK64: 6536 ret = F_SETLK64; 6537 break; 6538 case TARGET_F_SETLKW64: 6539 ret = F_SETLKW64; 6540 break; 6541 #endif 6542 case TARGET_F_SETLEASE: 6543 ret = F_SETLEASE; 6544 break; 6545 case TARGET_F_GETLEASE: 6546 ret = F_GETLEASE; 6547 break; 6548 #ifdef F_DUPFD_CLOEXEC 6549 case TARGET_F_DUPFD_CLOEXEC: 6550 ret = F_DUPFD_CLOEXEC; 6551 break; 6552 #endif 6553 case TARGET_F_NOTIFY: 6554 ret = F_NOTIFY; 6555 break; 6556 #ifdef F_GETOWN_EX 6557 case TARGET_F_GETOWN_EX: 6558 ret = F_GETOWN_EX; 6559 break; 6560 #endif 6561 #ifdef F_SETOWN_EX 6562 case TARGET_F_SETOWN_EX: 6563 ret = F_SETOWN_EX; 6564 break; 6565 #endif 6566 #ifdef F_SETPIPE_SZ 6567 case TARGET_F_SETPIPE_SZ: 6568 ret = F_SETPIPE_SZ; 6569 break; 6570 case TARGET_F_GETPIPE_SZ: 6571 ret = F_GETPIPE_SZ; 6572 break; 6573 #endif 6574 #ifdef F_ADD_SEALS 6575 case TARGET_F_ADD_SEALS: 6576 ret = F_ADD_SEALS; 6577 break; 6578 case TARGET_F_GET_SEALS: 6579 ret = F_GET_SEALS; 6580 break; 6581 #endif 6582 default: 6583 ret = -TARGET_EINVAL; 6584 break; 6585 } 6586 6587 #if defined(__powerpc64__) 6588 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and 6589 * is not supported by kernel. The glibc fcntl call actually adjusts 6590 * them to 5, 6 and 7 before making the syscall(). Since we make the 6591 * syscall directly, adjust to what is supported by the kernel. 6592 */ 6593 if (ret >= F_GETLK64 && ret <= F_SETLKW64) { 6594 ret -= F_GETLK64 - 5; 6595 } 6596 #endif 6597 6598 return ret; 6599 } 6600 6601 #define FLOCK_TRANSTBL \ 6602 switch (type) { \ 6603 TRANSTBL_CONVERT(F_RDLCK); \ 6604 TRANSTBL_CONVERT(F_WRLCK); \ 6605 TRANSTBL_CONVERT(F_UNLCK); \ 6606 } 6607 6608 static int target_to_host_flock(int type) 6609 { 6610 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a 6611 FLOCK_TRANSTBL 6612 #undef TRANSTBL_CONVERT 6613 return -TARGET_EINVAL; 6614 } 6615 6616 static int host_to_target_flock(int type) 6617 { 6618 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a 6619 FLOCK_TRANSTBL 6620 #undef TRANSTBL_CONVERT 6621 /* if we don't know how to convert the value coming 6622 * from the host we copy to the target field as-is 6623 */ 6624 return type; 6625 } 6626 6627 static inline abi_long copy_from_user_flock(struct flock64 *fl, 6628 abi_ulong target_flock_addr) 6629 { 6630 struct target_flock *target_fl; 6631 int l_type; 6632 6633 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6634 return -TARGET_EFAULT; 6635 } 6636 6637 __get_user(l_type, &target_fl->l_type); 6638 l_type = target_to_host_flock(l_type); 6639 if (l_type < 0) { 6640 return l_type; 6641 } 6642 fl->l_type = l_type; 6643 __get_user(fl->l_whence, &target_fl->l_whence); 6644 __get_user(fl->l_start, &target_fl->l_start); 6645 __get_user(fl->l_len, &target_fl->l_len); 6646 __get_user(fl->l_pid, &target_fl->l_pid); 6647 unlock_user_struct(target_fl, target_flock_addr, 0); 6648 return 0; 6649 } 6650 6651 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr, 6652 const struct flock64 *fl) 6653 { 6654 struct target_flock *target_fl; 6655 short l_type; 6656 6657 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6658 return -TARGET_EFAULT; 6659 } 6660 6661 l_type = host_to_target_flock(fl->l_type); 6662 __put_user(l_type, &target_fl->l_type); 6663 __put_user(fl->l_whence, &target_fl->l_whence); 6664 __put_user(fl->l_start, &target_fl->l_start); 6665 __put_user(fl->l_len, &target_fl->l_len); 6666 __put_user(fl->l_pid, &target_fl->l_pid); 6667 unlock_user_struct(target_fl, target_flock_addr, 1); 6668 return 0; 6669 } 6670 6671 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr); 6672 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl); 6673 6674 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32 6675 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl, 6676 abi_ulong target_flock_addr) 6677 { 6678 struct target_oabi_flock64 *target_fl; 6679 int l_type; 6680 6681 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6682 return -TARGET_EFAULT; 6683 } 6684 6685 __get_user(l_type, &target_fl->l_type); 6686 l_type = target_to_host_flock(l_type); 6687 if (l_type < 0) { 6688 return l_type; 6689 } 6690 fl->l_type = l_type; 6691 __get_user(fl->l_whence, &target_fl->l_whence); 6692 __get_user(fl->l_start, &target_fl->l_start); 6693 __get_user(fl->l_len, &target_fl->l_len); 6694 __get_user(fl->l_pid, &target_fl->l_pid); 6695 unlock_user_struct(target_fl, target_flock_addr, 0); 6696 return 0; 6697 } 6698 6699 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr, 6700 const struct flock64 *fl) 6701 { 6702 struct target_oabi_flock64 *target_fl; 6703 short l_type; 6704 6705 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6706 return -TARGET_EFAULT; 6707 } 6708 6709 l_type = host_to_target_flock(fl->l_type); 6710 __put_user(l_type, &target_fl->l_type); 6711 __put_user(fl->l_whence, &target_fl->l_whence); 6712 __put_user(fl->l_start, &target_fl->l_start); 6713 __put_user(fl->l_len, &target_fl->l_len); 6714 __put_user(fl->l_pid, &target_fl->l_pid); 6715 unlock_user_struct(target_fl, target_flock_addr, 1); 6716 return 0; 6717 } 6718 #endif 6719 6720 static inline abi_long copy_from_user_flock64(struct flock64 *fl, 6721 abi_ulong target_flock_addr) 6722 { 6723 struct target_flock64 *target_fl; 6724 int l_type; 6725 6726 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6727 return -TARGET_EFAULT; 6728 } 6729 6730 __get_user(l_type, &target_fl->l_type); 6731 l_type = target_to_host_flock(l_type); 6732 if (l_type < 0) { 6733 return l_type; 6734 } 6735 fl->l_type = l_type; 6736 __get_user(fl->l_whence, &target_fl->l_whence); 6737 __get_user(fl->l_start, &target_fl->l_start); 6738 __get_user(fl->l_len, &target_fl->l_len); 6739 __get_user(fl->l_pid, &target_fl->l_pid); 6740 unlock_user_struct(target_fl, target_flock_addr, 0); 6741 return 0; 6742 } 6743 6744 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr, 6745 const struct flock64 *fl) 6746 { 6747 struct target_flock64 *target_fl; 6748 short l_type; 6749 6750 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6751 return -TARGET_EFAULT; 6752 } 6753 6754 l_type = host_to_target_flock(fl->l_type); 6755 __put_user(l_type, &target_fl->l_type); 6756 __put_user(fl->l_whence, &target_fl->l_whence); 6757 __put_user(fl->l_start, &target_fl->l_start); 6758 __put_user(fl->l_len, &target_fl->l_len); 6759 __put_user(fl->l_pid, &target_fl->l_pid); 6760 unlock_user_struct(target_fl, target_flock_addr, 1); 6761 return 0; 6762 } 6763 6764 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 6765 { 6766 struct flock64 fl64; 6767 #ifdef F_GETOWN_EX 6768 struct f_owner_ex fox; 6769 struct target_f_owner_ex *target_fox; 6770 #endif 6771 abi_long ret; 6772 int host_cmd = target_to_host_fcntl_cmd(cmd); 6773 6774 if (host_cmd == -TARGET_EINVAL) 6775 return host_cmd; 6776 6777 switch(cmd) { 6778 case TARGET_F_GETLK: 6779 ret = copy_from_user_flock(&fl64, arg); 6780 if (ret) { 6781 return ret; 6782 } 6783 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6784 if (ret == 0) { 6785 ret = copy_to_user_flock(arg, &fl64); 6786 } 6787 break; 6788 6789 case TARGET_F_SETLK: 6790 case TARGET_F_SETLKW: 6791 ret = copy_from_user_flock(&fl64, arg); 6792 if (ret) { 6793 return ret; 6794 } 6795 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6796 break; 6797 6798 case TARGET_F_GETLK64: 6799 case TARGET_F_OFD_GETLK: 6800 ret = copy_from_user_flock64(&fl64, arg); 6801 if (ret) { 6802 return ret; 6803 } 6804 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6805 if (ret == 0) { 6806 ret = copy_to_user_flock64(arg, &fl64); 6807 } 6808 break; 6809 case TARGET_F_SETLK64: 6810 case TARGET_F_SETLKW64: 6811 case TARGET_F_OFD_SETLK: 6812 case TARGET_F_OFD_SETLKW: 6813 ret = copy_from_user_flock64(&fl64, arg); 6814 if (ret) { 6815 return ret; 6816 } 6817 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6818 break; 6819 6820 case TARGET_F_GETFL: 6821 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6822 if (ret >= 0) { 6823 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 6824 } 6825 break; 6826 6827 case TARGET_F_SETFL: 6828 ret = get_errno(safe_fcntl(fd, host_cmd, 6829 target_to_host_bitmask(arg, 6830 fcntl_flags_tbl))); 6831 break; 6832 6833 #ifdef F_GETOWN_EX 6834 case TARGET_F_GETOWN_EX: 6835 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6836 if (ret >= 0) { 6837 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 6838 return -TARGET_EFAULT; 6839 target_fox->type = tswap32(fox.type); 6840 target_fox->pid = tswap32(fox.pid); 6841 unlock_user_struct(target_fox, arg, 1); 6842 } 6843 break; 6844 #endif 6845 6846 #ifdef F_SETOWN_EX 6847 case TARGET_F_SETOWN_EX: 6848 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 6849 return -TARGET_EFAULT; 6850 fox.type = tswap32(target_fox->type); 6851 fox.pid = tswap32(target_fox->pid); 6852 unlock_user_struct(target_fox, arg, 0); 6853 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6854 break; 6855 #endif 6856 6857 case TARGET_F_SETSIG: 6858 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg))); 6859 break; 6860 6861 case TARGET_F_GETSIG: 6862 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg))); 6863 break; 6864 6865 case TARGET_F_SETOWN: 6866 case TARGET_F_GETOWN: 6867 case TARGET_F_SETLEASE: 6868 case TARGET_F_GETLEASE: 6869 case TARGET_F_SETPIPE_SZ: 6870 case TARGET_F_GETPIPE_SZ: 6871 case TARGET_F_ADD_SEALS: 6872 case TARGET_F_GET_SEALS: 6873 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6874 break; 6875 6876 default: 6877 ret = get_errno(safe_fcntl(fd, cmd, arg)); 6878 break; 6879 } 6880 return ret; 6881 } 6882 6883 #ifdef USE_UID16 6884 6885 static inline int high2lowuid(int uid) 6886 { 6887 if (uid > 65535) 6888 return 65534; 6889 else 6890 return uid; 6891 } 6892 6893 static inline int high2lowgid(int gid) 6894 { 6895 if (gid > 65535) 6896 return 65534; 6897 else 6898 return gid; 6899 } 6900 6901 static inline int low2highuid(int uid) 6902 { 6903 if ((int16_t)uid == -1) 6904 return -1; 6905 else 6906 return uid; 6907 } 6908 6909 static inline int low2highgid(int gid) 6910 { 6911 if ((int16_t)gid == -1) 6912 return -1; 6913 else 6914 return gid; 6915 } 6916 static inline int tswapid(int id) 6917 { 6918 return tswap16(id); 6919 } 6920 6921 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 6922 6923 #else /* !USE_UID16 */ 6924 static inline int high2lowuid(int uid) 6925 { 6926 return uid; 6927 } 6928 static inline int high2lowgid(int gid) 6929 { 6930 return gid; 6931 } 6932 static inline int low2highuid(int uid) 6933 { 6934 return uid; 6935 } 6936 static inline int low2highgid(int gid) 6937 { 6938 return gid; 6939 } 6940 static inline int tswapid(int id) 6941 { 6942 return tswap32(id); 6943 } 6944 6945 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 6946 6947 #endif /* USE_UID16 */ 6948 6949 /* We must do direct syscalls for setting UID/GID, because we want to 6950 * implement the Linux system call semantics of "change only for this thread", 6951 * not the libc/POSIX semantics of "change for all threads in process". 6952 * (See http://ewontfix.com/17/ for more details.) 6953 * We use the 32-bit version of the syscalls if present; if it is not 6954 * then either the host architecture supports 32-bit UIDs natively with 6955 * the standard syscall, or the 16-bit UID is the best we can do. 6956 */ 6957 #ifdef __NR_setuid32 6958 #define __NR_sys_setuid __NR_setuid32 6959 #else 6960 #define __NR_sys_setuid __NR_setuid 6961 #endif 6962 #ifdef __NR_setgid32 6963 #define __NR_sys_setgid __NR_setgid32 6964 #else 6965 #define __NR_sys_setgid __NR_setgid 6966 #endif 6967 #ifdef __NR_setresuid32 6968 #define __NR_sys_setresuid __NR_setresuid32 6969 #else 6970 #define __NR_sys_setresuid __NR_setresuid 6971 #endif 6972 #ifdef __NR_setresgid32 6973 #define __NR_sys_setresgid __NR_setresgid32 6974 #else 6975 #define __NR_sys_setresgid __NR_setresgid 6976 #endif 6977 6978 _syscall1(int, sys_setuid, uid_t, uid) 6979 _syscall1(int, sys_setgid, gid_t, gid) 6980 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 6981 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 6982 6983 void syscall_init(void) 6984 { 6985 IOCTLEntry *ie; 6986 const argtype *arg_type; 6987 int size; 6988 6989 thunk_init(STRUCT_MAX); 6990 6991 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 6992 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 6993 #include "syscall_types.h" 6994 #undef STRUCT 6995 #undef STRUCT_SPECIAL 6996 6997 /* we patch the ioctl size if necessary. We rely on the fact that 6998 no ioctl has all the bits at '1' in the size field */ 6999 ie = ioctl_entries; 7000 while (ie->target_cmd != 0) { 7001 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 7002 TARGET_IOC_SIZEMASK) { 7003 arg_type = ie->arg_type; 7004 if (arg_type[0] != TYPE_PTR) { 7005 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 7006 ie->target_cmd); 7007 exit(1); 7008 } 7009 arg_type++; 7010 size = thunk_type_size(arg_type, 0); 7011 ie->target_cmd = (ie->target_cmd & 7012 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 7013 (size << TARGET_IOC_SIZESHIFT); 7014 } 7015 7016 /* automatic consistency check if same arch */ 7017 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 7018 (defined(__x86_64__) && defined(TARGET_X86_64)) 7019 if (unlikely(ie->target_cmd != ie->host_cmd)) { 7020 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 7021 ie->name, ie->target_cmd, ie->host_cmd); 7022 } 7023 #endif 7024 ie++; 7025 } 7026 } 7027 7028 #ifdef TARGET_NR_truncate64 7029 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 7030 abi_long arg2, 7031 abi_long arg3, 7032 abi_long arg4) 7033 { 7034 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) { 7035 arg2 = arg3; 7036 arg3 = arg4; 7037 } 7038 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 7039 } 7040 #endif 7041 7042 #ifdef TARGET_NR_ftruncate64 7043 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 7044 abi_long arg2, 7045 abi_long arg3, 7046 abi_long arg4) 7047 { 7048 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) { 7049 arg2 = arg3; 7050 arg3 = arg4; 7051 } 7052 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 7053 } 7054 #endif 7055 7056 #if defined(TARGET_NR_timer_settime) || \ 7057 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)) 7058 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its, 7059 abi_ulong target_addr) 7060 { 7061 if (target_to_host_timespec(&host_its->it_interval, target_addr + 7062 offsetof(struct target_itimerspec, 7063 it_interval)) || 7064 target_to_host_timespec(&host_its->it_value, target_addr + 7065 offsetof(struct target_itimerspec, 7066 it_value))) { 7067 return -TARGET_EFAULT; 7068 } 7069 7070 return 0; 7071 } 7072 #endif 7073 7074 #if defined(TARGET_NR_timer_settime64) || \ 7075 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) 7076 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its, 7077 abi_ulong target_addr) 7078 { 7079 if (target_to_host_timespec64(&host_its->it_interval, target_addr + 7080 offsetof(struct target__kernel_itimerspec, 7081 it_interval)) || 7082 target_to_host_timespec64(&host_its->it_value, target_addr + 7083 offsetof(struct target__kernel_itimerspec, 7084 it_value))) { 7085 return -TARGET_EFAULT; 7086 } 7087 7088 return 0; 7089 } 7090 #endif 7091 7092 #if ((defined(TARGET_NR_timerfd_gettime) || \ 7093 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \ 7094 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime) 7095 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 7096 struct itimerspec *host_its) 7097 { 7098 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec, 7099 it_interval), 7100 &host_its->it_interval) || 7101 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec, 7102 it_value), 7103 &host_its->it_value)) { 7104 return -TARGET_EFAULT; 7105 } 7106 return 0; 7107 } 7108 #endif 7109 7110 #if ((defined(TARGET_NR_timerfd_gettime64) || \ 7111 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \ 7112 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64) 7113 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr, 7114 struct itimerspec *host_its) 7115 { 7116 if (host_to_target_timespec64(target_addr + 7117 offsetof(struct target__kernel_itimerspec, 7118 it_interval), 7119 &host_its->it_interval) || 7120 host_to_target_timespec64(target_addr + 7121 offsetof(struct target__kernel_itimerspec, 7122 it_value), 7123 &host_its->it_value)) { 7124 return -TARGET_EFAULT; 7125 } 7126 return 0; 7127 } 7128 #endif 7129 7130 #if defined(TARGET_NR_adjtimex) || \ 7131 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)) 7132 static inline abi_long target_to_host_timex(struct timex *host_tx, 7133 abi_long target_addr) 7134 { 7135 struct target_timex *target_tx; 7136 7137 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7138 return -TARGET_EFAULT; 7139 } 7140 7141 __get_user(host_tx->modes, &target_tx->modes); 7142 __get_user(host_tx->offset, &target_tx->offset); 7143 __get_user(host_tx->freq, &target_tx->freq); 7144 __get_user(host_tx->maxerror, &target_tx->maxerror); 7145 __get_user(host_tx->esterror, &target_tx->esterror); 7146 __get_user(host_tx->status, &target_tx->status); 7147 __get_user(host_tx->constant, &target_tx->constant); 7148 __get_user(host_tx->precision, &target_tx->precision); 7149 __get_user(host_tx->tolerance, &target_tx->tolerance); 7150 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7151 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7152 __get_user(host_tx->tick, &target_tx->tick); 7153 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7154 __get_user(host_tx->jitter, &target_tx->jitter); 7155 __get_user(host_tx->shift, &target_tx->shift); 7156 __get_user(host_tx->stabil, &target_tx->stabil); 7157 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7158 __get_user(host_tx->calcnt, &target_tx->calcnt); 7159 __get_user(host_tx->errcnt, &target_tx->errcnt); 7160 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7161 __get_user(host_tx->tai, &target_tx->tai); 7162 7163 unlock_user_struct(target_tx, target_addr, 0); 7164 return 0; 7165 } 7166 7167 static inline abi_long host_to_target_timex(abi_long target_addr, 7168 struct timex *host_tx) 7169 { 7170 struct target_timex *target_tx; 7171 7172 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7173 return -TARGET_EFAULT; 7174 } 7175 7176 __put_user(host_tx->modes, &target_tx->modes); 7177 __put_user(host_tx->offset, &target_tx->offset); 7178 __put_user(host_tx->freq, &target_tx->freq); 7179 __put_user(host_tx->maxerror, &target_tx->maxerror); 7180 __put_user(host_tx->esterror, &target_tx->esterror); 7181 __put_user(host_tx->status, &target_tx->status); 7182 __put_user(host_tx->constant, &target_tx->constant); 7183 __put_user(host_tx->precision, &target_tx->precision); 7184 __put_user(host_tx->tolerance, &target_tx->tolerance); 7185 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7186 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7187 __put_user(host_tx->tick, &target_tx->tick); 7188 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7189 __put_user(host_tx->jitter, &target_tx->jitter); 7190 __put_user(host_tx->shift, &target_tx->shift); 7191 __put_user(host_tx->stabil, &target_tx->stabil); 7192 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7193 __put_user(host_tx->calcnt, &target_tx->calcnt); 7194 __put_user(host_tx->errcnt, &target_tx->errcnt); 7195 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7196 __put_user(host_tx->tai, &target_tx->tai); 7197 7198 unlock_user_struct(target_tx, target_addr, 1); 7199 return 0; 7200 } 7201 #endif 7202 7203 7204 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 7205 static inline abi_long target_to_host_timex64(struct timex *host_tx, 7206 abi_long target_addr) 7207 { 7208 struct target__kernel_timex *target_tx; 7209 7210 if (copy_from_user_timeval64(&host_tx->time, target_addr + 7211 offsetof(struct target__kernel_timex, 7212 time))) { 7213 return -TARGET_EFAULT; 7214 } 7215 7216 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7217 return -TARGET_EFAULT; 7218 } 7219 7220 __get_user(host_tx->modes, &target_tx->modes); 7221 __get_user(host_tx->offset, &target_tx->offset); 7222 __get_user(host_tx->freq, &target_tx->freq); 7223 __get_user(host_tx->maxerror, &target_tx->maxerror); 7224 __get_user(host_tx->esterror, &target_tx->esterror); 7225 __get_user(host_tx->status, &target_tx->status); 7226 __get_user(host_tx->constant, &target_tx->constant); 7227 __get_user(host_tx->precision, &target_tx->precision); 7228 __get_user(host_tx->tolerance, &target_tx->tolerance); 7229 __get_user(host_tx->tick, &target_tx->tick); 7230 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7231 __get_user(host_tx->jitter, &target_tx->jitter); 7232 __get_user(host_tx->shift, &target_tx->shift); 7233 __get_user(host_tx->stabil, &target_tx->stabil); 7234 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7235 __get_user(host_tx->calcnt, &target_tx->calcnt); 7236 __get_user(host_tx->errcnt, &target_tx->errcnt); 7237 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7238 __get_user(host_tx->tai, &target_tx->tai); 7239 7240 unlock_user_struct(target_tx, target_addr, 0); 7241 return 0; 7242 } 7243 7244 static inline abi_long host_to_target_timex64(abi_long target_addr, 7245 struct timex *host_tx) 7246 { 7247 struct target__kernel_timex *target_tx; 7248 7249 if (copy_to_user_timeval64(target_addr + 7250 offsetof(struct target__kernel_timex, time), 7251 &host_tx->time)) { 7252 return -TARGET_EFAULT; 7253 } 7254 7255 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7256 return -TARGET_EFAULT; 7257 } 7258 7259 __put_user(host_tx->modes, &target_tx->modes); 7260 __put_user(host_tx->offset, &target_tx->offset); 7261 __put_user(host_tx->freq, &target_tx->freq); 7262 __put_user(host_tx->maxerror, &target_tx->maxerror); 7263 __put_user(host_tx->esterror, &target_tx->esterror); 7264 __put_user(host_tx->status, &target_tx->status); 7265 __put_user(host_tx->constant, &target_tx->constant); 7266 __put_user(host_tx->precision, &target_tx->precision); 7267 __put_user(host_tx->tolerance, &target_tx->tolerance); 7268 __put_user(host_tx->tick, &target_tx->tick); 7269 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7270 __put_user(host_tx->jitter, &target_tx->jitter); 7271 __put_user(host_tx->shift, &target_tx->shift); 7272 __put_user(host_tx->stabil, &target_tx->stabil); 7273 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7274 __put_user(host_tx->calcnt, &target_tx->calcnt); 7275 __put_user(host_tx->errcnt, &target_tx->errcnt); 7276 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7277 __put_user(host_tx->tai, &target_tx->tai); 7278 7279 unlock_user_struct(target_tx, target_addr, 1); 7280 return 0; 7281 } 7282 #endif 7283 7284 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID 7285 #define sigev_notify_thread_id _sigev_un._tid 7286 #endif 7287 7288 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, 7289 abi_ulong target_addr) 7290 { 7291 struct target_sigevent *target_sevp; 7292 7293 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { 7294 return -TARGET_EFAULT; 7295 } 7296 7297 /* This union is awkward on 64 bit systems because it has a 32 bit 7298 * integer and a pointer in it; we follow the conversion approach 7299 * used for handling sigval types in signal.c so the guest should get 7300 * the correct value back even if we did a 64 bit byteswap and it's 7301 * using the 32 bit integer. 7302 */ 7303 host_sevp->sigev_value.sival_ptr = 7304 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); 7305 host_sevp->sigev_signo = 7306 target_to_host_signal(tswap32(target_sevp->sigev_signo)); 7307 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); 7308 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid); 7309 7310 unlock_user_struct(target_sevp, target_addr, 1); 7311 return 0; 7312 } 7313 7314 #if defined(TARGET_NR_mlockall) 7315 static inline int target_to_host_mlockall_arg(int arg) 7316 { 7317 int result = 0; 7318 7319 if (arg & TARGET_MCL_CURRENT) { 7320 result |= MCL_CURRENT; 7321 } 7322 if (arg & TARGET_MCL_FUTURE) { 7323 result |= MCL_FUTURE; 7324 } 7325 #ifdef MCL_ONFAULT 7326 if (arg & TARGET_MCL_ONFAULT) { 7327 result |= MCL_ONFAULT; 7328 } 7329 #endif 7330 7331 return result; 7332 } 7333 #endif 7334 7335 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \ 7336 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \ 7337 defined(TARGET_NR_newfstatat)) 7338 static inline abi_long host_to_target_stat64(void *cpu_env, 7339 abi_ulong target_addr, 7340 struct stat *host_st) 7341 { 7342 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 7343 if (((CPUARMState *)cpu_env)->eabi) { 7344 struct target_eabi_stat64 *target_st; 7345 7346 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7347 return -TARGET_EFAULT; 7348 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 7349 __put_user(host_st->st_dev, &target_st->st_dev); 7350 __put_user(host_st->st_ino, &target_st->st_ino); 7351 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7352 __put_user(host_st->st_ino, &target_st->__st_ino); 7353 #endif 7354 __put_user(host_st->st_mode, &target_st->st_mode); 7355 __put_user(host_st->st_nlink, &target_st->st_nlink); 7356 __put_user(host_st->st_uid, &target_st->st_uid); 7357 __put_user(host_st->st_gid, &target_st->st_gid); 7358 __put_user(host_st->st_rdev, &target_st->st_rdev); 7359 __put_user(host_st->st_size, &target_st->st_size); 7360 __put_user(host_st->st_blksize, &target_st->st_blksize); 7361 __put_user(host_st->st_blocks, &target_st->st_blocks); 7362 __put_user(host_st->st_atime, &target_st->target_st_atime); 7363 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7364 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7365 #ifdef HAVE_STRUCT_STAT_ST_ATIM 7366 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); 7367 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); 7368 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); 7369 #endif 7370 unlock_user_struct(target_st, target_addr, 1); 7371 } else 7372 #endif 7373 { 7374 #if defined(TARGET_HAS_STRUCT_STAT64) 7375 struct target_stat64 *target_st; 7376 #else 7377 struct target_stat *target_st; 7378 #endif 7379 7380 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7381 return -TARGET_EFAULT; 7382 memset(target_st, 0, sizeof(*target_st)); 7383 __put_user(host_st->st_dev, &target_st->st_dev); 7384 __put_user(host_st->st_ino, &target_st->st_ino); 7385 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7386 __put_user(host_st->st_ino, &target_st->__st_ino); 7387 #endif 7388 __put_user(host_st->st_mode, &target_st->st_mode); 7389 __put_user(host_st->st_nlink, &target_st->st_nlink); 7390 __put_user(host_st->st_uid, &target_st->st_uid); 7391 __put_user(host_st->st_gid, &target_st->st_gid); 7392 __put_user(host_st->st_rdev, &target_st->st_rdev); 7393 /* XXX: better use of kernel struct */ 7394 __put_user(host_st->st_size, &target_st->st_size); 7395 __put_user(host_st->st_blksize, &target_st->st_blksize); 7396 __put_user(host_st->st_blocks, &target_st->st_blocks); 7397 __put_user(host_st->st_atime, &target_st->target_st_atime); 7398 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7399 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7400 #ifdef HAVE_STRUCT_STAT_ST_ATIM 7401 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); 7402 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); 7403 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); 7404 #endif 7405 unlock_user_struct(target_st, target_addr, 1); 7406 } 7407 7408 return 0; 7409 } 7410 #endif 7411 7412 #if defined(TARGET_NR_statx) && defined(__NR_statx) 7413 static inline abi_long host_to_target_statx(struct target_statx *host_stx, 7414 abi_ulong target_addr) 7415 { 7416 struct target_statx *target_stx; 7417 7418 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) { 7419 return -TARGET_EFAULT; 7420 } 7421 memset(target_stx, 0, sizeof(*target_stx)); 7422 7423 __put_user(host_stx->stx_mask, &target_stx->stx_mask); 7424 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize); 7425 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes); 7426 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink); 7427 __put_user(host_stx->stx_uid, &target_stx->stx_uid); 7428 __put_user(host_stx->stx_gid, &target_stx->stx_gid); 7429 __put_user(host_stx->stx_mode, &target_stx->stx_mode); 7430 __put_user(host_stx->stx_ino, &target_stx->stx_ino); 7431 __put_user(host_stx->stx_size, &target_stx->stx_size); 7432 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks); 7433 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask); 7434 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec); 7435 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec); 7436 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec); 7437 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec); 7438 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec); 7439 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec); 7440 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec); 7441 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec); 7442 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major); 7443 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor); 7444 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major); 7445 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor); 7446 7447 unlock_user_struct(target_stx, target_addr, 1); 7448 7449 return 0; 7450 } 7451 #endif 7452 7453 static int do_sys_futex(int *uaddr, int op, int val, 7454 const struct timespec *timeout, int *uaddr2, 7455 int val3) 7456 { 7457 #if HOST_LONG_BITS == 64 7458 #if defined(__NR_futex) 7459 /* always a 64-bit time_t, it doesn't define _time64 version */ 7460 return sys_futex(uaddr, op, val, timeout, uaddr2, val3); 7461 7462 #endif 7463 #else /* HOST_LONG_BITS == 64 */ 7464 #if defined(__NR_futex_time64) 7465 if (sizeof(timeout->tv_sec) == 8) { 7466 /* _time64 function on 32bit arch */ 7467 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3); 7468 } 7469 #endif 7470 #if defined(__NR_futex) 7471 /* old function on 32bit arch */ 7472 return sys_futex(uaddr, op, val, timeout, uaddr2, val3); 7473 #endif 7474 #endif /* HOST_LONG_BITS == 64 */ 7475 g_assert_not_reached(); 7476 } 7477 7478 static int do_safe_futex(int *uaddr, int op, int val, 7479 const struct timespec *timeout, int *uaddr2, 7480 int val3) 7481 { 7482 #if HOST_LONG_BITS == 64 7483 #if defined(__NR_futex) 7484 /* always a 64-bit time_t, it doesn't define _time64 version */ 7485 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3)); 7486 #endif 7487 #else /* HOST_LONG_BITS == 64 */ 7488 #if defined(__NR_futex_time64) 7489 if (sizeof(timeout->tv_sec) == 8) { 7490 /* _time64 function on 32bit arch */ 7491 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2, 7492 val3)); 7493 } 7494 #endif 7495 #if defined(__NR_futex) 7496 /* old function on 32bit arch */ 7497 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3)); 7498 #endif 7499 #endif /* HOST_LONG_BITS == 64 */ 7500 return -TARGET_ENOSYS; 7501 } 7502 7503 /* ??? Using host futex calls even when target atomic operations 7504 are not really atomic probably breaks things. However implementing 7505 futexes locally would make futexes shared between multiple processes 7506 tricky. However they're probably useless because guest atomic 7507 operations won't work either. */ 7508 #if defined(TARGET_NR_futex) 7509 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val, 7510 target_ulong timeout, target_ulong uaddr2, int val3) 7511 { 7512 struct timespec ts, *pts; 7513 int base_op; 7514 7515 /* ??? We assume FUTEX_* constants are the same on both host 7516 and target. */ 7517 #ifdef FUTEX_CMD_MASK 7518 base_op = op & FUTEX_CMD_MASK; 7519 #else 7520 base_op = op; 7521 #endif 7522 switch (base_op) { 7523 case FUTEX_WAIT: 7524 case FUTEX_WAIT_BITSET: 7525 if (timeout) { 7526 pts = &ts; 7527 target_to_host_timespec(pts, timeout); 7528 } else { 7529 pts = NULL; 7530 } 7531 return do_safe_futex(g2h(cpu, uaddr), 7532 op, tswap32(val), pts, NULL, val3); 7533 case FUTEX_WAKE: 7534 return do_safe_futex(g2h(cpu, uaddr), 7535 op, val, NULL, NULL, 0); 7536 case FUTEX_FD: 7537 return do_safe_futex(g2h(cpu, uaddr), 7538 op, val, NULL, NULL, 0); 7539 case FUTEX_REQUEUE: 7540 case FUTEX_CMP_REQUEUE: 7541 case FUTEX_WAKE_OP: 7542 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 7543 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 7544 But the prototype takes a `struct timespec *'; insert casts 7545 to satisfy the compiler. We do not need to tswap TIMEOUT 7546 since it's not compared to guest memory. */ 7547 pts = (struct timespec *)(uintptr_t) timeout; 7548 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2), 7549 (base_op == FUTEX_CMP_REQUEUE 7550 ? tswap32(val3) : val3)); 7551 default: 7552 return -TARGET_ENOSYS; 7553 } 7554 } 7555 #endif 7556 7557 #if defined(TARGET_NR_futex_time64) 7558 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op, 7559 int val, target_ulong timeout, 7560 target_ulong uaddr2, int val3) 7561 { 7562 struct timespec ts, *pts; 7563 int base_op; 7564 7565 /* ??? We assume FUTEX_* constants are the same on both host 7566 and target. */ 7567 #ifdef FUTEX_CMD_MASK 7568 base_op = op & FUTEX_CMD_MASK; 7569 #else 7570 base_op = op; 7571 #endif 7572 switch (base_op) { 7573 case FUTEX_WAIT: 7574 case FUTEX_WAIT_BITSET: 7575 if (timeout) { 7576 pts = &ts; 7577 if (target_to_host_timespec64(pts, timeout)) { 7578 return -TARGET_EFAULT; 7579 } 7580 } else { 7581 pts = NULL; 7582 } 7583 return do_safe_futex(g2h(cpu, uaddr), op, 7584 tswap32(val), pts, NULL, val3); 7585 case FUTEX_WAKE: 7586 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0); 7587 case FUTEX_FD: 7588 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0); 7589 case FUTEX_REQUEUE: 7590 case FUTEX_CMP_REQUEUE: 7591 case FUTEX_WAKE_OP: 7592 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 7593 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 7594 But the prototype takes a `struct timespec *'; insert casts 7595 to satisfy the compiler. We do not need to tswap TIMEOUT 7596 since it's not compared to guest memory. */ 7597 pts = (struct timespec *)(uintptr_t) timeout; 7598 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2), 7599 (base_op == FUTEX_CMP_REQUEUE 7600 ? tswap32(val3) : val3)); 7601 default: 7602 return -TARGET_ENOSYS; 7603 } 7604 } 7605 #endif 7606 7607 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7608 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname, 7609 abi_long handle, abi_long mount_id, 7610 abi_long flags) 7611 { 7612 struct file_handle *target_fh; 7613 struct file_handle *fh; 7614 int mid = 0; 7615 abi_long ret; 7616 char *name; 7617 unsigned int size, total_size; 7618 7619 if (get_user_s32(size, handle)) { 7620 return -TARGET_EFAULT; 7621 } 7622 7623 name = lock_user_string(pathname); 7624 if (!name) { 7625 return -TARGET_EFAULT; 7626 } 7627 7628 total_size = sizeof(struct file_handle) + size; 7629 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0); 7630 if (!target_fh) { 7631 unlock_user(name, pathname, 0); 7632 return -TARGET_EFAULT; 7633 } 7634 7635 fh = g_malloc0(total_size); 7636 fh->handle_bytes = size; 7637 7638 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags)); 7639 unlock_user(name, pathname, 0); 7640 7641 /* man name_to_handle_at(2): 7642 * Other than the use of the handle_bytes field, the caller should treat 7643 * the file_handle structure as an opaque data type 7644 */ 7645 7646 memcpy(target_fh, fh, total_size); 7647 target_fh->handle_bytes = tswap32(fh->handle_bytes); 7648 target_fh->handle_type = tswap32(fh->handle_type); 7649 g_free(fh); 7650 unlock_user(target_fh, handle, total_size); 7651 7652 if (put_user_s32(mid, mount_id)) { 7653 return -TARGET_EFAULT; 7654 } 7655 7656 return ret; 7657 7658 } 7659 #endif 7660 7661 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7662 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle, 7663 abi_long flags) 7664 { 7665 struct file_handle *target_fh; 7666 struct file_handle *fh; 7667 unsigned int size, total_size; 7668 abi_long ret; 7669 7670 if (get_user_s32(size, handle)) { 7671 return -TARGET_EFAULT; 7672 } 7673 7674 total_size = sizeof(struct file_handle) + size; 7675 target_fh = lock_user(VERIFY_READ, handle, total_size, 1); 7676 if (!target_fh) { 7677 return -TARGET_EFAULT; 7678 } 7679 7680 fh = g_memdup(target_fh, total_size); 7681 fh->handle_bytes = size; 7682 fh->handle_type = tswap32(target_fh->handle_type); 7683 7684 ret = get_errno(open_by_handle_at(mount_fd, fh, 7685 target_to_host_bitmask(flags, fcntl_flags_tbl))); 7686 7687 g_free(fh); 7688 7689 unlock_user(target_fh, handle, total_size); 7690 7691 return ret; 7692 } 7693 #endif 7694 7695 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4) 7696 7697 static abi_long do_signalfd4(int fd, abi_long mask, int flags) 7698 { 7699 int host_flags; 7700 target_sigset_t *target_mask; 7701 sigset_t host_mask; 7702 abi_long ret; 7703 7704 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) { 7705 return -TARGET_EINVAL; 7706 } 7707 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) { 7708 return -TARGET_EFAULT; 7709 } 7710 7711 target_to_host_sigset(&host_mask, target_mask); 7712 7713 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 7714 7715 ret = get_errno(signalfd(fd, &host_mask, host_flags)); 7716 if (ret >= 0) { 7717 fd_trans_register(ret, &target_signalfd_trans); 7718 } 7719 7720 unlock_user_struct(target_mask, mask, 0); 7721 7722 return ret; 7723 } 7724 #endif 7725 7726 /* Map host to target signal numbers for the wait family of syscalls. 7727 Assume all other status bits are the same. */ 7728 int host_to_target_waitstatus(int status) 7729 { 7730 if (WIFSIGNALED(status)) { 7731 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 7732 } 7733 if (WIFSTOPPED(status)) { 7734 return (host_to_target_signal(WSTOPSIG(status)) << 8) 7735 | (status & 0xff); 7736 } 7737 return status; 7738 } 7739 7740 static int open_self_cmdline(void *cpu_env, int fd) 7741 { 7742 CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 7743 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; 7744 int i; 7745 7746 for (i = 0; i < bprm->argc; i++) { 7747 size_t len = strlen(bprm->argv[i]) + 1; 7748 7749 if (write(fd, bprm->argv[i], len) != len) { 7750 return -1; 7751 } 7752 } 7753 7754 return 0; 7755 } 7756 7757 static int open_self_maps(void *cpu_env, int fd) 7758 { 7759 CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 7760 TaskState *ts = cpu->opaque; 7761 GSList *map_info = read_self_maps(); 7762 GSList *s; 7763 int count; 7764 7765 for (s = map_info; s; s = g_slist_next(s)) { 7766 MapInfo *e = (MapInfo *) s->data; 7767 7768 if (h2g_valid(e->start)) { 7769 unsigned long min = e->start; 7770 unsigned long max = e->end; 7771 int flags = page_get_flags(h2g(min)); 7772 const char *path; 7773 7774 max = h2g_valid(max - 1) ? 7775 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1; 7776 7777 if (page_check_range(h2g(min), max - min, flags) == -1) { 7778 continue; 7779 } 7780 7781 if (h2g(min) == ts->info->stack_limit) { 7782 path = "[stack]"; 7783 } else { 7784 path = e->path; 7785 } 7786 7787 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr 7788 " %c%c%c%c %08" PRIx64 " %s %"PRId64, 7789 h2g(min), h2g(max - 1) + 1, 7790 (flags & PAGE_READ) ? 'r' : '-', 7791 (flags & PAGE_WRITE_ORG) ? 'w' : '-', 7792 (flags & PAGE_EXEC) ? 'x' : '-', 7793 e->is_priv ? 'p' : '-', 7794 (uint64_t) e->offset, e->dev, e->inode); 7795 if (path) { 7796 dprintf(fd, "%*s%s\n", 73 - count, "", path); 7797 } else { 7798 dprintf(fd, "\n"); 7799 } 7800 } 7801 } 7802 7803 free_self_maps(map_info); 7804 7805 #ifdef TARGET_VSYSCALL_PAGE 7806 /* 7807 * We only support execution from the vsyscall page. 7808 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3. 7809 */ 7810 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx 7811 " --xp 00000000 00:00 0", 7812 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE); 7813 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]"); 7814 #endif 7815 7816 return 0; 7817 } 7818 7819 static int open_self_stat(void *cpu_env, int fd) 7820 { 7821 CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 7822 TaskState *ts = cpu->opaque; 7823 g_autoptr(GString) buf = g_string_new(NULL); 7824 int i; 7825 7826 for (i = 0; i < 44; i++) { 7827 if (i == 0) { 7828 /* pid */ 7829 g_string_printf(buf, FMT_pid " ", getpid()); 7830 } else if (i == 1) { 7831 /* app name */ 7832 gchar *bin = g_strrstr(ts->bprm->argv[0], "/"); 7833 bin = bin ? bin + 1 : ts->bprm->argv[0]; 7834 g_string_printf(buf, "(%.15s) ", bin); 7835 } else if (i == 3) { 7836 /* ppid */ 7837 g_string_printf(buf, FMT_pid " ", getppid()); 7838 } else if (i == 27) { 7839 /* stack bottom */ 7840 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack); 7841 } else { 7842 /* for the rest, there is MasterCard */ 7843 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' '); 7844 } 7845 7846 if (write(fd, buf->str, buf->len) != buf->len) { 7847 return -1; 7848 } 7849 } 7850 7851 return 0; 7852 } 7853 7854 static int open_self_auxv(void *cpu_env, int fd) 7855 { 7856 CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 7857 TaskState *ts = cpu->opaque; 7858 abi_ulong auxv = ts->info->saved_auxv; 7859 abi_ulong len = ts->info->auxv_len; 7860 char *ptr; 7861 7862 /* 7863 * Auxiliary vector is stored in target process stack. 7864 * read in whole auxv vector and copy it to file 7865 */ 7866 ptr = lock_user(VERIFY_READ, auxv, len, 0); 7867 if (ptr != NULL) { 7868 while (len > 0) { 7869 ssize_t r; 7870 r = write(fd, ptr, len); 7871 if (r <= 0) { 7872 break; 7873 } 7874 len -= r; 7875 ptr += r; 7876 } 7877 lseek(fd, 0, SEEK_SET); 7878 unlock_user(ptr, auxv, len); 7879 } 7880 7881 return 0; 7882 } 7883 7884 static int is_proc_myself(const char *filename, const char *entry) 7885 { 7886 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 7887 filename += strlen("/proc/"); 7888 if (!strncmp(filename, "self/", strlen("self/"))) { 7889 filename += strlen("self/"); 7890 } else if (*filename >= '1' && *filename <= '9') { 7891 char myself[80]; 7892 snprintf(myself, sizeof(myself), "%d/", getpid()); 7893 if (!strncmp(filename, myself, strlen(myself))) { 7894 filename += strlen(myself); 7895 } else { 7896 return 0; 7897 } 7898 } else { 7899 return 0; 7900 } 7901 if (!strcmp(filename, entry)) { 7902 return 1; 7903 } 7904 } 7905 return 0; 7906 } 7907 7908 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \ 7909 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) 7910 static int is_proc(const char *filename, const char *entry) 7911 { 7912 return strcmp(filename, entry) == 0; 7913 } 7914 #endif 7915 7916 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 7917 static int open_net_route(void *cpu_env, int fd) 7918 { 7919 FILE *fp; 7920 char *line = NULL; 7921 size_t len = 0; 7922 ssize_t read; 7923 7924 fp = fopen("/proc/net/route", "r"); 7925 if (fp == NULL) { 7926 return -1; 7927 } 7928 7929 /* read header */ 7930 7931 read = getline(&line, &len, fp); 7932 dprintf(fd, "%s", line); 7933 7934 /* read routes */ 7935 7936 while ((read = getline(&line, &len, fp)) != -1) { 7937 char iface[16]; 7938 uint32_t dest, gw, mask; 7939 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 7940 int fields; 7941 7942 fields = sscanf(line, 7943 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 7944 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 7945 &mask, &mtu, &window, &irtt); 7946 if (fields != 11) { 7947 continue; 7948 } 7949 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 7950 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 7951 metric, tswap32(mask), mtu, window, irtt); 7952 } 7953 7954 free(line); 7955 fclose(fp); 7956 7957 return 0; 7958 } 7959 #endif 7960 7961 #if defined(TARGET_SPARC) 7962 static int open_cpuinfo(void *cpu_env, int fd) 7963 { 7964 dprintf(fd, "type\t\t: sun4u\n"); 7965 return 0; 7966 } 7967 #endif 7968 7969 #if defined(TARGET_HPPA) 7970 static int open_cpuinfo(void *cpu_env, int fd) 7971 { 7972 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n"); 7973 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n"); 7974 dprintf(fd, "capabilities\t: os32\n"); 7975 dprintf(fd, "model\t\t: 9000/778/B160L\n"); 7976 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n"); 7977 return 0; 7978 } 7979 #endif 7980 7981 #if defined(TARGET_M68K) 7982 static int open_hardware(void *cpu_env, int fd) 7983 { 7984 dprintf(fd, "Model:\t\tqemu-m68k\n"); 7985 return 0; 7986 } 7987 #endif 7988 7989 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) 7990 { 7991 struct fake_open { 7992 const char *filename; 7993 int (*fill)(void *cpu_env, int fd); 7994 int (*cmp)(const char *s1, const char *s2); 7995 }; 7996 const struct fake_open *fake_open; 7997 static const struct fake_open fakes[] = { 7998 { "maps", open_self_maps, is_proc_myself }, 7999 { "stat", open_self_stat, is_proc_myself }, 8000 { "auxv", open_self_auxv, is_proc_myself }, 8001 { "cmdline", open_self_cmdline, is_proc_myself }, 8002 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 8003 { "/proc/net/route", open_net_route, is_proc }, 8004 #endif 8005 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) 8006 { "/proc/cpuinfo", open_cpuinfo, is_proc }, 8007 #endif 8008 #if defined(TARGET_M68K) 8009 { "/proc/hardware", open_hardware, is_proc }, 8010 #endif 8011 { NULL, NULL, NULL } 8012 }; 8013 8014 if (is_proc_myself(pathname, "exe")) { 8015 int execfd = qemu_getauxval(AT_EXECFD); 8016 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode); 8017 } 8018 8019 for (fake_open = fakes; fake_open->filename; fake_open++) { 8020 if (fake_open->cmp(pathname, fake_open->filename)) { 8021 break; 8022 } 8023 } 8024 8025 if (fake_open->filename) { 8026 const char *tmpdir; 8027 char filename[PATH_MAX]; 8028 int fd, r; 8029 8030 /* create temporary file to map stat to */ 8031 tmpdir = getenv("TMPDIR"); 8032 if (!tmpdir) 8033 tmpdir = "/tmp"; 8034 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 8035 fd = mkstemp(filename); 8036 if (fd < 0) { 8037 return fd; 8038 } 8039 unlink(filename); 8040 8041 if ((r = fake_open->fill(cpu_env, fd))) { 8042 int e = errno; 8043 close(fd); 8044 errno = e; 8045 return r; 8046 } 8047 lseek(fd, 0, SEEK_SET); 8048 8049 return fd; 8050 } 8051 8052 return safe_openat(dirfd, path(pathname), flags, mode); 8053 } 8054 8055 #define TIMER_MAGIC 0x0caf0000 8056 #define TIMER_MAGIC_MASK 0xffff0000 8057 8058 /* Convert QEMU provided timer ID back to internal 16bit index format */ 8059 static target_timer_t get_timer_id(abi_long arg) 8060 { 8061 target_timer_t timerid = arg; 8062 8063 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) { 8064 return -TARGET_EINVAL; 8065 } 8066 8067 timerid &= 0xffff; 8068 8069 if (timerid >= ARRAY_SIZE(g_posix_timers)) { 8070 return -TARGET_EINVAL; 8071 } 8072 8073 return timerid; 8074 } 8075 8076 static int target_to_host_cpu_mask(unsigned long *host_mask, 8077 size_t host_size, 8078 abi_ulong target_addr, 8079 size_t target_size) 8080 { 8081 unsigned target_bits = sizeof(abi_ulong) * 8; 8082 unsigned host_bits = sizeof(*host_mask) * 8; 8083 abi_ulong *target_mask; 8084 unsigned i, j; 8085 8086 assert(host_size >= target_size); 8087 8088 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1); 8089 if (!target_mask) { 8090 return -TARGET_EFAULT; 8091 } 8092 memset(host_mask, 0, host_size); 8093 8094 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 8095 unsigned bit = i * target_bits; 8096 abi_ulong val; 8097 8098 __get_user(val, &target_mask[i]); 8099 for (j = 0; j < target_bits; j++, bit++) { 8100 if (val & (1UL << j)) { 8101 host_mask[bit / host_bits] |= 1UL << (bit % host_bits); 8102 } 8103 } 8104 } 8105 8106 unlock_user(target_mask, target_addr, 0); 8107 return 0; 8108 } 8109 8110 static int host_to_target_cpu_mask(const unsigned long *host_mask, 8111 size_t host_size, 8112 abi_ulong target_addr, 8113 size_t target_size) 8114 { 8115 unsigned target_bits = sizeof(abi_ulong) * 8; 8116 unsigned host_bits = sizeof(*host_mask) * 8; 8117 abi_ulong *target_mask; 8118 unsigned i, j; 8119 8120 assert(host_size >= target_size); 8121 8122 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0); 8123 if (!target_mask) { 8124 return -TARGET_EFAULT; 8125 } 8126 8127 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 8128 unsigned bit = i * target_bits; 8129 abi_ulong val = 0; 8130 8131 for (j = 0; j < target_bits; j++, bit++) { 8132 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) { 8133 val |= 1UL << j; 8134 } 8135 } 8136 __put_user(val, &target_mask[i]); 8137 } 8138 8139 unlock_user(target_mask, target_addr, target_size); 8140 return 0; 8141 } 8142 8143 #ifdef TARGET_NR_getdents 8144 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count) 8145 { 8146 g_autofree void *hdirp = NULL; 8147 void *tdirp; 8148 int hlen, hoff, toff; 8149 int hreclen, treclen; 8150 off64_t prev_diroff = 0; 8151 8152 hdirp = g_try_malloc(count); 8153 if (!hdirp) { 8154 return -TARGET_ENOMEM; 8155 } 8156 8157 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 8158 hlen = sys_getdents(dirfd, hdirp, count); 8159 #else 8160 hlen = sys_getdents64(dirfd, hdirp, count); 8161 #endif 8162 8163 hlen = get_errno(hlen); 8164 if (is_error(hlen)) { 8165 return hlen; 8166 } 8167 8168 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0); 8169 if (!tdirp) { 8170 return -TARGET_EFAULT; 8171 } 8172 8173 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) { 8174 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 8175 struct linux_dirent *hde = hdirp + hoff; 8176 #else 8177 struct linux_dirent64 *hde = hdirp + hoff; 8178 #endif 8179 struct target_dirent *tde = tdirp + toff; 8180 int namelen; 8181 uint8_t type; 8182 8183 namelen = strlen(hde->d_name); 8184 hreclen = hde->d_reclen; 8185 treclen = offsetof(struct target_dirent, d_name) + namelen + 2; 8186 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent)); 8187 8188 if (toff + treclen > count) { 8189 /* 8190 * If the host struct is smaller than the target struct, or 8191 * requires less alignment and thus packs into less space, 8192 * then the host can return more entries than we can pass 8193 * on to the guest. 8194 */ 8195 if (toff == 0) { 8196 toff = -TARGET_EINVAL; /* result buffer is too small */ 8197 break; 8198 } 8199 /* 8200 * Return what we have, resetting the file pointer to the 8201 * location of the first record not returned. 8202 */ 8203 lseek64(dirfd, prev_diroff, SEEK_SET); 8204 break; 8205 } 8206 8207 prev_diroff = hde->d_off; 8208 tde->d_ino = tswapal(hde->d_ino); 8209 tde->d_off = tswapal(hde->d_off); 8210 tde->d_reclen = tswap16(treclen); 8211 memcpy(tde->d_name, hde->d_name, namelen + 1); 8212 8213 /* 8214 * The getdents type is in what was formerly a padding byte at the 8215 * end of the structure. 8216 */ 8217 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 8218 type = *((uint8_t *)hde + hreclen - 1); 8219 #else 8220 type = hde->d_type; 8221 #endif 8222 *((uint8_t *)tde + treclen - 1) = type; 8223 } 8224 8225 unlock_user(tdirp, arg2, toff); 8226 return toff; 8227 } 8228 #endif /* TARGET_NR_getdents */ 8229 8230 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 8231 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count) 8232 { 8233 g_autofree void *hdirp = NULL; 8234 void *tdirp; 8235 int hlen, hoff, toff; 8236 int hreclen, treclen; 8237 off64_t prev_diroff = 0; 8238 8239 hdirp = g_try_malloc(count); 8240 if (!hdirp) { 8241 return -TARGET_ENOMEM; 8242 } 8243 8244 hlen = get_errno(sys_getdents64(dirfd, hdirp, count)); 8245 if (is_error(hlen)) { 8246 return hlen; 8247 } 8248 8249 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0); 8250 if (!tdirp) { 8251 return -TARGET_EFAULT; 8252 } 8253 8254 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) { 8255 struct linux_dirent64 *hde = hdirp + hoff; 8256 struct target_dirent64 *tde = tdirp + toff; 8257 int namelen; 8258 8259 namelen = strlen(hde->d_name) + 1; 8260 hreclen = hde->d_reclen; 8261 treclen = offsetof(struct target_dirent64, d_name) + namelen; 8262 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64)); 8263 8264 if (toff + treclen > count) { 8265 /* 8266 * If the host struct is smaller than the target struct, or 8267 * requires less alignment and thus packs into less space, 8268 * then the host can return more entries than we can pass 8269 * on to the guest. 8270 */ 8271 if (toff == 0) { 8272 toff = -TARGET_EINVAL; /* result buffer is too small */ 8273 break; 8274 } 8275 /* 8276 * Return what we have, resetting the file pointer to the 8277 * location of the first record not returned. 8278 */ 8279 lseek64(dirfd, prev_diroff, SEEK_SET); 8280 break; 8281 } 8282 8283 prev_diroff = hde->d_off; 8284 tde->d_ino = tswap64(hde->d_ino); 8285 tde->d_off = tswap64(hde->d_off); 8286 tde->d_reclen = tswap16(treclen); 8287 tde->d_type = hde->d_type; 8288 memcpy(tde->d_name, hde->d_name, namelen); 8289 } 8290 8291 unlock_user(tdirp, arg2, toff); 8292 return toff; 8293 } 8294 #endif /* TARGET_NR_getdents64 */ 8295 8296 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root) 8297 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old) 8298 #endif 8299 8300 /* This is an internal helper for do_syscall so that it is easier 8301 * to have a single return point, so that actions, such as logging 8302 * of syscall results, can be performed. 8303 * All errnos that do_syscall() returns must be -TARGET_<errcode>. 8304 */ 8305 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, 8306 abi_long arg2, abi_long arg3, abi_long arg4, 8307 abi_long arg5, abi_long arg6, abi_long arg7, 8308 abi_long arg8) 8309 { 8310 CPUState *cpu = env_cpu(cpu_env); 8311 abi_long ret; 8312 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \ 8313 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \ 8314 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \ 8315 || defined(TARGET_NR_statx) 8316 struct stat st; 8317 #endif 8318 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \ 8319 || defined(TARGET_NR_fstatfs) 8320 struct statfs stfs; 8321 #endif 8322 void *p; 8323 8324 switch(num) { 8325 case TARGET_NR_exit: 8326 /* In old applications this may be used to implement _exit(2). 8327 However in threaded applications it is used for thread termination, 8328 and _exit_group is used for application termination. 8329 Do thread termination if we have more then one thread. */ 8330 8331 if (block_signals()) { 8332 return -QEMU_ERESTARTSYS; 8333 } 8334 8335 pthread_mutex_lock(&clone_lock); 8336 8337 if (CPU_NEXT(first_cpu)) { 8338 TaskState *ts = cpu->opaque; 8339 8340 object_property_set_bool(OBJECT(cpu), "realized", false, NULL); 8341 object_unref(OBJECT(cpu)); 8342 /* 8343 * At this point the CPU should be unrealized and removed 8344 * from cpu lists. We can clean-up the rest of the thread 8345 * data without the lock held. 8346 */ 8347 8348 pthread_mutex_unlock(&clone_lock); 8349 8350 if (ts->child_tidptr) { 8351 put_user_u32(0, ts->child_tidptr); 8352 do_sys_futex(g2h(cpu, ts->child_tidptr), 8353 FUTEX_WAKE, INT_MAX, NULL, NULL, 0); 8354 } 8355 thread_cpu = NULL; 8356 g_free(ts); 8357 rcu_unregister_thread(); 8358 pthread_exit(NULL); 8359 } 8360 8361 pthread_mutex_unlock(&clone_lock); 8362 preexit_cleanup(cpu_env, arg1); 8363 _exit(arg1); 8364 return 0; /* avoid warning */ 8365 case TARGET_NR_read: 8366 if (arg2 == 0 && arg3 == 0) { 8367 return get_errno(safe_read(arg1, 0, 0)); 8368 } else { 8369 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 8370 return -TARGET_EFAULT; 8371 ret = get_errno(safe_read(arg1, p, arg3)); 8372 if (ret >= 0 && 8373 fd_trans_host_to_target_data(arg1)) { 8374 ret = fd_trans_host_to_target_data(arg1)(p, ret); 8375 } 8376 unlock_user(p, arg2, ret); 8377 } 8378 return ret; 8379 case TARGET_NR_write: 8380 if (arg2 == 0 && arg3 == 0) { 8381 return get_errno(safe_write(arg1, 0, 0)); 8382 } 8383 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 8384 return -TARGET_EFAULT; 8385 if (fd_trans_target_to_host_data(arg1)) { 8386 void *copy = g_malloc(arg3); 8387 memcpy(copy, p, arg3); 8388 ret = fd_trans_target_to_host_data(arg1)(copy, arg3); 8389 if (ret >= 0) { 8390 ret = get_errno(safe_write(arg1, copy, ret)); 8391 } 8392 g_free(copy); 8393 } else { 8394 ret = get_errno(safe_write(arg1, p, arg3)); 8395 } 8396 unlock_user(p, arg2, 0); 8397 return ret; 8398 8399 #ifdef TARGET_NR_open 8400 case TARGET_NR_open: 8401 if (!(p = lock_user_string(arg1))) 8402 return -TARGET_EFAULT; 8403 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, 8404 target_to_host_bitmask(arg2, fcntl_flags_tbl), 8405 arg3)); 8406 fd_trans_unregister(ret); 8407 unlock_user(p, arg1, 0); 8408 return ret; 8409 #endif 8410 case TARGET_NR_openat: 8411 if (!(p = lock_user_string(arg2))) 8412 return -TARGET_EFAULT; 8413 ret = get_errno(do_openat(cpu_env, arg1, p, 8414 target_to_host_bitmask(arg3, fcntl_flags_tbl), 8415 arg4)); 8416 fd_trans_unregister(ret); 8417 unlock_user(p, arg2, 0); 8418 return ret; 8419 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 8420 case TARGET_NR_name_to_handle_at: 8421 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5); 8422 return ret; 8423 #endif 8424 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 8425 case TARGET_NR_open_by_handle_at: 8426 ret = do_open_by_handle_at(arg1, arg2, arg3); 8427 fd_trans_unregister(ret); 8428 return ret; 8429 #endif 8430 case TARGET_NR_close: 8431 fd_trans_unregister(arg1); 8432 return get_errno(close(arg1)); 8433 8434 case TARGET_NR_brk: 8435 return do_brk(arg1); 8436 #ifdef TARGET_NR_fork 8437 case TARGET_NR_fork: 8438 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0)); 8439 #endif 8440 #ifdef TARGET_NR_waitpid 8441 case TARGET_NR_waitpid: 8442 { 8443 int status; 8444 ret = get_errno(safe_wait4(arg1, &status, arg3, 0)); 8445 if (!is_error(ret) && arg2 && ret 8446 && put_user_s32(host_to_target_waitstatus(status), arg2)) 8447 return -TARGET_EFAULT; 8448 } 8449 return ret; 8450 #endif 8451 #ifdef TARGET_NR_waitid 8452 case TARGET_NR_waitid: 8453 { 8454 siginfo_t info; 8455 info.si_pid = 0; 8456 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL)); 8457 if (!is_error(ret) && arg3 && info.si_pid != 0) { 8458 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 8459 return -TARGET_EFAULT; 8460 host_to_target_siginfo(p, &info); 8461 unlock_user(p, arg3, sizeof(target_siginfo_t)); 8462 } 8463 } 8464 return ret; 8465 #endif 8466 #ifdef TARGET_NR_creat /* not on alpha */ 8467 case TARGET_NR_creat: 8468 if (!(p = lock_user_string(arg1))) 8469 return -TARGET_EFAULT; 8470 ret = get_errno(creat(p, arg2)); 8471 fd_trans_unregister(ret); 8472 unlock_user(p, arg1, 0); 8473 return ret; 8474 #endif 8475 #ifdef TARGET_NR_link 8476 case TARGET_NR_link: 8477 { 8478 void * p2; 8479 p = lock_user_string(arg1); 8480 p2 = lock_user_string(arg2); 8481 if (!p || !p2) 8482 ret = -TARGET_EFAULT; 8483 else 8484 ret = get_errno(link(p, p2)); 8485 unlock_user(p2, arg2, 0); 8486 unlock_user(p, arg1, 0); 8487 } 8488 return ret; 8489 #endif 8490 #if defined(TARGET_NR_linkat) 8491 case TARGET_NR_linkat: 8492 { 8493 void * p2 = NULL; 8494 if (!arg2 || !arg4) 8495 return -TARGET_EFAULT; 8496 p = lock_user_string(arg2); 8497 p2 = lock_user_string(arg4); 8498 if (!p || !p2) 8499 ret = -TARGET_EFAULT; 8500 else 8501 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 8502 unlock_user(p, arg2, 0); 8503 unlock_user(p2, arg4, 0); 8504 } 8505 return ret; 8506 #endif 8507 #ifdef TARGET_NR_unlink 8508 case TARGET_NR_unlink: 8509 if (!(p = lock_user_string(arg1))) 8510 return -TARGET_EFAULT; 8511 ret = get_errno(unlink(p)); 8512 unlock_user(p, arg1, 0); 8513 return ret; 8514 #endif 8515 #if defined(TARGET_NR_unlinkat) 8516 case TARGET_NR_unlinkat: 8517 if (!(p = lock_user_string(arg2))) 8518 return -TARGET_EFAULT; 8519 ret = get_errno(unlinkat(arg1, p, arg3)); 8520 unlock_user(p, arg2, 0); 8521 return ret; 8522 #endif 8523 case TARGET_NR_execve: 8524 { 8525 char **argp, **envp; 8526 int argc, envc; 8527 abi_ulong gp; 8528 abi_ulong guest_argp; 8529 abi_ulong guest_envp; 8530 abi_ulong addr; 8531 char **q; 8532 8533 argc = 0; 8534 guest_argp = arg2; 8535 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 8536 if (get_user_ual(addr, gp)) 8537 return -TARGET_EFAULT; 8538 if (!addr) 8539 break; 8540 argc++; 8541 } 8542 envc = 0; 8543 guest_envp = arg3; 8544 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 8545 if (get_user_ual(addr, gp)) 8546 return -TARGET_EFAULT; 8547 if (!addr) 8548 break; 8549 envc++; 8550 } 8551 8552 argp = g_new0(char *, argc + 1); 8553 envp = g_new0(char *, envc + 1); 8554 8555 for (gp = guest_argp, q = argp; gp; 8556 gp += sizeof(abi_ulong), q++) { 8557 if (get_user_ual(addr, gp)) 8558 goto execve_efault; 8559 if (!addr) 8560 break; 8561 if (!(*q = lock_user_string(addr))) 8562 goto execve_efault; 8563 } 8564 *q = NULL; 8565 8566 for (gp = guest_envp, q = envp; gp; 8567 gp += sizeof(abi_ulong), q++) { 8568 if (get_user_ual(addr, gp)) 8569 goto execve_efault; 8570 if (!addr) 8571 break; 8572 if (!(*q = lock_user_string(addr))) 8573 goto execve_efault; 8574 } 8575 *q = NULL; 8576 8577 if (!(p = lock_user_string(arg1))) 8578 goto execve_efault; 8579 /* Although execve() is not an interruptible syscall it is 8580 * a special case where we must use the safe_syscall wrapper: 8581 * if we allow a signal to happen before we make the host 8582 * syscall then we will 'lose' it, because at the point of 8583 * execve the process leaves QEMU's control. So we use the 8584 * safe syscall wrapper to ensure that we either take the 8585 * signal as a guest signal, or else it does not happen 8586 * before the execve completes and makes it the other 8587 * program's problem. 8588 */ 8589 ret = get_errno(safe_execve(p, argp, envp)); 8590 unlock_user(p, arg1, 0); 8591 8592 goto execve_end; 8593 8594 execve_efault: 8595 ret = -TARGET_EFAULT; 8596 8597 execve_end: 8598 for (gp = guest_argp, q = argp; *q; 8599 gp += sizeof(abi_ulong), q++) { 8600 if (get_user_ual(addr, gp) 8601 || !addr) 8602 break; 8603 unlock_user(*q, addr, 0); 8604 } 8605 for (gp = guest_envp, q = envp; *q; 8606 gp += sizeof(abi_ulong), q++) { 8607 if (get_user_ual(addr, gp) 8608 || !addr) 8609 break; 8610 unlock_user(*q, addr, 0); 8611 } 8612 8613 g_free(argp); 8614 g_free(envp); 8615 } 8616 return ret; 8617 case TARGET_NR_chdir: 8618 if (!(p = lock_user_string(arg1))) 8619 return -TARGET_EFAULT; 8620 ret = get_errno(chdir(p)); 8621 unlock_user(p, arg1, 0); 8622 return ret; 8623 #ifdef TARGET_NR_time 8624 case TARGET_NR_time: 8625 { 8626 time_t host_time; 8627 ret = get_errno(time(&host_time)); 8628 if (!is_error(ret) 8629 && arg1 8630 && put_user_sal(host_time, arg1)) 8631 return -TARGET_EFAULT; 8632 } 8633 return ret; 8634 #endif 8635 #ifdef TARGET_NR_mknod 8636 case TARGET_NR_mknod: 8637 if (!(p = lock_user_string(arg1))) 8638 return -TARGET_EFAULT; 8639 ret = get_errno(mknod(p, arg2, arg3)); 8640 unlock_user(p, arg1, 0); 8641 return ret; 8642 #endif 8643 #if defined(TARGET_NR_mknodat) 8644 case TARGET_NR_mknodat: 8645 if (!(p = lock_user_string(arg2))) 8646 return -TARGET_EFAULT; 8647 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 8648 unlock_user(p, arg2, 0); 8649 return ret; 8650 #endif 8651 #ifdef TARGET_NR_chmod 8652 case TARGET_NR_chmod: 8653 if (!(p = lock_user_string(arg1))) 8654 return -TARGET_EFAULT; 8655 ret = get_errno(chmod(p, arg2)); 8656 unlock_user(p, arg1, 0); 8657 return ret; 8658 #endif 8659 #ifdef TARGET_NR_lseek 8660 case TARGET_NR_lseek: 8661 return get_errno(lseek(arg1, arg2, arg3)); 8662 #endif 8663 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 8664 /* Alpha specific */ 8665 case TARGET_NR_getxpid: 8666 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 8667 return get_errno(getpid()); 8668 #endif 8669 #ifdef TARGET_NR_getpid 8670 case TARGET_NR_getpid: 8671 return get_errno(getpid()); 8672 #endif 8673 case TARGET_NR_mount: 8674 { 8675 /* need to look at the data field */ 8676 void *p2, *p3; 8677 8678 if (arg1) { 8679 p = lock_user_string(arg1); 8680 if (!p) { 8681 return -TARGET_EFAULT; 8682 } 8683 } else { 8684 p = NULL; 8685 } 8686 8687 p2 = lock_user_string(arg2); 8688 if (!p2) { 8689 if (arg1) { 8690 unlock_user(p, arg1, 0); 8691 } 8692 return -TARGET_EFAULT; 8693 } 8694 8695 if (arg3) { 8696 p3 = lock_user_string(arg3); 8697 if (!p3) { 8698 if (arg1) { 8699 unlock_user(p, arg1, 0); 8700 } 8701 unlock_user(p2, arg2, 0); 8702 return -TARGET_EFAULT; 8703 } 8704 } else { 8705 p3 = NULL; 8706 } 8707 8708 /* FIXME - arg5 should be locked, but it isn't clear how to 8709 * do that since it's not guaranteed to be a NULL-terminated 8710 * string. 8711 */ 8712 if (!arg5) { 8713 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 8714 } else { 8715 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5)); 8716 } 8717 ret = get_errno(ret); 8718 8719 if (arg1) { 8720 unlock_user(p, arg1, 0); 8721 } 8722 unlock_user(p2, arg2, 0); 8723 if (arg3) { 8724 unlock_user(p3, arg3, 0); 8725 } 8726 } 8727 return ret; 8728 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount) 8729 #if defined(TARGET_NR_umount) 8730 case TARGET_NR_umount: 8731 #endif 8732 #if defined(TARGET_NR_oldumount) 8733 case TARGET_NR_oldumount: 8734 #endif 8735 if (!(p = lock_user_string(arg1))) 8736 return -TARGET_EFAULT; 8737 ret = get_errno(umount(p)); 8738 unlock_user(p, arg1, 0); 8739 return ret; 8740 #endif 8741 #ifdef TARGET_NR_stime /* not on alpha */ 8742 case TARGET_NR_stime: 8743 { 8744 struct timespec ts; 8745 ts.tv_nsec = 0; 8746 if (get_user_sal(ts.tv_sec, arg1)) { 8747 return -TARGET_EFAULT; 8748 } 8749 return get_errno(clock_settime(CLOCK_REALTIME, &ts)); 8750 } 8751 #endif 8752 #ifdef TARGET_NR_alarm /* not on alpha */ 8753 case TARGET_NR_alarm: 8754 return alarm(arg1); 8755 #endif 8756 #ifdef TARGET_NR_pause /* not on alpha */ 8757 case TARGET_NR_pause: 8758 if (!block_signals()) { 8759 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask); 8760 } 8761 return -TARGET_EINTR; 8762 #endif 8763 #ifdef TARGET_NR_utime 8764 case TARGET_NR_utime: 8765 { 8766 struct utimbuf tbuf, *host_tbuf; 8767 struct target_utimbuf *target_tbuf; 8768 if (arg2) { 8769 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 8770 return -TARGET_EFAULT; 8771 tbuf.actime = tswapal(target_tbuf->actime); 8772 tbuf.modtime = tswapal(target_tbuf->modtime); 8773 unlock_user_struct(target_tbuf, arg2, 0); 8774 host_tbuf = &tbuf; 8775 } else { 8776 host_tbuf = NULL; 8777 } 8778 if (!(p = lock_user_string(arg1))) 8779 return -TARGET_EFAULT; 8780 ret = get_errno(utime(p, host_tbuf)); 8781 unlock_user(p, arg1, 0); 8782 } 8783 return ret; 8784 #endif 8785 #ifdef TARGET_NR_utimes 8786 case TARGET_NR_utimes: 8787 { 8788 struct timeval *tvp, tv[2]; 8789 if (arg2) { 8790 if (copy_from_user_timeval(&tv[0], arg2) 8791 || copy_from_user_timeval(&tv[1], 8792 arg2 + sizeof(struct target_timeval))) 8793 return -TARGET_EFAULT; 8794 tvp = tv; 8795 } else { 8796 tvp = NULL; 8797 } 8798 if (!(p = lock_user_string(arg1))) 8799 return -TARGET_EFAULT; 8800 ret = get_errno(utimes(p, tvp)); 8801 unlock_user(p, arg1, 0); 8802 } 8803 return ret; 8804 #endif 8805 #if defined(TARGET_NR_futimesat) 8806 case TARGET_NR_futimesat: 8807 { 8808 struct timeval *tvp, tv[2]; 8809 if (arg3) { 8810 if (copy_from_user_timeval(&tv[0], arg3) 8811 || copy_from_user_timeval(&tv[1], 8812 arg3 + sizeof(struct target_timeval))) 8813 return -TARGET_EFAULT; 8814 tvp = tv; 8815 } else { 8816 tvp = NULL; 8817 } 8818 if (!(p = lock_user_string(arg2))) { 8819 return -TARGET_EFAULT; 8820 } 8821 ret = get_errno(futimesat(arg1, path(p), tvp)); 8822 unlock_user(p, arg2, 0); 8823 } 8824 return ret; 8825 #endif 8826 #ifdef TARGET_NR_access 8827 case TARGET_NR_access: 8828 if (!(p = lock_user_string(arg1))) { 8829 return -TARGET_EFAULT; 8830 } 8831 ret = get_errno(access(path(p), arg2)); 8832 unlock_user(p, arg1, 0); 8833 return ret; 8834 #endif 8835 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 8836 case TARGET_NR_faccessat: 8837 if (!(p = lock_user_string(arg2))) { 8838 return -TARGET_EFAULT; 8839 } 8840 ret = get_errno(faccessat(arg1, p, arg3, 0)); 8841 unlock_user(p, arg2, 0); 8842 return ret; 8843 #endif 8844 #ifdef TARGET_NR_nice /* not on alpha */ 8845 case TARGET_NR_nice: 8846 return get_errno(nice(arg1)); 8847 #endif 8848 case TARGET_NR_sync: 8849 sync(); 8850 return 0; 8851 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS) 8852 case TARGET_NR_syncfs: 8853 return get_errno(syncfs(arg1)); 8854 #endif 8855 case TARGET_NR_kill: 8856 return get_errno(safe_kill(arg1, target_to_host_signal(arg2))); 8857 #ifdef TARGET_NR_rename 8858 case TARGET_NR_rename: 8859 { 8860 void *p2; 8861 p = lock_user_string(arg1); 8862 p2 = lock_user_string(arg2); 8863 if (!p || !p2) 8864 ret = -TARGET_EFAULT; 8865 else 8866 ret = get_errno(rename(p, p2)); 8867 unlock_user(p2, arg2, 0); 8868 unlock_user(p, arg1, 0); 8869 } 8870 return ret; 8871 #endif 8872 #if defined(TARGET_NR_renameat) 8873 case TARGET_NR_renameat: 8874 { 8875 void *p2; 8876 p = lock_user_string(arg2); 8877 p2 = lock_user_string(arg4); 8878 if (!p || !p2) 8879 ret = -TARGET_EFAULT; 8880 else 8881 ret = get_errno(renameat(arg1, p, arg3, p2)); 8882 unlock_user(p2, arg4, 0); 8883 unlock_user(p, arg2, 0); 8884 } 8885 return ret; 8886 #endif 8887 #if defined(TARGET_NR_renameat2) 8888 case TARGET_NR_renameat2: 8889 { 8890 void *p2; 8891 p = lock_user_string(arg2); 8892 p2 = lock_user_string(arg4); 8893 if (!p || !p2) { 8894 ret = -TARGET_EFAULT; 8895 } else { 8896 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5)); 8897 } 8898 unlock_user(p2, arg4, 0); 8899 unlock_user(p, arg2, 0); 8900 } 8901 return ret; 8902 #endif 8903 #ifdef TARGET_NR_mkdir 8904 case TARGET_NR_mkdir: 8905 if (!(p = lock_user_string(arg1))) 8906 return -TARGET_EFAULT; 8907 ret = get_errno(mkdir(p, arg2)); 8908 unlock_user(p, arg1, 0); 8909 return ret; 8910 #endif 8911 #if defined(TARGET_NR_mkdirat) 8912 case TARGET_NR_mkdirat: 8913 if (!(p = lock_user_string(arg2))) 8914 return -TARGET_EFAULT; 8915 ret = get_errno(mkdirat(arg1, p, arg3)); 8916 unlock_user(p, arg2, 0); 8917 return ret; 8918 #endif 8919 #ifdef TARGET_NR_rmdir 8920 case TARGET_NR_rmdir: 8921 if (!(p = lock_user_string(arg1))) 8922 return -TARGET_EFAULT; 8923 ret = get_errno(rmdir(p)); 8924 unlock_user(p, arg1, 0); 8925 return ret; 8926 #endif 8927 case TARGET_NR_dup: 8928 ret = get_errno(dup(arg1)); 8929 if (ret >= 0) { 8930 fd_trans_dup(arg1, ret); 8931 } 8932 return ret; 8933 #ifdef TARGET_NR_pipe 8934 case TARGET_NR_pipe: 8935 return do_pipe(cpu_env, arg1, 0, 0); 8936 #endif 8937 #ifdef TARGET_NR_pipe2 8938 case TARGET_NR_pipe2: 8939 return do_pipe(cpu_env, arg1, 8940 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 8941 #endif 8942 case TARGET_NR_times: 8943 { 8944 struct target_tms *tmsp; 8945 struct tms tms; 8946 ret = get_errno(times(&tms)); 8947 if (arg1) { 8948 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 8949 if (!tmsp) 8950 return -TARGET_EFAULT; 8951 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 8952 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 8953 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 8954 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 8955 } 8956 if (!is_error(ret)) 8957 ret = host_to_target_clock_t(ret); 8958 } 8959 return ret; 8960 case TARGET_NR_acct: 8961 if (arg1 == 0) { 8962 ret = get_errno(acct(NULL)); 8963 } else { 8964 if (!(p = lock_user_string(arg1))) { 8965 return -TARGET_EFAULT; 8966 } 8967 ret = get_errno(acct(path(p))); 8968 unlock_user(p, arg1, 0); 8969 } 8970 return ret; 8971 #ifdef TARGET_NR_umount2 8972 case TARGET_NR_umount2: 8973 if (!(p = lock_user_string(arg1))) 8974 return -TARGET_EFAULT; 8975 ret = get_errno(umount2(p, arg2)); 8976 unlock_user(p, arg1, 0); 8977 return ret; 8978 #endif 8979 case TARGET_NR_ioctl: 8980 return do_ioctl(arg1, arg2, arg3); 8981 #ifdef TARGET_NR_fcntl 8982 case TARGET_NR_fcntl: 8983 return do_fcntl(arg1, arg2, arg3); 8984 #endif 8985 case TARGET_NR_setpgid: 8986 return get_errno(setpgid(arg1, arg2)); 8987 case TARGET_NR_umask: 8988 return get_errno(umask(arg1)); 8989 case TARGET_NR_chroot: 8990 if (!(p = lock_user_string(arg1))) 8991 return -TARGET_EFAULT; 8992 ret = get_errno(chroot(p)); 8993 unlock_user(p, arg1, 0); 8994 return ret; 8995 #ifdef TARGET_NR_dup2 8996 case TARGET_NR_dup2: 8997 ret = get_errno(dup2(arg1, arg2)); 8998 if (ret >= 0) { 8999 fd_trans_dup(arg1, arg2); 9000 } 9001 return ret; 9002 #endif 9003 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 9004 case TARGET_NR_dup3: 9005 { 9006 int host_flags; 9007 9008 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) { 9009 return -EINVAL; 9010 } 9011 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl); 9012 ret = get_errno(dup3(arg1, arg2, host_flags)); 9013 if (ret >= 0) { 9014 fd_trans_dup(arg1, arg2); 9015 } 9016 return ret; 9017 } 9018 #endif 9019 #ifdef TARGET_NR_getppid /* not on alpha */ 9020 case TARGET_NR_getppid: 9021 return get_errno(getppid()); 9022 #endif 9023 #ifdef TARGET_NR_getpgrp 9024 case TARGET_NR_getpgrp: 9025 return get_errno(getpgrp()); 9026 #endif 9027 case TARGET_NR_setsid: 9028 return get_errno(setsid()); 9029 #ifdef TARGET_NR_sigaction 9030 case TARGET_NR_sigaction: 9031 { 9032 #if defined(TARGET_MIPS) 9033 struct target_sigaction act, oact, *pact, *old_act; 9034 9035 if (arg2) { 9036 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 9037 return -TARGET_EFAULT; 9038 act._sa_handler = old_act->_sa_handler; 9039 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 9040 act.sa_flags = old_act->sa_flags; 9041 unlock_user_struct(old_act, arg2, 0); 9042 pact = &act; 9043 } else { 9044 pact = NULL; 9045 } 9046 9047 ret = get_errno(do_sigaction(arg1, pact, &oact, 0)); 9048 9049 if (!is_error(ret) && arg3) { 9050 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 9051 return -TARGET_EFAULT; 9052 old_act->_sa_handler = oact._sa_handler; 9053 old_act->sa_flags = oact.sa_flags; 9054 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 9055 old_act->sa_mask.sig[1] = 0; 9056 old_act->sa_mask.sig[2] = 0; 9057 old_act->sa_mask.sig[3] = 0; 9058 unlock_user_struct(old_act, arg3, 1); 9059 } 9060 #else 9061 struct target_old_sigaction *old_act; 9062 struct target_sigaction act, oact, *pact; 9063 if (arg2) { 9064 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 9065 return -TARGET_EFAULT; 9066 act._sa_handler = old_act->_sa_handler; 9067 target_siginitset(&act.sa_mask, old_act->sa_mask); 9068 act.sa_flags = old_act->sa_flags; 9069 #ifdef TARGET_ARCH_HAS_SA_RESTORER 9070 act.sa_restorer = old_act->sa_restorer; 9071 #endif 9072 unlock_user_struct(old_act, arg2, 0); 9073 pact = &act; 9074 } else { 9075 pact = NULL; 9076 } 9077 ret = get_errno(do_sigaction(arg1, pact, &oact, 0)); 9078 if (!is_error(ret) && arg3) { 9079 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 9080 return -TARGET_EFAULT; 9081 old_act->_sa_handler = oact._sa_handler; 9082 old_act->sa_mask = oact.sa_mask.sig[0]; 9083 old_act->sa_flags = oact.sa_flags; 9084 #ifdef TARGET_ARCH_HAS_SA_RESTORER 9085 old_act->sa_restorer = oact.sa_restorer; 9086 #endif 9087 unlock_user_struct(old_act, arg3, 1); 9088 } 9089 #endif 9090 } 9091 return ret; 9092 #endif 9093 case TARGET_NR_rt_sigaction: 9094 { 9095 /* 9096 * For Alpha and SPARC this is a 5 argument syscall, with 9097 * a 'restorer' parameter which must be copied into the 9098 * sa_restorer field of the sigaction struct. 9099 * For Alpha that 'restorer' is arg5; for SPARC it is arg4, 9100 * and arg5 is the sigsetsize. 9101 */ 9102 #if defined(TARGET_ALPHA) 9103 target_ulong sigsetsize = arg4; 9104 target_ulong restorer = arg5; 9105 #elif defined(TARGET_SPARC) 9106 target_ulong restorer = arg4; 9107 target_ulong sigsetsize = arg5; 9108 #else 9109 target_ulong sigsetsize = arg4; 9110 target_ulong restorer = 0; 9111 #endif 9112 struct target_sigaction *act = NULL; 9113 struct target_sigaction *oact = NULL; 9114 9115 if (sigsetsize != sizeof(target_sigset_t)) { 9116 return -TARGET_EINVAL; 9117 } 9118 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) { 9119 return -TARGET_EFAULT; 9120 } 9121 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 9122 ret = -TARGET_EFAULT; 9123 } else { 9124 ret = get_errno(do_sigaction(arg1, act, oact, restorer)); 9125 if (oact) { 9126 unlock_user_struct(oact, arg3, 1); 9127 } 9128 } 9129 if (act) { 9130 unlock_user_struct(act, arg2, 0); 9131 } 9132 } 9133 return ret; 9134 #ifdef TARGET_NR_sgetmask /* not on alpha */ 9135 case TARGET_NR_sgetmask: 9136 { 9137 sigset_t cur_set; 9138 abi_ulong target_set; 9139 ret = do_sigprocmask(0, NULL, &cur_set); 9140 if (!ret) { 9141 host_to_target_old_sigset(&target_set, &cur_set); 9142 ret = target_set; 9143 } 9144 } 9145 return ret; 9146 #endif 9147 #ifdef TARGET_NR_ssetmask /* not on alpha */ 9148 case TARGET_NR_ssetmask: 9149 { 9150 sigset_t set, oset; 9151 abi_ulong target_set = arg1; 9152 target_to_host_old_sigset(&set, &target_set); 9153 ret = do_sigprocmask(SIG_SETMASK, &set, &oset); 9154 if (!ret) { 9155 host_to_target_old_sigset(&target_set, &oset); 9156 ret = target_set; 9157 } 9158 } 9159 return ret; 9160 #endif 9161 #ifdef TARGET_NR_sigprocmask 9162 case TARGET_NR_sigprocmask: 9163 { 9164 #if defined(TARGET_ALPHA) 9165 sigset_t set, oldset; 9166 abi_ulong mask; 9167 int how; 9168 9169 switch (arg1) { 9170 case TARGET_SIG_BLOCK: 9171 how = SIG_BLOCK; 9172 break; 9173 case TARGET_SIG_UNBLOCK: 9174 how = SIG_UNBLOCK; 9175 break; 9176 case TARGET_SIG_SETMASK: 9177 how = SIG_SETMASK; 9178 break; 9179 default: 9180 return -TARGET_EINVAL; 9181 } 9182 mask = arg2; 9183 target_to_host_old_sigset(&set, &mask); 9184 9185 ret = do_sigprocmask(how, &set, &oldset); 9186 if (!is_error(ret)) { 9187 host_to_target_old_sigset(&mask, &oldset); 9188 ret = mask; 9189 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 9190 } 9191 #else 9192 sigset_t set, oldset, *set_ptr; 9193 int how; 9194 9195 if (arg2) { 9196 switch (arg1) { 9197 case TARGET_SIG_BLOCK: 9198 how = SIG_BLOCK; 9199 break; 9200 case TARGET_SIG_UNBLOCK: 9201 how = SIG_UNBLOCK; 9202 break; 9203 case TARGET_SIG_SETMASK: 9204 how = SIG_SETMASK; 9205 break; 9206 default: 9207 return -TARGET_EINVAL; 9208 } 9209 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 9210 return -TARGET_EFAULT; 9211 target_to_host_old_sigset(&set, p); 9212 unlock_user(p, arg2, 0); 9213 set_ptr = &set; 9214 } else { 9215 how = 0; 9216 set_ptr = NULL; 9217 } 9218 ret = do_sigprocmask(how, set_ptr, &oldset); 9219 if (!is_error(ret) && arg3) { 9220 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9221 return -TARGET_EFAULT; 9222 host_to_target_old_sigset(p, &oldset); 9223 unlock_user(p, arg3, sizeof(target_sigset_t)); 9224 } 9225 #endif 9226 } 9227 return ret; 9228 #endif 9229 case TARGET_NR_rt_sigprocmask: 9230 { 9231 int how = arg1; 9232 sigset_t set, oldset, *set_ptr; 9233 9234 if (arg4 != sizeof(target_sigset_t)) { 9235 return -TARGET_EINVAL; 9236 } 9237 9238 if (arg2) { 9239 switch(how) { 9240 case TARGET_SIG_BLOCK: 9241 how = SIG_BLOCK; 9242 break; 9243 case TARGET_SIG_UNBLOCK: 9244 how = SIG_UNBLOCK; 9245 break; 9246 case TARGET_SIG_SETMASK: 9247 how = SIG_SETMASK; 9248 break; 9249 default: 9250 return -TARGET_EINVAL; 9251 } 9252 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 9253 return -TARGET_EFAULT; 9254 target_to_host_sigset(&set, p); 9255 unlock_user(p, arg2, 0); 9256 set_ptr = &set; 9257 } else { 9258 how = 0; 9259 set_ptr = NULL; 9260 } 9261 ret = do_sigprocmask(how, set_ptr, &oldset); 9262 if (!is_error(ret) && arg3) { 9263 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9264 return -TARGET_EFAULT; 9265 host_to_target_sigset(p, &oldset); 9266 unlock_user(p, arg3, sizeof(target_sigset_t)); 9267 } 9268 } 9269 return ret; 9270 #ifdef TARGET_NR_sigpending 9271 case TARGET_NR_sigpending: 9272 { 9273 sigset_t set; 9274 ret = get_errno(sigpending(&set)); 9275 if (!is_error(ret)) { 9276 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9277 return -TARGET_EFAULT; 9278 host_to_target_old_sigset(p, &set); 9279 unlock_user(p, arg1, sizeof(target_sigset_t)); 9280 } 9281 } 9282 return ret; 9283 #endif 9284 case TARGET_NR_rt_sigpending: 9285 { 9286 sigset_t set; 9287 9288 /* Yes, this check is >, not != like most. We follow the kernel's 9289 * logic and it does it like this because it implements 9290 * NR_sigpending through the same code path, and in that case 9291 * the old_sigset_t is smaller in size. 9292 */ 9293 if (arg2 > sizeof(target_sigset_t)) { 9294 return -TARGET_EINVAL; 9295 } 9296 9297 ret = get_errno(sigpending(&set)); 9298 if (!is_error(ret)) { 9299 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9300 return -TARGET_EFAULT; 9301 host_to_target_sigset(p, &set); 9302 unlock_user(p, arg1, sizeof(target_sigset_t)); 9303 } 9304 } 9305 return ret; 9306 #ifdef TARGET_NR_sigsuspend 9307 case TARGET_NR_sigsuspend: 9308 { 9309 TaskState *ts = cpu->opaque; 9310 #if defined(TARGET_ALPHA) 9311 abi_ulong mask = arg1; 9312 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask); 9313 #else 9314 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9315 return -TARGET_EFAULT; 9316 target_to_host_old_sigset(&ts->sigsuspend_mask, p); 9317 unlock_user(p, arg1, 0); 9318 #endif 9319 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 9320 SIGSET_T_SIZE)); 9321 if (ret != -QEMU_ERESTARTSYS) { 9322 ts->in_sigsuspend = 1; 9323 } 9324 } 9325 return ret; 9326 #endif 9327 case TARGET_NR_rt_sigsuspend: 9328 { 9329 TaskState *ts = cpu->opaque; 9330 9331 if (arg2 != sizeof(target_sigset_t)) { 9332 return -TARGET_EINVAL; 9333 } 9334 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9335 return -TARGET_EFAULT; 9336 target_to_host_sigset(&ts->sigsuspend_mask, p); 9337 unlock_user(p, arg1, 0); 9338 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 9339 SIGSET_T_SIZE)); 9340 if (ret != -QEMU_ERESTARTSYS) { 9341 ts->in_sigsuspend = 1; 9342 } 9343 } 9344 return ret; 9345 #ifdef TARGET_NR_rt_sigtimedwait 9346 case TARGET_NR_rt_sigtimedwait: 9347 { 9348 sigset_t set; 9349 struct timespec uts, *puts; 9350 siginfo_t uinfo; 9351 9352 if (arg4 != sizeof(target_sigset_t)) { 9353 return -TARGET_EINVAL; 9354 } 9355 9356 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9357 return -TARGET_EFAULT; 9358 target_to_host_sigset(&set, p); 9359 unlock_user(p, arg1, 0); 9360 if (arg3) { 9361 puts = &uts; 9362 if (target_to_host_timespec(puts, arg3)) { 9363 return -TARGET_EFAULT; 9364 } 9365 } else { 9366 puts = NULL; 9367 } 9368 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9369 SIGSET_T_SIZE)); 9370 if (!is_error(ret)) { 9371 if (arg2) { 9372 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 9373 0); 9374 if (!p) { 9375 return -TARGET_EFAULT; 9376 } 9377 host_to_target_siginfo(p, &uinfo); 9378 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9379 } 9380 ret = host_to_target_signal(ret); 9381 } 9382 } 9383 return ret; 9384 #endif 9385 #ifdef TARGET_NR_rt_sigtimedwait_time64 9386 case TARGET_NR_rt_sigtimedwait_time64: 9387 { 9388 sigset_t set; 9389 struct timespec uts, *puts; 9390 siginfo_t uinfo; 9391 9392 if (arg4 != sizeof(target_sigset_t)) { 9393 return -TARGET_EINVAL; 9394 } 9395 9396 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1); 9397 if (!p) { 9398 return -TARGET_EFAULT; 9399 } 9400 target_to_host_sigset(&set, p); 9401 unlock_user(p, arg1, 0); 9402 if (arg3) { 9403 puts = &uts; 9404 if (target_to_host_timespec64(puts, arg3)) { 9405 return -TARGET_EFAULT; 9406 } 9407 } else { 9408 puts = NULL; 9409 } 9410 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9411 SIGSET_T_SIZE)); 9412 if (!is_error(ret)) { 9413 if (arg2) { 9414 p = lock_user(VERIFY_WRITE, arg2, 9415 sizeof(target_siginfo_t), 0); 9416 if (!p) { 9417 return -TARGET_EFAULT; 9418 } 9419 host_to_target_siginfo(p, &uinfo); 9420 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9421 } 9422 ret = host_to_target_signal(ret); 9423 } 9424 } 9425 return ret; 9426 #endif 9427 case TARGET_NR_rt_sigqueueinfo: 9428 { 9429 siginfo_t uinfo; 9430 9431 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); 9432 if (!p) { 9433 return -TARGET_EFAULT; 9434 } 9435 target_to_host_siginfo(&uinfo, p); 9436 unlock_user(p, arg3, 0); 9437 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 9438 } 9439 return ret; 9440 case TARGET_NR_rt_tgsigqueueinfo: 9441 { 9442 siginfo_t uinfo; 9443 9444 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1); 9445 if (!p) { 9446 return -TARGET_EFAULT; 9447 } 9448 target_to_host_siginfo(&uinfo, p); 9449 unlock_user(p, arg4, 0); 9450 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo)); 9451 } 9452 return ret; 9453 #ifdef TARGET_NR_sigreturn 9454 case TARGET_NR_sigreturn: 9455 if (block_signals()) { 9456 return -QEMU_ERESTARTSYS; 9457 } 9458 return do_sigreturn(cpu_env); 9459 #endif 9460 case TARGET_NR_rt_sigreturn: 9461 if (block_signals()) { 9462 return -QEMU_ERESTARTSYS; 9463 } 9464 return do_rt_sigreturn(cpu_env); 9465 case TARGET_NR_sethostname: 9466 if (!(p = lock_user_string(arg1))) 9467 return -TARGET_EFAULT; 9468 ret = get_errno(sethostname(p, arg2)); 9469 unlock_user(p, arg1, 0); 9470 return ret; 9471 #ifdef TARGET_NR_setrlimit 9472 case TARGET_NR_setrlimit: 9473 { 9474 int resource = target_to_host_resource(arg1); 9475 struct target_rlimit *target_rlim; 9476 struct rlimit rlim; 9477 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 9478 return -TARGET_EFAULT; 9479 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 9480 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 9481 unlock_user_struct(target_rlim, arg2, 0); 9482 /* 9483 * If we just passed through resource limit settings for memory then 9484 * they would also apply to QEMU's own allocations, and QEMU will 9485 * crash or hang or die if its allocations fail. Ideally we would 9486 * track the guest allocations in QEMU and apply the limits ourselves. 9487 * For now, just tell the guest the call succeeded but don't actually 9488 * limit anything. 9489 */ 9490 if (resource != RLIMIT_AS && 9491 resource != RLIMIT_DATA && 9492 resource != RLIMIT_STACK) { 9493 return get_errno(setrlimit(resource, &rlim)); 9494 } else { 9495 return 0; 9496 } 9497 } 9498 #endif 9499 #ifdef TARGET_NR_getrlimit 9500 case TARGET_NR_getrlimit: 9501 { 9502 int resource = target_to_host_resource(arg1); 9503 struct target_rlimit *target_rlim; 9504 struct rlimit rlim; 9505 9506 ret = get_errno(getrlimit(resource, &rlim)); 9507 if (!is_error(ret)) { 9508 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 9509 return -TARGET_EFAULT; 9510 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 9511 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 9512 unlock_user_struct(target_rlim, arg2, 1); 9513 } 9514 } 9515 return ret; 9516 #endif 9517 case TARGET_NR_getrusage: 9518 { 9519 struct rusage rusage; 9520 ret = get_errno(getrusage(arg1, &rusage)); 9521 if (!is_error(ret)) { 9522 ret = host_to_target_rusage(arg2, &rusage); 9523 } 9524 } 9525 return ret; 9526 #if defined(TARGET_NR_gettimeofday) 9527 case TARGET_NR_gettimeofday: 9528 { 9529 struct timeval tv; 9530 struct timezone tz; 9531 9532 ret = get_errno(gettimeofday(&tv, &tz)); 9533 if (!is_error(ret)) { 9534 if (arg1 && copy_to_user_timeval(arg1, &tv)) { 9535 return -TARGET_EFAULT; 9536 } 9537 if (arg2 && copy_to_user_timezone(arg2, &tz)) { 9538 return -TARGET_EFAULT; 9539 } 9540 } 9541 } 9542 return ret; 9543 #endif 9544 #if defined(TARGET_NR_settimeofday) 9545 case TARGET_NR_settimeofday: 9546 { 9547 struct timeval tv, *ptv = NULL; 9548 struct timezone tz, *ptz = NULL; 9549 9550 if (arg1) { 9551 if (copy_from_user_timeval(&tv, arg1)) { 9552 return -TARGET_EFAULT; 9553 } 9554 ptv = &tv; 9555 } 9556 9557 if (arg2) { 9558 if (copy_from_user_timezone(&tz, arg2)) { 9559 return -TARGET_EFAULT; 9560 } 9561 ptz = &tz; 9562 } 9563 9564 return get_errno(settimeofday(ptv, ptz)); 9565 } 9566 #endif 9567 #if defined(TARGET_NR_select) 9568 case TARGET_NR_select: 9569 #if defined(TARGET_WANT_NI_OLD_SELECT) 9570 /* some architectures used to have old_select here 9571 * but now ENOSYS it. 9572 */ 9573 ret = -TARGET_ENOSYS; 9574 #elif defined(TARGET_WANT_OLD_SYS_SELECT) 9575 ret = do_old_select(arg1); 9576 #else 9577 ret = do_select(arg1, arg2, arg3, arg4, arg5); 9578 #endif 9579 return ret; 9580 #endif 9581 #ifdef TARGET_NR_pselect6 9582 case TARGET_NR_pselect6: 9583 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false); 9584 #endif 9585 #ifdef TARGET_NR_pselect6_time64 9586 case TARGET_NR_pselect6_time64: 9587 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true); 9588 #endif 9589 #ifdef TARGET_NR_symlink 9590 case TARGET_NR_symlink: 9591 { 9592 void *p2; 9593 p = lock_user_string(arg1); 9594 p2 = lock_user_string(arg2); 9595 if (!p || !p2) 9596 ret = -TARGET_EFAULT; 9597 else 9598 ret = get_errno(symlink(p, p2)); 9599 unlock_user(p2, arg2, 0); 9600 unlock_user(p, arg1, 0); 9601 } 9602 return ret; 9603 #endif 9604 #if defined(TARGET_NR_symlinkat) 9605 case TARGET_NR_symlinkat: 9606 { 9607 void *p2; 9608 p = lock_user_string(arg1); 9609 p2 = lock_user_string(arg3); 9610 if (!p || !p2) 9611 ret = -TARGET_EFAULT; 9612 else 9613 ret = get_errno(symlinkat(p, arg2, p2)); 9614 unlock_user(p2, arg3, 0); 9615 unlock_user(p, arg1, 0); 9616 } 9617 return ret; 9618 #endif 9619 #ifdef TARGET_NR_readlink 9620 case TARGET_NR_readlink: 9621 { 9622 void *p2; 9623 p = lock_user_string(arg1); 9624 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9625 if (!p || !p2) { 9626 ret = -TARGET_EFAULT; 9627 } else if (!arg3) { 9628 /* Short circuit this for the magic exe check. */ 9629 ret = -TARGET_EINVAL; 9630 } else if (is_proc_myself((const char *)p, "exe")) { 9631 char real[PATH_MAX], *temp; 9632 temp = realpath(exec_path, real); 9633 /* Return value is # of bytes that we wrote to the buffer. */ 9634 if (temp == NULL) { 9635 ret = get_errno(-1); 9636 } else { 9637 /* Don't worry about sign mismatch as earlier mapping 9638 * logic would have thrown a bad address error. */ 9639 ret = MIN(strlen(real), arg3); 9640 /* We cannot NUL terminate the string. */ 9641 memcpy(p2, real, ret); 9642 } 9643 } else { 9644 ret = get_errno(readlink(path(p), p2, arg3)); 9645 } 9646 unlock_user(p2, arg2, ret); 9647 unlock_user(p, arg1, 0); 9648 } 9649 return ret; 9650 #endif 9651 #if defined(TARGET_NR_readlinkat) 9652 case TARGET_NR_readlinkat: 9653 { 9654 void *p2; 9655 p = lock_user_string(arg2); 9656 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 9657 if (!p || !p2) { 9658 ret = -TARGET_EFAULT; 9659 } else if (is_proc_myself((const char *)p, "exe")) { 9660 char real[PATH_MAX], *temp; 9661 temp = realpath(exec_path, real); 9662 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 9663 snprintf((char *)p2, arg4, "%s", real); 9664 } else { 9665 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 9666 } 9667 unlock_user(p2, arg3, ret); 9668 unlock_user(p, arg2, 0); 9669 } 9670 return ret; 9671 #endif 9672 #ifdef TARGET_NR_swapon 9673 case TARGET_NR_swapon: 9674 if (!(p = lock_user_string(arg1))) 9675 return -TARGET_EFAULT; 9676 ret = get_errno(swapon(p, arg2)); 9677 unlock_user(p, arg1, 0); 9678 return ret; 9679 #endif 9680 case TARGET_NR_reboot: 9681 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 9682 /* arg4 must be ignored in all other cases */ 9683 p = lock_user_string(arg4); 9684 if (!p) { 9685 return -TARGET_EFAULT; 9686 } 9687 ret = get_errno(reboot(arg1, arg2, arg3, p)); 9688 unlock_user(p, arg4, 0); 9689 } else { 9690 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 9691 } 9692 return ret; 9693 #ifdef TARGET_NR_mmap 9694 case TARGET_NR_mmap: 9695 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 9696 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 9697 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 9698 || defined(TARGET_S390X) 9699 { 9700 abi_ulong *v; 9701 abi_ulong v1, v2, v3, v4, v5, v6; 9702 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 9703 return -TARGET_EFAULT; 9704 v1 = tswapal(v[0]); 9705 v2 = tswapal(v[1]); 9706 v3 = tswapal(v[2]); 9707 v4 = tswapal(v[3]); 9708 v5 = tswapal(v[4]); 9709 v6 = tswapal(v[5]); 9710 unlock_user(v, arg1, 0); 9711 ret = get_errno(target_mmap(v1, v2, v3, 9712 target_to_host_bitmask(v4, mmap_flags_tbl), 9713 v5, v6)); 9714 } 9715 #else 9716 /* mmap pointers are always untagged */ 9717 ret = get_errno(target_mmap(arg1, arg2, arg3, 9718 target_to_host_bitmask(arg4, mmap_flags_tbl), 9719 arg5, 9720 arg6)); 9721 #endif 9722 return ret; 9723 #endif 9724 #ifdef TARGET_NR_mmap2 9725 case TARGET_NR_mmap2: 9726 #ifndef MMAP_SHIFT 9727 #define MMAP_SHIFT 12 9728 #endif 9729 ret = target_mmap(arg1, arg2, arg3, 9730 target_to_host_bitmask(arg4, mmap_flags_tbl), 9731 arg5, arg6 << MMAP_SHIFT); 9732 return get_errno(ret); 9733 #endif 9734 case TARGET_NR_munmap: 9735 arg1 = cpu_untagged_addr(cpu, arg1); 9736 return get_errno(target_munmap(arg1, arg2)); 9737 case TARGET_NR_mprotect: 9738 arg1 = cpu_untagged_addr(cpu, arg1); 9739 { 9740 TaskState *ts = cpu->opaque; 9741 /* Special hack to detect libc making the stack executable. */ 9742 if ((arg3 & PROT_GROWSDOWN) 9743 && arg1 >= ts->info->stack_limit 9744 && arg1 <= ts->info->start_stack) { 9745 arg3 &= ~PROT_GROWSDOWN; 9746 arg2 = arg2 + arg1 - ts->info->stack_limit; 9747 arg1 = ts->info->stack_limit; 9748 } 9749 } 9750 return get_errno(target_mprotect(arg1, arg2, arg3)); 9751 #ifdef TARGET_NR_mremap 9752 case TARGET_NR_mremap: 9753 arg1 = cpu_untagged_addr(cpu, arg1); 9754 /* mremap new_addr (arg5) is always untagged */ 9755 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 9756 #endif 9757 /* ??? msync/mlock/munlock are broken for softmmu. */ 9758 #ifdef TARGET_NR_msync 9759 case TARGET_NR_msync: 9760 return get_errno(msync(g2h(cpu, arg1), arg2, arg3)); 9761 #endif 9762 #ifdef TARGET_NR_mlock 9763 case TARGET_NR_mlock: 9764 return get_errno(mlock(g2h(cpu, arg1), arg2)); 9765 #endif 9766 #ifdef TARGET_NR_munlock 9767 case TARGET_NR_munlock: 9768 return get_errno(munlock(g2h(cpu, arg1), arg2)); 9769 #endif 9770 #ifdef TARGET_NR_mlockall 9771 case TARGET_NR_mlockall: 9772 return get_errno(mlockall(target_to_host_mlockall_arg(arg1))); 9773 #endif 9774 #ifdef TARGET_NR_munlockall 9775 case TARGET_NR_munlockall: 9776 return get_errno(munlockall()); 9777 #endif 9778 #ifdef TARGET_NR_truncate 9779 case TARGET_NR_truncate: 9780 if (!(p = lock_user_string(arg1))) 9781 return -TARGET_EFAULT; 9782 ret = get_errno(truncate(p, arg2)); 9783 unlock_user(p, arg1, 0); 9784 return ret; 9785 #endif 9786 #ifdef TARGET_NR_ftruncate 9787 case TARGET_NR_ftruncate: 9788 return get_errno(ftruncate(arg1, arg2)); 9789 #endif 9790 case TARGET_NR_fchmod: 9791 return get_errno(fchmod(arg1, arg2)); 9792 #if defined(TARGET_NR_fchmodat) 9793 case TARGET_NR_fchmodat: 9794 if (!(p = lock_user_string(arg2))) 9795 return -TARGET_EFAULT; 9796 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 9797 unlock_user(p, arg2, 0); 9798 return ret; 9799 #endif 9800 case TARGET_NR_getpriority: 9801 /* Note that negative values are valid for getpriority, so we must 9802 differentiate based on errno settings. */ 9803 errno = 0; 9804 ret = getpriority(arg1, arg2); 9805 if (ret == -1 && errno != 0) { 9806 return -host_to_target_errno(errno); 9807 } 9808 #ifdef TARGET_ALPHA 9809 /* Return value is the unbiased priority. Signal no error. */ 9810 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 9811 #else 9812 /* Return value is a biased priority to avoid negative numbers. */ 9813 ret = 20 - ret; 9814 #endif 9815 return ret; 9816 case TARGET_NR_setpriority: 9817 return get_errno(setpriority(arg1, arg2, arg3)); 9818 #ifdef TARGET_NR_statfs 9819 case TARGET_NR_statfs: 9820 if (!(p = lock_user_string(arg1))) { 9821 return -TARGET_EFAULT; 9822 } 9823 ret = get_errno(statfs(path(p), &stfs)); 9824 unlock_user(p, arg1, 0); 9825 convert_statfs: 9826 if (!is_error(ret)) { 9827 struct target_statfs *target_stfs; 9828 9829 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 9830 return -TARGET_EFAULT; 9831 __put_user(stfs.f_type, &target_stfs->f_type); 9832 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9833 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9834 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9835 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9836 __put_user(stfs.f_files, &target_stfs->f_files); 9837 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9838 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9839 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9840 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9841 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9842 #ifdef _STATFS_F_FLAGS 9843 __put_user(stfs.f_flags, &target_stfs->f_flags); 9844 #else 9845 __put_user(0, &target_stfs->f_flags); 9846 #endif 9847 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9848 unlock_user_struct(target_stfs, arg2, 1); 9849 } 9850 return ret; 9851 #endif 9852 #ifdef TARGET_NR_fstatfs 9853 case TARGET_NR_fstatfs: 9854 ret = get_errno(fstatfs(arg1, &stfs)); 9855 goto convert_statfs; 9856 #endif 9857 #ifdef TARGET_NR_statfs64 9858 case TARGET_NR_statfs64: 9859 if (!(p = lock_user_string(arg1))) { 9860 return -TARGET_EFAULT; 9861 } 9862 ret = get_errno(statfs(path(p), &stfs)); 9863 unlock_user(p, arg1, 0); 9864 convert_statfs64: 9865 if (!is_error(ret)) { 9866 struct target_statfs64 *target_stfs; 9867 9868 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 9869 return -TARGET_EFAULT; 9870 __put_user(stfs.f_type, &target_stfs->f_type); 9871 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9872 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9873 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9874 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9875 __put_user(stfs.f_files, &target_stfs->f_files); 9876 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9877 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9878 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9879 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9880 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9881 #ifdef _STATFS_F_FLAGS 9882 __put_user(stfs.f_flags, &target_stfs->f_flags); 9883 #else 9884 __put_user(0, &target_stfs->f_flags); 9885 #endif 9886 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9887 unlock_user_struct(target_stfs, arg3, 1); 9888 } 9889 return ret; 9890 case TARGET_NR_fstatfs64: 9891 ret = get_errno(fstatfs(arg1, &stfs)); 9892 goto convert_statfs64; 9893 #endif 9894 #ifdef TARGET_NR_socketcall 9895 case TARGET_NR_socketcall: 9896 return do_socketcall(arg1, arg2); 9897 #endif 9898 #ifdef TARGET_NR_accept 9899 case TARGET_NR_accept: 9900 return do_accept4(arg1, arg2, arg3, 0); 9901 #endif 9902 #ifdef TARGET_NR_accept4 9903 case TARGET_NR_accept4: 9904 return do_accept4(arg1, arg2, arg3, arg4); 9905 #endif 9906 #ifdef TARGET_NR_bind 9907 case TARGET_NR_bind: 9908 return do_bind(arg1, arg2, arg3); 9909 #endif 9910 #ifdef TARGET_NR_connect 9911 case TARGET_NR_connect: 9912 return do_connect(arg1, arg2, arg3); 9913 #endif 9914 #ifdef TARGET_NR_getpeername 9915 case TARGET_NR_getpeername: 9916 return do_getpeername(arg1, arg2, arg3); 9917 #endif 9918 #ifdef TARGET_NR_getsockname 9919 case TARGET_NR_getsockname: 9920 return do_getsockname(arg1, arg2, arg3); 9921 #endif 9922 #ifdef TARGET_NR_getsockopt 9923 case TARGET_NR_getsockopt: 9924 return do_getsockopt(arg1, arg2, arg3, arg4, arg5); 9925 #endif 9926 #ifdef TARGET_NR_listen 9927 case TARGET_NR_listen: 9928 return get_errno(listen(arg1, arg2)); 9929 #endif 9930 #ifdef TARGET_NR_recv 9931 case TARGET_NR_recv: 9932 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 9933 #endif 9934 #ifdef TARGET_NR_recvfrom 9935 case TARGET_NR_recvfrom: 9936 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 9937 #endif 9938 #ifdef TARGET_NR_recvmsg 9939 case TARGET_NR_recvmsg: 9940 return do_sendrecvmsg(arg1, arg2, arg3, 0); 9941 #endif 9942 #ifdef TARGET_NR_send 9943 case TARGET_NR_send: 9944 return do_sendto(arg1, arg2, arg3, arg4, 0, 0); 9945 #endif 9946 #ifdef TARGET_NR_sendmsg 9947 case TARGET_NR_sendmsg: 9948 return do_sendrecvmsg(arg1, arg2, arg3, 1); 9949 #endif 9950 #ifdef TARGET_NR_sendmmsg 9951 case TARGET_NR_sendmmsg: 9952 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 9953 #endif 9954 #ifdef TARGET_NR_recvmmsg 9955 case TARGET_NR_recvmmsg: 9956 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 9957 #endif 9958 #ifdef TARGET_NR_sendto 9959 case TARGET_NR_sendto: 9960 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 9961 #endif 9962 #ifdef TARGET_NR_shutdown 9963 case TARGET_NR_shutdown: 9964 return get_errno(shutdown(arg1, arg2)); 9965 #endif 9966 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 9967 case TARGET_NR_getrandom: 9968 p = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9969 if (!p) { 9970 return -TARGET_EFAULT; 9971 } 9972 ret = get_errno(getrandom(p, arg2, arg3)); 9973 unlock_user(p, arg1, ret); 9974 return ret; 9975 #endif 9976 #ifdef TARGET_NR_socket 9977 case TARGET_NR_socket: 9978 return do_socket(arg1, arg2, arg3); 9979 #endif 9980 #ifdef TARGET_NR_socketpair 9981 case TARGET_NR_socketpair: 9982 return do_socketpair(arg1, arg2, arg3, arg4); 9983 #endif 9984 #ifdef TARGET_NR_setsockopt 9985 case TARGET_NR_setsockopt: 9986 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 9987 #endif 9988 #if defined(TARGET_NR_syslog) 9989 case TARGET_NR_syslog: 9990 { 9991 int len = arg2; 9992 9993 switch (arg1) { 9994 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */ 9995 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */ 9996 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */ 9997 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */ 9998 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */ 9999 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */ 10000 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */ 10001 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */ 10002 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3)); 10003 case TARGET_SYSLOG_ACTION_READ: /* Read from log */ 10004 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */ 10005 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */ 10006 { 10007 if (len < 0) { 10008 return -TARGET_EINVAL; 10009 } 10010 if (len == 0) { 10011 return 0; 10012 } 10013 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 10014 if (!p) { 10015 return -TARGET_EFAULT; 10016 } 10017 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 10018 unlock_user(p, arg2, arg3); 10019 } 10020 return ret; 10021 default: 10022 return -TARGET_EINVAL; 10023 } 10024 } 10025 break; 10026 #endif 10027 case TARGET_NR_setitimer: 10028 { 10029 struct itimerval value, ovalue, *pvalue; 10030 10031 if (arg2) { 10032 pvalue = &value; 10033 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 10034 || copy_from_user_timeval(&pvalue->it_value, 10035 arg2 + sizeof(struct target_timeval))) 10036 return -TARGET_EFAULT; 10037 } else { 10038 pvalue = NULL; 10039 } 10040 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 10041 if (!is_error(ret) && arg3) { 10042 if (copy_to_user_timeval(arg3, 10043 &ovalue.it_interval) 10044 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 10045 &ovalue.it_value)) 10046 return -TARGET_EFAULT; 10047 } 10048 } 10049 return ret; 10050 case TARGET_NR_getitimer: 10051 { 10052 struct itimerval value; 10053 10054 ret = get_errno(getitimer(arg1, &value)); 10055 if (!is_error(ret) && arg2) { 10056 if (copy_to_user_timeval(arg2, 10057 &value.it_interval) 10058 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 10059 &value.it_value)) 10060 return -TARGET_EFAULT; 10061 } 10062 } 10063 return ret; 10064 #ifdef TARGET_NR_stat 10065 case TARGET_NR_stat: 10066 if (!(p = lock_user_string(arg1))) { 10067 return -TARGET_EFAULT; 10068 } 10069 ret = get_errno(stat(path(p), &st)); 10070 unlock_user(p, arg1, 0); 10071 goto do_stat; 10072 #endif 10073 #ifdef TARGET_NR_lstat 10074 case TARGET_NR_lstat: 10075 if (!(p = lock_user_string(arg1))) { 10076 return -TARGET_EFAULT; 10077 } 10078 ret = get_errno(lstat(path(p), &st)); 10079 unlock_user(p, arg1, 0); 10080 goto do_stat; 10081 #endif 10082 #ifdef TARGET_NR_fstat 10083 case TARGET_NR_fstat: 10084 { 10085 ret = get_errno(fstat(arg1, &st)); 10086 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat) 10087 do_stat: 10088 #endif 10089 if (!is_error(ret)) { 10090 struct target_stat *target_st; 10091 10092 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 10093 return -TARGET_EFAULT; 10094 memset(target_st, 0, sizeof(*target_st)); 10095 __put_user(st.st_dev, &target_st->st_dev); 10096 __put_user(st.st_ino, &target_st->st_ino); 10097 __put_user(st.st_mode, &target_st->st_mode); 10098 __put_user(st.st_uid, &target_st->st_uid); 10099 __put_user(st.st_gid, &target_st->st_gid); 10100 __put_user(st.st_nlink, &target_st->st_nlink); 10101 __put_user(st.st_rdev, &target_st->st_rdev); 10102 __put_user(st.st_size, &target_st->st_size); 10103 __put_user(st.st_blksize, &target_st->st_blksize); 10104 __put_user(st.st_blocks, &target_st->st_blocks); 10105 __put_user(st.st_atime, &target_st->target_st_atime); 10106 __put_user(st.st_mtime, &target_st->target_st_mtime); 10107 __put_user(st.st_ctime, &target_st->target_st_ctime); 10108 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC) 10109 __put_user(st.st_atim.tv_nsec, 10110 &target_st->target_st_atime_nsec); 10111 __put_user(st.st_mtim.tv_nsec, 10112 &target_st->target_st_mtime_nsec); 10113 __put_user(st.st_ctim.tv_nsec, 10114 &target_st->target_st_ctime_nsec); 10115 #endif 10116 unlock_user_struct(target_st, arg2, 1); 10117 } 10118 } 10119 return ret; 10120 #endif 10121 case TARGET_NR_vhangup: 10122 return get_errno(vhangup()); 10123 #ifdef TARGET_NR_syscall 10124 case TARGET_NR_syscall: 10125 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 10126 arg6, arg7, arg8, 0); 10127 #endif 10128 #if defined(TARGET_NR_wait4) 10129 case TARGET_NR_wait4: 10130 { 10131 int status; 10132 abi_long status_ptr = arg2; 10133 struct rusage rusage, *rusage_ptr; 10134 abi_ulong target_rusage = arg4; 10135 abi_long rusage_err; 10136 if (target_rusage) 10137 rusage_ptr = &rusage; 10138 else 10139 rusage_ptr = NULL; 10140 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr)); 10141 if (!is_error(ret)) { 10142 if (status_ptr && ret) { 10143 status = host_to_target_waitstatus(status); 10144 if (put_user_s32(status, status_ptr)) 10145 return -TARGET_EFAULT; 10146 } 10147 if (target_rusage) { 10148 rusage_err = host_to_target_rusage(target_rusage, &rusage); 10149 if (rusage_err) { 10150 ret = rusage_err; 10151 } 10152 } 10153 } 10154 } 10155 return ret; 10156 #endif 10157 #ifdef TARGET_NR_swapoff 10158 case TARGET_NR_swapoff: 10159 if (!(p = lock_user_string(arg1))) 10160 return -TARGET_EFAULT; 10161 ret = get_errno(swapoff(p)); 10162 unlock_user(p, arg1, 0); 10163 return ret; 10164 #endif 10165 case TARGET_NR_sysinfo: 10166 { 10167 struct target_sysinfo *target_value; 10168 struct sysinfo value; 10169 ret = get_errno(sysinfo(&value)); 10170 if (!is_error(ret) && arg1) 10171 { 10172 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 10173 return -TARGET_EFAULT; 10174 __put_user(value.uptime, &target_value->uptime); 10175 __put_user(value.loads[0], &target_value->loads[0]); 10176 __put_user(value.loads[1], &target_value->loads[1]); 10177 __put_user(value.loads[2], &target_value->loads[2]); 10178 __put_user(value.totalram, &target_value->totalram); 10179 __put_user(value.freeram, &target_value->freeram); 10180 __put_user(value.sharedram, &target_value->sharedram); 10181 __put_user(value.bufferram, &target_value->bufferram); 10182 __put_user(value.totalswap, &target_value->totalswap); 10183 __put_user(value.freeswap, &target_value->freeswap); 10184 __put_user(value.procs, &target_value->procs); 10185 __put_user(value.totalhigh, &target_value->totalhigh); 10186 __put_user(value.freehigh, &target_value->freehigh); 10187 __put_user(value.mem_unit, &target_value->mem_unit); 10188 unlock_user_struct(target_value, arg1, 1); 10189 } 10190 } 10191 return ret; 10192 #ifdef TARGET_NR_ipc 10193 case TARGET_NR_ipc: 10194 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); 10195 #endif 10196 #ifdef TARGET_NR_semget 10197 case TARGET_NR_semget: 10198 return get_errno(semget(arg1, arg2, arg3)); 10199 #endif 10200 #ifdef TARGET_NR_semop 10201 case TARGET_NR_semop: 10202 return do_semtimedop(arg1, arg2, arg3, 0, false); 10203 #endif 10204 #ifdef TARGET_NR_semtimedop 10205 case TARGET_NR_semtimedop: 10206 return do_semtimedop(arg1, arg2, arg3, arg4, false); 10207 #endif 10208 #ifdef TARGET_NR_semtimedop_time64 10209 case TARGET_NR_semtimedop_time64: 10210 return do_semtimedop(arg1, arg2, arg3, arg4, true); 10211 #endif 10212 #ifdef TARGET_NR_semctl 10213 case TARGET_NR_semctl: 10214 return do_semctl(arg1, arg2, arg3, arg4); 10215 #endif 10216 #ifdef TARGET_NR_msgctl 10217 case TARGET_NR_msgctl: 10218 return do_msgctl(arg1, arg2, arg3); 10219 #endif 10220 #ifdef TARGET_NR_msgget 10221 case TARGET_NR_msgget: 10222 return get_errno(msgget(arg1, arg2)); 10223 #endif 10224 #ifdef TARGET_NR_msgrcv 10225 case TARGET_NR_msgrcv: 10226 return do_msgrcv(arg1, arg2, arg3, arg4, arg5); 10227 #endif 10228 #ifdef TARGET_NR_msgsnd 10229 case TARGET_NR_msgsnd: 10230 return do_msgsnd(arg1, arg2, arg3, arg4); 10231 #endif 10232 #ifdef TARGET_NR_shmget 10233 case TARGET_NR_shmget: 10234 return get_errno(shmget(arg1, arg2, arg3)); 10235 #endif 10236 #ifdef TARGET_NR_shmctl 10237 case TARGET_NR_shmctl: 10238 return do_shmctl(arg1, arg2, arg3); 10239 #endif 10240 #ifdef TARGET_NR_shmat 10241 case TARGET_NR_shmat: 10242 return do_shmat(cpu_env, arg1, arg2, arg3); 10243 #endif 10244 #ifdef TARGET_NR_shmdt 10245 case TARGET_NR_shmdt: 10246 return do_shmdt(arg1); 10247 #endif 10248 case TARGET_NR_fsync: 10249 return get_errno(fsync(arg1)); 10250 case TARGET_NR_clone: 10251 /* Linux manages to have three different orderings for its 10252 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 10253 * match the kernel's CONFIG_CLONE_* settings. 10254 * Microblaze is further special in that it uses a sixth 10255 * implicit argument to clone for the TLS pointer. 10256 */ 10257 #if defined(TARGET_MICROBLAZE) 10258 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 10259 #elif defined(TARGET_CLONE_BACKWARDS) 10260 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 10261 #elif defined(TARGET_CLONE_BACKWARDS2) 10262 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 10263 #else 10264 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 10265 #endif 10266 return ret; 10267 #ifdef __NR_exit_group 10268 /* new thread calls */ 10269 case TARGET_NR_exit_group: 10270 preexit_cleanup(cpu_env, arg1); 10271 return get_errno(exit_group(arg1)); 10272 #endif 10273 case TARGET_NR_setdomainname: 10274 if (!(p = lock_user_string(arg1))) 10275 return -TARGET_EFAULT; 10276 ret = get_errno(setdomainname(p, arg2)); 10277 unlock_user(p, arg1, 0); 10278 return ret; 10279 case TARGET_NR_uname: 10280 /* no need to transcode because we use the linux syscall */ 10281 { 10282 struct new_utsname * buf; 10283 10284 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 10285 return -TARGET_EFAULT; 10286 ret = get_errno(sys_uname(buf)); 10287 if (!is_error(ret)) { 10288 /* Overwrite the native machine name with whatever is being 10289 emulated. */ 10290 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env), 10291 sizeof(buf->machine)); 10292 /* Allow the user to override the reported release. */ 10293 if (qemu_uname_release && *qemu_uname_release) { 10294 g_strlcpy(buf->release, qemu_uname_release, 10295 sizeof(buf->release)); 10296 } 10297 } 10298 unlock_user_struct(buf, arg1, 1); 10299 } 10300 return ret; 10301 #ifdef TARGET_I386 10302 case TARGET_NR_modify_ldt: 10303 return do_modify_ldt(cpu_env, arg1, arg2, arg3); 10304 #if !defined(TARGET_X86_64) 10305 case TARGET_NR_vm86: 10306 return do_vm86(cpu_env, arg1, arg2); 10307 #endif 10308 #endif 10309 #if defined(TARGET_NR_adjtimex) 10310 case TARGET_NR_adjtimex: 10311 { 10312 struct timex host_buf; 10313 10314 if (target_to_host_timex(&host_buf, arg1) != 0) { 10315 return -TARGET_EFAULT; 10316 } 10317 ret = get_errno(adjtimex(&host_buf)); 10318 if (!is_error(ret)) { 10319 if (host_to_target_timex(arg1, &host_buf) != 0) { 10320 return -TARGET_EFAULT; 10321 } 10322 } 10323 } 10324 return ret; 10325 #endif 10326 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) 10327 case TARGET_NR_clock_adjtime: 10328 { 10329 struct timex htx, *phtx = &htx; 10330 10331 if (target_to_host_timex(phtx, arg2) != 0) { 10332 return -TARGET_EFAULT; 10333 } 10334 ret = get_errno(clock_adjtime(arg1, phtx)); 10335 if (!is_error(ret) && phtx) { 10336 if (host_to_target_timex(arg2, phtx) != 0) { 10337 return -TARGET_EFAULT; 10338 } 10339 } 10340 } 10341 return ret; 10342 #endif 10343 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 10344 case TARGET_NR_clock_adjtime64: 10345 { 10346 struct timex htx; 10347 10348 if (target_to_host_timex64(&htx, arg2) != 0) { 10349 return -TARGET_EFAULT; 10350 } 10351 ret = get_errno(clock_adjtime(arg1, &htx)); 10352 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) { 10353 return -TARGET_EFAULT; 10354 } 10355 } 10356 return ret; 10357 #endif 10358 case TARGET_NR_getpgid: 10359 return get_errno(getpgid(arg1)); 10360 case TARGET_NR_fchdir: 10361 return get_errno(fchdir(arg1)); 10362 case TARGET_NR_personality: 10363 return get_errno(personality(arg1)); 10364 #ifdef TARGET_NR__llseek /* Not on alpha */ 10365 case TARGET_NR__llseek: 10366 { 10367 int64_t res; 10368 #if !defined(__NR_llseek) 10369 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5); 10370 if (res == -1) { 10371 ret = get_errno(res); 10372 } else { 10373 ret = 0; 10374 } 10375 #else 10376 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 10377 #endif 10378 if ((ret == 0) && put_user_s64(res, arg4)) { 10379 return -TARGET_EFAULT; 10380 } 10381 } 10382 return ret; 10383 #endif 10384 #ifdef TARGET_NR_getdents 10385 case TARGET_NR_getdents: 10386 return do_getdents(arg1, arg2, arg3); 10387 #endif /* TARGET_NR_getdents */ 10388 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 10389 case TARGET_NR_getdents64: 10390 return do_getdents64(arg1, arg2, arg3); 10391 #endif /* TARGET_NR_getdents64 */ 10392 #if defined(TARGET_NR__newselect) 10393 case TARGET_NR__newselect: 10394 return do_select(arg1, arg2, arg3, arg4, arg5); 10395 #endif 10396 #ifdef TARGET_NR_poll 10397 case TARGET_NR_poll: 10398 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false); 10399 #endif 10400 #ifdef TARGET_NR_ppoll 10401 case TARGET_NR_ppoll: 10402 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false); 10403 #endif 10404 #ifdef TARGET_NR_ppoll_time64 10405 case TARGET_NR_ppoll_time64: 10406 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true); 10407 #endif 10408 case TARGET_NR_flock: 10409 /* NOTE: the flock constant seems to be the same for every 10410 Linux platform */ 10411 return get_errno(safe_flock(arg1, arg2)); 10412 case TARGET_NR_readv: 10413 { 10414 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10415 if (vec != NULL) { 10416 ret = get_errno(safe_readv(arg1, vec, arg3)); 10417 unlock_iovec(vec, arg2, arg3, 1); 10418 } else { 10419 ret = -host_to_target_errno(errno); 10420 } 10421 } 10422 return ret; 10423 case TARGET_NR_writev: 10424 { 10425 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10426 if (vec != NULL) { 10427 ret = get_errno(safe_writev(arg1, vec, arg3)); 10428 unlock_iovec(vec, arg2, arg3, 0); 10429 } else { 10430 ret = -host_to_target_errno(errno); 10431 } 10432 } 10433 return ret; 10434 #if defined(TARGET_NR_preadv) 10435 case TARGET_NR_preadv: 10436 { 10437 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10438 if (vec != NULL) { 10439 unsigned long low, high; 10440 10441 target_to_host_low_high(arg4, arg5, &low, &high); 10442 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high)); 10443 unlock_iovec(vec, arg2, arg3, 1); 10444 } else { 10445 ret = -host_to_target_errno(errno); 10446 } 10447 } 10448 return ret; 10449 #endif 10450 #if defined(TARGET_NR_pwritev) 10451 case TARGET_NR_pwritev: 10452 { 10453 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10454 if (vec != NULL) { 10455 unsigned long low, high; 10456 10457 target_to_host_low_high(arg4, arg5, &low, &high); 10458 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high)); 10459 unlock_iovec(vec, arg2, arg3, 0); 10460 } else { 10461 ret = -host_to_target_errno(errno); 10462 } 10463 } 10464 return ret; 10465 #endif 10466 case TARGET_NR_getsid: 10467 return get_errno(getsid(arg1)); 10468 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 10469 case TARGET_NR_fdatasync: 10470 return get_errno(fdatasync(arg1)); 10471 #endif 10472 case TARGET_NR_sched_getaffinity: 10473 { 10474 unsigned int mask_size; 10475 unsigned long *mask; 10476 10477 /* 10478 * sched_getaffinity needs multiples of ulong, so need to take 10479 * care of mismatches between target ulong and host ulong sizes. 10480 */ 10481 if (arg2 & (sizeof(abi_ulong) - 1)) { 10482 return -TARGET_EINVAL; 10483 } 10484 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10485 10486 mask = alloca(mask_size); 10487 memset(mask, 0, mask_size); 10488 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 10489 10490 if (!is_error(ret)) { 10491 if (ret > arg2) { 10492 /* More data returned than the caller's buffer will fit. 10493 * This only happens if sizeof(abi_long) < sizeof(long) 10494 * and the caller passed us a buffer holding an odd number 10495 * of abi_longs. If the host kernel is actually using the 10496 * extra 4 bytes then fail EINVAL; otherwise we can just 10497 * ignore them and only copy the interesting part. 10498 */ 10499 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 10500 if (numcpus > arg2 * 8) { 10501 return -TARGET_EINVAL; 10502 } 10503 ret = arg2; 10504 } 10505 10506 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) { 10507 return -TARGET_EFAULT; 10508 } 10509 } 10510 } 10511 return ret; 10512 case TARGET_NR_sched_setaffinity: 10513 { 10514 unsigned int mask_size; 10515 unsigned long *mask; 10516 10517 /* 10518 * sched_setaffinity needs multiples of ulong, so need to take 10519 * care of mismatches between target ulong and host ulong sizes. 10520 */ 10521 if (arg2 & (sizeof(abi_ulong) - 1)) { 10522 return -TARGET_EINVAL; 10523 } 10524 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10525 mask = alloca(mask_size); 10526 10527 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2); 10528 if (ret) { 10529 return ret; 10530 } 10531 10532 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 10533 } 10534 case TARGET_NR_getcpu: 10535 { 10536 unsigned cpu, node; 10537 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL, 10538 arg2 ? &node : NULL, 10539 NULL)); 10540 if (is_error(ret)) { 10541 return ret; 10542 } 10543 if (arg1 && put_user_u32(cpu, arg1)) { 10544 return -TARGET_EFAULT; 10545 } 10546 if (arg2 && put_user_u32(node, arg2)) { 10547 return -TARGET_EFAULT; 10548 } 10549 } 10550 return ret; 10551 case TARGET_NR_sched_setparam: 10552 { 10553 struct sched_param *target_schp; 10554 struct sched_param schp; 10555 10556 if (arg2 == 0) { 10557 return -TARGET_EINVAL; 10558 } 10559 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 10560 return -TARGET_EFAULT; 10561 schp.sched_priority = tswap32(target_schp->sched_priority); 10562 unlock_user_struct(target_schp, arg2, 0); 10563 return get_errno(sched_setparam(arg1, &schp)); 10564 } 10565 case TARGET_NR_sched_getparam: 10566 { 10567 struct sched_param *target_schp; 10568 struct sched_param schp; 10569 10570 if (arg2 == 0) { 10571 return -TARGET_EINVAL; 10572 } 10573 ret = get_errno(sched_getparam(arg1, &schp)); 10574 if (!is_error(ret)) { 10575 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 10576 return -TARGET_EFAULT; 10577 target_schp->sched_priority = tswap32(schp.sched_priority); 10578 unlock_user_struct(target_schp, arg2, 1); 10579 } 10580 } 10581 return ret; 10582 case TARGET_NR_sched_setscheduler: 10583 { 10584 struct sched_param *target_schp; 10585 struct sched_param schp; 10586 if (arg3 == 0) { 10587 return -TARGET_EINVAL; 10588 } 10589 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 10590 return -TARGET_EFAULT; 10591 schp.sched_priority = tswap32(target_schp->sched_priority); 10592 unlock_user_struct(target_schp, arg3, 0); 10593 return get_errno(sched_setscheduler(arg1, arg2, &schp)); 10594 } 10595 case TARGET_NR_sched_getscheduler: 10596 return get_errno(sched_getscheduler(arg1)); 10597 case TARGET_NR_sched_yield: 10598 return get_errno(sched_yield()); 10599 case TARGET_NR_sched_get_priority_max: 10600 return get_errno(sched_get_priority_max(arg1)); 10601 case TARGET_NR_sched_get_priority_min: 10602 return get_errno(sched_get_priority_min(arg1)); 10603 #ifdef TARGET_NR_sched_rr_get_interval 10604 case TARGET_NR_sched_rr_get_interval: 10605 { 10606 struct timespec ts; 10607 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 10608 if (!is_error(ret)) { 10609 ret = host_to_target_timespec(arg2, &ts); 10610 } 10611 } 10612 return ret; 10613 #endif 10614 #ifdef TARGET_NR_sched_rr_get_interval_time64 10615 case TARGET_NR_sched_rr_get_interval_time64: 10616 { 10617 struct timespec ts; 10618 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 10619 if (!is_error(ret)) { 10620 ret = host_to_target_timespec64(arg2, &ts); 10621 } 10622 } 10623 return ret; 10624 #endif 10625 #if defined(TARGET_NR_nanosleep) 10626 case TARGET_NR_nanosleep: 10627 { 10628 struct timespec req, rem; 10629 target_to_host_timespec(&req, arg1); 10630 ret = get_errno(safe_nanosleep(&req, &rem)); 10631 if (is_error(ret) && arg2) { 10632 host_to_target_timespec(arg2, &rem); 10633 } 10634 } 10635 return ret; 10636 #endif 10637 case TARGET_NR_prctl: 10638 switch (arg1) { 10639 case PR_GET_PDEATHSIG: 10640 { 10641 int deathsig; 10642 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 10643 if (!is_error(ret) && arg2 10644 && put_user_s32(deathsig, arg2)) { 10645 return -TARGET_EFAULT; 10646 } 10647 return ret; 10648 } 10649 #ifdef PR_GET_NAME 10650 case PR_GET_NAME: 10651 { 10652 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 10653 if (!name) { 10654 return -TARGET_EFAULT; 10655 } 10656 ret = get_errno(prctl(arg1, (unsigned long)name, 10657 arg3, arg4, arg5)); 10658 unlock_user(name, arg2, 16); 10659 return ret; 10660 } 10661 case PR_SET_NAME: 10662 { 10663 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 10664 if (!name) { 10665 return -TARGET_EFAULT; 10666 } 10667 ret = get_errno(prctl(arg1, (unsigned long)name, 10668 arg3, arg4, arg5)); 10669 unlock_user(name, arg2, 0); 10670 return ret; 10671 } 10672 #endif 10673 #ifdef TARGET_MIPS 10674 case TARGET_PR_GET_FP_MODE: 10675 { 10676 CPUMIPSState *env = ((CPUMIPSState *)cpu_env); 10677 ret = 0; 10678 if (env->CP0_Status & (1 << CP0St_FR)) { 10679 ret |= TARGET_PR_FP_MODE_FR; 10680 } 10681 if (env->CP0_Config5 & (1 << CP0C5_FRE)) { 10682 ret |= TARGET_PR_FP_MODE_FRE; 10683 } 10684 return ret; 10685 } 10686 case TARGET_PR_SET_FP_MODE: 10687 { 10688 CPUMIPSState *env = ((CPUMIPSState *)cpu_env); 10689 bool old_fr = env->CP0_Status & (1 << CP0St_FR); 10690 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE); 10691 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR; 10692 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE; 10693 10694 const unsigned int known_bits = TARGET_PR_FP_MODE_FR | 10695 TARGET_PR_FP_MODE_FRE; 10696 10697 /* If nothing to change, return right away, successfully. */ 10698 if (old_fr == new_fr && old_fre == new_fre) { 10699 return 0; 10700 } 10701 /* Check the value is valid */ 10702 if (arg2 & ~known_bits) { 10703 return -TARGET_EOPNOTSUPP; 10704 } 10705 /* Setting FRE without FR is not supported. */ 10706 if (new_fre && !new_fr) { 10707 return -TARGET_EOPNOTSUPP; 10708 } 10709 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) { 10710 /* FR1 is not supported */ 10711 return -TARGET_EOPNOTSUPP; 10712 } 10713 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64)) 10714 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) { 10715 /* cannot set FR=0 */ 10716 return -TARGET_EOPNOTSUPP; 10717 } 10718 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) { 10719 /* Cannot set FRE=1 */ 10720 return -TARGET_EOPNOTSUPP; 10721 } 10722 10723 int i; 10724 fpr_t *fpr = env->active_fpu.fpr; 10725 for (i = 0; i < 32 ; i += 2) { 10726 if (!old_fr && new_fr) { 10727 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX]; 10728 } else if (old_fr && !new_fr) { 10729 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX]; 10730 } 10731 } 10732 10733 if (new_fr) { 10734 env->CP0_Status |= (1 << CP0St_FR); 10735 env->hflags |= MIPS_HFLAG_F64; 10736 } else { 10737 env->CP0_Status &= ~(1 << CP0St_FR); 10738 env->hflags &= ~MIPS_HFLAG_F64; 10739 } 10740 if (new_fre) { 10741 env->CP0_Config5 |= (1 << CP0C5_FRE); 10742 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) { 10743 env->hflags |= MIPS_HFLAG_FRE; 10744 } 10745 } else { 10746 env->CP0_Config5 &= ~(1 << CP0C5_FRE); 10747 env->hflags &= ~MIPS_HFLAG_FRE; 10748 } 10749 10750 return 0; 10751 } 10752 #endif /* MIPS */ 10753 #ifdef TARGET_AARCH64 10754 case TARGET_PR_SVE_SET_VL: 10755 /* 10756 * We cannot support either PR_SVE_SET_VL_ONEXEC or 10757 * PR_SVE_VL_INHERIT. Note the kernel definition 10758 * of sve_vl_valid allows for VQ=512, i.e. VL=8192, 10759 * even though the current architectural maximum is VQ=16. 10760 */ 10761 ret = -TARGET_EINVAL; 10762 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env)) 10763 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { 10764 CPUARMState *env = cpu_env; 10765 ARMCPU *cpu = env_archcpu(env); 10766 uint32_t vq, old_vq; 10767 10768 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1; 10769 vq = MAX(arg2 / 16, 1); 10770 vq = MIN(vq, cpu->sve_max_vq); 10771 10772 if (vq < old_vq) { 10773 aarch64_sve_narrow_vq(env, vq); 10774 } 10775 env->vfp.zcr_el[1] = vq - 1; 10776 arm_rebuild_hflags(env); 10777 ret = vq * 16; 10778 } 10779 return ret; 10780 case TARGET_PR_SVE_GET_VL: 10781 ret = -TARGET_EINVAL; 10782 { 10783 ARMCPU *cpu = env_archcpu(cpu_env); 10784 if (cpu_isar_feature(aa64_sve, cpu)) { 10785 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16; 10786 } 10787 } 10788 return ret; 10789 case TARGET_PR_PAC_RESET_KEYS: 10790 { 10791 CPUARMState *env = cpu_env; 10792 ARMCPU *cpu = env_archcpu(env); 10793 10794 if (arg3 || arg4 || arg5) { 10795 return -TARGET_EINVAL; 10796 } 10797 if (cpu_isar_feature(aa64_pauth, cpu)) { 10798 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY | 10799 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY | 10800 TARGET_PR_PAC_APGAKEY); 10801 int ret = 0; 10802 Error *err = NULL; 10803 10804 if (arg2 == 0) { 10805 arg2 = all; 10806 } else if (arg2 & ~all) { 10807 return -TARGET_EINVAL; 10808 } 10809 if (arg2 & TARGET_PR_PAC_APIAKEY) { 10810 ret |= qemu_guest_getrandom(&env->keys.apia, 10811 sizeof(ARMPACKey), &err); 10812 } 10813 if (arg2 & TARGET_PR_PAC_APIBKEY) { 10814 ret |= qemu_guest_getrandom(&env->keys.apib, 10815 sizeof(ARMPACKey), &err); 10816 } 10817 if (arg2 & TARGET_PR_PAC_APDAKEY) { 10818 ret |= qemu_guest_getrandom(&env->keys.apda, 10819 sizeof(ARMPACKey), &err); 10820 } 10821 if (arg2 & TARGET_PR_PAC_APDBKEY) { 10822 ret |= qemu_guest_getrandom(&env->keys.apdb, 10823 sizeof(ARMPACKey), &err); 10824 } 10825 if (arg2 & TARGET_PR_PAC_APGAKEY) { 10826 ret |= qemu_guest_getrandom(&env->keys.apga, 10827 sizeof(ARMPACKey), &err); 10828 } 10829 if (ret != 0) { 10830 /* 10831 * Some unknown failure in the crypto. The best 10832 * we can do is log it and fail the syscall. 10833 * The real syscall cannot fail this way. 10834 */ 10835 qemu_log_mask(LOG_UNIMP, 10836 "PR_PAC_RESET_KEYS: Crypto failure: %s", 10837 error_get_pretty(err)); 10838 error_free(err); 10839 return -TARGET_EIO; 10840 } 10841 return 0; 10842 } 10843 } 10844 return -TARGET_EINVAL; 10845 case TARGET_PR_SET_TAGGED_ADDR_CTRL: 10846 { 10847 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE; 10848 CPUARMState *env = cpu_env; 10849 ARMCPU *cpu = env_archcpu(env); 10850 10851 if (cpu_isar_feature(aa64_mte, cpu)) { 10852 valid_mask |= TARGET_PR_MTE_TCF_MASK; 10853 valid_mask |= TARGET_PR_MTE_TAG_MASK; 10854 } 10855 10856 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) { 10857 return -TARGET_EINVAL; 10858 } 10859 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE; 10860 10861 if (cpu_isar_feature(aa64_mte, cpu)) { 10862 switch (arg2 & TARGET_PR_MTE_TCF_MASK) { 10863 case TARGET_PR_MTE_TCF_NONE: 10864 case TARGET_PR_MTE_TCF_SYNC: 10865 case TARGET_PR_MTE_TCF_ASYNC: 10866 break; 10867 default: 10868 return -EINVAL; 10869 } 10870 10871 /* 10872 * Write PR_MTE_TCF to SCTLR_EL1[TCF0]. 10873 * Note that the syscall values are consistent with hw. 10874 */ 10875 env->cp15.sctlr_el[1] = 10876 deposit64(env->cp15.sctlr_el[1], 38, 2, 10877 arg2 >> TARGET_PR_MTE_TCF_SHIFT); 10878 10879 /* 10880 * Write PR_MTE_TAG to GCR_EL1[Exclude]. 10881 * Note that the syscall uses an include mask, 10882 * and hardware uses an exclude mask -- invert. 10883 */ 10884 env->cp15.gcr_el1 = 10885 deposit64(env->cp15.gcr_el1, 0, 16, 10886 ~arg2 >> TARGET_PR_MTE_TAG_SHIFT); 10887 arm_rebuild_hflags(env); 10888 } 10889 return 0; 10890 } 10891 case TARGET_PR_GET_TAGGED_ADDR_CTRL: 10892 { 10893 abi_long ret = 0; 10894 CPUARMState *env = cpu_env; 10895 ARMCPU *cpu = env_archcpu(env); 10896 10897 if (arg2 || arg3 || arg4 || arg5) { 10898 return -TARGET_EINVAL; 10899 } 10900 if (env->tagged_addr_enable) { 10901 ret |= TARGET_PR_TAGGED_ADDR_ENABLE; 10902 } 10903 if (cpu_isar_feature(aa64_mte, cpu)) { 10904 /* See above. */ 10905 ret |= (extract64(env->cp15.sctlr_el[1], 38, 2) 10906 << TARGET_PR_MTE_TCF_SHIFT); 10907 ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16, 10908 ~env->cp15.gcr_el1); 10909 } 10910 return ret; 10911 } 10912 #endif /* AARCH64 */ 10913 case PR_GET_SECCOMP: 10914 case PR_SET_SECCOMP: 10915 /* Disable seccomp to prevent the target disabling syscalls we 10916 * need. */ 10917 return -TARGET_EINVAL; 10918 default: 10919 /* Most prctl options have no pointer arguments */ 10920 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 10921 } 10922 break; 10923 #ifdef TARGET_NR_arch_prctl 10924 case TARGET_NR_arch_prctl: 10925 return do_arch_prctl(cpu_env, arg1, arg2); 10926 #endif 10927 #ifdef TARGET_NR_pread64 10928 case TARGET_NR_pread64: 10929 if (regpairs_aligned(cpu_env, num)) { 10930 arg4 = arg5; 10931 arg5 = arg6; 10932 } 10933 if (arg2 == 0 && arg3 == 0) { 10934 /* Special-case NULL buffer and zero length, which should succeed */ 10935 p = 0; 10936 } else { 10937 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 10938 if (!p) { 10939 return -TARGET_EFAULT; 10940 } 10941 } 10942 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 10943 unlock_user(p, arg2, ret); 10944 return ret; 10945 case TARGET_NR_pwrite64: 10946 if (regpairs_aligned(cpu_env, num)) { 10947 arg4 = arg5; 10948 arg5 = arg6; 10949 } 10950 if (arg2 == 0 && arg3 == 0) { 10951 /* Special-case NULL buffer and zero length, which should succeed */ 10952 p = 0; 10953 } else { 10954 p = lock_user(VERIFY_READ, arg2, arg3, 1); 10955 if (!p) { 10956 return -TARGET_EFAULT; 10957 } 10958 } 10959 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 10960 unlock_user(p, arg2, 0); 10961 return ret; 10962 #endif 10963 case TARGET_NR_getcwd: 10964 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 10965 return -TARGET_EFAULT; 10966 ret = get_errno(sys_getcwd1(p, arg2)); 10967 unlock_user(p, arg1, ret); 10968 return ret; 10969 case TARGET_NR_capget: 10970 case TARGET_NR_capset: 10971 { 10972 struct target_user_cap_header *target_header; 10973 struct target_user_cap_data *target_data = NULL; 10974 struct __user_cap_header_struct header; 10975 struct __user_cap_data_struct data[2]; 10976 struct __user_cap_data_struct *dataptr = NULL; 10977 int i, target_datalen; 10978 int data_items = 1; 10979 10980 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 10981 return -TARGET_EFAULT; 10982 } 10983 header.version = tswap32(target_header->version); 10984 header.pid = tswap32(target_header->pid); 10985 10986 if (header.version != _LINUX_CAPABILITY_VERSION) { 10987 /* Version 2 and up takes pointer to two user_data structs */ 10988 data_items = 2; 10989 } 10990 10991 target_datalen = sizeof(*target_data) * data_items; 10992 10993 if (arg2) { 10994 if (num == TARGET_NR_capget) { 10995 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 10996 } else { 10997 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 10998 } 10999 if (!target_data) { 11000 unlock_user_struct(target_header, arg1, 0); 11001 return -TARGET_EFAULT; 11002 } 11003 11004 if (num == TARGET_NR_capset) { 11005 for (i = 0; i < data_items; i++) { 11006 data[i].effective = tswap32(target_data[i].effective); 11007 data[i].permitted = tswap32(target_data[i].permitted); 11008 data[i].inheritable = tswap32(target_data[i].inheritable); 11009 } 11010 } 11011 11012 dataptr = data; 11013 } 11014 11015 if (num == TARGET_NR_capget) { 11016 ret = get_errno(capget(&header, dataptr)); 11017 } else { 11018 ret = get_errno(capset(&header, dataptr)); 11019 } 11020 11021 /* The kernel always updates version for both capget and capset */ 11022 target_header->version = tswap32(header.version); 11023 unlock_user_struct(target_header, arg1, 1); 11024 11025 if (arg2) { 11026 if (num == TARGET_NR_capget) { 11027 for (i = 0; i < data_items; i++) { 11028 target_data[i].effective = tswap32(data[i].effective); 11029 target_data[i].permitted = tswap32(data[i].permitted); 11030 target_data[i].inheritable = tswap32(data[i].inheritable); 11031 } 11032 unlock_user(target_data, arg2, target_datalen); 11033 } else { 11034 unlock_user(target_data, arg2, 0); 11035 } 11036 } 11037 return ret; 11038 } 11039 case TARGET_NR_sigaltstack: 11040 return do_sigaltstack(arg1, arg2, cpu_env); 11041 11042 #ifdef CONFIG_SENDFILE 11043 #ifdef TARGET_NR_sendfile 11044 case TARGET_NR_sendfile: 11045 { 11046 off_t *offp = NULL; 11047 off_t off; 11048 if (arg3) { 11049 ret = get_user_sal(off, arg3); 11050 if (is_error(ret)) { 11051 return ret; 11052 } 11053 offp = &off; 11054 } 11055 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 11056 if (!is_error(ret) && arg3) { 11057 abi_long ret2 = put_user_sal(off, arg3); 11058 if (is_error(ret2)) { 11059 ret = ret2; 11060 } 11061 } 11062 return ret; 11063 } 11064 #endif 11065 #ifdef TARGET_NR_sendfile64 11066 case TARGET_NR_sendfile64: 11067 { 11068 off_t *offp = NULL; 11069 off_t off; 11070 if (arg3) { 11071 ret = get_user_s64(off, arg3); 11072 if (is_error(ret)) { 11073 return ret; 11074 } 11075 offp = &off; 11076 } 11077 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 11078 if (!is_error(ret) && arg3) { 11079 abi_long ret2 = put_user_s64(off, arg3); 11080 if (is_error(ret2)) { 11081 ret = ret2; 11082 } 11083 } 11084 return ret; 11085 } 11086 #endif 11087 #endif 11088 #ifdef TARGET_NR_vfork 11089 case TARGET_NR_vfork: 11090 return get_errno(do_fork(cpu_env, 11091 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD, 11092 0, 0, 0, 0)); 11093 #endif 11094 #ifdef TARGET_NR_ugetrlimit 11095 case TARGET_NR_ugetrlimit: 11096 { 11097 struct rlimit rlim; 11098 int resource = target_to_host_resource(arg1); 11099 ret = get_errno(getrlimit(resource, &rlim)); 11100 if (!is_error(ret)) { 11101 struct target_rlimit *target_rlim; 11102 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 11103 return -TARGET_EFAULT; 11104 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 11105 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 11106 unlock_user_struct(target_rlim, arg2, 1); 11107 } 11108 return ret; 11109 } 11110 #endif 11111 #ifdef TARGET_NR_truncate64 11112 case TARGET_NR_truncate64: 11113 if (!(p = lock_user_string(arg1))) 11114 return -TARGET_EFAULT; 11115 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 11116 unlock_user(p, arg1, 0); 11117 return ret; 11118 #endif 11119 #ifdef TARGET_NR_ftruncate64 11120 case TARGET_NR_ftruncate64: 11121 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 11122 #endif 11123 #ifdef TARGET_NR_stat64 11124 case TARGET_NR_stat64: 11125 if (!(p = lock_user_string(arg1))) { 11126 return -TARGET_EFAULT; 11127 } 11128 ret = get_errno(stat(path(p), &st)); 11129 unlock_user(p, arg1, 0); 11130 if (!is_error(ret)) 11131 ret = host_to_target_stat64(cpu_env, arg2, &st); 11132 return ret; 11133 #endif 11134 #ifdef TARGET_NR_lstat64 11135 case TARGET_NR_lstat64: 11136 if (!(p = lock_user_string(arg1))) { 11137 return -TARGET_EFAULT; 11138 } 11139 ret = get_errno(lstat(path(p), &st)); 11140 unlock_user(p, arg1, 0); 11141 if (!is_error(ret)) 11142 ret = host_to_target_stat64(cpu_env, arg2, &st); 11143 return ret; 11144 #endif 11145 #ifdef TARGET_NR_fstat64 11146 case TARGET_NR_fstat64: 11147 ret = get_errno(fstat(arg1, &st)); 11148 if (!is_error(ret)) 11149 ret = host_to_target_stat64(cpu_env, arg2, &st); 11150 return ret; 11151 #endif 11152 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 11153 #ifdef TARGET_NR_fstatat64 11154 case TARGET_NR_fstatat64: 11155 #endif 11156 #ifdef TARGET_NR_newfstatat 11157 case TARGET_NR_newfstatat: 11158 #endif 11159 if (!(p = lock_user_string(arg2))) { 11160 return -TARGET_EFAULT; 11161 } 11162 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 11163 unlock_user(p, arg2, 0); 11164 if (!is_error(ret)) 11165 ret = host_to_target_stat64(cpu_env, arg3, &st); 11166 return ret; 11167 #endif 11168 #if defined(TARGET_NR_statx) 11169 case TARGET_NR_statx: 11170 { 11171 struct target_statx *target_stx; 11172 int dirfd = arg1; 11173 int flags = arg3; 11174 11175 p = lock_user_string(arg2); 11176 if (p == NULL) { 11177 return -TARGET_EFAULT; 11178 } 11179 #if defined(__NR_statx) 11180 { 11181 /* 11182 * It is assumed that struct statx is architecture independent. 11183 */ 11184 struct target_statx host_stx; 11185 int mask = arg4; 11186 11187 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx)); 11188 if (!is_error(ret)) { 11189 if (host_to_target_statx(&host_stx, arg5) != 0) { 11190 unlock_user(p, arg2, 0); 11191 return -TARGET_EFAULT; 11192 } 11193 } 11194 11195 if (ret != -TARGET_ENOSYS) { 11196 unlock_user(p, arg2, 0); 11197 return ret; 11198 } 11199 } 11200 #endif 11201 ret = get_errno(fstatat(dirfd, path(p), &st, flags)); 11202 unlock_user(p, arg2, 0); 11203 11204 if (!is_error(ret)) { 11205 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) { 11206 return -TARGET_EFAULT; 11207 } 11208 memset(target_stx, 0, sizeof(*target_stx)); 11209 __put_user(major(st.st_dev), &target_stx->stx_dev_major); 11210 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor); 11211 __put_user(st.st_ino, &target_stx->stx_ino); 11212 __put_user(st.st_mode, &target_stx->stx_mode); 11213 __put_user(st.st_uid, &target_stx->stx_uid); 11214 __put_user(st.st_gid, &target_stx->stx_gid); 11215 __put_user(st.st_nlink, &target_stx->stx_nlink); 11216 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major); 11217 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor); 11218 __put_user(st.st_size, &target_stx->stx_size); 11219 __put_user(st.st_blksize, &target_stx->stx_blksize); 11220 __put_user(st.st_blocks, &target_stx->stx_blocks); 11221 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec); 11222 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec); 11223 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec); 11224 unlock_user_struct(target_stx, arg5, 1); 11225 } 11226 } 11227 return ret; 11228 #endif 11229 #ifdef TARGET_NR_lchown 11230 case TARGET_NR_lchown: 11231 if (!(p = lock_user_string(arg1))) 11232 return -TARGET_EFAULT; 11233 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 11234 unlock_user(p, arg1, 0); 11235 return ret; 11236 #endif 11237 #ifdef TARGET_NR_getuid 11238 case TARGET_NR_getuid: 11239 return get_errno(high2lowuid(getuid())); 11240 #endif 11241 #ifdef TARGET_NR_getgid 11242 case TARGET_NR_getgid: 11243 return get_errno(high2lowgid(getgid())); 11244 #endif 11245 #ifdef TARGET_NR_geteuid 11246 case TARGET_NR_geteuid: 11247 return get_errno(high2lowuid(geteuid())); 11248 #endif 11249 #ifdef TARGET_NR_getegid 11250 case TARGET_NR_getegid: 11251 return get_errno(high2lowgid(getegid())); 11252 #endif 11253 case TARGET_NR_setreuid: 11254 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 11255 case TARGET_NR_setregid: 11256 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 11257 case TARGET_NR_getgroups: 11258 { 11259 int gidsetsize = arg1; 11260 target_id *target_grouplist; 11261 gid_t *grouplist; 11262 int i; 11263 11264 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11265 ret = get_errno(getgroups(gidsetsize, grouplist)); 11266 if (gidsetsize == 0) 11267 return ret; 11268 if (!is_error(ret)) { 11269 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 11270 if (!target_grouplist) 11271 return -TARGET_EFAULT; 11272 for(i = 0;i < ret; i++) 11273 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 11274 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 11275 } 11276 } 11277 return ret; 11278 case TARGET_NR_setgroups: 11279 { 11280 int gidsetsize = arg1; 11281 target_id *target_grouplist; 11282 gid_t *grouplist = NULL; 11283 int i; 11284 if (gidsetsize) { 11285 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11286 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 11287 if (!target_grouplist) { 11288 return -TARGET_EFAULT; 11289 } 11290 for (i = 0; i < gidsetsize; i++) { 11291 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 11292 } 11293 unlock_user(target_grouplist, arg2, 0); 11294 } 11295 return get_errno(setgroups(gidsetsize, grouplist)); 11296 } 11297 case TARGET_NR_fchown: 11298 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 11299 #if defined(TARGET_NR_fchownat) 11300 case TARGET_NR_fchownat: 11301 if (!(p = lock_user_string(arg2))) 11302 return -TARGET_EFAULT; 11303 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 11304 low2highgid(arg4), arg5)); 11305 unlock_user(p, arg2, 0); 11306 return ret; 11307 #endif 11308 #ifdef TARGET_NR_setresuid 11309 case TARGET_NR_setresuid: 11310 return get_errno(sys_setresuid(low2highuid(arg1), 11311 low2highuid(arg2), 11312 low2highuid(arg3))); 11313 #endif 11314 #ifdef TARGET_NR_getresuid 11315 case TARGET_NR_getresuid: 11316 { 11317 uid_t ruid, euid, suid; 11318 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11319 if (!is_error(ret)) { 11320 if (put_user_id(high2lowuid(ruid), arg1) 11321 || put_user_id(high2lowuid(euid), arg2) 11322 || put_user_id(high2lowuid(suid), arg3)) 11323 return -TARGET_EFAULT; 11324 } 11325 } 11326 return ret; 11327 #endif 11328 #ifdef TARGET_NR_getresgid 11329 case TARGET_NR_setresgid: 11330 return get_errno(sys_setresgid(low2highgid(arg1), 11331 low2highgid(arg2), 11332 low2highgid(arg3))); 11333 #endif 11334 #ifdef TARGET_NR_getresgid 11335 case TARGET_NR_getresgid: 11336 { 11337 gid_t rgid, egid, sgid; 11338 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11339 if (!is_error(ret)) { 11340 if (put_user_id(high2lowgid(rgid), arg1) 11341 || put_user_id(high2lowgid(egid), arg2) 11342 || put_user_id(high2lowgid(sgid), arg3)) 11343 return -TARGET_EFAULT; 11344 } 11345 } 11346 return ret; 11347 #endif 11348 #ifdef TARGET_NR_chown 11349 case TARGET_NR_chown: 11350 if (!(p = lock_user_string(arg1))) 11351 return -TARGET_EFAULT; 11352 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 11353 unlock_user(p, arg1, 0); 11354 return ret; 11355 #endif 11356 case TARGET_NR_setuid: 11357 return get_errno(sys_setuid(low2highuid(arg1))); 11358 case TARGET_NR_setgid: 11359 return get_errno(sys_setgid(low2highgid(arg1))); 11360 case TARGET_NR_setfsuid: 11361 return get_errno(setfsuid(arg1)); 11362 case TARGET_NR_setfsgid: 11363 return get_errno(setfsgid(arg1)); 11364 11365 #ifdef TARGET_NR_lchown32 11366 case TARGET_NR_lchown32: 11367 if (!(p = lock_user_string(arg1))) 11368 return -TARGET_EFAULT; 11369 ret = get_errno(lchown(p, arg2, arg3)); 11370 unlock_user(p, arg1, 0); 11371 return ret; 11372 #endif 11373 #ifdef TARGET_NR_getuid32 11374 case TARGET_NR_getuid32: 11375 return get_errno(getuid()); 11376 #endif 11377 11378 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 11379 /* Alpha specific */ 11380 case TARGET_NR_getxuid: 11381 { 11382 uid_t euid; 11383 euid=geteuid(); 11384 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 11385 } 11386 return get_errno(getuid()); 11387 #endif 11388 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 11389 /* Alpha specific */ 11390 case TARGET_NR_getxgid: 11391 { 11392 uid_t egid; 11393 egid=getegid(); 11394 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 11395 } 11396 return get_errno(getgid()); 11397 #endif 11398 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 11399 /* Alpha specific */ 11400 case TARGET_NR_osf_getsysinfo: 11401 ret = -TARGET_EOPNOTSUPP; 11402 switch (arg1) { 11403 case TARGET_GSI_IEEE_FP_CONTROL: 11404 { 11405 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env); 11406 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr; 11407 11408 swcr &= ~SWCR_STATUS_MASK; 11409 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK; 11410 11411 if (put_user_u64 (swcr, arg2)) 11412 return -TARGET_EFAULT; 11413 ret = 0; 11414 } 11415 break; 11416 11417 /* case GSI_IEEE_STATE_AT_SIGNAL: 11418 -- Not implemented in linux kernel. 11419 case GSI_UACPROC: 11420 -- Retrieves current unaligned access state; not much used. 11421 case GSI_PROC_TYPE: 11422 -- Retrieves implver information; surely not used. 11423 case GSI_GET_HWRPB: 11424 -- Grabs a copy of the HWRPB; surely not used. 11425 */ 11426 } 11427 return ret; 11428 #endif 11429 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 11430 /* Alpha specific */ 11431 case TARGET_NR_osf_setsysinfo: 11432 ret = -TARGET_EOPNOTSUPP; 11433 switch (arg1) { 11434 case TARGET_SSI_IEEE_FP_CONTROL: 11435 { 11436 uint64_t swcr, fpcr; 11437 11438 if (get_user_u64 (swcr, arg2)) { 11439 return -TARGET_EFAULT; 11440 } 11441 11442 /* 11443 * The kernel calls swcr_update_status to update the 11444 * status bits from the fpcr at every point that it 11445 * could be queried. Therefore, we store the status 11446 * bits only in FPCR. 11447 */ 11448 ((CPUAlphaState *)cpu_env)->swcr 11449 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK); 11450 11451 fpcr = cpu_alpha_load_fpcr(cpu_env); 11452 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32); 11453 fpcr |= alpha_ieee_swcr_to_fpcr(swcr); 11454 cpu_alpha_store_fpcr(cpu_env, fpcr); 11455 ret = 0; 11456 } 11457 break; 11458 11459 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 11460 { 11461 uint64_t exc, fpcr, fex; 11462 11463 if (get_user_u64(exc, arg2)) { 11464 return -TARGET_EFAULT; 11465 } 11466 exc &= SWCR_STATUS_MASK; 11467 fpcr = cpu_alpha_load_fpcr(cpu_env); 11468 11469 /* Old exceptions are not signaled. */ 11470 fex = alpha_ieee_fpcr_to_swcr(fpcr); 11471 fex = exc & ~fex; 11472 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT; 11473 fex &= ((CPUArchState *)cpu_env)->swcr; 11474 11475 /* Update the hardware fpcr. */ 11476 fpcr |= alpha_ieee_swcr_to_fpcr(exc); 11477 cpu_alpha_store_fpcr(cpu_env, fpcr); 11478 11479 if (fex) { 11480 int si_code = TARGET_FPE_FLTUNK; 11481 target_siginfo_t info; 11482 11483 if (fex & SWCR_TRAP_ENABLE_DNO) { 11484 si_code = TARGET_FPE_FLTUND; 11485 } 11486 if (fex & SWCR_TRAP_ENABLE_INE) { 11487 si_code = TARGET_FPE_FLTRES; 11488 } 11489 if (fex & SWCR_TRAP_ENABLE_UNF) { 11490 si_code = TARGET_FPE_FLTUND; 11491 } 11492 if (fex & SWCR_TRAP_ENABLE_OVF) { 11493 si_code = TARGET_FPE_FLTOVF; 11494 } 11495 if (fex & SWCR_TRAP_ENABLE_DZE) { 11496 si_code = TARGET_FPE_FLTDIV; 11497 } 11498 if (fex & SWCR_TRAP_ENABLE_INV) { 11499 si_code = TARGET_FPE_FLTINV; 11500 } 11501 11502 info.si_signo = SIGFPE; 11503 info.si_errno = 0; 11504 info.si_code = si_code; 11505 info._sifields._sigfault._addr 11506 = ((CPUArchState *)cpu_env)->pc; 11507 queue_signal((CPUArchState *)cpu_env, info.si_signo, 11508 QEMU_SI_FAULT, &info); 11509 } 11510 ret = 0; 11511 } 11512 break; 11513 11514 /* case SSI_NVPAIRS: 11515 -- Used with SSIN_UACPROC to enable unaligned accesses. 11516 case SSI_IEEE_STATE_AT_SIGNAL: 11517 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 11518 -- Not implemented in linux kernel 11519 */ 11520 } 11521 return ret; 11522 #endif 11523 #ifdef TARGET_NR_osf_sigprocmask 11524 /* Alpha specific. */ 11525 case TARGET_NR_osf_sigprocmask: 11526 { 11527 abi_ulong mask; 11528 int how; 11529 sigset_t set, oldset; 11530 11531 switch(arg1) { 11532 case TARGET_SIG_BLOCK: 11533 how = SIG_BLOCK; 11534 break; 11535 case TARGET_SIG_UNBLOCK: 11536 how = SIG_UNBLOCK; 11537 break; 11538 case TARGET_SIG_SETMASK: 11539 how = SIG_SETMASK; 11540 break; 11541 default: 11542 return -TARGET_EINVAL; 11543 } 11544 mask = arg2; 11545 target_to_host_old_sigset(&set, &mask); 11546 ret = do_sigprocmask(how, &set, &oldset); 11547 if (!ret) { 11548 host_to_target_old_sigset(&mask, &oldset); 11549 ret = mask; 11550 } 11551 } 11552 return ret; 11553 #endif 11554 11555 #ifdef TARGET_NR_getgid32 11556 case TARGET_NR_getgid32: 11557 return get_errno(getgid()); 11558 #endif 11559 #ifdef TARGET_NR_geteuid32 11560 case TARGET_NR_geteuid32: 11561 return get_errno(geteuid()); 11562 #endif 11563 #ifdef TARGET_NR_getegid32 11564 case TARGET_NR_getegid32: 11565 return get_errno(getegid()); 11566 #endif 11567 #ifdef TARGET_NR_setreuid32 11568 case TARGET_NR_setreuid32: 11569 return get_errno(setreuid(arg1, arg2)); 11570 #endif 11571 #ifdef TARGET_NR_setregid32 11572 case TARGET_NR_setregid32: 11573 return get_errno(setregid(arg1, arg2)); 11574 #endif 11575 #ifdef TARGET_NR_getgroups32 11576 case TARGET_NR_getgroups32: 11577 { 11578 int gidsetsize = arg1; 11579 uint32_t *target_grouplist; 11580 gid_t *grouplist; 11581 int i; 11582 11583 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11584 ret = get_errno(getgroups(gidsetsize, grouplist)); 11585 if (gidsetsize == 0) 11586 return ret; 11587 if (!is_error(ret)) { 11588 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 11589 if (!target_grouplist) { 11590 return -TARGET_EFAULT; 11591 } 11592 for(i = 0;i < ret; i++) 11593 target_grouplist[i] = tswap32(grouplist[i]); 11594 unlock_user(target_grouplist, arg2, gidsetsize * 4); 11595 } 11596 } 11597 return ret; 11598 #endif 11599 #ifdef TARGET_NR_setgroups32 11600 case TARGET_NR_setgroups32: 11601 { 11602 int gidsetsize = arg1; 11603 uint32_t *target_grouplist; 11604 gid_t *grouplist; 11605 int i; 11606 11607 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11608 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 11609 if (!target_grouplist) { 11610 return -TARGET_EFAULT; 11611 } 11612 for(i = 0;i < gidsetsize; i++) 11613 grouplist[i] = tswap32(target_grouplist[i]); 11614 unlock_user(target_grouplist, arg2, 0); 11615 return get_errno(setgroups(gidsetsize, grouplist)); 11616 } 11617 #endif 11618 #ifdef TARGET_NR_fchown32 11619 case TARGET_NR_fchown32: 11620 return get_errno(fchown(arg1, arg2, arg3)); 11621 #endif 11622 #ifdef TARGET_NR_setresuid32 11623 case TARGET_NR_setresuid32: 11624 return get_errno(sys_setresuid(arg1, arg2, arg3)); 11625 #endif 11626 #ifdef TARGET_NR_getresuid32 11627 case TARGET_NR_getresuid32: 11628 { 11629 uid_t ruid, euid, suid; 11630 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11631 if (!is_error(ret)) { 11632 if (put_user_u32(ruid, arg1) 11633 || put_user_u32(euid, arg2) 11634 || put_user_u32(suid, arg3)) 11635 return -TARGET_EFAULT; 11636 } 11637 } 11638 return ret; 11639 #endif 11640 #ifdef TARGET_NR_setresgid32 11641 case TARGET_NR_setresgid32: 11642 return get_errno(sys_setresgid(arg1, arg2, arg3)); 11643 #endif 11644 #ifdef TARGET_NR_getresgid32 11645 case TARGET_NR_getresgid32: 11646 { 11647 gid_t rgid, egid, sgid; 11648 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11649 if (!is_error(ret)) { 11650 if (put_user_u32(rgid, arg1) 11651 || put_user_u32(egid, arg2) 11652 || put_user_u32(sgid, arg3)) 11653 return -TARGET_EFAULT; 11654 } 11655 } 11656 return ret; 11657 #endif 11658 #ifdef TARGET_NR_chown32 11659 case TARGET_NR_chown32: 11660 if (!(p = lock_user_string(arg1))) 11661 return -TARGET_EFAULT; 11662 ret = get_errno(chown(p, arg2, arg3)); 11663 unlock_user(p, arg1, 0); 11664 return ret; 11665 #endif 11666 #ifdef TARGET_NR_setuid32 11667 case TARGET_NR_setuid32: 11668 return get_errno(sys_setuid(arg1)); 11669 #endif 11670 #ifdef TARGET_NR_setgid32 11671 case TARGET_NR_setgid32: 11672 return get_errno(sys_setgid(arg1)); 11673 #endif 11674 #ifdef TARGET_NR_setfsuid32 11675 case TARGET_NR_setfsuid32: 11676 return get_errno(setfsuid(arg1)); 11677 #endif 11678 #ifdef TARGET_NR_setfsgid32 11679 case TARGET_NR_setfsgid32: 11680 return get_errno(setfsgid(arg1)); 11681 #endif 11682 #ifdef TARGET_NR_mincore 11683 case TARGET_NR_mincore: 11684 { 11685 void *a = lock_user(VERIFY_READ, arg1, arg2, 0); 11686 if (!a) { 11687 return -TARGET_ENOMEM; 11688 } 11689 p = lock_user_string(arg3); 11690 if (!p) { 11691 ret = -TARGET_EFAULT; 11692 } else { 11693 ret = get_errno(mincore(a, arg2, p)); 11694 unlock_user(p, arg3, ret); 11695 } 11696 unlock_user(a, arg1, 0); 11697 } 11698 return ret; 11699 #endif 11700 #ifdef TARGET_NR_arm_fadvise64_64 11701 case TARGET_NR_arm_fadvise64_64: 11702 /* arm_fadvise64_64 looks like fadvise64_64 but 11703 * with different argument order: fd, advice, offset, len 11704 * rather than the usual fd, offset, len, advice. 11705 * Note that offset and len are both 64-bit so appear as 11706 * pairs of 32-bit registers. 11707 */ 11708 ret = posix_fadvise(arg1, target_offset64(arg3, arg4), 11709 target_offset64(arg5, arg6), arg2); 11710 return -host_to_target_errno(ret); 11711 #endif 11712 11713 #if TARGET_ABI_BITS == 32 11714 11715 #ifdef TARGET_NR_fadvise64_64 11716 case TARGET_NR_fadvise64_64: 11717 #if defined(TARGET_PPC) || defined(TARGET_XTENSA) 11718 /* 6 args: fd, advice, offset (high, low), len (high, low) */ 11719 ret = arg2; 11720 arg2 = arg3; 11721 arg3 = arg4; 11722 arg4 = arg5; 11723 arg5 = arg6; 11724 arg6 = ret; 11725 #else 11726 /* 6 args: fd, offset (high, low), len (high, low), advice */ 11727 if (regpairs_aligned(cpu_env, num)) { 11728 /* offset is in (3,4), len in (5,6) and advice in 7 */ 11729 arg2 = arg3; 11730 arg3 = arg4; 11731 arg4 = arg5; 11732 arg5 = arg6; 11733 arg6 = arg7; 11734 } 11735 #endif 11736 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), 11737 target_offset64(arg4, arg5), arg6); 11738 return -host_to_target_errno(ret); 11739 #endif 11740 11741 #ifdef TARGET_NR_fadvise64 11742 case TARGET_NR_fadvise64: 11743 /* 5 args: fd, offset (high, low), len, advice */ 11744 if (regpairs_aligned(cpu_env, num)) { 11745 /* offset is in (3,4), len in 5 and advice in 6 */ 11746 arg2 = arg3; 11747 arg3 = arg4; 11748 arg4 = arg5; 11749 arg5 = arg6; 11750 } 11751 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5); 11752 return -host_to_target_errno(ret); 11753 #endif 11754 11755 #else /* not a 32-bit ABI */ 11756 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64) 11757 #ifdef TARGET_NR_fadvise64_64 11758 case TARGET_NR_fadvise64_64: 11759 #endif 11760 #ifdef TARGET_NR_fadvise64 11761 case TARGET_NR_fadvise64: 11762 #endif 11763 #ifdef TARGET_S390X 11764 switch (arg4) { 11765 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 11766 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 11767 case 6: arg4 = POSIX_FADV_DONTNEED; break; 11768 case 7: arg4 = POSIX_FADV_NOREUSE; break; 11769 default: break; 11770 } 11771 #endif 11772 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4)); 11773 #endif 11774 #endif /* end of 64-bit ABI fadvise handling */ 11775 11776 #ifdef TARGET_NR_madvise 11777 case TARGET_NR_madvise: 11778 /* A straight passthrough may not be safe because qemu sometimes 11779 turns private file-backed mappings into anonymous mappings. 11780 This will break MADV_DONTNEED. 11781 This is a hint, so ignoring and returning success is ok. */ 11782 return 0; 11783 #endif 11784 #ifdef TARGET_NR_fcntl64 11785 case TARGET_NR_fcntl64: 11786 { 11787 int cmd; 11788 struct flock64 fl; 11789 from_flock64_fn *copyfrom = copy_from_user_flock64; 11790 to_flock64_fn *copyto = copy_to_user_flock64; 11791 11792 #ifdef TARGET_ARM 11793 if (!((CPUARMState *)cpu_env)->eabi) { 11794 copyfrom = copy_from_user_oabi_flock64; 11795 copyto = copy_to_user_oabi_flock64; 11796 } 11797 #endif 11798 11799 cmd = target_to_host_fcntl_cmd(arg2); 11800 if (cmd == -TARGET_EINVAL) { 11801 return cmd; 11802 } 11803 11804 switch(arg2) { 11805 case TARGET_F_GETLK64: 11806 ret = copyfrom(&fl, arg3); 11807 if (ret) { 11808 break; 11809 } 11810 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 11811 if (ret == 0) { 11812 ret = copyto(arg3, &fl); 11813 } 11814 break; 11815 11816 case TARGET_F_SETLK64: 11817 case TARGET_F_SETLKW64: 11818 ret = copyfrom(&fl, arg3); 11819 if (ret) { 11820 break; 11821 } 11822 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 11823 break; 11824 default: 11825 ret = do_fcntl(arg1, arg2, arg3); 11826 break; 11827 } 11828 return ret; 11829 } 11830 #endif 11831 #ifdef TARGET_NR_cacheflush 11832 case TARGET_NR_cacheflush: 11833 /* self-modifying code is handled automatically, so nothing needed */ 11834 return 0; 11835 #endif 11836 #ifdef TARGET_NR_getpagesize 11837 case TARGET_NR_getpagesize: 11838 return TARGET_PAGE_SIZE; 11839 #endif 11840 case TARGET_NR_gettid: 11841 return get_errno(sys_gettid()); 11842 #ifdef TARGET_NR_readahead 11843 case TARGET_NR_readahead: 11844 #if TARGET_ABI_BITS == 32 11845 if (regpairs_aligned(cpu_env, num)) { 11846 arg2 = arg3; 11847 arg3 = arg4; 11848 arg4 = arg5; 11849 } 11850 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4)); 11851 #else 11852 ret = get_errno(readahead(arg1, arg2, arg3)); 11853 #endif 11854 return ret; 11855 #endif 11856 #ifdef CONFIG_ATTR 11857 #ifdef TARGET_NR_setxattr 11858 case TARGET_NR_listxattr: 11859 case TARGET_NR_llistxattr: 11860 { 11861 void *p, *b = 0; 11862 if (arg2) { 11863 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11864 if (!b) { 11865 return -TARGET_EFAULT; 11866 } 11867 } 11868 p = lock_user_string(arg1); 11869 if (p) { 11870 if (num == TARGET_NR_listxattr) { 11871 ret = get_errno(listxattr(p, b, arg3)); 11872 } else { 11873 ret = get_errno(llistxattr(p, b, arg3)); 11874 } 11875 } else { 11876 ret = -TARGET_EFAULT; 11877 } 11878 unlock_user(p, arg1, 0); 11879 unlock_user(b, arg2, arg3); 11880 return ret; 11881 } 11882 case TARGET_NR_flistxattr: 11883 { 11884 void *b = 0; 11885 if (arg2) { 11886 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11887 if (!b) { 11888 return -TARGET_EFAULT; 11889 } 11890 } 11891 ret = get_errno(flistxattr(arg1, b, arg3)); 11892 unlock_user(b, arg2, arg3); 11893 return ret; 11894 } 11895 case TARGET_NR_setxattr: 11896 case TARGET_NR_lsetxattr: 11897 { 11898 void *p, *n, *v = 0; 11899 if (arg3) { 11900 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11901 if (!v) { 11902 return -TARGET_EFAULT; 11903 } 11904 } 11905 p = lock_user_string(arg1); 11906 n = lock_user_string(arg2); 11907 if (p && n) { 11908 if (num == TARGET_NR_setxattr) { 11909 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 11910 } else { 11911 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 11912 } 11913 } else { 11914 ret = -TARGET_EFAULT; 11915 } 11916 unlock_user(p, arg1, 0); 11917 unlock_user(n, arg2, 0); 11918 unlock_user(v, arg3, 0); 11919 } 11920 return ret; 11921 case TARGET_NR_fsetxattr: 11922 { 11923 void *n, *v = 0; 11924 if (arg3) { 11925 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11926 if (!v) { 11927 return -TARGET_EFAULT; 11928 } 11929 } 11930 n = lock_user_string(arg2); 11931 if (n) { 11932 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 11933 } else { 11934 ret = -TARGET_EFAULT; 11935 } 11936 unlock_user(n, arg2, 0); 11937 unlock_user(v, arg3, 0); 11938 } 11939 return ret; 11940 case TARGET_NR_getxattr: 11941 case TARGET_NR_lgetxattr: 11942 { 11943 void *p, *n, *v = 0; 11944 if (arg3) { 11945 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 11946 if (!v) { 11947 return -TARGET_EFAULT; 11948 } 11949 } 11950 p = lock_user_string(arg1); 11951 n = lock_user_string(arg2); 11952 if (p && n) { 11953 if (num == TARGET_NR_getxattr) { 11954 ret = get_errno(getxattr(p, n, v, arg4)); 11955 } else { 11956 ret = get_errno(lgetxattr(p, n, v, arg4)); 11957 } 11958 } else { 11959 ret = -TARGET_EFAULT; 11960 } 11961 unlock_user(p, arg1, 0); 11962 unlock_user(n, arg2, 0); 11963 unlock_user(v, arg3, arg4); 11964 } 11965 return ret; 11966 case TARGET_NR_fgetxattr: 11967 { 11968 void *n, *v = 0; 11969 if (arg3) { 11970 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 11971 if (!v) { 11972 return -TARGET_EFAULT; 11973 } 11974 } 11975 n = lock_user_string(arg2); 11976 if (n) { 11977 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 11978 } else { 11979 ret = -TARGET_EFAULT; 11980 } 11981 unlock_user(n, arg2, 0); 11982 unlock_user(v, arg3, arg4); 11983 } 11984 return ret; 11985 case TARGET_NR_removexattr: 11986 case TARGET_NR_lremovexattr: 11987 { 11988 void *p, *n; 11989 p = lock_user_string(arg1); 11990 n = lock_user_string(arg2); 11991 if (p && n) { 11992 if (num == TARGET_NR_removexattr) { 11993 ret = get_errno(removexattr(p, n)); 11994 } else { 11995 ret = get_errno(lremovexattr(p, n)); 11996 } 11997 } else { 11998 ret = -TARGET_EFAULT; 11999 } 12000 unlock_user(p, arg1, 0); 12001 unlock_user(n, arg2, 0); 12002 } 12003 return ret; 12004 case TARGET_NR_fremovexattr: 12005 { 12006 void *n; 12007 n = lock_user_string(arg2); 12008 if (n) { 12009 ret = get_errno(fremovexattr(arg1, n)); 12010 } else { 12011 ret = -TARGET_EFAULT; 12012 } 12013 unlock_user(n, arg2, 0); 12014 } 12015 return ret; 12016 #endif 12017 #endif /* CONFIG_ATTR */ 12018 #ifdef TARGET_NR_set_thread_area 12019 case TARGET_NR_set_thread_area: 12020 #if defined(TARGET_MIPS) 12021 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1; 12022 return 0; 12023 #elif defined(TARGET_CRIS) 12024 if (arg1 & 0xff) 12025 ret = -TARGET_EINVAL; 12026 else { 12027 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 12028 ret = 0; 12029 } 12030 return ret; 12031 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 12032 return do_set_thread_area(cpu_env, arg1); 12033 #elif defined(TARGET_M68K) 12034 { 12035 TaskState *ts = cpu->opaque; 12036 ts->tp_value = arg1; 12037 return 0; 12038 } 12039 #else 12040 return -TARGET_ENOSYS; 12041 #endif 12042 #endif 12043 #ifdef TARGET_NR_get_thread_area 12044 case TARGET_NR_get_thread_area: 12045 #if defined(TARGET_I386) && defined(TARGET_ABI32) 12046 return do_get_thread_area(cpu_env, arg1); 12047 #elif defined(TARGET_M68K) 12048 { 12049 TaskState *ts = cpu->opaque; 12050 return ts->tp_value; 12051 } 12052 #else 12053 return -TARGET_ENOSYS; 12054 #endif 12055 #endif 12056 #ifdef TARGET_NR_getdomainname 12057 case TARGET_NR_getdomainname: 12058 return -TARGET_ENOSYS; 12059 #endif 12060 12061 #ifdef TARGET_NR_clock_settime 12062 case TARGET_NR_clock_settime: 12063 { 12064 struct timespec ts; 12065 12066 ret = target_to_host_timespec(&ts, arg2); 12067 if (!is_error(ret)) { 12068 ret = get_errno(clock_settime(arg1, &ts)); 12069 } 12070 return ret; 12071 } 12072 #endif 12073 #ifdef TARGET_NR_clock_settime64 12074 case TARGET_NR_clock_settime64: 12075 { 12076 struct timespec ts; 12077 12078 ret = target_to_host_timespec64(&ts, arg2); 12079 if (!is_error(ret)) { 12080 ret = get_errno(clock_settime(arg1, &ts)); 12081 } 12082 return ret; 12083 } 12084 #endif 12085 #ifdef TARGET_NR_clock_gettime 12086 case TARGET_NR_clock_gettime: 12087 { 12088 struct timespec ts; 12089 ret = get_errno(clock_gettime(arg1, &ts)); 12090 if (!is_error(ret)) { 12091 ret = host_to_target_timespec(arg2, &ts); 12092 } 12093 return ret; 12094 } 12095 #endif 12096 #ifdef TARGET_NR_clock_gettime64 12097 case TARGET_NR_clock_gettime64: 12098 { 12099 struct timespec ts; 12100 ret = get_errno(clock_gettime(arg1, &ts)); 12101 if (!is_error(ret)) { 12102 ret = host_to_target_timespec64(arg2, &ts); 12103 } 12104 return ret; 12105 } 12106 #endif 12107 #ifdef TARGET_NR_clock_getres 12108 case TARGET_NR_clock_getres: 12109 { 12110 struct timespec ts; 12111 ret = get_errno(clock_getres(arg1, &ts)); 12112 if (!is_error(ret)) { 12113 host_to_target_timespec(arg2, &ts); 12114 } 12115 return ret; 12116 } 12117 #endif 12118 #ifdef TARGET_NR_clock_getres_time64 12119 case TARGET_NR_clock_getres_time64: 12120 { 12121 struct timespec ts; 12122 ret = get_errno(clock_getres(arg1, &ts)); 12123 if (!is_error(ret)) { 12124 host_to_target_timespec64(arg2, &ts); 12125 } 12126 return ret; 12127 } 12128 #endif 12129 #ifdef TARGET_NR_clock_nanosleep 12130 case TARGET_NR_clock_nanosleep: 12131 { 12132 struct timespec ts; 12133 if (target_to_host_timespec(&ts, arg3)) { 12134 return -TARGET_EFAULT; 12135 } 12136 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 12137 &ts, arg4 ? &ts : NULL)); 12138 /* 12139 * if the call is interrupted by a signal handler, it fails 12140 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not 12141 * TIMER_ABSTIME, it returns the remaining unslept time in arg4. 12142 */ 12143 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME && 12144 host_to_target_timespec(arg4, &ts)) { 12145 return -TARGET_EFAULT; 12146 } 12147 12148 return ret; 12149 } 12150 #endif 12151 #ifdef TARGET_NR_clock_nanosleep_time64 12152 case TARGET_NR_clock_nanosleep_time64: 12153 { 12154 struct timespec ts; 12155 12156 if (target_to_host_timespec64(&ts, arg3)) { 12157 return -TARGET_EFAULT; 12158 } 12159 12160 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 12161 &ts, arg4 ? &ts : NULL)); 12162 12163 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME && 12164 host_to_target_timespec64(arg4, &ts)) { 12165 return -TARGET_EFAULT; 12166 } 12167 return ret; 12168 } 12169 #endif 12170 12171 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 12172 case TARGET_NR_set_tid_address: 12173 return get_errno(set_tid_address((int *)g2h(cpu, arg1))); 12174 #endif 12175 12176 case TARGET_NR_tkill: 12177 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2))); 12178 12179 case TARGET_NR_tgkill: 12180 return get_errno(safe_tgkill((int)arg1, (int)arg2, 12181 target_to_host_signal(arg3))); 12182 12183 #ifdef TARGET_NR_set_robust_list 12184 case TARGET_NR_set_robust_list: 12185 case TARGET_NR_get_robust_list: 12186 /* The ABI for supporting robust futexes has userspace pass 12187 * the kernel a pointer to a linked list which is updated by 12188 * userspace after the syscall; the list is walked by the kernel 12189 * when the thread exits. Since the linked list in QEMU guest 12190 * memory isn't a valid linked list for the host and we have 12191 * no way to reliably intercept the thread-death event, we can't 12192 * support these. Silently return ENOSYS so that guest userspace 12193 * falls back to a non-robust futex implementation (which should 12194 * be OK except in the corner case of the guest crashing while 12195 * holding a mutex that is shared with another process via 12196 * shared memory). 12197 */ 12198 return -TARGET_ENOSYS; 12199 #endif 12200 12201 #if defined(TARGET_NR_utimensat) 12202 case TARGET_NR_utimensat: 12203 { 12204 struct timespec *tsp, ts[2]; 12205 if (!arg3) { 12206 tsp = NULL; 12207 } else { 12208 if (target_to_host_timespec(ts, arg3)) { 12209 return -TARGET_EFAULT; 12210 } 12211 if (target_to_host_timespec(ts + 1, arg3 + 12212 sizeof(struct target_timespec))) { 12213 return -TARGET_EFAULT; 12214 } 12215 tsp = ts; 12216 } 12217 if (!arg2) 12218 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 12219 else { 12220 if (!(p = lock_user_string(arg2))) { 12221 return -TARGET_EFAULT; 12222 } 12223 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 12224 unlock_user(p, arg2, 0); 12225 } 12226 } 12227 return ret; 12228 #endif 12229 #ifdef TARGET_NR_utimensat_time64 12230 case TARGET_NR_utimensat_time64: 12231 { 12232 struct timespec *tsp, ts[2]; 12233 if (!arg3) { 12234 tsp = NULL; 12235 } else { 12236 if (target_to_host_timespec64(ts, arg3)) { 12237 return -TARGET_EFAULT; 12238 } 12239 if (target_to_host_timespec64(ts + 1, arg3 + 12240 sizeof(struct target__kernel_timespec))) { 12241 return -TARGET_EFAULT; 12242 } 12243 tsp = ts; 12244 } 12245 if (!arg2) 12246 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 12247 else { 12248 p = lock_user_string(arg2); 12249 if (!p) { 12250 return -TARGET_EFAULT; 12251 } 12252 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 12253 unlock_user(p, arg2, 0); 12254 } 12255 } 12256 return ret; 12257 #endif 12258 #ifdef TARGET_NR_futex 12259 case TARGET_NR_futex: 12260 return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6); 12261 #endif 12262 #ifdef TARGET_NR_futex_time64 12263 case TARGET_NR_futex_time64: 12264 return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6); 12265 #endif 12266 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 12267 case TARGET_NR_inotify_init: 12268 ret = get_errno(sys_inotify_init()); 12269 if (ret >= 0) { 12270 fd_trans_register(ret, &target_inotify_trans); 12271 } 12272 return ret; 12273 #endif 12274 #ifdef CONFIG_INOTIFY1 12275 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 12276 case TARGET_NR_inotify_init1: 12277 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1, 12278 fcntl_flags_tbl))); 12279 if (ret >= 0) { 12280 fd_trans_register(ret, &target_inotify_trans); 12281 } 12282 return ret; 12283 #endif 12284 #endif 12285 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 12286 case TARGET_NR_inotify_add_watch: 12287 p = lock_user_string(arg2); 12288 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 12289 unlock_user(p, arg2, 0); 12290 return ret; 12291 #endif 12292 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 12293 case TARGET_NR_inotify_rm_watch: 12294 return get_errno(sys_inotify_rm_watch(arg1, arg2)); 12295 #endif 12296 12297 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 12298 case TARGET_NR_mq_open: 12299 { 12300 struct mq_attr posix_mq_attr; 12301 struct mq_attr *pposix_mq_attr; 12302 int host_flags; 12303 12304 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl); 12305 pposix_mq_attr = NULL; 12306 if (arg4) { 12307 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) { 12308 return -TARGET_EFAULT; 12309 } 12310 pposix_mq_attr = &posix_mq_attr; 12311 } 12312 p = lock_user_string(arg1 - 1); 12313 if (!p) { 12314 return -TARGET_EFAULT; 12315 } 12316 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr)); 12317 unlock_user (p, arg1, 0); 12318 } 12319 return ret; 12320 12321 case TARGET_NR_mq_unlink: 12322 p = lock_user_string(arg1 - 1); 12323 if (!p) { 12324 return -TARGET_EFAULT; 12325 } 12326 ret = get_errno(mq_unlink(p)); 12327 unlock_user (p, arg1, 0); 12328 return ret; 12329 12330 #ifdef TARGET_NR_mq_timedsend 12331 case TARGET_NR_mq_timedsend: 12332 { 12333 struct timespec ts; 12334 12335 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12336 if (arg5 != 0) { 12337 if (target_to_host_timespec(&ts, arg5)) { 12338 return -TARGET_EFAULT; 12339 } 12340 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12341 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) { 12342 return -TARGET_EFAULT; 12343 } 12344 } else { 12345 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12346 } 12347 unlock_user (p, arg2, arg3); 12348 } 12349 return ret; 12350 #endif 12351 #ifdef TARGET_NR_mq_timedsend_time64 12352 case TARGET_NR_mq_timedsend_time64: 12353 { 12354 struct timespec ts; 12355 12356 p = lock_user(VERIFY_READ, arg2, arg3, 1); 12357 if (arg5 != 0) { 12358 if (target_to_host_timespec64(&ts, arg5)) { 12359 return -TARGET_EFAULT; 12360 } 12361 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12362 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) { 12363 return -TARGET_EFAULT; 12364 } 12365 } else { 12366 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12367 } 12368 unlock_user(p, arg2, arg3); 12369 } 12370 return ret; 12371 #endif 12372 12373 #ifdef TARGET_NR_mq_timedreceive 12374 case TARGET_NR_mq_timedreceive: 12375 { 12376 struct timespec ts; 12377 unsigned int prio; 12378 12379 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12380 if (arg5 != 0) { 12381 if (target_to_host_timespec(&ts, arg5)) { 12382 return -TARGET_EFAULT; 12383 } 12384 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12385 &prio, &ts)); 12386 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) { 12387 return -TARGET_EFAULT; 12388 } 12389 } else { 12390 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12391 &prio, NULL)); 12392 } 12393 unlock_user (p, arg2, arg3); 12394 if (arg4 != 0) 12395 put_user_u32(prio, arg4); 12396 } 12397 return ret; 12398 #endif 12399 #ifdef TARGET_NR_mq_timedreceive_time64 12400 case TARGET_NR_mq_timedreceive_time64: 12401 { 12402 struct timespec ts; 12403 unsigned int prio; 12404 12405 p = lock_user(VERIFY_READ, arg2, arg3, 1); 12406 if (arg5 != 0) { 12407 if (target_to_host_timespec64(&ts, arg5)) { 12408 return -TARGET_EFAULT; 12409 } 12410 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12411 &prio, &ts)); 12412 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) { 12413 return -TARGET_EFAULT; 12414 } 12415 } else { 12416 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12417 &prio, NULL)); 12418 } 12419 unlock_user(p, arg2, arg3); 12420 if (arg4 != 0) { 12421 put_user_u32(prio, arg4); 12422 } 12423 } 12424 return ret; 12425 #endif 12426 12427 /* Not implemented for now... */ 12428 /* case TARGET_NR_mq_notify: */ 12429 /* break; */ 12430 12431 case TARGET_NR_mq_getsetattr: 12432 { 12433 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 12434 ret = 0; 12435 if (arg2 != 0) { 12436 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 12437 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in, 12438 &posix_mq_attr_out)); 12439 } else if (arg3 != 0) { 12440 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out)); 12441 } 12442 if (ret == 0 && arg3 != 0) { 12443 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 12444 } 12445 } 12446 return ret; 12447 #endif 12448 12449 #ifdef CONFIG_SPLICE 12450 #ifdef TARGET_NR_tee 12451 case TARGET_NR_tee: 12452 { 12453 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 12454 } 12455 return ret; 12456 #endif 12457 #ifdef TARGET_NR_splice 12458 case TARGET_NR_splice: 12459 { 12460 loff_t loff_in, loff_out; 12461 loff_t *ploff_in = NULL, *ploff_out = NULL; 12462 if (arg2) { 12463 if (get_user_u64(loff_in, arg2)) { 12464 return -TARGET_EFAULT; 12465 } 12466 ploff_in = &loff_in; 12467 } 12468 if (arg4) { 12469 if (get_user_u64(loff_out, arg4)) { 12470 return -TARGET_EFAULT; 12471 } 12472 ploff_out = &loff_out; 12473 } 12474 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 12475 if (arg2) { 12476 if (put_user_u64(loff_in, arg2)) { 12477 return -TARGET_EFAULT; 12478 } 12479 } 12480 if (arg4) { 12481 if (put_user_u64(loff_out, arg4)) { 12482 return -TARGET_EFAULT; 12483 } 12484 } 12485 } 12486 return ret; 12487 #endif 12488 #ifdef TARGET_NR_vmsplice 12489 case TARGET_NR_vmsplice: 12490 { 12491 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 12492 if (vec != NULL) { 12493 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 12494 unlock_iovec(vec, arg2, arg3, 0); 12495 } else { 12496 ret = -host_to_target_errno(errno); 12497 } 12498 } 12499 return ret; 12500 #endif 12501 #endif /* CONFIG_SPLICE */ 12502 #ifdef CONFIG_EVENTFD 12503 #if defined(TARGET_NR_eventfd) 12504 case TARGET_NR_eventfd: 12505 ret = get_errno(eventfd(arg1, 0)); 12506 if (ret >= 0) { 12507 fd_trans_register(ret, &target_eventfd_trans); 12508 } 12509 return ret; 12510 #endif 12511 #if defined(TARGET_NR_eventfd2) 12512 case TARGET_NR_eventfd2: 12513 { 12514 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)); 12515 if (arg2 & TARGET_O_NONBLOCK) { 12516 host_flags |= O_NONBLOCK; 12517 } 12518 if (arg2 & TARGET_O_CLOEXEC) { 12519 host_flags |= O_CLOEXEC; 12520 } 12521 ret = get_errno(eventfd(arg1, host_flags)); 12522 if (ret >= 0) { 12523 fd_trans_register(ret, &target_eventfd_trans); 12524 } 12525 return ret; 12526 } 12527 #endif 12528 #endif /* CONFIG_EVENTFD */ 12529 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 12530 case TARGET_NR_fallocate: 12531 #if TARGET_ABI_BITS == 32 12532 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 12533 target_offset64(arg5, arg6))); 12534 #else 12535 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 12536 #endif 12537 return ret; 12538 #endif 12539 #if defined(CONFIG_SYNC_FILE_RANGE) 12540 #if defined(TARGET_NR_sync_file_range) 12541 case TARGET_NR_sync_file_range: 12542 #if TARGET_ABI_BITS == 32 12543 #if defined(TARGET_MIPS) 12544 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12545 target_offset64(arg5, arg6), arg7)); 12546 #else 12547 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 12548 target_offset64(arg4, arg5), arg6)); 12549 #endif /* !TARGET_MIPS */ 12550 #else 12551 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 12552 #endif 12553 return ret; 12554 #endif 12555 #if defined(TARGET_NR_sync_file_range2) || \ 12556 defined(TARGET_NR_arm_sync_file_range) 12557 #if defined(TARGET_NR_sync_file_range2) 12558 case TARGET_NR_sync_file_range2: 12559 #endif 12560 #if defined(TARGET_NR_arm_sync_file_range) 12561 case TARGET_NR_arm_sync_file_range: 12562 #endif 12563 /* This is like sync_file_range but the arguments are reordered */ 12564 #if TARGET_ABI_BITS == 32 12565 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12566 target_offset64(arg5, arg6), arg2)); 12567 #else 12568 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 12569 #endif 12570 return ret; 12571 #endif 12572 #endif 12573 #if defined(TARGET_NR_signalfd4) 12574 case TARGET_NR_signalfd4: 12575 return do_signalfd4(arg1, arg2, arg4); 12576 #endif 12577 #if defined(TARGET_NR_signalfd) 12578 case TARGET_NR_signalfd: 12579 return do_signalfd4(arg1, arg2, 0); 12580 #endif 12581 #if defined(CONFIG_EPOLL) 12582 #if defined(TARGET_NR_epoll_create) 12583 case TARGET_NR_epoll_create: 12584 return get_errno(epoll_create(arg1)); 12585 #endif 12586 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 12587 case TARGET_NR_epoll_create1: 12588 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl))); 12589 #endif 12590 #if defined(TARGET_NR_epoll_ctl) 12591 case TARGET_NR_epoll_ctl: 12592 { 12593 struct epoll_event ep; 12594 struct epoll_event *epp = 0; 12595 if (arg4) { 12596 if (arg2 != EPOLL_CTL_DEL) { 12597 struct target_epoll_event *target_ep; 12598 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 12599 return -TARGET_EFAULT; 12600 } 12601 ep.events = tswap32(target_ep->events); 12602 /* 12603 * The epoll_data_t union is just opaque data to the kernel, 12604 * so we transfer all 64 bits across and need not worry what 12605 * actual data type it is. 12606 */ 12607 ep.data.u64 = tswap64(target_ep->data.u64); 12608 unlock_user_struct(target_ep, arg4, 0); 12609 } 12610 /* 12611 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a 12612 * non-null pointer, even though this argument is ignored. 12613 * 12614 */ 12615 epp = &ep; 12616 } 12617 return get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 12618 } 12619 #endif 12620 12621 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait) 12622 #if defined(TARGET_NR_epoll_wait) 12623 case TARGET_NR_epoll_wait: 12624 #endif 12625 #if defined(TARGET_NR_epoll_pwait) 12626 case TARGET_NR_epoll_pwait: 12627 #endif 12628 { 12629 struct target_epoll_event *target_ep; 12630 struct epoll_event *ep; 12631 int epfd = arg1; 12632 int maxevents = arg3; 12633 int timeout = arg4; 12634 12635 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) { 12636 return -TARGET_EINVAL; 12637 } 12638 12639 target_ep = lock_user(VERIFY_WRITE, arg2, 12640 maxevents * sizeof(struct target_epoll_event), 1); 12641 if (!target_ep) { 12642 return -TARGET_EFAULT; 12643 } 12644 12645 ep = g_try_new(struct epoll_event, maxevents); 12646 if (!ep) { 12647 unlock_user(target_ep, arg2, 0); 12648 return -TARGET_ENOMEM; 12649 } 12650 12651 switch (num) { 12652 #if defined(TARGET_NR_epoll_pwait) 12653 case TARGET_NR_epoll_pwait: 12654 { 12655 target_sigset_t *target_set; 12656 sigset_t _set, *set = &_set; 12657 12658 if (arg5) { 12659 if (arg6 != sizeof(target_sigset_t)) { 12660 ret = -TARGET_EINVAL; 12661 break; 12662 } 12663 12664 target_set = lock_user(VERIFY_READ, arg5, 12665 sizeof(target_sigset_t), 1); 12666 if (!target_set) { 12667 ret = -TARGET_EFAULT; 12668 break; 12669 } 12670 target_to_host_sigset(set, target_set); 12671 unlock_user(target_set, arg5, 0); 12672 } else { 12673 set = NULL; 12674 } 12675 12676 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12677 set, SIGSET_T_SIZE)); 12678 break; 12679 } 12680 #endif 12681 #if defined(TARGET_NR_epoll_wait) 12682 case TARGET_NR_epoll_wait: 12683 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12684 NULL, 0)); 12685 break; 12686 #endif 12687 default: 12688 ret = -TARGET_ENOSYS; 12689 } 12690 if (!is_error(ret)) { 12691 int i; 12692 for (i = 0; i < ret; i++) { 12693 target_ep[i].events = tswap32(ep[i].events); 12694 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 12695 } 12696 unlock_user(target_ep, arg2, 12697 ret * sizeof(struct target_epoll_event)); 12698 } else { 12699 unlock_user(target_ep, arg2, 0); 12700 } 12701 g_free(ep); 12702 return ret; 12703 } 12704 #endif 12705 #endif 12706 #ifdef TARGET_NR_prlimit64 12707 case TARGET_NR_prlimit64: 12708 { 12709 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 12710 struct target_rlimit64 *target_rnew, *target_rold; 12711 struct host_rlimit64 rnew, rold, *rnewp = 0; 12712 int resource = target_to_host_resource(arg2); 12713 12714 if (arg3 && (resource != RLIMIT_AS && 12715 resource != RLIMIT_DATA && 12716 resource != RLIMIT_STACK)) { 12717 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 12718 return -TARGET_EFAULT; 12719 } 12720 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 12721 rnew.rlim_max = tswap64(target_rnew->rlim_max); 12722 unlock_user_struct(target_rnew, arg3, 0); 12723 rnewp = &rnew; 12724 } 12725 12726 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0)); 12727 if (!is_error(ret) && arg4) { 12728 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 12729 return -TARGET_EFAULT; 12730 } 12731 target_rold->rlim_cur = tswap64(rold.rlim_cur); 12732 target_rold->rlim_max = tswap64(rold.rlim_max); 12733 unlock_user_struct(target_rold, arg4, 1); 12734 } 12735 return ret; 12736 } 12737 #endif 12738 #ifdef TARGET_NR_gethostname 12739 case TARGET_NR_gethostname: 12740 { 12741 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 12742 if (name) { 12743 ret = get_errno(gethostname(name, arg2)); 12744 unlock_user(name, arg1, arg2); 12745 } else { 12746 ret = -TARGET_EFAULT; 12747 } 12748 return ret; 12749 } 12750 #endif 12751 #ifdef TARGET_NR_atomic_cmpxchg_32 12752 case TARGET_NR_atomic_cmpxchg_32: 12753 { 12754 /* should use start_exclusive from main.c */ 12755 abi_ulong mem_value; 12756 if (get_user_u32(mem_value, arg6)) { 12757 target_siginfo_t info; 12758 info.si_signo = SIGSEGV; 12759 info.si_errno = 0; 12760 info.si_code = TARGET_SEGV_MAPERR; 12761 info._sifields._sigfault._addr = arg6; 12762 queue_signal((CPUArchState *)cpu_env, info.si_signo, 12763 QEMU_SI_FAULT, &info); 12764 ret = 0xdeadbeef; 12765 12766 } 12767 if (mem_value == arg2) 12768 put_user_u32(arg1, arg6); 12769 return mem_value; 12770 } 12771 #endif 12772 #ifdef TARGET_NR_atomic_barrier 12773 case TARGET_NR_atomic_barrier: 12774 /* Like the kernel implementation and the 12775 qemu arm barrier, no-op this? */ 12776 return 0; 12777 #endif 12778 12779 #ifdef TARGET_NR_timer_create 12780 case TARGET_NR_timer_create: 12781 { 12782 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 12783 12784 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 12785 12786 int clkid = arg1; 12787 int timer_index = next_free_host_timer(); 12788 12789 if (timer_index < 0) { 12790 ret = -TARGET_EAGAIN; 12791 } else { 12792 timer_t *phtimer = g_posix_timers + timer_index; 12793 12794 if (arg2) { 12795 phost_sevp = &host_sevp; 12796 ret = target_to_host_sigevent(phost_sevp, arg2); 12797 if (ret != 0) { 12798 return ret; 12799 } 12800 } 12801 12802 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 12803 if (ret) { 12804 phtimer = NULL; 12805 } else { 12806 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { 12807 return -TARGET_EFAULT; 12808 } 12809 } 12810 } 12811 return ret; 12812 } 12813 #endif 12814 12815 #ifdef TARGET_NR_timer_settime 12816 case TARGET_NR_timer_settime: 12817 { 12818 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 12819 * struct itimerspec * old_value */ 12820 target_timer_t timerid = get_timer_id(arg1); 12821 12822 if (timerid < 0) { 12823 ret = timerid; 12824 } else if (arg3 == 0) { 12825 ret = -TARGET_EINVAL; 12826 } else { 12827 timer_t htimer = g_posix_timers[timerid]; 12828 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 12829 12830 if (target_to_host_itimerspec(&hspec_new, arg3)) { 12831 return -TARGET_EFAULT; 12832 } 12833 ret = get_errno( 12834 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 12835 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) { 12836 return -TARGET_EFAULT; 12837 } 12838 } 12839 return ret; 12840 } 12841 #endif 12842 12843 #ifdef TARGET_NR_timer_settime64 12844 case TARGET_NR_timer_settime64: 12845 { 12846 target_timer_t timerid = get_timer_id(arg1); 12847 12848 if (timerid < 0) { 12849 ret = timerid; 12850 } else if (arg3 == 0) { 12851 ret = -TARGET_EINVAL; 12852 } else { 12853 timer_t htimer = g_posix_timers[timerid]; 12854 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 12855 12856 if (target_to_host_itimerspec64(&hspec_new, arg3)) { 12857 return -TARGET_EFAULT; 12858 } 12859 ret = get_errno( 12860 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 12861 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) { 12862 return -TARGET_EFAULT; 12863 } 12864 } 12865 return ret; 12866 } 12867 #endif 12868 12869 #ifdef TARGET_NR_timer_gettime 12870 case TARGET_NR_timer_gettime: 12871 { 12872 /* args: timer_t timerid, struct itimerspec *curr_value */ 12873 target_timer_t timerid = get_timer_id(arg1); 12874 12875 if (timerid < 0) { 12876 ret = timerid; 12877 } else if (!arg2) { 12878 ret = -TARGET_EFAULT; 12879 } else { 12880 timer_t htimer = g_posix_timers[timerid]; 12881 struct itimerspec hspec; 12882 ret = get_errno(timer_gettime(htimer, &hspec)); 12883 12884 if (host_to_target_itimerspec(arg2, &hspec)) { 12885 ret = -TARGET_EFAULT; 12886 } 12887 } 12888 return ret; 12889 } 12890 #endif 12891 12892 #ifdef TARGET_NR_timer_gettime64 12893 case TARGET_NR_timer_gettime64: 12894 { 12895 /* args: timer_t timerid, struct itimerspec64 *curr_value */ 12896 target_timer_t timerid = get_timer_id(arg1); 12897 12898 if (timerid < 0) { 12899 ret = timerid; 12900 } else if (!arg2) { 12901 ret = -TARGET_EFAULT; 12902 } else { 12903 timer_t htimer = g_posix_timers[timerid]; 12904 struct itimerspec hspec; 12905 ret = get_errno(timer_gettime(htimer, &hspec)); 12906 12907 if (host_to_target_itimerspec64(arg2, &hspec)) { 12908 ret = -TARGET_EFAULT; 12909 } 12910 } 12911 return ret; 12912 } 12913 #endif 12914 12915 #ifdef TARGET_NR_timer_getoverrun 12916 case TARGET_NR_timer_getoverrun: 12917 { 12918 /* args: timer_t timerid */ 12919 target_timer_t timerid = get_timer_id(arg1); 12920 12921 if (timerid < 0) { 12922 ret = timerid; 12923 } else { 12924 timer_t htimer = g_posix_timers[timerid]; 12925 ret = get_errno(timer_getoverrun(htimer)); 12926 } 12927 return ret; 12928 } 12929 #endif 12930 12931 #ifdef TARGET_NR_timer_delete 12932 case TARGET_NR_timer_delete: 12933 { 12934 /* args: timer_t timerid */ 12935 target_timer_t timerid = get_timer_id(arg1); 12936 12937 if (timerid < 0) { 12938 ret = timerid; 12939 } else { 12940 timer_t htimer = g_posix_timers[timerid]; 12941 ret = get_errno(timer_delete(htimer)); 12942 g_posix_timers[timerid] = 0; 12943 } 12944 return ret; 12945 } 12946 #endif 12947 12948 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) 12949 case TARGET_NR_timerfd_create: 12950 return get_errno(timerfd_create(arg1, 12951 target_to_host_bitmask(arg2, fcntl_flags_tbl))); 12952 #endif 12953 12954 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) 12955 case TARGET_NR_timerfd_gettime: 12956 { 12957 struct itimerspec its_curr; 12958 12959 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 12960 12961 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { 12962 return -TARGET_EFAULT; 12963 } 12964 } 12965 return ret; 12966 #endif 12967 12968 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD) 12969 case TARGET_NR_timerfd_gettime64: 12970 { 12971 struct itimerspec its_curr; 12972 12973 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 12974 12975 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) { 12976 return -TARGET_EFAULT; 12977 } 12978 } 12979 return ret; 12980 #endif 12981 12982 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) 12983 case TARGET_NR_timerfd_settime: 12984 { 12985 struct itimerspec its_new, its_old, *p_new; 12986 12987 if (arg3) { 12988 if (target_to_host_itimerspec(&its_new, arg3)) { 12989 return -TARGET_EFAULT; 12990 } 12991 p_new = &its_new; 12992 } else { 12993 p_new = NULL; 12994 } 12995 12996 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 12997 12998 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { 12999 return -TARGET_EFAULT; 13000 } 13001 } 13002 return ret; 13003 #endif 13004 13005 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD) 13006 case TARGET_NR_timerfd_settime64: 13007 { 13008 struct itimerspec its_new, its_old, *p_new; 13009 13010 if (arg3) { 13011 if (target_to_host_itimerspec64(&its_new, arg3)) { 13012 return -TARGET_EFAULT; 13013 } 13014 p_new = &its_new; 13015 } else { 13016 p_new = NULL; 13017 } 13018 13019 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 13020 13021 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) { 13022 return -TARGET_EFAULT; 13023 } 13024 } 13025 return ret; 13026 #endif 13027 13028 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 13029 case TARGET_NR_ioprio_get: 13030 return get_errno(ioprio_get(arg1, arg2)); 13031 #endif 13032 13033 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 13034 case TARGET_NR_ioprio_set: 13035 return get_errno(ioprio_set(arg1, arg2, arg3)); 13036 #endif 13037 13038 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) 13039 case TARGET_NR_setns: 13040 return get_errno(setns(arg1, arg2)); 13041 #endif 13042 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) 13043 case TARGET_NR_unshare: 13044 return get_errno(unshare(arg1)); 13045 #endif 13046 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 13047 case TARGET_NR_kcmp: 13048 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5)); 13049 #endif 13050 #ifdef TARGET_NR_swapcontext 13051 case TARGET_NR_swapcontext: 13052 /* PowerPC specific. */ 13053 return do_swapcontext(cpu_env, arg1, arg2, arg3); 13054 #endif 13055 #ifdef TARGET_NR_memfd_create 13056 case TARGET_NR_memfd_create: 13057 p = lock_user_string(arg1); 13058 if (!p) { 13059 return -TARGET_EFAULT; 13060 } 13061 ret = get_errno(memfd_create(p, arg2)); 13062 fd_trans_unregister(ret); 13063 unlock_user(p, arg1, 0); 13064 return ret; 13065 #endif 13066 #if defined TARGET_NR_membarrier && defined __NR_membarrier 13067 case TARGET_NR_membarrier: 13068 return get_errno(membarrier(arg1, arg2)); 13069 #endif 13070 13071 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range) 13072 case TARGET_NR_copy_file_range: 13073 { 13074 loff_t inoff, outoff; 13075 loff_t *pinoff = NULL, *poutoff = NULL; 13076 13077 if (arg2) { 13078 if (get_user_u64(inoff, arg2)) { 13079 return -TARGET_EFAULT; 13080 } 13081 pinoff = &inoff; 13082 } 13083 if (arg4) { 13084 if (get_user_u64(outoff, arg4)) { 13085 return -TARGET_EFAULT; 13086 } 13087 poutoff = &outoff; 13088 } 13089 /* Do not sign-extend the count parameter. */ 13090 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff, 13091 (abi_ulong)arg5, arg6)); 13092 if (!is_error(ret) && ret > 0) { 13093 if (arg2) { 13094 if (put_user_u64(inoff, arg2)) { 13095 return -TARGET_EFAULT; 13096 } 13097 } 13098 if (arg4) { 13099 if (put_user_u64(outoff, arg4)) { 13100 return -TARGET_EFAULT; 13101 } 13102 } 13103 } 13104 } 13105 return ret; 13106 #endif 13107 13108 #if defined(TARGET_NR_pivot_root) 13109 case TARGET_NR_pivot_root: 13110 { 13111 void *p2; 13112 p = lock_user_string(arg1); /* new_root */ 13113 p2 = lock_user_string(arg2); /* put_old */ 13114 if (!p || !p2) { 13115 ret = -TARGET_EFAULT; 13116 } else { 13117 ret = get_errno(pivot_root(p, p2)); 13118 } 13119 unlock_user(p2, arg2, 0); 13120 unlock_user(p, arg1, 0); 13121 } 13122 return ret; 13123 #endif 13124 13125 default: 13126 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num); 13127 return -TARGET_ENOSYS; 13128 } 13129 return ret; 13130 } 13131 13132 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 13133 abi_long arg2, abi_long arg3, abi_long arg4, 13134 abi_long arg5, abi_long arg6, abi_long arg7, 13135 abi_long arg8) 13136 { 13137 CPUState *cpu = env_cpu(cpu_env); 13138 abi_long ret; 13139 13140 #ifdef DEBUG_ERESTARTSYS 13141 /* Debug-only code for exercising the syscall-restart code paths 13142 * in the per-architecture cpu main loops: restart every syscall 13143 * the guest makes once before letting it through. 13144 */ 13145 { 13146 static bool flag; 13147 flag = !flag; 13148 if (flag) { 13149 return -QEMU_ERESTARTSYS; 13150 } 13151 } 13152 #endif 13153 13154 record_syscall_start(cpu, num, arg1, 13155 arg2, arg3, arg4, arg5, arg6, arg7, arg8); 13156 13157 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { 13158 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6); 13159 } 13160 13161 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4, 13162 arg5, arg6, arg7, arg8); 13163 13164 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { 13165 print_syscall_ret(cpu_env, num, ret, arg1, arg2, 13166 arg3, arg4, arg5, arg6); 13167 } 13168 13169 record_syscall_return(cpu, num, ret); 13170 return ret; 13171 } 13172