1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include "qemu/osdep.h" 21 #include "qemu/cutils.h" 22 #include "qemu/path.h" 23 #include "qemu/memfd.h" 24 #include "qemu/queue.h" 25 #include <elf.h> 26 #include <endian.h> 27 #include <grp.h> 28 #include <sys/ipc.h> 29 #include <sys/msg.h> 30 #include <sys/wait.h> 31 #include <sys/mount.h> 32 #include <sys/file.h> 33 #include <sys/fsuid.h> 34 #include <sys/personality.h> 35 #include <sys/prctl.h> 36 #include <sys/resource.h> 37 #include <sys/swap.h> 38 #include <linux/capability.h> 39 #include <sched.h> 40 #include <sys/timex.h> 41 #include <sys/socket.h> 42 #include <linux/sockios.h> 43 #include <sys/un.h> 44 #include <sys/uio.h> 45 #include <poll.h> 46 #include <sys/times.h> 47 #include <sys/shm.h> 48 #include <sys/sem.h> 49 #include <sys/statfs.h> 50 #include <utime.h> 51 #include <sys/sysinfo.h> 52 #include <sys/signalfd.h> 53 //#include <sys/user.h> 54 #include <netinet/in.h> 55 #include <netinet/ip.h> 56 #include <netinet/tcp.h> 57 #include <netinet/udp.h> 58 #include <linux/wireless.h> 59 #include <linux/icmp.h> 60 #include <linux/icmpv6.h> 61 #include <linux/if_tun.h> 62 #include <linux/in6.h> 63 #include <linux/errqueue.h> 64 #include <linux/random.h> 65 #ifdef CONFIG_TIMERFD 66 #include <sys/timerfd.h> 67 #endif 68 #ifdef CONFIG_EVENTFD 69 #include <sys/eventfd.h> 70 #endif 71 #ifdef CONFIG_EPOLL 72 #include <sys/epoll.h> 73 #endif 74 #ifdef CONFIG_ATTR 75 #include "qemu/xattr.h" 76 #endif 77 #ifdef CONFIG_SENDFILE 78 #include <sys/sendfile.h> 79 #endif 80 #ifdef HAVE_SYS_KCOV_H 81 #include <sys/kcov.h> 82 #endif 83 84 #define termios host_termios 85 #define winsize host_winsize 86 #define termio host_termio 87 #define sgttyb host_sgttyb /* same as target */ 88 #define tchars host_tchars /* same as target */ 89 #define ltchars host_ltchars /* same as target */ 90 91 #include <linux/termios.h> 92 #include <linux/unistd.h> 93 #include <linux/cdrom.h> 94 #include <linux/hdreg.h> 95 #include <linux/soundcard.h> 96 #include <linux/kd.h> 97 #include <linux/mtio.h> 98 #include <linux/fs.h> 99 #include <linux/fd.h> 100 #if defined(CONFIG_FIEMAP) 101 #include <linux/fiemap.h> 102 #endif 103 #include <linux/fb.h> 104 #if defined(CONFIG_USBFS) 105 #include <linux/usbdevice_fs.h> 106 #include <linux/usb/ch9.h> 107 #endif 108 #include <linux/vt.h> 109 #include <linux/dm-ioctl.h> 110 #include <linux/reboot.h> 111 #include <linux/route.h> 112 #include <linux/filter.h> 113 #include <linux/blkpg.h> 114 #include <netpacket/packet.h> 115 #include <linux/netlink.h> 116 #include <linux/if_alg.h> 117 #include <linux/rtc.h> 118 #include <sound/asound.h> 119 #ifdef HAVE_BTRFS_H 120 #include <linux/btrfs.h> 121 #endif 122 #ifdef HAVE_DRM_H 123 #include <libdrm/drm.h> 124 #include <libdrm/i915_drm.h> 125 #endif 126 #include "linux_loop.h" 127 #include "uname.h" 128 129 #include "qemu.h" 130 #include "qemu/guest-random.h" 131 #include "qemu/selfmap.h" 132 #include "user/syscall-trace.h" 133 #include "qapi/error.h" 134 #include "fd-trans.h" 135 #include "tcg/tcg.h" 136 137 #ifndef CLONE_IO 138 #define CLONE_IO 0x80000000 /* Clone io context */ 139 #endif 140 141 /* We can't directly call the host clone syscall, because this will 142 * badly confuse libc (breaking mutexes, for example). So we must 143 * divide clone flags into: 144 * * flag combinations that look like pthread_create() 145 * * flag combinations that look like fork() 146 * * flags we can implement within QEMU itself 147 * * flags we can't support and will return an error for 148 */ 149 /* For thread creation, all these flags must be present; for 150 * fork, none must be present. 151 */ 152 #define CLONE_THREAD_FLAGS \ 153 (CLONE_VM | CLONE_FS | CLONE_FILES | \ 154 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM) 155 156 /* These flags are ignored: 157 * CLONE_DETACHED is now ignored by the kernel; 158 * CLONE_IO is just an optimisation hint to the I/O scheduler 159 */ 160 #define CLONE_IGNORED_FLAGS \ 161 (CLONE_DETACHED | CLONE_IO) 162 163 /* Flags for fork which we can implement within QEMU itself */ 164 #define CLONE_OPTIONAL_FORK_FLAGS \ 165 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 166 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID) 167 168 /* Flags for thread creation which we can implement within QEMU itself */ 169 #define CLONE_OPTIONAL_THREAD_FLAGS \ 170 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 171 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT) 172 173 #define CLONE_INVALID_FORK_FLAGS \ 174 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS)) 175 176 #define CLONE_INVALID_THREAD_FLAGS \ 177 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \ 178 CLONE_IGNORED_FLAGS)) 179 180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits 181 * have almost all been allocated. We cannot support any of 182 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC, 183 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED. 184 * The checks against the invalid thread masks above will catch these. 185 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.) 186 */ 187 188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted 189 * once. This exercises the codepaths for restart. 190 */ 191 //#define DEBUG_ERESTARTSYS 192 193 //#include <linux/msdos_fs.h> 194 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 195 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 196 197 #undef _syscall0 198 #undef _syscall1 199 #undef _syscall2 200 #undef _syscall3 201 #undef _syscall4 202 #undef _syscall5 203 #undef _syscall6 204 205 #define _syscall0(type,name) \ 206 static type name (void) \ 207 { \ 208 return syscall(__NR_##name); \ 209 } 210 211 #define _syscall1(type,name,type1,arg1) \ 212 static type name (type1 arg1) \ 213 { \ 214 return syscall(__NR_##name, arg1); \ 215 } 216 217 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 218 static type name (type1 arg1,type2 arg2) \ 219 { \ 220 return syscall(__NR_##name, arg1, arg2); \ 221 } 222 223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 224 static type name (type1 arg1,type2 arg2,type3 arg3) \ 225 { \ 226 return syscall(__NR_##name, arg1, arg2, arg3); \ 227 } 228 229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 231 { \ 232 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 233 } 234 235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 236 type5,arg5) \ 237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 238 { \ 239 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 240 } 241 242 243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 244 type5,arg5,type6,arg6) \ 245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 246 type6 arg6) \ 247 { \ 248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 249 } 250 251 252 #define __NR_sys_uname __NR_uname 253 #define __NR_sys_getcwd1 __NR_getcwd 254 #define __NR_sys_getdents __NR_getdents 255 #define __NR_sys_getdents64 __NR_getdents64 256 #define __NR_sys_getpriority __NR_getpriority 257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo 259 #define __NR_sys_syslog __NR_syslog 260 #if defined(__NR_futex) 261 # define __NR_sys_futex __NR_futex 262 #endif 263 #if defined(__NR_futex_time64) 264 # define __NR_sys_futex_time64 __NR_futex_time64 265 #endif 266 #define __NR_sys_inotify_init __NR_inotify_init 267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 269 #define __NR_sys_statx __NR_statx 270 271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__) 272 #define __NR__llseek __NR_lseek 273 #endif 274 275 /* Newer kernel ports have llseek() instead of _llseek() */ 276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 277 #define TARGET_NR__llseek TARGET_NR_llseek 278 #endif 279 280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */ 281 #ifndef TARGET_O_NONBLOCK_MASK 282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK 283 #endif 284 285 #define __NR_sys_gettid __NR_gettid 286 _syscall0(int, sys_gettid) 287 288 /* For the 64-bit guest on 32-bit host case we must emulate 289 * getdents using getdents64, because otherwise the host 290 * might hand us back more dirent records than we can fit 291 * into the guest buffer after structure format conversion. 292 * Otherwise we emulate getdents with getdents if the host has it. 293 */ 294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS 295 #define EMULATE_GETDENTS_WITH_GETDENTS 296 #endif 297 298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS) 299 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 300 #endif 301 #if (defined(TARGET_NR_getdents) && \ 302 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \ 303 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 304 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 305 #endif 306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 307 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 308 loff_t *, res, uint, wh); 309 #endif 310 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo) 311 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig, 312 siginfo_t *, uinfo) 313 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 314 #ifdef __NR_exit_group 315 _syscall1(int,exit_group,int,error_code) 316 #endif 317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 318 _syscall1(int,set_tid_address,int *,tidptr) 319 #endif 320 #if defined(__NR_futex) 321 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 322 const struct timespec *,timeout,int *,uaddr2,int,val3) 323 #endif 324 #if defined(__NR_futex_time64) 325 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val, 326 const struct timespec *,timeout,int *,uaddr2,int,val3) 327 #endif 328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 329 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 330 unsigned long *, user_mask_ptr); 331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 332 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 333 unsigned long *, user_mask_ptr); 334 #define __NR_sys_getcpu __NR_getcpu 335 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache); 336 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 337 void *, arg); 338 _syscall2(int, capget, struct __user_cap_header_struct *, header, 339 struct __user_cap_data_struct *, data); 340 _syscall2(int, capset, struct __user_cap_header_struct *, header, 341 struct __user_cap_data_struct *, data); 342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 343 _syscall2(int, ioprio_get, int, which, int, who) 344 #endif 345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 346 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio) 347 #endif 348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 349 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags) 350 #endif 351 352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 353 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type, 354 unsigned long, idx1, unsigned long, idx2) 355 #endif 356 357 /* 358 * It is assumed that struct statx is architecture independent. 359 */ 360 #if defined(TARGET_NR_statx) && defined(__NR_statx) 361 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags, 362 unsigned int, mask, struct target_statx *, statxbuf) 363 #endif 364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier) 365 _syscall2(int, membarrier, int, cmd, int, flags) 366 #endif 367 368 static const bitmask_transtbl fcntl_flags_tbl[] = { 369 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 370 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 371 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 372 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 373 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 374 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 375 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 376 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 377 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 378 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 379 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 380 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 381 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 382 #if defined(O_DIRECT) 383 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 384 #endif 385 #if defined(O_NOATIME) 386 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 387 #endif 388 #if defined(O_CLOEXEC) 389 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 390 #endif 391 #if defined(O_PATH) 392 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 393 #endif 394 #if defined(O_TMPFILE) 395 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE }, 396 #endif 397 /* Don't terminate the list prematurely on 64-bit host+guest. */ 398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 399 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 400 #endif 401 { 0, 0, 0, 0 } 402 }; 403 404 _syscall2(int, sys_getcwd1, char *, buf, size_t, size) 405 406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64) 407 #if defined(__NR_utimensat) 408 #define __NR_sys_utimensat __NR_utimensat 409 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 410 const struct timespec *,tsp,int,flags) 411 #else 412 static int sys_utimensat(int dirfd, const char *pathname, 413 const struct timespec times[2], int flags) 414 { 415 errno = ENOSYS; 416 return -1; 417 } 418 #endif 419 #endif /* TARGET_NR_utimensat */ 420 421 #ifdef TARGET_NR_renameat2 422 #if defined(__NR_renameat2) 423 #define __NR_sys_renameat2 __NR_renameat2 424 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd, 425 const char *, new, unsigned int, flags) 426 #else 427 static int sys_renameat2(int oldfd, const char *old, 428 int newfd, const char *new, int flags) 429 { 430 if (flags == 0) { 431 return renameat(oldfd, old, newfd, new); 432 } 433 errno = ENOSYS; 434 return -1; 435 } 436 #endif 437 #endif /* TARGET_NR_renameat2 */ 438 439 #ifdef CONFIG_INOTIFY 440 #include <sys/inotify.h> 441 442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 443 static int sys_inotify_init(void) 444 { 445 return (inotify_init()); 446 } 447 #endif 448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 449 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 450 { 451 return (inotify_add_watch(fd, pathname, mask)); 452 } 453 #endif 454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 455 static int sys_inotify_rm_watch(int fd, int32_t wd) 456 { 457 return (inotify_rm_watch(fd, wd)); 458 } 459 #endif 460 #ifdef CONFIG_INOTIFY1 461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 462 static int sys_inotify_init1(int flags) 463 { 464 return (inotify_init1(flags)); 465 } 466 #endif 467 #endif 468 #else 469 /* Userspace can usually survive runtime without inotify */ 470 #undef TARGET_NR_inotify_init 471 #undef TARGET_NR_inotify_init1 472 #undef TARGET_NR_inotify_add_watch 473 #undef TARGET_NR_inotify_rm_watch 474 #endif /* CONFIG_INOTIFY */ 475 476 #if defined(TARGET_NR_prlimit64) 477 #ifndef __NR_prlimit64 478 # define __NR_prlimit64 -1 479 #endif 480 #define __NR_sys_prlimit64 __NR_prlimit64 481 /* The glibc rlimit structure may not be that used by the underlying syscall */ 482 struct host_rlimit64 { 483 uint64_t rlim_cur; 484 uint64_t rlim_max; 485 }; 486 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 487 const struct host_rlimit64 *, new_limit, 488 struct host_rlimit64 *, old_limit) 489 #endif 490 491 492 #if defined(TARGET_NR_timer_create) 493 /* Maximum of 32 active POSIX timers allowed at any one time. */ 494 static timer_t g_posix_timers[32] = { 0, } ; 495 496 static inline int next_free_host_timer(void) 497 { 498 int k ; 499 /* FIXME: Does finding the next free slot require a lock? */ 500 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 501 if (g_posix_timers[k] == 0) { 502 g_posix_timers[k] = (timer_t) 1; 503 return k; 504 } 505 } 506 return -1; 507 } 508 #endif 509 510 #define ERRNO_TABLE_SIZE 1200 511 512 /* target_to_host_errno_table[] is initialized from 513 * host_to_target_errno_table[] in syscall_init(). */ 514 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 515 }; 516 517 /* 518 * This list is the union of errno values overridden in asm-<arch>/errno.h 519 * minus the errnos that are not actually generic to all archs. 520 */ 521 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 522 [EAGAIN] = TARGET_EAGAIN, 523 [EIDRM] = TARGET_EIDRM, 524 [ECHRNG] = TARGET_ECHRNG, 525 [EL2NSYNC] = TARGET_EL2NSYNC, 526 [EL3HLT] = TARGET_EL3HLT, 527 [EL3RST] = TARGET_EL3RST, 528 [ELNRNG] = TARGET_ELNRNG, 529 [EUNATCH] = TARGET_EUNATCH, 530 [ENOCSI] = TARGET_ENOCSI, 531 [EL2HLT] = TARGET_EL2HLT, 532 [EDEADLK] = TARGET_EDEADLK, 533 [ENOLCK] = TARGET_ENOLCK, 534 [EBADE] = TARGET_EBADE, 535 [EBADR] = TARGET_EBADR, 536 [EXFULL] = TARGET_EXFULL, 537 [ENOANO] = TARGET_ENOANO, 538 [EBADRQC] = TARGET_EBADRQC, 539 [EBADSLT] = TARGET_EBADSLT, 540 [EBFONT] = TARGET_EBFONT, 541 [ENOSTR] = TARGET_ENOSTR, 542 [ENODATA] = TARGET_ENODATA, 543 [ETIME] = TARGET_ETIME, 544 [ENOSR] = TARGET_ENOSR, 545 [ENONET] = TARGET_ENONET, 546 [ENOPKG] = TARGET_ENOPKG, 547 [EREMOTE] = TARGET_EREMOTE, 548 [ENOLINK] = TARGET_ENOLINK, 549 [EADV] = TARGET_EADV, 550 [ESRMNT] = TARGET_ESRMNT, 551 [ECOMM] = TARGET_ECOMM, 552 [EPROTO] = TARGET_EPROTO, 553 [EDOTDOT] = TARGET_EDOTDOT, 554 [EMULTIHOP] = TARGET_EMULTIHOP, 555 [EBADMSG] = TARGET_EBADMSG, 556 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 557 [EOVERFLOW] = TARGET_EOVERFLOW, 558 [ENOTUNIQ] = TARGET_ENOTUNIQ, 559 [EBADFD] = TARGET_EBADFD, 560 [EREMCHG] = TARGET_EREMCHG, 561 [ELIBACC] = TARGET_ELIBACC, 562 [ELIBBAD] = TARGET_ELIBBAD, 563 [ELIBSCN] = TARGET_ELIBSCN, 564 [ELIBMAX] = TARGET_ELIBMAX, 565 [ELIBEXEC] = TARGET_ELIBEXEC, 566 [EILSEQ] = TARGET_EILSEQ, 567 [ENOSYS] = TARGET_ENOSYS, 568 [ELOOP] = TARGET_ELOOP, 569 [ERESTART] = TARGET_ERESTART, 570 [ESTRPIPE] = TARGET_ESTRPIPE, 571 [ENOTEMPTY] = TARGET_ENOTEMPTY, 572 [EUSERS] = TARGET_EUSERS, 573 [ENOTSOCK] = TARGET_ENOTSOCK, 574 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 575 [EMSGSIZE] = TARGET_EMSGSIZE, 576 [EPROTOTYPE] = TARGET_EPROTOTYPE, 577 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 578 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 579 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 580 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 581 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 582 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 583 [EADDRINUSE] = TARGET_EADDRINUSE, 584 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 585 [ENETDOWN] = TARGET_ENETDOWN, 586 [ENETUNREACH] = TARGET_ENETUNREACH, 587 [ENETRESET] = TARGET_ENETRESET, 588 [ECONNABORTED] = TARGET_ECONNABORTED, 589 [ECONNRESET] = TARGET_ECONNRESET, 590 [ENOBUFS] = TARGET_ENOBUFS, 591 [EISCONN] = TARGET_EISCONN, 592 [ENOTCONN] = TARGET_ENOTCONN, 593 [EUCLEAN] = TARGET_EUCLEAN, 594 [ENOTNAM] = TARGET_ENOTNAM, 595 [ENAVAIL] = TARGET_ENAVAIL, 596 [EISNAM] = TARGET_EISNAM, 597 [EREMOTEIO] = TARGET_EREMOTEIO, 598 [EDQUOT] = TARGET_EDQUOT, 599 [ESHUTDOWN] = TARGET_ESHUTDOWN, 600 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 601 [ETIMEDOUT] = TARGET_ETIMEDOUT, 602 [ECONNREFUSED] = TARGET_ECONNREFUSED, 603 [EHOSTDOWN] = TARGET_EHOSTDOWN, 604 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 605 [EALREADY] = TARGET_EALREADY, 606 [EINPROGRESS] = TARGET_EINPROGRESS, 607 [ESTALE] = TARGET_ESTALE, 608 [ECANCELED] = TARGET_ECANCELED, 609 [ENOMEDIUM] = TARGET_ENOMEDIUM, 610 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 611 #ifdef ENOKEY 612 [ENOKEY] = TARGET_ENOKEY, 613 #endif 614 #ifdef EKEYEXPIRED 615 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 616 #endif 617 #ifdef EKEYREVOKED 618 [EKEYREVOKED] = TARGET_EKEYREVOKED, 619 #endif 620 #ifdef EKEYREJECTED 621 [EKEYREJECTED] = TARGET_EKEYREJECTED, 622 #endif 623 #ifdef EOWNERDEAD 624 [EOWNERDEAD] = TARGET_EOWNERDEAD, 625 #endif 626 #ifdef ENOTRECOVERABLE 627 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 628 #endif 629 #ifdef ENOMSG 630 [ENOMSG] = TARGET_ENOMSG, 631 #endif 632 #ifdef ERKFILL 633 [ERFKILL] = TARGET_ERFKILL, 634 #endif 635 #ifdef EHWPOISON 636 [EHWPOISON] = TARGET_EHWPOISON, 637 #endif 638 }; 639 640 static inline int host_to_target_errno(int err) 641 { 642 if (err >= 0 && err < ERRNO_TABLE_SIZE && 643 host_to_target_errno_table[err]) { 644 return host_to_target_errno_table[err]; 645 } 646 return err; 647 } 648 649 static inline int target_to_host_errno(int err) 650 { 651 if (err >= 0 && err < ERRNO_TABLE_SIZE && 652 target_to_host_errno_table[err]) { 653 return target_to_host_errno_table[err]; 654 } 655 return err; 656 } 657 658 static inline abi_long get_errno(abi_long ret) 659 { 660 if (ret == -1) 661 return -host_to_target_errno(errno); 662 else 663 return ret; 664 } 665 666 const char *target_strerror(int err) 667 { 668 if (err == TARGET_ERESTARTSYS) { 669 return "To be restarted"; 670 } 671 if (err == TARGET_QEMU_ESIGRETURN) { 672 return "Successful exit from sigreturn"; 673 } 674 675 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 676 return NULL; 677 } 678 return strerror(target_to_host_errno(err)); 679 } 680 681 #define safe_syscall0(type, name) \ 682 static type safe_##name(void) \ 683 { \ 684 return safe_syscall(__NR_##name); \ 685 } 686 687 #define safe_syscall1(type, name, type1, arg1) \ 688 static type safe_##name(type1 arg1) \ 689 { \ 690 return safe_syscall(__NR_##name, arg1); \ 691 } 692 693 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \ 694 static type safe_##name(type1 arg1, type2 arg2) \ 695 { \ 696 return safe_syscall(__NR_##name, arg1, arg2); \ 697 } 698 699 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 700 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \ 701 { \ 702 return safe_syscall(__NR_##name, arg1, arg2, arg3); \ 703 } 704 705 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ 706 type4, arg4) \ 707 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ 708 { \ 709 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 710 } 711 712 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ 713 type4, arg4, type5, arg5) \ 714 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 715 type5 arg5) \ 716 { \ 717 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 718 } 719 720 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ 721 type4, arg4, type5, arg5, type6, arg6) \ 722 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 723 type5 arg5, type6 arg6) \ 724 { \ 725 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 726 } 727 728 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count) 729 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count) 730 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \ 731 int, flags, mode_t, mode) 732 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid) 733 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \ 734 struct rusage *, rusage) 735 #endif 736 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \ 737 int, options, struct rusage *, rusage) 738 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp) 739 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \ 740 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 741 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \ 742 fd_set *, exceptfds, struct timespec *, timeout, void *, sig) 743 #endif 744 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64) 745 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds, 746 struct timespec *, tsp, const sigset_t *, sigmask, 747 size_t, sigsetsize) 748 #endif 749 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events, 750 int, maxevents, int, timeout, const sigset_t *, sigmask, 751 size_t, sigsetsize) 752 #if defined(__NR_futex) 753 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \ 754 const struct timespec *,timeout,int *,uaddr2,int,val3) 755 #endif 756 #if defined(__NR_futex_time64) 757 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \ 758 const struct timespec *,timeout,int *,uaddr2,int,val3) 759 #endif 760 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize) 761 safe_syscall2(int, kill, pid_t, pid, int, sig) 762 safe_syscall2(int, tkill, int, tid, int, sig) 763 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig) 764 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt) 765 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt) 766 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt, 767 unsigned long, pos_l, unsigned long, pos_h) 768 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt, 769 unsigned long, pos_l, unsigned long, pos_h) 770 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr, 771 socklen_t, addrlen) 772 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len, 773 int, flags, const struct sockaddr *, addr, socklen_t, addrlen) 774 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len, 775 int, flags, struct sockaddr *, addr, socklen_t *, addrlen) 776 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags) 777 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags) 778 safe_syscall2(int, flock, int, fd, int, operation) 779 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64) 780 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo, 781 const struct timespec *, uts, size_t, sigsetsize) 782 #endif 783 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len, 784 int, flags) 785 #if defined(TARGET_NR_nanosleep) 786 safe_syscall2(int, nanosleep, const struct timespec *, req, 787 struct timespec *, rem) 788 #endif 789 #if defined(TARGET_NR_clock_nanosleep) || \ 790 defined(TARGET_NR_clock_nanosleep_time64) 791 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags, 792 const struct timespec *, req, struct timespec *, rem) 793 #endif 794 #ifdef __NR_ipc 795 #ifdef __s390x__ 796 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third, 797 void *, ptr) 798 #else 799 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third, 800 void *, ptr, long, fifth) 801 #endif 802 #endif 803 #ifdef __NR_msgsnd 804 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz, 805 int, flags) 806 #endif 807 #ifdef __NR_msgrcv 808 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz, 809 long, msgtype, int, flags) 810 #endif 811 #ifdef __NR_semtimedop 812 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops, 813 unsigned, nsops, const struct timespec *, timeout) 814 #endif 815 #if defined(TARGET_NR_mq_timedsend) || \ 816 defined(TARGET_NR_mq_timedsend_time64) 817 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr, 818 size_t, len, unsigned, prio, const struct timespec *, timeout) 819 #endif 820 #if defined(TARGET_NR_mq_timedreceive) || \ 821 defined(TARGET_NR_mq_timedreceive_time64) 822 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr, 823 size_t, len, unsigned *, prio, const struct timespec *, timeout) 824 #endif 825 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range) 826 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff, 827 int, outfd, loff_t *, poutoff, size_t, length, 828 unsigned int, flags) 829 #endif 830 831 /* We do ioctl like this rather than via safe_syscall3 to preserve the 832 * "third argument might be integer or pointer or not present" behaviour of 833 * the libc function. 834 */ 835 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__) 836 /* Similarly for fcntl. Note that callers must always: 837 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK 838 * use the flock64 struct rather than unsuffixed flock 839 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts. 840 */ 841 #ifdef __NR_fcntl64 842 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__) 843 #else 844 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__) 845 #endif 846 847 static inline int host_to_target_sock_type(int host_type) 848 { 849 int target_type; 850 851 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 852 case SOCK_DGRAM: 853 target_type = TARGET_SOCK_DGRAM; 854 break; 855 case SOCK_STREAM: 856 target_type = TARGET_SOCK_STREAM; 857 break; 858 default: 859 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 860 break; 861 } 862 863 #if defined(SOCK_CLOEXEC) 864 if (host_type & SOCK_CLOEXEC) { 865 target_type |= TARGET_SOCK_CLOEXEC; 866 } 867 #endif 868 869 #if defined(SOCK_NONBLOCK) 870 if (host_type & SOCK_NONBLOCK) { 871 target_type |= TARGET_SOCK_NONBLOCK; 872 } 873 #endif 874 875 return target_type; 876 } 877 878 static abi_ulong target_brk; 879 static abi_ulong target_original_brk; 880 static abi_ulong brk_page; 881 882 void target_set_brk(abi_ulong new_brk) 883 { 884 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 885 brk_page = HOST_PAGE_ALIGN(target_brk); 886 } 887 888 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 889 #define DEBUGF_BRK(message, args...) 890 891 /* do_brk() must return target values and target errnos. */ 892 abi_long do_brk(abi_ulong new_brk) 893 { 894 abi_long mapped_addr; 895 abi_ulong new_alloc_size; 896 897 /* brk pointers are always untagged */ 898 899 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 900 901 if (!new_brk) { 902 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 903 return target_brk; 904 } 905 if (new_brk < target_original_brk) { 906 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 907 target_brk); 908 return target_brk; 909 } 910 911 /* If the new brk is less than the highest page reserved to the 912 * target heap allocation, set it and we're almost done... */ 913 if (new_brk <= brk_page) { 914 /* Heap contents are initialized to zero, as for anonymous 915 * mapped pages. */ 916 if (new_brk > target_brk) { 917 memset(g2h_untagged(target_brk), 0, new_brk - target_brk); 918 } 919 target_brk = new_brk; 920 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 921 return target_brk; 922 } 923 924 /* We need to allocate more memory after the brk... Note that 925 * we don't use MAP_FIXED because that will map over the top of 926 * any existing mapping (like the one with the host libc or qemu 927 * itself); instead we treat "mapped but at wrong address" as 928 * a failure and unmap again. 929 */ 930 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 931 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 932 PROT_READ|PROT_WRITE, 933 MAP_ANON|MAP_PRIVATE, 0, 0)); 934 935 if (mapped_addr == brk_page) { 936 /* Heap contents are initialized to zero, as for anonymous 937 * mapped pages. Technically the new pages are already 938 * initialized to zero since they *are* anonymous mapped 939 * pages, however we have to take care with the contents that 940 * come from the remaining part of the previous page: it may 941 * contains garbage data due to a previous heap usage (grown 942 * then shrunken). */ 943 memset(g2h_untagged(target_brk), 0, brk_page - target_brk); 944 945 target_brk = new_brk; 946 brk_page = HOST_PAGE_ALIGN(target_brk); 947 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 948 target_brk); 949 return target_brk; 950 } else if (mapped_addr != -1) { 951 /* Mapped but at wrong address, meaning there wasn't actually 952 * enough space for this brk. 953 */ 954 target_munmap(mapped_addr, new_alloc_size); 955 mapped_addr = -1; 956 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 957 } 958 else { 959 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 960 } 961 962 #if defined(TARGET_ALPHA) 963 /* We (partially) emulate OSF/1 on Alpha, which requires we 964 return a proper errno, not an unchanged brk value. */ 965 return -TARGET_ENOMEM; 966 #endif 967 /* For everything else, return the previous break. */ 968 return target_brk; 969 } 970 971 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \ 972 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 973 static inline abi_long copy_from_user_fdset(fd_set *fds, 974 abi_ulong target_fds_addr, 975 int n) 976 { 977 int i, nw, j, k; 978 abi_ulong b, *target_fds; 979 980 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 981 if (!(target_fds = lock_user(VERIFY_READ, 982 target_fds_addr, 983 sizeof(abi_ulong) * nw, 984 1))) 985 return -TARGET_EFAULT; 986 987 FD_ZERO(fds); 988 k = 0; 989 for (i = 0; i < nw; i++) { 990 /* grab the abi_ulong */ 991 __get_user(b, &target_fds[i]); 992 for (j = 0; j < TARGET_ABI_BITS; j++) { 993 /* check the bit inside the abi_ulong */ 994 if ((b >> j) & 1) 995 FD_SET(k, fds); 996 k++; 997 } 998 } 999 1000 unlock_user(target_fds, target_fds_addr, 0); 1001 1002 return 0; 1003 } 1004 1005 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 1006 abi_ulong target_fds_addr, 1007 int n) 1008 { 1009 if (target_fds_addr) { 1010 if (copy_from_user_fdset(fds, target_fds_addr, n)) 1011 return -TARGET_EFAULT; 1012 *fds_ptr = fds; 1013 } else { 1014 *fds_ptr = NULL; 1015 } 1016 return 0; 1017 } 1018 1019 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 1020 const fd_set *fds, 1021 int n) 1022 { 1023 int i, nw, j, k; 1024 abi_long v; 1025 abi_ulong *target_fds; 1026 1027 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 1028 if (!(target_fds = lock_user(VERIFY_WRITE, 1029 target_fds_addr, 1030 sizeof(abi_ulong) * nw, 1031 0))) 1032 return -TARGET_EFAULT; 1033 1034 k = 0; 1035 for (i = 0; i < nw; i++) { 1036 v = 0; 1037 for (j = 0; j < TARGET_ABI_BITS; j++) { 1038 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 1039 k++; 1040 } 1041 __put_user(v, &target_fds[i]); 1042 } 1043 1044 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 1045 1046 return 0; 1047 } 1048 #endif 1049 1050 #if defined(__alpha__) 1051 #define HOST_HZ 1024 1052 #else 1053 #define HOST_HZ 100 1054 #endif 1055 1056 static inline abi_long host_to_target_clock_t(long ticks) 1057 { 1058 #if HOST_HZ == TARGET_HZ 1059 return ticks; 1060 #else 1061 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 1062 #endif 1063 } 1064 1065 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 1066 const struct rusage *rusage) 1067 { 1068 struct target_rusage *target_rusage; 1069 1070 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 1071 return -TARGET_EFAULT; 1072 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 1073 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 1074 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 1075 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 1076 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 1077 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 1078 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 1079 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 1080 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 1081 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 1082 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 1083 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 1084 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 1085 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 1086 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 1087 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 1088 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 1089 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 1090 unlock_user_struct(target_rusage, target_addr, 1); 1091 1092 return 0; 1093 } 1094 1095 #ifdef TARGET_NR_setrlimit 1096 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 1097 { 1098 abi_ulong target_rlim_swap; 1099 rlim_t result; 1100 1101 target_rlim_swap = tswapal(target_rlim); 1102 if (target_rlim_swap == TARGET_RLIM_INFINITY) 1103 return RLIM_INFINITY; 1104 1105 result = target_rlim_swap; 1106 if (target_rlim_swap != (rlim_t)result) 1107 return RLIM_INFINITY; 1108 1109 return result; 1110 } 1111 #endif 1112 1113 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit) 1114 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 1115 { 1116 abi_ulong target_rlim_swap; 1117 abi_ulong result; 1118 1119 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 1120 target_rlim_swap = TARGET_RLIM_INFINITY; 1121 else 1122 target_rlim_swap = rlim; 1123 result = tswapal(target_rlim_swap); 1124 1125 return result; 1126 } 1127 #endif 1128 1129 static inline int target_to_host_resource(int code) 1130 { 1131 switch (code) { 1132 case TARGET_RLIMIT_AS: 1133 return RLIMIT_AS; 1134 case TARGET_RLIMIT_CORE: 1135 return RLIMIT_CORE; 1136 case TARGET_RLIMIT_CPU: 1137 return RLIMIT_CPU; 1138 case TARGET_RLIMIT_DATA: 1139 return RLIMIT_DATA; 1140 case TARGET_RLIMIT_FSIZE: 1141 return RLIMIT_FSIZE; 1142 case TARGET_RLIMIT_LOCKS: 1143 return RLIMIT_LOCKS; 1144 case TARGET_RLIMIT_MEMLOCK: 1145 return RLIMIT_MEMLOCK; 1146 case TARGET_RLIMIT_MSGQUEUE: 1147 return RLIMIT_MSGQUEUE; 1148 case TARGET_RLIMIT_NICE: 1149 return RLIMIT_NICE; 1150 case TARGET_RLIMIT_NOFILE: 1151 return RLIMIT_NOFILE; 1152 case TARGET_RLIMIT_NPROC: 1153 return RLIMIT_NPROC; 1154 case TARGET_RLIMIT_RSS: 1155 return RLIMIT_RSS; 1156 case TARGET_RLIMIT_RTPRIO: 1157 return RLIMIT_RTPRIO; 1158 case TARGET_RLIMIT_SIGPENDING: 1159 return RLIMIT_SIGPENDING; 1160 case TARGET_RLIMIT_STACK: 1161 return RLIMIT_STACK; 1162 default: 1163 return code; 1164 } 1165 } 1166 1167 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1168 abi_ulong target_tv_addr) 1169 { 1170 struct target_timeval *target_tv; 1171 1172 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) { 1173 return -TARGET_EFAULT; 1174 } 1175 1176 __get_user(tv->tv_sec, &target_tv->tv_sec); 1177 __get_user(tv->tv_usec, &target_tv->tv_usec); 1178 1179 unlock_user_struct(target_tv, target_tv_addr, 0); 1180 1181 return 0; 1182 } 1183 1184 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1185 const struct timeval *tv) 1186 { 1187 struct target_timeval *target_tv; 1188 1189 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) { 1190 return -TARGET_EFAULT; 1191 } 1192 1193 __put_user(tv->tv_sec, &target_tv->tv_sec); 1194 __put_user(tv->tv_usec, &target_tv->tv_usec); 1195 1196 unlock_user_struct(target_tv, target_tv_addr, 1); 1197 1198 return 0; 1199 } 1200 1201 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 1202 static inline abi_long copy_from_user_timeval64(struct timeval *tv, 1203 abi_ulong target_tv_addr) 1204 { 1205 struct target__kernel_sock_timeval *target_tv; 1206 1207 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) { 1208 return -TARGET_EFAULT; 1209 } 1210 1211 __get_user(tv->tv_sec, &target_tv->tv_sec); 1212 __get_user(tv->tv_usec, &target_tv->tv_usec); 1213 1214 unlock_user_struct(target_tv, target_tv_addr, 0); 1215 1216 return 0; 1217 } 1218 #endif 1219 1220 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr, 1221 const struct timeval *tv) 1222 { 1223 struct target__kernel_sock_timeval *target_tv; 1224 1225 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) { 1226 return -TARGET_EFAULT; 1227 } 1228 1229 __put_user(tv->tv_sec, &target_tv->tv_sec); 1230 __put_user(tv->tv_usec, &target_tv->tv_usec); 1231 1232 unlock_user_struct(target_tv, target_tv_addr, 1); 1233 1234 return 0; 1235 } 1236 1237 #if defined(TARGET_NR_futex) || \ 1238 defined(TARGET_NR_rt_sigtimedwait) || \ 1239 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \ 1240 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \ 1241 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \ 1242 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \ 1243 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \ 1244 defined(TARGET_NR_timer_settime) || \ 1245 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)) 1246 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 1247 abi_ulong target_addr) 1248 { 1249 struct target_timespec *target_ts; 1250 1251 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) { 1252 return -TARGET_EFAULT; 1253 } 1254 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 1255 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1256 unlock_user_struct(target_ts, target_addr, 0); 1257 return 0; 1258 } 1259 #endif 1260 1261 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \ 1262 defined(TARGET_NR_timer_settime64) || \ 1263 defined(TARGET_NR_mq_timedsend_time64) || \ 1264 defined(TARGET_NR_mq_timedreceive_time64) || \ 1265 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \ 1266 defined(TARGET_NR_clock_nanosleep_time64) || \ 1267 defined(TARGET_NR_rt_sigtimedwait_time64) || \ 1268 defined(TARGET_NR_utimensat) || \ 1269 defined(TARGET_NR_utimensat_time64) || \ 1270 defined(TARGET_NR_semtimedop_time64) || \ 1271 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64) 1272 static inline abi_long target_to_host_timespec64(struct timespec *host_ts, 1273 abi_ulong target_addr) 1274 { 1275 struct target__kernel_timespec *target_ts; 1276 1277 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) { 1278 return -TARGET_EFAULT; 1279 } 1280 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 1281 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1282 /* in 32bit mode, this drops the padding */ 1283 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec; 1284 unlock_user_struct(target_ts, target_addr, 0); 1285 return 0; 1286 } 1287 #endif 1288 1289 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 1290 struct timespec *host_ts) 1291 { 1292 struct target_timespec *target_ts; 1293 1294 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) { 1295 return -TARGET_EFAULT; 1296 } 1297 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 1298 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1299 unlock_user_struct(target_ts, target_addr, 1); 1300 return 0; 1301 } 1302 1303 static inline abi_long host_to_target_timespec64(abi_ulong target_addr, 1304 struct timespec *host_ts) 1305 { 1306 struct target__kernel_timespec *target_ts; 1307 1308 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) { 1309 return -TARGET_EFAULT; 1310 } 1311 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 1312 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1313 unlock_user_struct(target_ts, target_addr, 1); 1314 return 0; 1315 } 1316 1317 #if defined(TARGET_NR_gettimeofday) 1318 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr, 1319 struct timezone *tz) 1320 { 1321 struct target_timezone *target_tz; 1322 1323 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) { 1324 return -TARGET_EFAULT; 1325 } 1326 1327 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1328 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1329 1330 unlock_user_struct(target_tz, target_tz_addr, 1); 1331 1332 return 0; 1333 } 1334 #endif 1335 1336 #if defined(TARGET_NR_settimeofday) 1337 static inline abi_long copy_from_user_timezone(struct timezone *tz, 1338 abi_ulong target_tz_addr) 1339 { 1340 struct target_timezone *target_tz; 1341 1342 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 1343 return -TARGET_EFAULT; 1344 } 1345 1346 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1347 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1348 1349 unlock_user_struct(target_tz, target_tz_addr, 0); 1350 1351 return 0; 1352 } 1353 #endif 1354 1355 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1356 #include <mqueue.h> 1357 1358 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1359 abi_ulong target_mq_attr_addr) 1360 { 1361 struct target_mq_attr *target_mq_attr; 1362 1363 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1364 target_mq_attr_addr, 1)) 1365 return -TARGET_EFAULT; 1366 1367 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1368 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1369 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1370 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1371 1372 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1373 1374 return 0; 1375 } 1376 1377 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1378 const struct mq_attr *attr) 1379 { 1380 struct target_mq_attr *target_mq_attr; 1381 1382 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1383 target_mq_attr_addr, 0)) 1384 return -TARGET_EFAULT; 1385 1386 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1387 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1388 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1389 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1390 1391 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1392 1393 return 0; 1394 } 1395 #endif 1396 1397 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1398 /* do_select() must return target values and target errnos. */ 1399 static abi_long do_select(int n, 1400 abi_ulong rfd_addr, abi_ulong wfd_addr, 1401 abi_ulong efd_addr, abi_ulong target_tv_addr) 1402 { 1403 fd_set rfds, wfds, efds; 1404 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1405 struct timeval tv; 1406 struct timespec ts, *ts_ptr; 1407 abi_long ret; 1408 1409 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1410 if (ret) { 1411 return ret; 1412 } 1413 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1414 if (ret) { 1415 return ret; 1416 } 1417 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1418 if (ret) { 1419 return ret; 1420 } 1421 1422 if (target_tv_addr) { 1423 if (copy_from_user_timeval(&tv, target_tv_addr)) 1424 return -TARGET_EFAULT; 1425 ts.tv_sec = tv.tv_sec; 1426 ts.tv_nsec = tv.tv_usec * 1000; 1427 ts_ptr = &ts; 1428 } else { 1429 ts_ptr = NULL; 1430 } 1431 1432 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1433 ts_ptr, NULL)); 1434 1435 if (!is_error(ret)) { 1436 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1437 return -TARGET_EFAULT; 1438 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1439 return -TARGET_EFAULT; 1440 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1441 return -TARGET_EFAULT; 1442 1443 if (target_tv_addr) { 1444 tv.tv_sec = ts.tv_sec; 1445 tv.tv_usec = ts.tv_nsec / 1000; 1446 if (copy_to_user_timeval(target_tv_addr, &tv)) { 1447 return -TARGET_EFAULT; 1448 } 1449 } 1450 } 1451 1452 return ret; 1453 } 1454 1455 #if defined(TARGET_WANT_OLD_SYS_SELECT) 1456 static abi_long do_old_select(abi_ulong arg1) 1457 { 1458 struct target_sel_arg_struct *sel; 1459 abi_ulong inp, outp, exp, tvp; 1460 long nsel; 1461 1462 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) { 1463 return -TARGET_EFAULT; 1464 } 1465 1466 nsel = tswapal(sel->n); 1467 inp = tswapal(sel->inp); 1468 outp = tswapal(sel->outp); 1469 exp = tswapal(sel->exp); 1470 tvp = tswapal(sel->tvp); 1471 1472 unlock_user_struct(sel, arg1, 0); 1473 1474 return do_select(nsel, inp, outp, exp, tvp); 1475 } 1476 #endif 1477 #endif 1478 1479 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 1480 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3, 1481 abi_long arg4, abi_long arg5, abi_long arg6, 1482 bool time64) 1483 { 1484 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 1485 fd_set rfds, wfds, efds; 1486 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1487 struct timespec ts, *ts_ptr; 1488 abi_long ret; 1489 1490 /* 1491 * The 6th arg is actually two args smashed together, 1492 * so we cannot use the C library. 1493 */ 1494 sigset_t set; 1495 struct { 1496 sigset_t *set; 1497 size_t size; 1498 } sig, *sig_ptr; 1499 1500 abi_ulong arg_sigset, arg_sigsize, *arg7; 1501 target_sigset_t *target_sigset; 1502 1503 n = arg1; 1504 rfd_addr = arg2; 1505 wfd_addr = arg3; 1506 efd_addr = arg4; 1507 ts_addr = arg5; 1508 1509 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1510 if (ret) { 1511 return ret; 1512 } 1513 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1514 if (ret) { 1515 return ret; 1516 } 1517 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1518 if (ret) { 1519 return ret; 1520 } 1521 1522 /* 1523 * This takes a timespec, and not a timeval, so we cannot 1524 * use the do_select() helper ... 1525 */ 1526 if (ts_addr) { 1527 if (time64) { 1528 if (target_to_host_timespec64(&ts, ts_addr)) { 1529 return -TARGET_EFAULT; 1530 } 1531 } else { 1532 if (target_to_host_timespec(&ts, ts_addr)) { 1533 return -TARGET_EFAULT; 1534 } 1535 } 1536 ts_ptr = &ts; 1537 } else { 1538 ts_ptr = NULL; 1539 } 1540 1541 /* Extract the two packed args for the sigset */ 1542 if (arg6) { 1543 sig_ptr = &sig; 1544 sig.size = SIGSET_T_SIZE; 1545 1546 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 1547 if (!arg7) { 1548 return -TARGET_EFAULT; 1549 } 1550 arg_sigset = tswapal(arg7[0]); 1551 arg_sigsize = tswapal(arg7[1]); 1552 unlock_user(arg7, arg6, 0); 1553 1554 if (arg_sigset) { 1555 sig.set = &set; 1556 if (arg_sigsize != sizeof(*target_sigset)) { 1557 /* Like the kernel, we enforce correct size sigsets */ 1558 return -TARGET_EINVAL; 1559 } 1560 target_sigset = lock_user(VERIFY_READ, arg_sigset, 1561 sizeof(*target_sigset), 1); 1562 if (!target_sigset) { 1563 return -TARGET_EFAULT; 1564 } 1565 target_to_host_sigset(&set, target_sigset); 1566 unlock_user(target_sigset, arg_sigset, 0); 1567 } else { 1568 sig.set = NULL; 1569 } 1570 } else { 1571 sig_ptr = NULL; 1572 } 1573 1574 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1575 ts_ptr, sig_ptr)); 1576 1577 if (!is_error(ret)) { 1578 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) { 1579 return -TARGET_EFAULT; 1580 } 1581 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) { 1582 return -TARGET_EFAULT; 1583 } 1584 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) { 1585 return -TARGET_EFAULT; 1586 } 1587 if (time64) { 1588 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) { 1589 return -TARGET_EFAULT; 1590 } 1591 } else { 1592 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) { 1593 return -TARGET_EFAULT; 1594 } 1595 } 1596 } 1597 return ret; 1598 } 1599 #endif 1600 1601 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \ 1602 defined(TARGET_NR_ppoll_time64) 1603 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3, 1604 abi_long arg4, abi_long arg5, bool ppoll, bool time64) 1605 { 1606 struct target_pollfd *target_pfd; 1607 unsigned int nfds = arg2; 1608 struct pollfd *pfd; 1609 unsigned int i; 1610 abi_long ret; 1611 1612 pfd = NULL; 1613 target_pfd = NULL; 1614 if (nfds) { 1615 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) { 1616 return -TARGET_EINVAL; 1617 } 1618 target_pfd = lock_user(VERIFY_WRITE, arg1, 1619 sizeof(struct target_pollfd) * nfds, 1); 1620 if (!target_pfd) { 1621 return -TARGET_EFAULT; 1622 } 1623 1624 pfd = alloca(sizeof(struct pollfd) * nfds); 1625 for (i = 0; i < nfds; i++) { 1626 pfd[i].fd = tswap32(target_pfd[i].fd); 1627 pfd[i].events = tswap16(target_pfd[i].events); 1628 } 1629 } 1630 if (ppoll) { 1631 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 1632 target_sigset_t *target_set; 1633 sigset_t _set, *set = &_set; 1634 1635 if (arg3) { 1636 if (time64) { 1637 if (target_to_host_timespec64(timeout_ts, arg3)) { 1638 unlock_user(target_pfd, arg1, 0); 1639 return -TARGET_EFAULT; 1640 } 1641 } else { 1642 if (target_to_host_timespec(timeout_ts, arg3)) { 1643 unlock_user(target_pfd, arg1, 0); 1644 return -TARGET_EFAULT; 1645 } 1646 } 1647 } else { 1648 timeout_ts = NULL; 1649 } 1650 1651 if (arg4) { 1652 if (arg5 != sizeof(target_sigset_t)) { 1653 unlock_user(target_pfd, arg1, 0); 1654 return -TARGET_EINVAL; 1655 } 1656 1657 target_set = lock_user(VERIFY_READ, arg4, 1658 sizeof(target_sigset_t), 1); 1659 if (!target_set) { 1660 unlock_user(target_pfd, arg1, 0); 1661 return -TARGET_EFAULT; 1662 } 1663 target_to_host_sigset(set, target_set); 1664 } else { 1665 set = NULL; 1666 } 1667 1668 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts, 1669 set, SIGSET_T_SIZE)); 1670 1671 if (!is_error(ret) && arg3) { 1672 if (time64) { 1673 if (host_to_target_timespec64(arg3, timeout_ts)) { 1674 return -TARGET_EFAULT; 1675 } 1676 } else { 1677 if (host_to_target_timespec(arg3, timeout_ts)) { 1678 return -TARGET_EFAULT; 1679 } 1680 } 1681 } 1682 if (arg4) { 1683 unlock_user(target_set, arg4, 0); 1684 } 1685 } else { 1686 struct timespec ts, *pts; 1687 1688 if (arg3 >= 0) { 1689 /* Convert ms to secs, ns */ 1690 ts.tv_sec = arg3 / 1000; 1691 ts.tv_nsec = (arg3 % 1000) * 1000000LL; 1692 pts = &ts; 1693 } else { 1694 /* -ve poll() timeout means "infinite" */ 1695 pts = NULL; 1696 } 1697 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0)); 1698 } 1699 1700 if (!is_error(ret)) { 1701 for (i = 0; i < nfds; i++) { 1702 target_pfd[i].revents = tswap16(pfd[i].revents); 1703 } 1704 } 1705 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 1706 return ret; 1707 } 1708 #endif 1709 1710 static abi_long do_pipe2(int host_pipe[], int flags) 1711 { 1712 #ifdef CONFIG_PIPE2 1713 return pipe2(host_pipe, flags); 1714 #else 1715 return -ENOSYS; 1716 #endif 1717 } 1718 1719 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1720 int flags, int is_pipe2) 1721 { 1722 int host_pipe[2]; 1723 abi_long ret; 1724 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1725 1726 if (is_error(ret)) 1727 return get_errno(ret); 1728 1729 /* Several targets have special calling conventions for the original 1730 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1731 if (!is_pipe2) { 1732 #if defined(TARGET_ALPHA) 1733 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1734 return host_pipe[0]; 1735 #elif defined(TARGET_MIPS) 1736 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1737 return host_pipe[0]; 1738 #elif defined(TARGET_SH4) 1739 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1740 return host_pipe[0]; 1741 #elif defined(TARGET_SPARC) 1742 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1743 return host_pipe[0]; 1744 #endif 1745 } 1746 1747 if (put_user_s32(host_pipe[0], pipedes) 1748 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1749 return -TARGET_EFAULT; 1750 return get_errno(ret); 1751 } 1752 1753 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1754 abi_ulong target_addr, 1755 socklen_t len) 1756 { 1757 struct target_ip_mreqn *target_smreqn; 1758 1759 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1760 if (!target_smreqn) 1761 return -TARGET_EFAULT; 1762 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1763 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1764 if (len == sizeof(struct target_ip_mreqn)) 1765 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1766 unlock_user(target_smreqn, target_addr, 0); 1767 1768 return 0; 1769 } 1770 1771 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, 1772 abi_ulong target_addr, 1773 socklen_t len) 1774 { 1775 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1776 sa_family_t sa_family; 1777 struct target_sockaddr *target_saddr; 1778 1779 if (fd_trans_target_to_host_addr(fd)) { 1780 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len); 1781 } 1782 1783 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1784 if (!target_saddr) 1785 return -TARGET_EFAULT; 1786 1787 sa_family = tswap16(target_saddr->sa_family); 1788 1789 /* Oops. The caller might send a incomplete sun_path; sun_path 1790 * must be terminated by \0 (see the manual page), but 1791 * unfortunately it is quite common to specify sockaddr_un 1792 * length as "strlen(x->sun_path)" while it should be 1793 * "strlen(...) + 1". We'll fix that here if needed. 1794 * Linux kernel has a similar feature. 1795 */ 1796 1797 if (sa_family == AF_UNIX) { 1798 if (len < unix_maxlen && len > 0) { 1799 char *cp = (char*)target_saddr; 1800 1801 if ( cp[len-1] && !cp[len] ) 1802 len++; 1803 } 1804 if (len > unix_maxlen) 1805 len = unix_maxlen; 1806 } 1807 1808 memcpy(addr, target_saddr, len); 1809 addr->sa_family = sa_family; 1810 if (sa_family == AF_NETLINK) { 1811 struct sockaddr_nl *nladdr; 1812 1813 nladdr = (struct sockaddr_nl *)addr; 1814 nladdr->nl_pid = tswap32(nladdr->nl_pid); 1815 nladdr->nl_groups = tswap32(nladdr->nl_groups); 1816 } else if (sa_family == AF_PACKET) { 1817 struct target_sockaddr_ll *lladdr; 1818 1819 lladdr = (struct target_sockaddr_ll *)addr; 1820 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); 1821 lladdr->sll_hatype = tswap16(lladdr->sll_hatype); 1822 } 1823 unlock_user(target_saddr, target_addr, 0); 1824 1825 return 0; 1826 } 1827 1828 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1829 struct sockaddr *addr, 1830 socklen_t len) 1831 { 1832 struct target_sockaddr *target_saddr; 1833 1834 if (len == 0) { 1835 return 0; 1836 } 1837 assert(addr); 1838 1839 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1840 if (!target_saddr) 1841 return -TARGET_EFAULT; 1842 memcpy(target_saddr, addr, len); 1843 if (len >= offsetof(struct target_sockaddr, sa_family) + 1844 sizeof(target_saddr->sa_family)) { 1845 target_saddr->sa_family = tswap16(addr->sa_family); 1846 } 1847 if (addr->sa_family == AF_NETLINK && 1848 len >= sizeof(struct target_sockaddr_nl)) { 1849 struct target_sockaddr_nl *target_nl = 1850 (struct target_sockaddr_nl *)target_saddr; 1851 target_nl->nl_pid = tswap32(target_nl->nl_pid); 1852 target_nl->nl_groups = tswap32(target_nl->nl_groups); 1853 } else if (addr->sa_family == AF_PACKET) { 1854 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr; 1855 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex); 1856 target_ll->sll_hatype = tswap16(target_ll->sll_hatype); 1857 } else if (addr->sa_family == AF_INET6 && 1858 len >= sizeof(struct target_sockaddr_in6)) { 1859 struct target_sockaddr_in6 *target_in6 = 1860 (struct target_sockaddr_in6 *)target_saddr; 1861 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id); 1862 } 1863 unlock_user(target_saddr, target_addr, len); 1864 1865 return 0; 1866 } 1867 1868 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1869 struct target_msghdr *target_msgh) 1870 { 1871 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1872 abi_long msg_controllen; 1873 abi_ulong target_cmsg_addr; 1874 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1875 socklen_t space = 0; 1876 1877 msg_controllen = tswapal(target_msgh->msg_controllen); 1878 if (msg_controllen < sizeof (struct target_cmsghdr)) 1879 goto the_end; 1880 target_cmsg_addr = tswapal(target_msgh->msg_control); 1881 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1882 target_cmsg_start = target_cmsg; 1883 if (!target_cmsg) 1884 return -TARGET_EFAULT; 1885 1886 while (cmsg && target_cmsg) { 1887 void *data = CMSG_DATA(cmsg); 1888 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1889 1890 int len = tswapal(target_cmsg->cmsg_len) 1891 - sizeof(struct target_cmsghdr); 1892 1893 space += CMSG_SPACE(len); 1894 if (space > msgh->msg_controllen) { 1895 space -= CMSG_SPACE(len); 1896 /* This is a QEMU bug, since we allocated the payload 1897 * area ourselves (unlike overflow in host-to-target 1898 * conversion, which is just the guest giving us a buffer 1899 * that's too small). It can't happen for the payload types 1900 * we currently support; if it becomes an issue in future 1901 * we would need to improve our allocation strategy to 1902 * something more intelligent than "twice the size of the 1903 * target buffer we're reading from". 1904 */ 1905 qemu_log_mask(LOG_UNIMP, 1906 ("Unsupported ancillary data %d/%d: " 1907 "unhandled msg size\n"), 1908 tswap32(target_cmsg->cmsg_level), 1909 tswap32(target_cmsg->cmsg_type)); 1910 break; 1911 } 1912 1913 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1914 cmsg->cmsg_level = SOL_SOCKET; 1915 } else { 1916 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1917 } 1918 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1919 cmsg->cmsg_len = CMSG_LEN(len); 1920 1921 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 1922 int *fd = (int *)data; 1923 int *target_fd = (int *)target_data; 1924 int i, numfds = len / sizeof(int); 1925 1926 for (i = 0; i < numfds; i++) { 1927 __get_user(fd[i], target_fd + i); 1928 } 1929 } else if (cmsg->cmsg_level == SOL_SOCKET 1930 && cmsg->cmsg_type == SCM_CREDENTIALS) { 1931 struct ucred *cred = (struct ucred *)data; 1932 struct target_ucred *target_cred = 1933 (struct target_ucred *)target_data; 1934 1935 __get_user(cred->pid, &target_cred->pid); 1936 __get_user(cred->uid, &target_cred->uid); 1937 __get_user(cred->gid, &target_cred->gid); 1938 } else { 1939 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", 1940 cmsg->cmsg_level, cmsg->cmsg_type); 1941 memcpy(data, target_data, len); 1942 } 1943 1944 cmsg = CMSG_NXTHDR(msgh, cmsg); 1945 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1946 target_cmsg_start); 1947 } 1948 unlock_user(target_cmsg, target_cmsg_addr, 0); 1949 the_end: 1950 msgh->msg_controllen = space; 1951 return 0; 1952 } 1953 1954 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1955 struct msghdr *msgh) 1956 { 1957 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1958 abi_long msg_controllen; 1959 abi_ulong target_cmsg_addr; 1960 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1961 socklen_t space = 0; 1962 1963 msg_controllen = tswapal(target_msgh->msg_controllen); 1964 if (msg_controllen < sizeof (struct target_cmsghdr)) 1965 goto the_end; 1966 target_cmsg_addr = tswapal(target_msgh->msg_control); 1967 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1968 target_cmsg_start = target_cmsg; 1969 if (!target_cmsg) 1970 return -TARGET_EFAULT; 1971 1972 while (cmsg && target_cmsg) { 1973 void *data = CMSG_DATA(cmsg); 1974 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1975 1976 int len = cmsg->cmsg_len - sizeof(struct cmsghdr); 1977 int tgt_len, tgt_space; 1978 1979 /* We never copy a half-header but may copy half-data; 1980 * this is Linux's behaviour in put_cmsg(). Note that 1981 * truncation here is a guest problem (which we report 1982 * to the guest via the CTRUNC bit), unlike truncation 1983 * in target_to_host_cmsg, which is a QEMU bug. 1984 */ 1985 if (msg_controllen < sizeof(struct target_cmsghdr)) { 1986 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1987 break; 1988 } 1989 1990 if (cmsg->cmsg_level == SOL_SOCKET) { 1991 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1992 } else { 1993 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1994 } 1995 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1996 1997 /* Payload types which need a different size of payload on 1998 * the target must adjust tgt_len here. 1999 */ 2000 tgt_len = len; 2001 switch (cmsg->cmsg_level) { 2002 case SOL_SOCKET: 2003 switch (cmsg->cmsg_type) { 2004 case SO_TIMESTAMP: 2005 tgt_len = sizeof(struct target_timeval); 2006 break; 2007 default: 2008 break; 2009 } 2010 break; 2011 default: 2012 break; 2013 } 2014 2015 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) { 2016 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 2017 tgt_len = msg_controllen - sizeof(struct target_cmsghdr); 2018 } 2019 2020 /* We must now copy-and-convert len bytes of payload 2021 * into tgt_len bytes of destination space. Bear in mind 2022 * that in both source and destination we may be dealing 2023 * with a truncated value! 2024 */ 2025 switch (cmsg->cmsg_level) { 2026 case SOL_SOCKET: 2027 switch (cmsg->cmsg_type) { 2028 case SCM_RIGHTS: 2029 { 2030 int *fd = (int *)data; 2031 int *target_fd = (int *)target_data; 2032 int i, numfds = tgt_len / sizeof(int); 2033 2034 for (i = 0; i < numfds; i++) { 2035 __put_user(fd[i], target_fd + i); 2036 } 2037 break; 2038 } 2039 case SO_TIMESTAMP: 2040 { 2041 struct timeval *tv = (struct timeval *)data; 2042 struct target_timeval *target_tv = 2043 (struct target_timeval *)target_data; 2044 2045 if (len != sizeof(struct timeval) || 2046 tgt_len != sizeof(struct target_timeval)) { 2047 goto unimplemented; 2048 } 2049 2050 /* copy struct timeval to target */ 2051 __put_user(tv->tv_sec, &target_tv->tv_sec); 2052 __put_user(tv->tv_usec, &target_tv->tv_usec); 2053 break; 2054 } 2055 case SCM_CREDENTIALS: 2056 { 2057 struct ucred *cred = (struct ucred *)data; 2058 struct target_ucred *target_cred = 2059 (struct target_ucred *)target_data; 2060 2061 __put_user(cred->pid, &target_cred->pid); 2062 __put_user(cred->uid, &target_cred->uid); 2063 __put_user(cred->gid, &target_cred->gid); 2064 break; 2065 } 2066 default: 2067 goto unimplemented; 2068 } 2069 break; 2070 2071 case SOL_IP: 2072 switch (cmsg->cmsg_type) { 2073 case IP_TTL: 2074 { 2075 uint32_t *v = (uint32_t *)data; 2076 uint32_t *t_int = (uint32_t *)target_data; 2077 2078 if (len != sizeof(uint32_t) || 2079 tgt_len != sizeof(uint32_t)) { 2080 goto unimplemented; 2081 } 2082 __put_user(*v, t_int); 2083 break; 2084 } 2085 case IP_RECVERR: 2086 { 2087 struct errhdr_t { 2088 struct sock_extended_err ee; 2089 struct sockaddr_in offender; 2090 }; 2091 struct errhdr_t *errh = (struct errhdr_t *)data; 2092 struct errhdr_t *target_errh = 2093 (struct errhdr_t *)target_data; 2094 2095 if (len != sizeof(struct errhdr_t) || 2096 tgt_len != sizeof(struct errhdr_t)) { 2097 goto unimplemented; 2098 } 2099 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 2100 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 2101 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 2102 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 2103 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 2104 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 2105 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 2106 host_to_target_sockaddr((unsigned long) &target_errh->offender, 2107 (void *) &errh->offender, sizeof(errh->offender)); 2108 break; 2109 } 2110 default: 2111 goto unimplemented; 2112 } 2113 break; 2114 2115 case SOL_IPV6: 2116 switch (cmsg->cmsg_type) { 2117 case IPV6_HOPLIMIT: 2118 { 2119 uint32_t *v = (uint32_t *)data; 2120 uint32_t *t_int = (uint32_t *)target_data; 2121 2122 if (len != sizeof(uint32_t) || 2123 tgt_len != sizeof(uint32_t)) { 2124 goto unimplemented; 2125 } 2126 __put_user(*v, t_int); 2127 break; 2128 } 2129 case IPV6_RECVERR: 2130 { 2131 struct errhdr6_t { 2132 struct sock_extended_err ee; 2133 struct sockaddr_in6 offender; 2134 }; 2135 struct errhdr6_t *errh = (struct errhdr6_t *)data; 2136 struct errhdr6_t *target_errh = 2137 (struct errhdr6_t *)target_data; 2138 2139 if (len != sizeof(struct errhdr6_t) || 2140 tgt_len != sizeof(struct errhdr6_t)) { 2141 goto unimplemented; 2142 } 2143 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 2144 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 2145 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 2146 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 2147 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 2148 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 2149 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 2150 host_to_target_sockaddr((unsigned long) &target_errh->offender, 2151 (void *) &errh->offender, sizeof(errh->offender)); 2152 break; 2153 } 2154 default: 2155 goto unimplemented; 2156 } 2157 break; 2158 2159 default: 2160 unimplemented: 2161 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", 2162 cmsg->cmsg_level, cmsg->cmsg_type); 2163 memcpy(target_data, data, MIN(len, tgt_len)); 2164 if (tgt_len > len) { 2165 memset(target_data + len, 0, tgt_len - len); 2166 } 2167 } 2168 2169 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len)); 2170 tgt_space = TARGET_CMSG_SPACE(tgt_len); 2171 if (msg_controllen < tgt_space) { 2172 tgt_space = msg_controllen; 2173 } 2174 msg_controllen -= tgt_space; 2175 space += tgt_space; 2176 cmsg = CMSG_NXTHDR(msgh, cmsg); 2177 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 2178 target_cmsg_start); 2179 } 2180 unlock_user(target_cmsg, target_cmsg_addr, space); 2181 the_end: 2182 target_msgh->msg_controllen = tswapal(space); 2183 return 0; 2184 } 2185 2186 /* do_setsockopt() Must return target values and target errnos. */ 2187 static abi_long do_setsockopt(int sockfd, int level, int optname, 2188 abi_ulong optval_addr, socklen_t optlen) 2189 { 2190 abi_long ret; 2191 int val; 2192 struct ip_mreqn *ip_mreq; 2193 struct ip_mreq_source *ip_mreq_source; 2194 2195 switch(level) { 2196 case SOL_TCP: 2197 case SOL_UDP: 2198 /* TCP and UDP options all take an 'int' value. */ 2199 if (optlen < sizeof(uint32_t)) 2200 return -TARGET_EINVAL; 2201 2202 if (get_user_u32(val, optval_addr)) 2203 return -TARGET_EFAULT; 2204 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2205 break; 2206 case SOL_IP: 2207 switch(optname) { 2208 case IP_TOS: 2209 case IP_TTL: 2210 case IP_HDRINCL: 2211 case IP_ROUTER_ALERT: 2212 case IP_RECVOPTS: 2213 case IP_RETOPTS: 2214 case IP_PKTINFO: 2215 case IP_MTU_DISCOVER: 2216 case IP_RECVERR: 2217 case IP_RECVTTL: 2218 case IP_RECVTOS: 2219 #ifdef IP_FREEBIND 2220 case IP_FREEBIND: 2221 #endif 2222 case IP_MULTICAST_TTL: 2223 case IP_MULTICAST_LOOP: 2224 val = 0; 2225 if (optlen >= sizeof(uint32_t)) { 2226 if (get_user_u32(val, optval_addr)) 2227 return -TARGET_EFAULT; 2228 } else if (optlen >= 1) { 2229 if (get_user_u8(val, optval_addr)) 2230 return -TARGET_EFAULT; 2231 } 2232 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2233 break; 2234 case IP_ADD_MEMBERSHIP: 2235 case IP_DROP_MEMBERSHIP: 2236 if (optlen < sizeof (struct target_ip_mreq) || 2237 optlen > sizeof (struct target_ip_mreqn)) 2238 return -TARGET_EINVAL; 2239 2240 ip_mreq = (struct ip_mreqn *) alloca(optlen); 2241 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 2242 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 2243 break; 2244 2245 case IP_BLOCK_SOURCE: 2246 case IP_UNBLOCK_SOURCE: 2247 case IP_ADD_SOURCE_MEMBERSHIP: 2248 case IP_DROP_SOURCE_MEMBERSHIP: 2249 if (optlen != sizeof (struct target_ip_mreq_source)) 2250 return -TARGET_EINVAL; 2251 2252 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2253 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 2254 unlock_user (ip_mreq_source, optval_addr, 0); 2255 break; 2256 2257 default: 2258 goto unimplemented; 2259 } 2260 break; 2261 case SOL_IPV6: 2262 switch (optname) { 2263 case IPV6_MTU_DISCOVER: 2264 case IPV6_MTU: 2265 case IPV6_V6ONLY: 2266 case IPV6_RECVPKTINFO: 2267 case IPV6_UNICAST_HOPS: 2268 case IPV6_MULTICAST_HOPS: 2269 case IPV6_MULTICAST_LOOP: 2270 case IPV6_RECVERR: 2271 case IPV6_RECVHOPLIMIT: 2272 case IPV6_2292HOPLIMIT: 2273 case IPV6_CHECKSUM: 2274 case IPV6_ADDRFORM: 2275 case IPV6_2292PKTINFO: 2276 case IPV6_RECVTCLASS: 2277 case IPV6_RECVRTHDR: 2278 case IPV6_2292RTHDR: 2279 case IPV6_RECVHOPOPTS: 2280 case IPV6_2292HOPOPTS: 2281 case IPV6_RECVDSTOPTS: 2282 case IPV6_2292DSTOPTS: 2283 case IPV6_TCLASS: 2284 case IPV6_ADDR_PREFERENCES: 2285 #ifdef IPV6_RECVPATHMTU 2286 case IPV6_RECVPATHMTU: 2287 #endif 2288 #ifdef IPV6_TRANSPARENT 2289 case IPV6_TRANSPARENT: 2290 #endif 2291 #ifdef IPV6_FREEBIND 2292 case IPV6_FREEBIND: 2293 #endif 2294 #ifdef IPV6_RECVORIGDSTADDR 2295 case IPV6_RECVORIGDSTADDR: 2296 #endif 2297 val = 0; 2298 if (optlen < sizeof(uint32_t)) { 2299 return -TARGET_EINVAL; 2300 } 2301 if (get_user_u32(val, optval_addr)) { 2302 return -TARGET_EFAULT; 2303 } 2304 ret = get_errno(setsockopt(sockfd, level, optname, 2305 &val, sizeof(val))); 2306 break; 2307 case IPV6_PKTINFO: 2308 { 2309 struct in6_pktinfo pki; 2310 2311 if (optlen < sizeof(pki)) { 2312 return -TARGET_EINVAL; 2313 } 2314 2315 if (copy_from_user(&pki, optval_addr, sizeof(pki))) { 2316 return -TARGET_EFAULT; 2317 } 2318 2319 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex); 2320 2321 ret = get_errno(setsockopt(sockfd, level, optname, 2322 &pki, sizeof(pki))); 2323 break; 2324 } 2325 case IPV6_ADD_MEMBERSHIP: 2326 case IPV6_DROP_MEMBERSHIP: 2327 { 2328 struct ipv6_mreq ipv6mreq; 2329 2330 if (optlen < sizeof(ipv6mreq)) { 2331 return -TARGET_EINVAL; 2332 } 2333 2334 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) { 2335 return -TARGET_EFAULT; 2336 } 2337 2338 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface); 2339 2340 ret = get_errno(setsockopt(sockfd, level, optname, 2341 &ipv6mreq, sizeof(ipv6mreq))); 2342 break; 2343 } 2344 default: 2345 goto unimplemented; 2346 } 2347 break; 2348 case SOL_ICMPV6: 2349 switch (optname) { 2350 case ICMPV6_FILTER: 2351 { 2352 struct icmp6_filter icmp6f; 2353 2354 if (optlen > sizeof(icmp6f)) { 2355 optlen = sizeof(icmp6f); 2356 } 2357 2358 if (copy_from_user(&icmp6f, optval_addr, optlen)) { 2359 return -TARGET_EFAULT; 2360 } 2361 2362 for (val = 0; val < 8; val++) { 2363 icmp6f.data[val] = tswap32(icmp6f.data[val]); 2364 } 2365 2366 ret = get_errno(setsockopt(sockfd, level, optname, 2367 &icmp6f, optlen)); 2368 break; 2369 } 2370 default: 2371 goto unimplemented; 2372 } 2373 break; 2374 case SOL_RAW: 2375 switch (optname) { 2376 case ICMP_FILTER: 2377 case IPV6_CHECKSUM: 2378 /* those take an u32 value */ 2379 if (optlen < sizeof(uint32_t)) { 2380 return -TARGET_EINVAL; 2381 } 2382 2383 if (get_user_u32(val, optval_addr)) { 2384 return -TARGET_EFAULT; 2385 } 2386 ret = get_errno(setsockopt(sockfd, level, optname, 2387 &val, sizeof(val))); 2388 break; 2389 2390 default: 2391 goto unimplemented; 2392 } 2393 break; 2394 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE) 2395 case SOL_ALG: 2396 switch (optname) { 2397 case ALG_SET_KEY: 2398 { 2399 char *alg_key = g_malloc(optlen); 2400 2401 if (!alg_key) { 2402 return -TARGET_ENOMEM; 2403 } 2404 if (copy_from_user(alg_key, optval_addr, optlen)) { 2405 g_free(alg_key); 2406 return -TARGET_EFAULT; 2407 } 2408 ret = get_errno(setsockopt(sockfd, level, optname, 2409 alg_key, optlen)); 2410 g_free(alg_key); 2411 break; 2412 } 2413 case ALG_SET_AEAD_AUTHSIZE: 2414 { 2415 ret = get_errno(setsockopt(sockfd, level, optname, 2416 NULL, optlen)); 2417 break; 2418 } 2419 default: 2420 goto unimplemented; 2421 } 2422 break; 2423 #endif 2424 case TARGET_SOL_SOCKET: 2425 switch (optname) { 2426 case TARGET_SO_RCVTIMEO: 2427 { 2428 struct timeval tv; 2429 2430 optname = SO_RCVTIMEO; 2431 2432 set_timeout: 2433 if (optlen != sizeof(struct target_timeval)) { 2434 return -TARGET_EINVAL; 2435 } 2436 2437 if (copy_from_user_timeval(&tv, optval_addr)) { 2438 return -TARGET_EFAULT; 2439 } 2440 2441 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 2442 &tv, sizeof(tv))); 2443 return ret; 2444 } 2445 case TARGET_SO_SNDTIMEO: 2446 optname = SO_SNDTIMEO; 2447 goto set_timeout; 2448 case TARGET_SO_ATTACH_FILTER: 2449 { 2450 struct target_sock_fprog *tfprog; 2451 struct target_sock_filter *tfilter; 2452 struct sock_fprog fprog; 2453 struct sock_filter *filter; 2454 int i; 2455 2456 if (optlen != sizeof(*tfprog)) { 2457 return -TARGET_EINVAL; 2458 } 2459 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 2460 return -TARGET_EFAULT; 2461 } 2462 if (!lock_user_struct(VERIFY_READ, tfilter, 2463 tswapal(tfprog->filter), 0)) { 2464 unlock_user_struct(tfprog, optval_addr, 1); 2465 return -TARGET_EFAULT; 2466 } 2467 2468 fprog.len = tswap16(tfprog->len); 2469 filter = g_try_new(struct sock_filter, fprog.len); 2470 if (filter == NULL) { 2471 unlock_user_struct(tfilter, tfprog->filter, 1); 2472 unlock_user_struct(tfprog, optval_addr, 1); 2473 return -TARGET_ENOMEM; 2474 } 2475 for (i = 0; i < fprog.len; i++) { 2476 filter[i].code = tswap16(tfilter[i].code); 2477 filter[i].jt = tfilter[i].jt; 2478 filter[i].jf = tfilter[i].jf; 2479 filter[i].k = tswap32(tfilter[i].k); 2480 } 2481 fprog.filter = filter; 2482 2483 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 2484 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 2485 g_free(filter); 2486 2487 unlock_user_struct(tfilter, tfprog->filter, 1); 2488 unlock_user_struct(tfprog, optval_addr, 1); 2489 return ret; 2490 } 2491 case TARGET_SO_BINDTODEVICE: 2492 { 2493 char *dev_ifname, *addr_ifname; 2494 2495 if (optlen > IFNAMSIZ - 1) { 2496 optlen = IFNAMSIZ - 1; 2497 } 2498 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2499 if (!dev_ifname) { 2500 return -TARGET_EFAULT; 2501 } 2502 optname = SO_BINDTODEVICE; 2503 addr_ifname = alloca(IFNAMSIZ); 2504 memcpy(addr_ifname, dev_ifname, optlen); 2505 addr_ifname[optlen] = 0; 2506 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 2507 addr_ifname, optlen)); 2508 unlock_user (dev_ifname, optval_addr, 0); 2509 return ret; 2510 } 2511 case TARGET_SO_LINGER: 2512 { 2513 struct linger lg; 2514 struct target_linger *tlg; 2515 2516 if (optlen != sizeof(struct target_linger)) { 2517 return -TARGET_EINVAL; 2518 } 2519 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) { 2520 return -TARGET_EFAULT; 2521 } 2522 __get_user(lg.l_onoff, &tlg->l_onoff); 2523 __get_user(lg.l_linger, &tlg->l_linger); 2524 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER, 2525 &lg, sizeof(lg))); 2526 unlock_user_struct(tlg, optval_addr, 0); 2527 return ret; 2528 } 2529 /* Options with 'int' argument. */ 2530 case TARGET_SO_DEBUG: 2531 optname = SO_DEBUG; 2532 break; 2533 case TARGET_SO_REUSEADDR: 2534 optname = SO_REUSEADDR; 2535 break; 2536 #ifdef SO_REUSEPORT 2537 case TARGET_SO_REUSEPORT: 2538 optname = SO_REUSEPORT; 2539 break; 2540 #endif 2541 case TARGET_SO_TYPE: 2542 optname = SO_TYPE; 2543 break; 2544 case TARGET_SO_ERROR: 2545 optname = SO_ERROR; 2546 break; 2547 case TARGET_SO_DONTROUTE: 2548 optname = SO_DONTROUTE; 2549 break; 2550 case TARGET_SO_BROADCAST: 2551 optname = SO_BROADCAST; 2552 break; 2553 case TARGET_SO_SNDBUF: 2554 optname = SO_SNDBUF; 2555 break; 2556 case TARGET_SO_SNDBUFFORCE: 2557 optname = SO_SNDBUFFORCE; 2558 break; 2559 case TARGET_SO_RCVBUF: 2560 optname = SO_RCVBUF; 2561 break; 2562 case TARGET_SO_RCVBUFFORCE: 2563 optname = SO_RCVBUFFORCE; 2564 break; 2565 case TARGET_SO_KEEPALIVE: 2566 optname = SO_KEEPALIVE; 2567 break; 2568 case TARGET_SO_OOBINLINE: 2569 optname = SO_OOBINLINE; 2570 break; 2571 case TARGET_SO_NO_CHECK: 2572 optname = SO_NO_CHECK; 2573 break; 2574 case TARGET_SO_PRIORITY: 2575 optname = SO_PRIORITY; 2576 break; 2577 #ifdef SO_BSDCOMPAT 2578 case TARGET_SO_BSDCOMPAT: 2579 optname = SO_BSDCOMPAT; 2580 break; 2581 #endif 2582 case TARGET_SO_PASSCRED: 2583 optname = SO_PASSCRED; 2584 break; 2585 case TARGET_SO_PASSSEC: 2586 optname = SO_PASSSEC; 2587 break; 2588 case TARGET_SO_TIMESTAMP: 2589 optname = SO_TIMESTAMP; 2590 break; 2591 case TARGET_SO_RCVLOWAT: 2592 optname = SO_RCVLOWAT; 2593 break; 2594 default: 2595 goto unimplemented; 2596 } 2597 if (optlen < sizeof(uint32_t)) 2598 return -TARGET_EINVAL; 2599 2600 if (get_user_u32(val, optval_addr)) 2601 return -TARGET_EFAULT; 2602 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 2603 break; 2604 #ifdef SOL_NETLINK 2605 case SOL_NETLINK: 2606 switch (optname) { 2607 case NETLINK_PKTINFO: 2608 case NETLINK_ADD_MEMBERSHIP: 2609 case NETLINK_DROP_MEMBERSHIP: 2610 case NETLINK_BROADCAST_ERROR: 2611 case NETLINK_NO_ENOBUFS: 2612 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2613 case NETLINK_LISTEN_ALL_NSID: 2614 case NETLINK_CAP_ACK: 2615 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2616 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) 2617 case NETLINK_EXT_ACK: 2618 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2619 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) 2620 case NETLINK_GET_STRICT_CHK: 2621 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2622 break; 2623 default: 2624 goto unimplemented; 2625 } 2626 val = 0; 2627 if (optlen < sizeof(uint32_t)) { 2628 return -TARGET_EINVAL; 2629 } 2630 if (get_user_u32(val, optval_addr)) { 2631 return -TARGET_EFAULT; 2632 } 2633 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val, 2634 sizeof(val))); 2635 break; 2636 #endif /* SOL_NETLINK */ 2637 default: 2638 unimplemented: 2639 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n", 2640 level, optname); 2641 ret = -TARGET_ENOPROTOOPT; 2642 } 2643 return ret; 2644 } 2645 2646 /* do_getsockopt() Must return target values and target errnos. */ 2647 static abi_long do_getsockopt(int sockfd, int level, int optname, 2648 abi_ulong optval_addr, abi_ulong optlen) 2649 { 2650 abi_long ret; 2651 int len, val; 2652 socklen_t lv; 2653 2654 switch(level) { 2655 case TARGET_SOL_SOCKET: 2656 level = SOL_SOCKET; 2657 switch (optname) { 2658 /* These don't just return a single integer */ 2659 case TARGET_SO_PEERNAME: 2660 goto unimplemented; 2661 case TARGET_SO_RCVTIMEO: { 2662 struct timeval tv; 2663 socklen_t tvlen; 2664 2665 optname = SO_RCVTIMEO; 2666 2667 get_timeout: 2668 if (get_user_u32(len, optlen)) { 2669 return -TARGET_EFAULT; 2670 } 2671 if (len < 0) { 2672 return -TARGET_EINVAL; 2673 } 2674 2675 tvlen = sizeof(tv); 2676 ret = get_errno(getsockopt(sockfd, level, optname, 2677 &tv, &tvlen)); 2678 if (ret < 0) { 2679 return ret; 2680 } 2681 if (len > sizeof(struct target_timeval)) { 2682 len = sizeof(struct target_timeval); 2683 } 2684 if (copy_to_user_timeval(optval_addr, &tv)) { 2685 return -TARGET_EFAULT; 2686 } 2687 if (put_user_u32(len, optlen)) { 2688 return -TARGET_EFAULT; 2689 } 2690 break; 2691 } 2692 case TARGET_SO_SNDTIMEO: 2693 optname = SO_SNDTIMEO; 2694 goto get_timeout; 2695 case TARGET_SO_PEERCRED: { 2696 struct ucred cr; 2697 socklen_t crlen; 2698 struct target_ucred *tcr; 2699 2700 if (get_user_u32(len, optlen)) { 2701 return -TARGET_EFAULT; 2702 } 2703 if (len < 0) { 2704 return -TARGET_EINVAL; 2705 } 2706 2707 crlen = sizeof(cr); 2708 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 2709 &cr, &crlen)); 2710 if (ret < 0) { 2711 return ret; 2712 } 2713 if (len > crlen) { 2714 len = crlen; 2715 } 2716 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 2717 return -TARGET_EFAULT; 2718 } 2719 __put_user(cr.pid, &tcr->pid); 2720 __put_user(cr.uid, &tcr->uid); 2721 __put_user(cr.gid, &tcr->gid); 2722 unlock_user_struct(tcr, optval_addr, 1); 2723 if (put_user_u32(len, optlen)) { 2724 return -TARGET_EFAULT; 2725 } 2726 break; 2727 } 2728 case TARGET_SO_PEERSEC: { 2729 char *name; 2730 2731 if (get_user_u32(len, optlen)) { 2732 return -TARGET_EFAULT; 2733 } 2734 if (len < 0) { 2735 return -TARGET_EINVAL; 2736 } 2737 name = lock_user(VERIFY_WRITE, optval_addr, len, 0); 2738 if (!name) { 2739 return -TARGET_EFAULT; 2740 } 2741 lv = len; 2742 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC, 2743 name, &lv)); 2744 if (put_user_u32(lv, optlen)) { 2745 ret = -TARGET_EFAULT; 2746 } 2747 unlock_user(name, optval_addr, lv); 2748 break; 2749 } 2750 case TARGET_SO_LINGER: 2751 { 2752 struct linger lg; 2753 socklen_t lglen; 2754 struct target_linger *tlg; 2755 2756 if (get_user_u32(len, optlen)) { 2757 return -TARGET_EFAULT; 2758 } 2759 if (len < 0) { 2760 return -TARGET_EINVAL; 2761 } 2762 2763 lglen = sizeof(lg); 2764 ret = get_errno(getsockopt(sockfd, level, SO_LINGER, 2765 &lg, &lglen)); 2766 if (ret < 0) { 2767 return ret; 2768 } 2769 if (len > lglen) { 2770 len = lglen; 2771 } 2772 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) { 2773 return -TARGET_EFAULT; 2774 } 2775 __put_user(lg.l_onoff, &tlg->l_onoff); 2776 __put_user(lg.l_linger, &tlg->l_linger); 2777 unlock_user_struct(tlg, optval_addr, 1); 2778 if (put_user_u32(len, optlen)) { 2779 return -TARGET_EFAULT; 2780 } 2781 break; 2782 } 2783 /* Options with 'int' argument. */ 2784 case TARGET_SO_DEBUG: 2785 optname = SO_DEBUG; 2786 goto int_case; 2787 case TARGET_SO_REUSEADDR: 2788 optname = SO_REUSEADDR; 2789 goto int_case; 2790 #ifdef SO_REUSEPORT 2791 case TARGET_SO_REUSEPORT: 2792 optname = SO_REUSEPORT; 2793 goto int_case; 2794 #endif 2795 case TARGET_SO_TYPE: 2796 optname = SO_TYPE; 2797 goto int_case; 2798 case TARGET_SO_ERROR: 2799 optname = SO_ERROR; 2800 goto int_case; 2801 case TARGET_SO_DONTROUTE: 2802 optname = SO_DONTROUTE; 2803 goto int_case; 2804 case TARGET_SO_BROADCAST: 2805 optname = SO_BROADCAST; 2806 goto int_case; 2807 case TARGET_SO_SNDBUF: 2808 optname = SO_SNDBUF; 2809 goto int_case; 2810 case TARGET_SO_RCVBUF: 2811 optname = SO_RCVBUF; 2812 goto int_case; 2813 case TARGET_SO_KEEPALIVE: 2814 optname = SO_KEEPALIVE; 2815 goto int_case; 2816 case TARGET_SO_OOBINLINE: 2817 optname = SO_OOBINLINE; 2818 goto int_case; 2819 case TARGET_SO_NO_CHECK: 2820 optname = SO_NO_CHECK; 2821 goto int_case; 2822 case TARGET_SO_PRIORITY: 2823 optname = SO_PRIORITY; 2824 goto int_case; 2825 #ifdef SO_BSDCOMPAT 2826 case TARGET_SO_BSDCOMPAT: 2827 optname = SO_BSDCOMPAT; 2828 goto int_case; 2829 #endif 2830 case TARGET_SO_PASSCRED: 2831 optname = SO_PASSCRED; 2832 goto int_case; 2833 case TARGET_SO_TIMESTAMP: 2834 optname = SO_TIMESTAMP; 2835 goto int_case; 2836 case TARGET_SO_RCVLOWAT: 2837 optname = SO_RCVLOWAT; 2838 goto int_case; 2839 case TARGET_SO_ACCEPTCONN: 2840 optname = SO_ACCEPTCONN; 2841 goto int_case; 2842 case TARGET_SO_PROTOCOL: 2843 optname = SO_PROTOCOL; 2844 goto int_case; 2845 case TARGET_SO_DOMAIN: 2846 optname = SO_DOMAIN; 2847 goto int_case; 2848 default: 2849 goto int_case; 2850 } 2851 break; 2852 case SOL_TCP: 2853 case SOL_UDP: 2854 /* TCP and UDP options all take an 'int' value. */ 2855 int_case: 2856 if (get_user_u32(len, optlen)) 2857 return -TARGET_EFAULT; 2858 if (len < 0) 2859 return -TARGET_EINVAL; 2860 lv = sizeof(lv); 2861 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2862 if (ret < 0) 2863 return ret; 2864 if (optname == SO_TYPE) { 2865 val = host_to_target_sock_type(val); 2866 } 2867 if (len > lv) 2868 len = lv; 2869 if (len == 4) { 2870 if (put_user_u32(val, optval_addr)) 2871 return -TARGET_EFAULT; 2872 } else { 2873 if (put_user_u8(val, optval_addr)) 2874 return -TARGET_EFAULT; 2875 } 2876 if (put_user_u32(len, optlen)) 2877 return -TARGET_EFAULT; 2878 break; 2879 case SOL_IP: 2880 switch(optname) { 2881 case IP_TOS: 2882 case IP_TTL: 2883 case IP_HDRINCL: 2884 case IP_ROUTER_ALERT: 2885 case IP_RECVOPTS: 2886 case IP_RETOPTS: 2887 case IP_PKTINFO: 2888 case IP_MTU_DISCOVER: 2889 case IP_RECVERR: 2890 case IP_RECVTOS: 2891 #ifdef IP_FREEBIND 2892 case IP_FREEBIND: 2893 #endif 2894 case IP_MULTICAST_TTL: 2895 case IP_MULTICAST_LOOP: 2896 if (get_user_u32(len, optlen)) 2897 return -TARGET_EFAULT; 2898 if (len < 0) 2899 return -TARGET_EINVAL; 2900 lv = sizeof(lv); 2901 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2902 if (ret < 0) 2903 return ret; 2904 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 2905 len = 1; 2906 if (put_user_u32(len, optlen) 2907 || put_user_u8(val, optval_addr)) 2908 return -TARGET_EFAULT; 2909 } else { 2910 if (len > sizeof(int)) 2911 len = sizeof(int); 2912 if (put_user_u32(len, optlen) 2913 || put_user_u32(val, optval_addr)) 2914 return -TARGET_EFAULT; 2915 } 2916 break; 2917 default: 2918 ret = -TARGET_ENOPROTOOPT; 2919 break; 2920 } 2921 break; 2922 case SOL_IPV6: 2923 switch (optname) { 2924 case IPV6_MTU_DISCOVER: 2925 case IPV6_MTU: 2926 case IPV6_V6ONLY: 2927 case IPV6_RECVPKTINFO: 2928 case IPV6_UNICAST_HOPS: 2929 case IPV6_MULTICAST_HOPS: 2930 case IPV6_MULTICAST_LOOP: 2931 case IPV6_RECVERR: 2932 case IPV6_RECVHOPLIMIT: 2933 case IPV6_2292HOPLIMIT: 2934 case IPV6_CHECKSUM: 2935 case IPV6_ADDRFORM: 2936 case IPV6_2292PKTINFO: 2937 case IPV6_RECVTCLASS: 2938 case IPV6_RECVRTHDR: 2939 case IPV6_2292RTHDR: 2940 case IPV6_RECVHOPOPTS: 2941 case IPV6_2292HOPOPTS: 2942 case IPV6_RECVDSTOPTS: 2943 case IPV6_2292DSTOPTS: 2944 case IPV6_TCLASS: 2945 case IPV6_ADDR_PREFERENCES: 2946 #ifdef IPV6_RECVPATHMTU 2947 case IPV6_RECVPATHMTU: 2948 #endif 2949 #ifdef IPV6_TRANSPARENT 2950 case IPV6_TRANSPARENT: 2951 #endif 2952 #ifdef IPV6_FREEBIND 2953 case IPV6_FREEBIND: 2954 #endif 2955 #ifdef IPV6_RECVORIGDSTADDR 2956 case IPV6_RECVORIGDSTADDR: 2957 #endif 2958 if (get_user_u32(len, optlen)) 2959 return -TARGET_EFAULT; 2960 if (len < 0) 2961 return -TARGET_EINVAL; 2962 lv = sizeof(lv); 2963 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2964 if (ret < 0) 2965 return ret; 2966 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 2967 len = 1; 2968 if (put_user_u32(len, optlen) 2969 || put_user_u8(val, optval_addr)) 2970 return -TARGET_EFAULT; 2971 } else { 2972 if (len > sizeof(int)) 2973 len = sizeof(int); 2974 if (put_user_u32(len, optlen) 2975 || put_user_u32(val, optval_addr)) 2976 return -TARGET_EFAULT; 2977 } 2978 break; 2979 default: 2980 ret = -TARGET_ENOPROTOOPT; 2981 break; 2982 } 2983 break; 2984 #ifdef SOL_NETLINK 2985 case SOL_NETLINK: 2986 switch (optname) { 2987 case NETLINK_PKTINFO: 2988 case NETLINK_BROADCAST_ERROR: 2989 case NETLINK_NO_ENOBUFS: 2990 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2991 case NETLINK_LISTEN_ALL_NSID: 2992 case NETLINK_CAP_ACK: 2993 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2994 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) 2995 case NETLINK_EXT_ACK: 2996 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2997 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) 2998 case NETLINK_GET_STRICT_CHK: 2999 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 3000 if (get_user_u32(len, optlen)) { 3001 return -TARGET_EFAULT; 3002 } 3003 if (len != sizeof(val)) { 3004 return -TARGET_EINVAL; 3005 } 3006 lv = len; 3007 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 3008 if (ret < 0) { 3009 return ret; 3010 } 3011 if (put_user_u32(lv, optlen) 3012 || put_user_u32(val, optval_addr)) { 3013 return -TARGET_EFAULT; 3014 } 3015 break; 3016 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 3017 case NETLINK_LIST_MEMBERSHIPS: 3018 { 3019 uint32_t *results; 3020 int i; 3021 if (get_user_u32(len, optlen)) { 3022 return -TARGET_EFAULT; 3023 } 3024 if (len < 0) { 3025 return -TARGET_EINVAL; 3026 } 3027 results = lock_user(VERIFY_WRITE, optval_addr, len, 1); 3028 if (!results && len > 0) { 3029 return -TARGET_EFAULT; 3030 } 3031 lv = len; 3032 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv)); 3033 if (ret < 0) { 3034 unlock_user(results, optval_addr, 0); 3035 return ret; 3036 } 3037 /* swap host endianess to target endianess. */ 3038 for (i = 0; i < (len / sizeof(uint32_t)); i++) { 3039 results[i] = tswap32(results[i]); 3040 } 3041 if (put_user_u32(lv, optlen)) { 3042 return -TARGET_EFAULT; 3043 } 3044 unlock_user(results, optval_addr, 0); 3045 break; 3046 } 3047 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 3048 default: 3049 goto unimplemented; 3050 } 3051 break; 3052 #endif /* SOL_NETLINK */ 3053 default: 3054 unimplemented: 3055 qemu_log_mask(LOG_UNIMP, 3056 "getsockopt level=%d optname=%d not yet supported\n", 3057 level, optname); 3058 ret = -TARGET_EOPNOTSUPP; 3059 break; 3060 } 3061 return ret; 3062 } 3063 3064 /* Convert target low/high pair representing file offset into the host 3065 * low/high pair. This function doesn't handle offsets bigger than 64 bits 3066 * as the kernel doesn't handle them either. 3067 */ 3068 static void target_to_host_low_high(abi_ulong tlow, 3069 abi_ulong thigh, 3070 unsigned long *hlow, 3071 unsigned long *hhigh) 3072 { 3073 uint64_t off = tlow | 3074 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) << 3075 TARGET_LONG_BITS / 2; 3076 3077 *hlow = off; 3078 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2; 3079 } 3080 3081 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 3082 abi_ulong count, int copy) 3083 { 3084 struct target_iovec *target_vec; 3085 struct iovec *vec; 3086 abi_ulong total_len, max_len; 3087 int i; 3088 int err = 0; 3089 bool bad_address = false; 3090 3091 if (count == 0) { 3092 errno = 0; 3093 return NULL; 3094 } 3095 if (count > IOV_MAX) { 3096 errno = EINVAL; 3097 return NULL; 3098 } 3099 3100 vec = g_try_new0(struct iovec, count); 3101 if (vec == NULL) { 3102 errno = ENOMEM; 3103 return NULL; 3104 } 3105 3106 target_vec = lock_user(VERIFY_READ, target_addr, 3107 count * sizeof(struct target_iovec), 1); 3108 if (target_vec == NULL) { 3109 err = EFAULT; 3110 goto fail2; 3111 } 3112 3113 /* ??? If host page size > target page size, this will result in a 3114 value larger than what we can actually support. */ 3115 max_len = 0x7fffffff & TARGET_PAGE_MASK; 3116 total_len = 0; 3117 3118 for (i = 0; i < count; i++) { 3119 abi_ulong base = tswapal(target_vec[i].iov_base); 3120 abi_long len = tswapal(target_vec[i].iov_len); 3121 3122 if (len < 0) { 3123 err = EINVAL; 3124 goto fail; 3125 } else if (len == 0) { 3126 /* Zero length pointer is ignored. */ 3127 vec[i].iov_base = 0; 3128 } else { 3129 vec[i].iov_base = lock_user(type, base, len, copy); 3130 /* If the first buffer pointer is bad, this is a fault. But 3131 * subsequent bad buffers will result in a partial write; this 3132 * is realized by filling the vector with null pointers and 3133 * zero lengths. */ 3134 if (!vec[i].iov_base) { 3135 if (i == 0) { 3136 err = EFAULT; 3137 goto fail; 3138 } else { 3139 bad_address = true; 3140 } 3141 } 3142 if (bad_address) { 3143 len = 0; 3144 } 3145 if (len > max_len - total_len) { 3146 len = max_len - total_len; 3147 } 3148 } 3149 vec[i].iov_len = len; 3150 total_len += len; 3151 } 3152 3153 unlock_user(target_vec, target_addr, 0); 3154 return vec; 3155 3156 fail: 3157 while (--i >= 0) { 3158 if (tswapal(target_vec[i].iov_len) > 0) { 3159 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0); 3160 } 3161 } 3162 unlock_user(target_vec, target_addr, 0); 3163 fail2: 3164 g_free(vec); 3165 errno = err; 3166 return NULL; 3167 } 3168 3169 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 3170 abi_ulong count, int copy) 3171 { 3172 struct target_iovec *target_vec; 3173 int i; 3174 3175 target_vec = lock_user(VERIFY_READ, target_addr, 3176 count * sizeof(struct target_iovec), 1); 3177 if (target_vec) { 3178 for (i = 0; i < count; i++) { 3179 abi_ulong base = tswapal(target_vec[i].iov_base); 3180 abi_long len = tswapal(target_vec[i].iov_len); 3181 if (len < 0) { 3182 break; 3183 } 3184 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 3185 } 3186 unlock_user(target_vec, target_addr, 0); 3187 } 3188 3189 g_free(vec); 3190 } 3191 3192 static inline int target_to_host_sock_type(int *type) 3193 { 3194 int host_type = 0; 3195 int target_type = *type; 3196 3197 switch (target_type & TARGET_SOCK_TYPE_MASK) { 3198 case TARGET_SOCK_DGRAM: 3199 host_type = SOCK_DGRAM; 3200 break; 3201 case TARGET_SOCK_STREAM: 3202 host_type = SOCK_STREAM; 3203 break; 3204 default: 3205 host_type = target_type & TARGET_SOCK_TYPE_MASK; 3206 break; 3207 } 3208 if (target_type & TARGET_SOCK_CLOEXEC) { 3209 #if defined(SOCK_CLOEXEC) 3210 host_type |= SOCK_CLOEXEC; 3211 #else 3212 return -TARGET_EINVAL; 3213 #endif 3214 } 3215 if (target_type & TARGET_SOCK_NONBLOCK) { 3216 #if defined(SOCK_NONBLOCK) 3217 host_type |= SOCK_NONBLOCK; 3218 #elif !defined(O_NONBLOCK) 3219 return -TARGET_EINVAL; 3220 #endif 3221 } 3222 *type = host_type; 3223 return 0; 3224 } 3225 3226 /* Try to emulate socket type flags after socket creation. */ 3227 static int sock_flags_fixup(int fd, int target_type) 3228 { 3229 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 3230 if (target_type & TARGET_SOCK_NONBLOCK) { 3231 int flags = fcntl(fd, F_GETFL); 3232 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 3233 close(fd); 3234 return -TARGET_EINVAL; 3235 } 3236 } 3237 #endif 3238 return fd; 3239 } 3240 3241 /* do_socket() Must return target values and target errnos. */ 3242 static abi_long do_socket(int domain, int type, int protocol) 3243 { 3244 int target_type = type; 3245 int ret; 3246 3247 ret = target_to_host_sock_type(&type); 3248 if (ret) { 3249 return ret; 3250 } 3251 3252 if (domain == PF_NETLINK && !( 3253 #ifdef CONFIG_RTNETLINK 3254 protocol == NETLINK_ROUTE || 3255 #endif 3256 protocol == NETLINK_KOBJECT_UEVENT || 3257 protocol == NETLINK_AUDIT)) { 3258 return -TARGET_EPROTONOSUPPORT; 3259 } 3260 3261 if (domain == AF_PACKET || 3262 (domain == AF_INET && type == SOCK_PACKET)) { 3263 protocol = tswap16(protocol); 3264 } 3265 3266 ret = get_errno(socket(domain, type, protocol)); 3267 if (ret >= 0) { 3268 ret = sock_flags_fixup(ret, target_type); 3269 if (type == SOCK_PACKET) { 3270 /* Manage an obsolete case : 3271 * if socket type is SOCK_PACKET, bind by name 3272 */ 3273 fd_trans_register(ret, &target_packet_trans); 3274 } else if (domain == PF_NETLINK) { 3275 switch (protocol) { 3276 #ifdef CONFIG_RTNETLINK 3277 case NETLINK_ROUTE: 3278 fd_trans_register(ret, &target_netlink_route_trans); 3279 break; 3280 #endif 3281 case NETLINK_KOBJECT_UEVENT: 3282 /* nothing to do: messages are strings */ 3283 break; 3284 case NETLINK_AUDIT: 3285 fd_trans_register(ret, &target_netlink_audit_trans); 3286 break; 3287 default: 3288 g_assert_not_reached(); 3289 } 3290 } 3291 } 3292 return ret; 3293 } 3294 3295 /* do_bind() Must return target values and target errnos. */ 3296 static abi_long do_bind(int sockfd, abi_ulong target_addr, 3297 socklen_t addrlen) 3298 { 3299 void *addr; 3300 abi_long ret; 3301 3302 if ((int)addrlen < 0) { 3303 return -TARGET_EINVAL; 3304 } 3305 3306 addr = alloca(addrlen+1); 3307 3308 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3309 if (ret) 3310 return ret; 3311 3312 return get_errno(bind(sockfd, addr, addrlen)); 3313 } 3314 3315 /* do_connect() Must return target values and target errnos. */ 3316 static abi_long do_connect(int sockfd, abi_ulong target_addr, 3317 socklen_t addrlen) 3318 { 3319 void *addr; 3320 abi_long ret; 3321 3322 if ((int)addrlen < 0) { 3323 return -TARGET_EINVAL; 3324 } 3325 3326 addr = alloca(addrlen+1); 3327 3328 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3329 if (ret) 3330 return ret; 3331 3332 return get_errno(safe_connect(sockfd, addr, addrlen)); 3333 } 3334 3335 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 3336 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 3337 int flags, int send) 3338 { 3339 abi_long ret, len; 3340 struct msghdr msg; 3341 abi_ulong count; 3342 struct iovec *vec; 3343 abi_ulong target_vec; 3344 3345 if (msgp->msg_name) { 3346 msg.msg_namelen = tswap32(msgp->msg_namelen); 3347 msg.msg_name = alloca(msg.msg_namelen+1); 3348 ret = target_to_host_sockaddr(fd, msg.msg_name, 3349 tswapal(msgp->msg_name), 3350 msg.msg_namelen); 3351 if (ret == -TARGET_EFAULT) { 3352 /* For connected sockets msg_name and msg_namelen must 3353 * be ignored, so returning EFAULT immediately is wrong. 3354 * Instead, pass a bad msg_name to the host kernel, and 3355 * let it decide whether to return EFAULT or not. 3356 */ 3357 msg.msg_name = (void *)-1; 3358 } else if (ret) { 3359 goto out2; 3360 } 3361 } else { 3362 msg.msg_name = NULL; 3363 msg.msg_namelen = 0; 3364 } 3365 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 3366 msg.msg_control = alloca(msg.msg_controllen); 3367 memset(msg.msg_control, 0, msg.msg_controllen); 3368 3369 msg.msg_flags = tswap32(msgp->msg_flags); 3370 3371 count = tswapal(msgp->msg_iovlen); 3372 target_vec = tswapal(msgp->msg_iov); 3373 3374 if (count > IOV_MAX) { 3375 /* sendrcvmsg returns a different errno for this condition than 3376 * readv/writev, so we must catch it here before lock_iovec() does. 3377 */ 3378 ret = -TARGET_EMSGSIZE; 3379 goto out2; 3380 } 3381 3382 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 3383 target_vec, count, send); 3384 if (vec == NULL) { 3385 ret = -host_to_target_errno(errno); 3386 goto out2; 3387 } 3388 msg.msg_iovlen = count; 3389 msg.msg_iov = vec; 3390 3391 if (send) { 3392 if (fd_trans_target_to_host_data(fd)) { 3393 void *host_msg; 3394 3395 host_msg = g_malloc(msg.msg_iov->iov_len); 3396 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len); 3397 ret = fd_trans_target_to_host_data(fd)(host_msg, 3398 msg.msg_iov->iov_len); 3399 if (ret >= 0) { 3400 msg.msg_iov->iov_base = host_msg; 3401 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3402 } 3403 g_free(host_msg); 3404 } else { 3405 ret = target_to_host_cmsg(&msg, msgp); 3406 if (ret == 0) { 3407 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3408 } 3409 } 3410 } else { 3411 ret = get_errno(safe_recvmsg(fd, &msg, flags)); 3412 if (!is_error(ret)) { 3413 len = ret; 3414 if (fd_trans_host_to_target_data(fd)) { 3415 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base, 3416 MIN(msg.msg_iov->iov_len, len)); 3417 } else { 3418 ret = host_to_target_cmsg(msgp, &msg); 3419 } 3420 if (!is_error(ret)) { 3421 msgp->msg_namelen = tswap32(msg.msg_namelen); 3422 msgp->msg_flags = tswap32(msg.msg_flags); 3423 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) { 3424 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 3425 msg.msg_name, msg.msg_namelen); 3426 if (ret) { 3427 goto out; 3428 } 3429 } 3430 3431 ret = len; 3432 } 3433 } 3434 } 3435 3436 out: 3437 unlock_iovec(vec, target_vec, count, !send); 3438 out2: 3439 return ret; 3440 } 3441 3442 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 3443 int flags, int send) 3444 { 3445 abi_long ret; 3446 struct target_msghdr *msgp; 3447 3448 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 3449 msgp, 3450 target_msg, 3451 send ? 1 : 0)) { 3452 return -TARGET_EFAULT; 3453 } 3454 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 3455 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 3456 return ret; 3457 } 3458 3459 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 3460 * so it might not have this *mmsg-specific flag either. 3461 */ 3462 #ifndef MSG_WAITFORONE 3463 #define MSG_WAITFORONE 0x10000 3464 #endif 3465 3466 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 3467 unsigned int vlen, unsigned int flags, 3468 int send) 3469 { 3470 struct target_mmsghdr *mmsgp; 3471 abi_long ret = 0; 3472 int i; 3473 3474 if (vlen > UIO_MAXIOV) { 3475 vlen = UIO_MAXIOV; 3476 } 3477 3478 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 3479 if (!mmsgp) { 3480 return -TARGET_EFAULT; 3481 } 3482 3483 for (i = 0; i < vlen; i++) { 3484 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 3485 if (is_error(ret)) { 3486 break; 3487 } 3488 mmsgp[i].msg_len = tswap32(ret); 3489 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 3490 if (flags & MSG_WAITFORONE) { 3491 flags |= MSG_DONTWAIT; 3492 } 3493 } 3494 3495 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 3496 3497 /* Return number of datagrams sent if we sent any at all; 3498 * otherwise return the error. 3499 */ 3500 if (i) { 3501 return i; 3502 } 3503 return ret; 3504 } 3505 3506 /* do_accept4() Must return target values and target errnos. */ 3507 static abi_long do_accept4(int fd, abi_ulong target_addr, 3508 abi_ulong target_addrlen_addr, int flags) 3509 { 3510 socklen_t addrlen, ret_addrlen; 3511 void *addr; 3512 abi_long ret; 3513 int host_flags; 3514 3515 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 3516 3517 if (target_addr == 0) { 3518 return get_errno(safe_accept4(fd, NULL, NULL, host_flags)); 3519 } 3520 3521 /* linux returns EFAULT if addrlen pointer is invalid */ 3522 if (get_user_u32(addrlen, target_addrlen_addr)) 3523 return -TARGET_EFAULT; 3524 3525 if ((int)addrlen < 0) { 3526 return -TARGET_EINVAL; 3527 } 3528 3529 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3530 return -TARGET_EFAULT; 3531 } 3532 3533 addr = alloca(addrlen); 3534 3535 ret_addrlen = addrlen; 3536 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags)); 3537 if (!is_error(ret)) { 3538 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3539 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3540 ret = -TARGET_EFAULT; 3541 } 3542 } 3543 return ret; 3544 } 3545 3546 /* do_getpeername() Must return target values and target errnos. */ 3547 static abi_long do_getpeername(int fd, abi_ulong target_addr, 3548 abi_ulong target_addrlen_addr) 3549 { 3550 socklen_t addrlen, ret_addrlen; 3551 void *addr; 3552 abi_long ret; 3553 3554 if (get_user_u32(addrlen, target_addrlen_addr)) 3555 return -TARGET_EFAULT; 3556 3557 if ((int)addrlen < 0) { 3558 return -TARGET_EINVAL; 3559 } 3560 3561 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3562 return -TARGET_EFAULT; 3563 } 3564 3565 addr = alloca(addrlen); 3566 3567 ret_addrlen = addrlen; 3568 ret = get_errno(getpeername(fd, addr, &ret_addrlen)); 3569 if (!is_error(ret)) { 3570 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3571 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3572 ret = -TARGET_EFAULT; 3573 } 3574 } 3575 return ret; 3576 } 3577 3578 /* do_getsockname() Must return target values and target errnos. */ 3579 static abi_long do_getsockname(int fd, abi_ulong target_addr, 3580 abi_ulong target_addrlen_addr) 3581 { 3582 socklen_t addrlen, ret_addrlen; 3583 void *addr; 3584 abi_long ret; 3585 3586 if (get_user_u32(addrlen, target_addrlen_addr)) 3587 return -TARGET_EFAULT; 3588 3589 if ((int)addrlen < 0) { 3590 return -TARGET_EINVAL; 3591 } 3592 3593 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3594 return -TARGET_EFAULT; 3595 } 3596 3597 addr = alloca(addrlen); 3598 3599 ret_addrlen = addrlen; 3600 ret = get_errno(getsockname(fd, addr, &ret_addrlen)); 3601 if (!is_error(ret)) { 3602 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3603 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3604 ret = -TARGET_EFAULT; 3605 } 3606 } 3607 return ret; 3608 } 3609 3610 /* do_socketpair() Must return target values and target errnos. */ 3611 static abi_long do_socketpair(int domain, int type, int protocol, 3612 abi_ulong target_tab_addr) 3613 { 3614 int tab[2]; 3615 abi_long ret; 3616 3617 target_to_host_sock_type(&type); 3618 3619 ret = get_errno(socketpair(domain, type, protocol, tab)); 3620 if (!is_error(ret)) { 3621 if (put_user_s32(tab[0], target_tab_addr) 3622 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 3623 ret = -TARGET_EFAULT; 3624 } 3625 return ret; 3626 } 3627 3628 /* do_sendto() Must return target values and target errnos. */ 3629 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 3630 abi_ulong target_addr, socklen_t addrlen) 3631 { 3632 void *addr; 3633 void *host_msg; 3634 void *copy_msg = NULL; 3635 abi_long ret; 3636 3637 if ((int)addrlen < 0) { 3638 return -TARGET_EINVAL; 3639 } 3640 3641 host_msg = lock_user(VERIFY_READ, msg, len, 1); 3642 if (!host_msg) 3643 return -TARGET_EFAULT; 3644 if (fd_trans_target_to_host_data(fd)) { 3645 copy_msg = host_msg; 3646 host_msg = g_malloc(len); 3647 memcpy(host_msg, copy_msg, len); 3648 ret = fd_trans_target_to_host_data(fd)(host_msg, len); 3649 if (ret < 0) { 3650 goto fail; 3651 } 3652 } 3653 if (target_addr) { 3654 addr = alloca(addrlen+1); 3655 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen); 3656 if (ret) { 3657 goto fail; 3658 } 3659 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen)); 3660 } else { 3661 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0)); 3662 } 3663 fail: 3664 if (copy_msg) { 3665 g_free(host_msg); 3666 host_msg = copy_msg; 3667 } 3668 unlock_user(host_msg, msg, 0); 3669 return ret; 3670 } 3671 3672 /* do_recvfrom() Must return target values and target errnos. */ 3673 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 3674 abi_ulong target_addr, 3675 abi_ulong target_addrlen) 3676 { 3677 socklen_t addrlen, ret_addrlen; 3678 void *addr; 3679 void *host_msg; 3680 abi_long ret; 3681 3682 if (!msg) { 3683 host_msg = NULL; 3684 } else { 3685 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 3686 if (!host_msg) { 3687 return -TARGET_EFAULT; 3688 } 3689 } 3690 if (target_addr) { 3691 if (get_user_u32(addrlen, target_addrlen)) { 3692 ret = -TARGET_EFAULT; 3693 goto fail; 3694 } 3695 if ((int)addrlen < 0) { 3696 ret = -TARGET_EINVAL; 3697 goto fail; 3698 } 3699 addr = alloca(addrlen); 3700 ret_addrlen = addrlen; 3701 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, 3702 addr, &ret_addrlen)); 3703 } else { 3704 addr = NULL; /* To keep compiler quiet. */ 3705 addrlen = 0; /* To keep compiler quiet. */ 3706 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0)); 3707 } 3708 if (!is_error(ret)) { 3709 if (fd_trans_host_to_target_data(fd)) { 3710 abi_long trans; 3711 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len)); 3712 if (is_error(trans)) { 3713 ret = trans; 3714 goto fail; 3715 } 3716 } 3717 if (target_addr) { 3718 host_to_target_sockaddr(target_addr, addr, 3719 MIN(addrlen, ret_addrlen)); 3720 if (put_user_u32(ret_addrlen, target_addrlen)) { 3721 ret = -TARGET_EFAULT; 3722 goto fail; 3723 } 3724 } 3725 unlock_user(host_msg, msg, len); 3726 } else { 3727 fail: 3728 unlock_user(host_msg, msg, 0); 3729 } 3730 return ret; 3731 } 3732 3733 #ifdef TARGET_NR_socketcall 3734 /* do_socketcall() must return target values and target errnos. */ 3735 static abi_long do_socketcall(int num, abi_ulong vptr) 3736 { 3737 static const unsigned nargs[] = { /* number of arguments per operation */ 3738 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */ 3739 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */ 3740 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */ 3741 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */ 3742 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */ 3743 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */ 3744 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */ 3745 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */ 3746 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */ 3747 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */ 3748 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */ 3749 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */ 3750 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */ 3751 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 3752 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 3753 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */ 3754 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */ 3755 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */ 3756 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */ 3757 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */ 3758 }; 3759 abi_long a[6]; /* max 6 args */ 3760 unsigned i; 3761 3762 /* check the range of the first argument num */ 3763 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */ 3764 if (num < 1 || num > TARGET_SYS_SENDMMSG) { 3765 return -TARGET_EINVAL; 3766 } 3767 /* ensure we have space for args */ 3768 if (nargs[num] > ARRAY_SIZE(a)) { 3769 return -TARGET_EINVAL; 3770 } 3771 /* collect the arguments in a[] according to nargs[] */ 3772 for (i = 0; i < nargs[num]; ++i) { 3773 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 3774 return -TARGET_EFAULT; 3775 } 3776 } 3777 /* now when we have the args, invoke the appropriate underlying function */ 3778 switch (num) { 3779 case TARGET_SYS_SOCKET: /* domain, type, protocol */ 3780 return do_socket(a[0], a[1], a[2]); 3781 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */ 3782 return do_bind(a[0], a[1], a[2]); 3783 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */ 3784 return do_connect(a[0], a[1], a[2]); 3785 case TARGET_SYS_LISTEN: /* sockfd, backlog */ 3786 return get_errno(listen(a[0], a[1])); 3787 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */ 3788 return do_accept4(a[0], a[1], a[2], 0); 3789 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */ 3790 return do_getsockname(a[0], a[1], a[2]); 3791 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */ 3792 return do_getpeername(a[0], a[1], a[2]); 3793 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */ 3794 return do_socketpair(a[0], a[1], a[2], a[3]); 3795 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */ 3796 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 3797 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */ 3798 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 3799 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */ 3800 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 3801 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */ 3802 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 3803 case TARGET_SYS_SHUTDOWN: /* sockfd, how */ 3804 return get_errno(shutdown(a[0], a[1])); 3805 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 3806 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 3807 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 3808 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 3809 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */ 3810 return do_sendrecvmsg(a[0], a[1], a[2], 1); 3811 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */ 3812 return do_sendrecvmsg(a[0], a[1], a[2], 0); 3813 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */ 3814 return do_accept4(a[0], a[1], a[2], a[3]); 3815 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */ 3816 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0); 3817 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */ 3818 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1); 3819 default: 3820 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num); 3821 return -TARGET_EINVAL; 3822 } 3823 } 3824 #endif 3825 3826 #define N_SHM_REGIONS 32 3827 3828 static struct shm_region { 3829 abi_ulong start; 3830 abi_ulong size; 3831 bool in_use; 3832 } shm_regions[N_SHM_REGIONS]; 3833 3834 #ifndef TARGET_SEMID64_DS 3835 /* asm-generic version of this struct */ 3836 struct target_semid64_ds 3837 { 3838 struct target_ipc_perm sem_perm; 3839 abi_ulong sem_otime; 3840 #if TARGET_ABI_BITS == 32 3841 abi_ulong __unused1; 3842 #endif 3843 abi_ulong sem_ctime; 3844 #if TARGET_ABI_BITS == 32 3845 abi_ulong __unused2; 3846 #endif 3847 abi_ulong sem_nsems; 3848 abi_ulong __unused3; 3849 abi_ulong __unused4; 3850 }; 3851 #endif 3852 3853 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 3854 abi_ulong target_addr) 3855 { 3856 struct target_ipc_perm *target_ip; 3857 struct target_semid64_ds *target_sd; 3858 3859 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 3860 return -TARGET_EFAULT; 3861 target_ip = &(target_sd->sem_perm); 3862 host_ip->__key = tswap32(target_ip->__key); 3863 host_ip->uid = tswap32(target_ip->uid); 3864 host_ip->gid = tswap32(target_ip->gid); 3865 host_ip->cuid = tswap32(target_ip->cuid); 3866 host_ip->cgid = tswap32(target_ip->cgid); 3867 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 3868 host_ip->mode = tswap32(target_ip->mode); 3869 #else 3870 host_ip->mode = tswap16(target_ip->mode); 3871 #endif 3872 #if defined(TARGET_PPC) 3873 host_ip->__seq = tswap32(target_ip->__seq); 3874 #else 3875 host_ip->__seq = tswap16(target_ip->__seq); 3876 #endif 3877 unlock_user_struct(target_sd, target_addr, 0); 3878 return 0; 3879 } 3880 3881 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 3882 struct ipc_perm *host_ip) 3883 { 3884 struct target_ipc_perm *target_ip; 3885 struct target_semid64_ds *target_sd; 3886 3887 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3888 return -TARGET_EFAULT; 3889 target_ip = &(target_sd->sem_perm); 3890 target_ip->__key = tswap32(host_ip->__key); 3891 target_ip->uid = tswap32(host_ip->uid); 3892 target_ip->gid = tswap32(host_ip->gid); 3893 target_ip->cuid = tswap32(host_ip->cuid); 3894 target_ip->cgid = tswap32(host_ip->cgid); 3895 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 3896 target_ip->mode = tswap32(host_ip->mode); 3897 #else 3898 target_ip->mode = tswap16(host_ip->mode); 3899 #endif 3900 #if defined(TARGET_PPC) 3901 target_ip->__seq = tswap32(host_ip->__seq); 3902 #else 3903 target_ip->__seq = tswap16(host_ip->__seq); 3904 #endif 3905 unlock_user_struct(target_sd, target_addr, 1); 3906 return 0; 3907 } 3908 3909 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 3910 abi_ulong target_addr) 3911 { 3912 struct target_semid64_ds *target_sd; 3913 3914 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 3915 return -TARGET_EFAULT; 3916 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 3917 return -TARGET_EFAULT; 3918 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 3919 host_sd->sem_otime = tswapal(target_sd->sem_otime); 3920 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 3921 unlock_user_struct(target_sd, target_addr, 0); 3922 return 0; 3923 } 3924 3925 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 3926 struct semid_ds *host_sd) 3927 { 3928 struct target_semid64_ds *target_sd; 3929 3930 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3931 return -TARGET_EFAULT; 3932 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 3933 return -TARGET_EFAULT; 3934 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 3935 target_sd->sem_otime = tswapal(host_sd->sem_otime); 3936 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 3937 unlock_user_struct(target_sd, target_addr, 1); 3938 return 0; 3939 } 3940 3941 struct target_seminfo { 3942 int semmap; 3943 int semmni; 3944 int semmns; 3945 int semmnu; 3946 int semmsl; 3947 int semopm; 3948 int semume; 3949 int semusz; 3950 int semvmx; 3951 int semaem; 3952 }; 3953 3954 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 3955 struct seminfo *host_seminfo) 3956 { 3957 struct target_seminfo *target_seminfo; 3958 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 3959 return -TARGET_EFAULT; 3960 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 3961 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 3962 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 3963 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 3964 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 3965 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 3966 __put_user(host_seminfo->semume, &target_seminfo->semume); 3967 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 3968 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 3969 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 3970 unlock_user_struct(target_seminfo, target_addr, 1); 3971 return 0; 3972 } 3973 3974 union semun { 3975 int val; 3976 struct semid_ds *buf; 3977 unsigned short *array; 3978 struct seminfo *__buf; 3979 }; 3980 3981 union target_semun { 3982 int val; 3983 abi_ulong buf; 3984 abi_ulong array; 3985 abi_ulong __buf; 3986 }; 3987 3988 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 3989 abi_ulong target_addr) 3990 { 3991 int nsems; 3992 unsigned short *array; 3993 union semun semun; 3994 struct semid_ds semid_ds; 3995 int i, ret; 3996 3997 semun.buf = &semid_ds; 3998 3999 ret = semctl(semid, 0, IPC_STAT, semun); 4000 if (ret == -1) 4001 return get_errno(ret); 4002 4003 nsems = semid_ds.sem_nsems; 4004 4005 *host_array = g_try_new(unsigned short, nsems); 4006 if (!*host_array) { 4007 return -TARGET_ENOMEM; 4008 } 4009 array = lock_user(VERIFY_READ, target_addr, 4010 nsems*sizeof(unsigned short), 1); 4011 if (!array) { 4012 g_free(*host_array); 4013 return -TARGET_EFAULT; 4014 } 4015 4016 for(i=0; i<nsems; i++) { 4017 __get_user((*host_array)[i], &array[i]); 4018 } 4019 unlock_user(array, target_addr, 0); 4020 4021 return 0; 4022 } 4023 4024 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 4025 unsigned short **host_array) 4026 { 4027 int nsems; 4028 unsigned short *array; 4029 union semun semun; 4030 struct semid_ds semid_ds; 4031 int i, ret; 4032 4033 semun.buf = &semid_ds; 4034 4035 ret = semctl(semid, 0, IPC_STAT, semun); 4036 if (ret == -1) 4037 return get_errno(ret); 4038 4039 nsems = semid_ds.sem_nsems; 4040 4041 array = lock_user(VERIFY_WRITE, target_addr, 4042 nsems*sizeof(unsigned short), 0); 4043 if (!array) 4044 return -TARGET_EFAULT; 4045 4046 for(i=0; i<nsems; i++) { 4047 __put_user((*host_array)[i], &array[i]); 4048 } 4049 g_free(*host_array); 4050 unlock_user(array, target_addr, 1); 4051 4052 return 0; 4053 } 4054 4055 static inline abi_long do_semctl(int semid, int semnum, int cmd, 4056 abi_ulong target_arg) 4057 { 4058 union target_semun target_su = { .buf = target_arg }; 4059 union semun arg; 4060 struct semid_ds dsarg; 4061 unsigned short *array = NULL; 4062 struct seminfo seminfo; 4063 abi_long ret = -TARGET_EINVAL; 4064 abi_long err; 4065 cmd &= 0xff; 4066 4067 switch( cmd ) { 4068 case GETVAL: 4069 case SETVAL: 4070 /* In 64 bit cross-endian situations, we will erroneously pick up 4071 * the wrong half of the union for the "val" element. To rectify 4072 * this, the entire 8-byte structure is byteswapped, followed by 4073 * a swap of the 4 byte val field. In other cases, the data is 4074 * already in proper host byte order. */ 4075 if (sizeof(target_su.val) != (sizeof(target_su.buf))) { 4076 target_su.buf = tswapal(target_su.buf); 4077 arg.val = tswap32(target_su.val); 4078 } else { 4079 arg.val = target_su.val; 4080 } 4081 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4082 break; 4083 case GETALL: 4084 case SETALL: 4085 err = target_to_host_semarray(semid, &array, target_su.array); 4086 if (err) 4087 return err; 4088 arg.array = array; 4089 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4090 err = host_to_target_semarray(semid, target_su.array, &array); 4091 if (err) 4092 return err; 4093 break; 4094 case IPC_STAT: 4095 case IPC_SET: 4096 case SEM_STAT: 4097 err = target_to_host_semid_ds(&dsarg, target_su.buf); 4098 if (err) 4099 return err; 4100 arg.buf = &dsarg; 4101 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4102 err = host_to_target_semid_ds(target_su.buf, &dsarg); 4103 if (err) 4104 return err; 4105 break; 4106 case IPC_INFO: 4107 case SEM_INFO: 4108 arg.__buf = &seminfo; 4109 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4110 err = host_to_target_seminfo(target_su.__buf, &seminfo); 4111 if (err) 4112 return err; 4113 break; 4114 case IPC_RMID: 4115 case GETPID: 4116 case GETNCNT: 4117 case GETZCNT: 4118 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 4119 break; 4120 } 4121 4122 return ret; 4123 } 4124 4125 struct target_sembuf { 4126 unsigned short sem_num; 4127 short sem_op; 4128 short sem_flg; 4129 }; 4130 4131 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 4132 abi_ulong target_addr, 4133 unsigned nsops) 4134 { 4135 struct target_sembuf *target_sembuf; 4136 int i; 4137 4138 target_sembuf = lock_user(VERIFY_READ, target_addr, 4139 nsops*sizeof(struct target_sembuf), 1); 4140 if (!target_sembuf) 4141 return -TARGET_EFAULT; 4142 4143 for(i=0; i<nsops; i++) { 4144 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 4145 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 4146 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 4147 } 4148 4149 unlock_user(target_sembuf, target_addr, 0); 4150 4151 return 0; 4152 } 4153 4154 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \ 4155 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64) 4156 4157 /* 4158 * This macro is required to handle the s390 variants, which passes the 4159 * arguments in a different order than default. 4160 */ 4161 #ifdef __s390x__ 4162 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \ 4163 (__nsops), (__timeout), (__sops) 4164 #else 4165 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \ 4166 (__nsops), 0, (__sops), (__timeout) 4167 #endif 4168 4169 static inline abi_long do_semtimedop(int semid, 4170 abi_long ptr, 4171 unsigned nsops, 4172 abi_long timeout, bool time64) 4173 { 4174 struct sembuf *sops; 4175 struct timespec ts, *pts = NULL; 4176 abi_long ret; 4177 4178 if (timeout) { 4179 pts = &ts; 4180 if (time64) { 4181 if (target_to_host_timespec64(pts, timeout)) { 4182 return -TARGET_EFAULT; 4183 } 4184 } else { 4185 if (target_to_host_timespec(pts, timeout)) { 4186 return -TARGET_EFAULT; 4187 } 4188 } 4189 } 4190 4191 if (nsops > TARGET_SEMOPM) { 4192 return -TARGET_E2BIG; 4193 } 4194 4195 sops = g_new(struct sembuf, nsops); 4196 4197 if (target_to_host_sembuf(sops, ptr, nsops)) { 4198 g_free(sops); 4199 return -TARGET_EFAULT; 4200 } 4201 4202 ret = -TARGET_ENOSYS; 4203 #ifdef __NR_semtimedop 4204 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts)); 4205 #endif 4206 #ifdef __NR_ipc 4207 if (ret == -TARGET_ENOSYS) { 4208 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, 4209 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts))); 4210 } 4211 #endif 4212 g_free(sops); 4213 return ret; 4214 } 4215 #endif 4216 4217 struct target_msqid_ds 4218 { 4219 struct target_ipc_perm msg_perm; 4220 abi_ulong msg_stime; 4221 #if TARGET_ABI_BITS == 32 4222 abi_ulong __unused1; 4223 #endif 4224 abi_ulong msg_rtime; 4225 #if TARGET_ABI_BITS == 32 4226 abi_ulong __unused2; 4227 #endif 4228 abi_ulong msg_ctime; 4229 #if TARGET_ABI_BITS == 32 4230 abi_ulong __unused3; 4231 #endif 4232 abi_ulong __msg_cbytes; 4233 abi_ulong msg_qnum; 4234 abi_ulong msg_qbytes; 4235 abi_ulong msg_lspid; 4236 abi_ulong msg_lrpid; 4237 abi_ulong __unused4; 4238 abi_ulong __unused5; 4239 }; 4240 4241 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 4242 abi_ulong target_addr) 4243 { 4244 struct target_msqid_ds *target_md; 4245 4246 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 4247 return -TARGET_EFAULT; 4248 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 4249 return -TARGET_EFAULT; 4250 host_md->msg_stime = tswapal(target_md->msg_stime); 4251 host_md->msg_rtime = tswapal(target_md->msg_rtime); 4252 host_md->msg_ctime = tswapal(target_md->msg_ctime); 4253 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 4254 host_md->msg_qnum = tswapal(target_md->msg_qnum); 4255 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 4256 host_md->msg_lspid = tswapal(target_md->msg_lspid); 4257 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 4258 unlock_user_struct(target_md, target_addr, 0); 4259 return 0; 4260 } 4261 4262 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 4263 struct msqid_ds *host_md) 4264 { 4265 struct target_msqid_ds *target_md; 4266 4267 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 4268 return -TARGET_EFAULT; 4269 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 4270 return -TARGET_EFAULT; 4271 target_md->msg_stime = tswapal(host_md->msg_stime); 4272 target_md->msg_rtime = tswapal(host_md->msg_rtime); 4273 target_md->msg_ctime = tswapal(host_md->msg_ctime); 4274 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 4275 target_md->msg_qnum = tswapal(host_md->msg_qnum); 4276 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 4277 target_md->msg_lspid = tswapal(host_md->msg_lspid); 4278 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 4279 unlock_user_struct(target_md, target_addr, 1); 4280 return 0; 4281 } 4282 4283 struct target_msginfo { 4284 int msgpool; 4285 int msgmap; 4286 int msgmax; 4287 int msgmnb; 4288 int msgmni; 4289 int msgssz; 4290 int msgtql; 4291 unsigned short int msgseg; 4292 }; 4293 4294 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 4295 struct msginfo *host_msginfo) 4296 { 4297 struct target_msginfo *target_msginfo; 4298 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 4299 return -TARGET_EFAULT; 4300 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 4301 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 4302 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 4303 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 4304 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 4305 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 4306 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 4307 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 4308 unlock_user_struct(target_msginfo, target_addr, 1); 4309 return 0; 4310 } 4311 4312 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 4313 { 4314 struct msqid_ds dsarg; 4315 struct msginfo msginfo; 4316 abi_long ret = -TARGET_EINVAL; 4317 4318 cmd &= 0xff; 4319 4320 switch (cmd) { 4321 case IPC_STAT: 4322 case IPC_SET: 4323 case MSG_STAT: 4324 if (target_to_host_msqid_ds(&dsarg,ptr)) 4325 return -TARGET_EFAULT; 4326 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 4327 if (host_to_target_msqid_ds(ptr,&dsarg)) 4328 return -TARGET_EFAULT; 4329 break; 4330 case IPC_RMID: 4331 ret = get_errno(msgctl(msgid, cmd, NULL)); 4332 break; 4333 case IPC_INFO: 4334 case MSG_INFO: 4335 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 4336 if (host_to_target_msginfo(ptr, &msginfo)) 4337 return -TARGET_EFAULT; 4338 break; 4339 } 4340 4341 return ret; 4342 } 4343 4344 struct target_msgbuf { 4345 abi_long mtype; 4346 char mtext[1]; 4347 }; 4348 4349 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 4350 ssize_t msgsz, int msgflg) 4351 { 4352 struct target_msgbuf *target_mb; 4353 struct msgbuf *host_mb; 4354 abi_long ret = 0; 4355 4356 if (msgsz < 0) { 4357 return -TARGET_EINVAL; 4358 } 4359 4360 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 4361 return -TARGET_EFAULT; 4362 host_mb = g_try_malloc(msgsz + sizeof(long)); 4363 if (!host_mb) { 4364 unlock_user_struct(target_mb, msgp, 0); 4365 return -TARGET_ENOMEM; 4366 } 4367 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 4368 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 4369 ret = -TARGET_ENOSYS; 4370 #ifdef __NR_msgsnd 4371 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg)); 4372 #endif 4373 #ifdef __NR_ipc 4374 if (ret == -TARGET_ENOSYS) { 4375 #ifdef __s390x__ 4376 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg, 4377 host_mb)); 4378 #else 4379 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg, 4380 host_mb, 0)); 4381 #endif 4382 } 4383 #endif 4384 g_free(host_mb); 4385 unlock_user_struct(target_mb, msgp, 0); 4386 4387 return ret; 4388 } 4389 4390 #ifdef __NR_ipc 4391 #if defined(__sparc__) 4392 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */ 4393 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp 4394 #elif defined(__s390x__) 4395 /* The s390 sys_ipc variant has only five parameters. */ 4396 #define MSGRCV_ARGS(__msgp, __msgtyp) \ 4397 ((long int[]){(long int)__msgp, __msgtyp}) 4398 #else 4399 #define MSGRCV_ARGS(__msgp, __msgtyp) \ 4400 ((long int[]){(long int)__msgp, __msgtyp}), 0 4401 #endif 4402 #endif 4403 4404 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 4405 ssize_t msgsz, abi_long msgtyp, 4406 int msgflg) 4407 { 4408 struct target_msgbuf *target_mb; 4409 char *target_mtext; 4410 struct msgbuf *host_mb; 4411 abi_long ret = 0; 4412 4413 if (msgsz < 0) { 4414 return -TARGET_EINVAL; 4415 } 4416 4417 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 4418 return -TARGET_EFAULT; 4419 4420 host_mb = g_try_malloc(msgsz + sizeof(long)); 4421 if (!host_mb) { 4422 ret = -TARGET_ENOMEM; 4423 goto end; 4424 } 4425 ret = -TARGET_ENOSYS; 4426 #ifdef __NR_msgrcv 4427 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 4428 #endif 4429 #ifdef __NR_ipc 4430 if (ret == -TARGET_ENOSYS) { 4431 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz, 4432 msgflg, MSGRCV_ARGS(host_mb, msgtyp))); 4433 } 4434 #endif 4435 4436 if (ret > 0) { 4437 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 4438 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 4439 if (!target_mtext) { 4440 ret = -TARGET_EFAULT; 4441 goto end; 4442 } 4443 memcpy(target_mb->mtext, host_mb->mtext, ret); 4444 unlock_user(target_mtext, target_mtext_addr, ret); 4445 } 4446 4447 target_mb->mtype = tswapal(host_mb->mtype); 4448 4449 end: 4450 if (target_mb) 4451 unlock_user_struct(target_mb, msgp, 1); 4452 g_free(host_mb); 4453 return ret; 4454 } 4455 4456 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 4457 abi_ulong target_addr) 4458 { 4459 struct target_shmid_ds *target_sd; 4460 4461 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4462 return -TARGET_EFAULT; 4463 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 4464 return -TARGET_EFAULT; 4465 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4466 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 4467 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4468 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4469 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4470 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4471 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4472 unlock_user_struct(target_sd, target_addr, 0); 4473 return 0; 4474 } 4475 4476 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 4477 struct shmid_ds *host_sd) 4478 { 4479 struct target_shmid_ds *target_sd; 4480 4481 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4482 return -TARGET_EFAULT; 4483 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 4484 return -TARGET_EFAULT; 4485 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4486 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 4487 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4488 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4489 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4490 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4491 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4492 unlock_user_struct(target_sd, target_addr, 1); 4493 return 0; 4494 } 4495 4496 struct target_shminfo { 4497 abi_ulong shmmax; 4498 abi_ulong shmmin; 4499 abi_ulong shmmni; 4500 abi_ulong shmseg; 4501 abi_ulong shmall; 4502 }; 4503 4504 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 4505 struct shminfo *host_shminfo) 4506 { 4507 struct target_shminfo *target_shminfo; 4508 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 4509 return -TARGET_EFAULT; 4510 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 4511 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 4512 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 4513 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 4514 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 4515 unlock_user_struct(target_shminfo, target_addr, 1); 4516 return 0; 4517 } 4518 4519 struct target_shm_info { 4520 int used_ids; 4521 abi_ulong shm_tot; 4522 abi_ulong shm_rss; 4523 abi_ulong shm_swp; 4524 abi_ulong swap_attempts; 4525 abi_ulong swap_successes; 4526 }; 4527 4528 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 4529 struct shm_info *host_shm_info) 4530 { 4531 struct target_shm_info *target_shm_info; 4532 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 4533 return -TARGET_EFAULT; 4534 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 4535 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 4536 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 4537 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 4538 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 4539 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 4540 unlock_user_struct(target_shm_info, target_addr, 1); 4541 return 0; 4542 } 4543 4544 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 4545 { 4546 struct shmid_ds dsarg; 4547 struct shminfo shminfo; 4548 struct shm_info shm_info; 4549 abi_long ret = -TARGET_EINVAL; 4550 4551 cmd &= 0xff; 4552 4553 switch(cmd) { 4554 case IPC_STAT: 4555 case IPC_SET: 4556 case SHM_STAT: 4557 if (target_to_host_shmid_ds(&dsarg, buf)) 4558 return -TARGET_EFAULT; 4559 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 4560 if (host_to_target_shmid_ds(buf, &dsarg)) 4561 return -TARGET_EFAULT; 4562 break; 4563 case IPC_INFO: 4564 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 4565 if (host_to_target_shminfo(buf, &shminfo)) 4566 return -TARGET_EFAULT; 4567 break; 4568 case SHM_INFO: 4569 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 4570 if (host_to_target_shm_info(buf, &shm_info)) 4571 return -TARGET_EFAULT; 4572 break; 4573 case IPC_RMID: 4574 case SHM_LOCK: 4575 case SHM_UNLOCK: 4576 ret = get_errno(shmctl(shmid, cmd, NULL)); 4577 break; 4578 } 4579 4580 return ret; 4581 } 4582 4583 #ifndef TARGET_FORCE_SHMLBA 4584 /* For most architectures, SHMLBA is the same as the page size; 4585 * some architectures have larger values, in which case they should 4586 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function. 4587 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA 4588 * and defining its own value for SHMLBA. 4589 * 4590 * The kernel also permits SHMLBA to be set by the architecture to a 4591 * value larger than the page size without setting __ARCH_FORCE_SHMLBA; 4592 * this means that addresses are rounded to the large size if 4593 * SHM_RND is set but addresses not aligned to that size are not rejected 4594 * as long as they are at least page-aligned. Since the only architecture 4595 * which uses this is ia64 this code doesn't provide for that oddity. 4596 */ 4597 static inline abi_ulong target_shmlba(CPUArchState *cpu_env) 4598 { 4599 return TARGET_PAGE_SIZE; 4600 } 4601 #endif 4602 4603 static inline abi_ulong do_shmat(CPUArchState *cpu_env, 4604 int shmid, abi_ulong shmaddr, int shmflg) 4605 { 4606 CPUState *cpu = env_cpu(cpu_env); 4607 abi_long raddr; 4608 void *host_raddr; 4609 struct shmid_ds shm_info; 4610 int i,ret; 4611 abi_ulong shmlba; 4612 4613 /* shmat pointers are always untagged */ 4614 4615 /* find out the length of the shared memory segment */ 4616 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 4617 if (is_error(ret)) { 4618 /* can't get length, bail out */ 4619 return ret; 4620 } 4621 4622 shmlba = target_shmlba(cpu_env); 4623 4624 if (shmaddr & (shmlba - 1)) { 4625 if (shmflg & SHM_RND) { 4626 shmaddr &= ~(shmlba - 1); 4627 } else { 4628 return -TARGET_EINVAL; 4629 } 4630 } 4631 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) { 4632 return -TARGET_EINVAL; 4633 } 4634 4635 mmap_lock(); 4636 4637 /* 4638 * We're mapping shared memory, so ensure we generate code for parallel 4639 * execution and flush old translations. This will work up to the level 4640 * supported by the host -- anything that requires EXCP_ATOMIC will not 4641 * be atomic with respect to an external process. 4642 */ 4643 if (!(cpu->tcg_cflags & CF_PARALLEL)) { 4644 cpu->tcg_cflags |= CF_PARALLEL; 4645 tb_flush(cpu); 4646 } 4647 4648 if (shmaddr) 4649 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg); 4650 else { 4651 abi_ulong mmap_start; 4652 4653 /* In order to use the host shmat, we need to honor host SHMLBA. */ 4654 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba)); 4655 4656 if (mmap_start == -1) { 4657 errno = ENOMEM; 4658 host_raddr = (void *)-1; 4659 } else 4660 host_raddr = shmat(shmid, g2h_untagged(mmap_start), 4661 shmflg | SHM_REMAP); 4662 } 4663 4664 if (host_raddr == (void *)-1) { 4665 mmap_unlock(); 4666 return get_errno((long)host_raddr); 4667 } 4668 raddr=h2g((unsigned long)host_raddr); 4669 4670 page_set_flags(raddr, raddr + shm_info.shm_segsz, 4671 PAGE_VALID | PAGE_RESET | PAGE_READ | 4672 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE)); 4673 4674 for (i = 0; i < N_SHM_REGIONS; i++) { 4675 if (!shm_regions[i].in_use) { 4676 shm_regions[i].in_use = true; 4677 shm_regions[i].start = raddr; 4678 shm_regions[i].size = shm_info.shm_segsz; 4679 break; 4680 } 4681 } 4682 4683 mmap_unlock(); 4684 return raddr; 4685 4686 } 4687 4688 static inline abi_long do_shmdt(abi_ulong shmaddr) 4689 { 4690 int i; 4691 abi_long rv; 4692 4693 /* shmdt pointers are always untagged */ 4694 4695 mmap_lock(); 4696 4697 for (i = 0; i < N_SHM_REGIONS; ++i) { 4698 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { 4699 shm_regions[i].in_use = false; 4700 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 4701 break; 4702 } 4703 } 4704 rv = get_errno(shmdt(g2h_untagged(shmaddr))); 4705 4706 mmap_unlock(); 4707 4708 return rv; 4709 } 4710 4711 #ifdef TARGET_NR_ipc 4712 /* ??? This only works with linear mappings. */ 4713 /* do_ipc() must return target values and target errnos. */ 4714 static abi_long do_ipc(CPUArchState *cpu_env, 4715 unsigned int call, abi_long first, 4716 abi_long second, abi_long third, 4717 abi_long ptr, abi_long fifth) 4718 { 4719 int version; 4720 abi_long ret = 0; 4721 4722 version = call >> 16; 4723 call &= 0xffff; 4724 4725 switch (call) { 4726 case IPCOP_semop: 4727 ret = do_semtimedop(first, ptr, second, 0, false); 4728 break; 4729 case IPCOP_semtimedop: 4730 /* 4731 * The s390 sys_ipc variant has only five parameters instead of six 4732 * (as for default variant) and the only difference is the handling of 4733 * SEMTIMEDOP where on s390 the third parameter is used as a pointer 4734 * to a struct timespec where the generic variant uses fifth parameter. 4735 */ 4736 #if defined(TARGET_S390X) 4737 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64); 4738 #else 4739 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64); 4740 #endif 4741 break; 4742 4743 case IPCOP_semget: 4744 ret = get_errno(semget(first, second, third)); 4745 break; 4746 4747 case IPCOP_semctl: { 4748 /* The semun argument to semctl is passed by value, so dereference the 4749 * ptr argument. */ 4750 abi_ulong atptr; 4751 get_user_ual(atptr, ptr); 4752 ret = do_semctl(first, second, third, atptr); 4753 break; 4754 } 4755 4756 case IPCOP_msgget: 4757 ret = get_errno(msgget(first, second)); 4758 break; 4759 4760 case IPCOP_msgsnd: 4761 ret = do_msgsnd(first, ptr, second, third); 4762 break; 4763 4764 case IPCOP_msgctl: 4765 ret = do_msgctl(first, second, ptr); 4766 break; 4767 4768 case IPCOP_msgrcv: 4769 switch (version) { 4770 case 0: 4771 { 4772 struct target_ipc_kludge { 4773 abi_long msgp; 4774 abi_long msgtyp; 4775 } *tmp; 4776 4777 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 4778 ret = -TARGET_EFAULT; 4779 break; 4780 } 4781 4782 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 4783 4784 unlock_user_struct(tmp, ptr, 0); 4785 break; 4786 } 4787 default: 4788 ret = do_msgrcv(first, ptr, second, fifth, third); 4789 } 4790 break; 4791 4792 case IPCOP_shmat: 4793 switch (version) { 4794 default: 4795 { 4796 abi_ulong raddr; 4797 raddr = do_shmat(cpu_env, first, ptr, second); 4798 if (is_error(raddr)) 4799 return get_errno(raddr); 4800 if (put_user_ual(raddr, third)) 4801 return -TARGET_EFAULT; 4802 break; 4803 } 4804 case 1: 4805 ret = -TARGET_EINVAL; 4806 break; 4807 } 4808 break; 4809 case IPCOP_shmdt: 4810 ret = do_shmdt(ptr); 4811 break; 4812 4813 case IPCOP_shmget: 4814 /* IPC_* flag values are the same on all linux platforms */ 4815 ret = get_errno(shmget(first, second, third)); 4816 break; 4817 4818 /* IPC_* and SHM_* command values are the same on all linux platforms */ 4819 case IPCOP_shmctl: 4820 ret = do_shmctl(first, second, ptr); 4821 break; 4822 default: 4823 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n", 4824 call, version); 4825 ret = -TARGET_ENOSYS; 4826 break; 4827 } 4828 return ret; 4829 } 4830 #endif 4831 4832 /* kernel structure types definitions */ 4833 4834 #define STRUCT(name, ...) STRUCT_ ## name, 4835 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 4836 enum { 4837 #include "syscall_types.h" 4838 STRUCT_MAX 4839 }; 4840 #undef STRUCT 4841 #undef STRUCT_SPECIAL 4842 4843 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 4844 #define STRUCT_SPECIAL(name) 4845 #include "syscall_types.h" 4846 #undef STRUCT 4847 #undef STRUCT_SPECIAL 4848 4849 #define MAX_STRUCT_SIZE 4096 4850 4851 #ifdef CONFIG_FIEMAP 4852 /* So fiemap access checks don't overflow on 32 bit systems. 4853 * This is very slightly smaller than the limit imposed by 4854 * the underlying kernel. 4855 */ 4856 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 4857 / sizeof(struct fiemap_extent)) 4858 4859 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 4860 int fd, int cmd, abi_long arg) 4861 { 4862 /* The parameter for this ioctl is a struct fiemap followed 4863 * by an array of struct fiemap_extent whose size is set 4864 * in fiemap->fm_extent_count. The array is filled in by the 4865 * ioctl. 4866 */ 4867 int target_size_in, target_size_out; 4868 struct fiemap *fm; 4869 const argtype *arg_type = ie->arg_type; 4870 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 4871 void *argptr, *p; 4872 abi_long ret; 4873 int i, extent_size = thunk_type_size(extent_arg_type, 0); 4874 uint32_t outbufsz; 4875 int free_fm = 0; 4876 4877 assert(arg_type[0] == TYPE_PTR); 4878 assert(ie->access == IOC_RW); 4879 arg_type++; 4880 target_size_in = thunk_type_size(arg_type, 0); 4881 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 4882 if (!argptr) { 4883 return -TARGET_EFAULT; 4884 } 4885 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4886 unlock_user(argptr, arg, 0); 4887 fm = (struct fiemap *)buf_temp; 4888 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 4889 return -TARGET_EINVAL; 4890 } 4891 4892 outbufsz = sizeof (*fm) + 4893 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 4894 4895 if (outbufsz > MAX_STRUCT_SIZE) { 4896 /* We can't fit all the extents into the fixed size buffer. 4897 * Allocate one that is large enough and use it instead. 4898 */ 4899 fm = g_try_malloc(outbufsz); 4900 if (!fm) { 4901 return -TARGET_ENOMEM; 4902 } 4903 memcpy(fm, buf_temp, sizeof(struct fiemap)); 4904 free_fm = 1; 4905 } 4906 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm)); 4907 if (!is_error(ret)) { 4908 target_size_out = target_size_in; 4909 /* An extent_count of 0 means we were only counting the extents 4910 * so there are no structs to copy 4911 */ 4912 if (fm->fm_extent_count != 0) { 4913 target_size_out += fm->fm_mapped_extents * extent_size; 4914 } 4915 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 4916 if (!argptr) { 4917 ret = -TARGET_EFAULT; 4918 } else { 4919 /* Convert the struct fiemap */ 4920 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 4921 if (fm->fm_extent_count != 0) { 4922 p = argptr + target_size_in; 4923 /* ...and then all the struct fiemap_extents */ 4924 for (i = 0; i < fm->fm_mapped_extents; i++) { 4925 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 4926 THUNK_TARGET); 4927 p += extent_size; 4928 } 4929 } 4930 unlock_user(argptr, arg, target_size_out); 4931 } 4932 } 4933 if (free_fm) { 4934 g_free(fm); 4935 } 4936 return ret; 4937 } 4938 #endif 4939 4940 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 4941 int fd, int cmd, abi_long arg) 4942 { 4943 const argtype *arg_type = ie->arg_type; 4944 int target_size; 4945 void *argptr; 4946 int ret; 4947 struct ifconf *host_ifconf; 4948 uint32_t outbufsz; 4949 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 4950 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) }; 4951 int target_ifreq_size; 4952 int nb_ifreq; 4953 int free_buf = 0; 4954 int i; 4955 int target_ifc_len; 4956 abi_long target_ifc_buf; 4957 int host_ifc_len; 4958 char *host_ifc_buf; 4959 4960 assert(arg_type[0] == TYPE_PTR); 4961 assert(ie->access == IOC_RW); 4962 4963 arg_type++; 4964 target_size = thunk_type_size(arg_type, 0); 4965 4966 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 4967 if (!argptr) 4968 return -TARGET_EFAULT; 4969 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4970 unlock_user(argptr, arg, 0); 4971 4972 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 4973 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 4974 target_ifreq_size = thunk_type_size(ifreq_max_type, 0); 4975 4976 if (target_ifc_buf != 0) { 4977 target_ifc_len = host_ifconf->ifc_len; 4978 nb_ifreq = target_ifc_len / target_ifreq_size; 4979 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 4980 4981 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 4982 if (outbufsz > MAX_STRUCT_SIZE) { 4983 /* 4984 * We can't fit all the extents into the fixed size buffer. 4985 * Allocate one that is large enough and use it instead. 4986 */ 4987 host_ifconf = malloc(outbufsz); 4988 if (!host_ifconf) { 4989 return -TARGET_ENOMEM; 4990 } 4991 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 4992 free_buf = 1; 4993 } 4994 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf); 4995 4996 host_ifconf->ifc_len = host_ifc_len; 4997 } else { 4998 host_ifc_buf = NULL; 4999 } 5000 host_ifconf->ifc_buf = host_ifc_buf; 5001 5002 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf)); 5003 if (!is_error(ret)) { 5004 /* convert host ifc_len to target ifc_len */ 5005 5006 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 5007 target_ifc_len = nb_ifreq * target_ifreq_size; 5008 host_ifconf->ifc_len = target_ifc_len; 5009 5010 /* restore target ifc_buf */ 5011 5012 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 5013 5014 /* copy struct ifconf to target user */ 5015 5016 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5017 if (!argptr) 5018 return -TARGET_EFAULT; 5019 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 5020 unlock_user(argptr, arg, target_size); 5021 5022 if (target_ifc_buf != 0) { 5023 /* copy ifreq[] to target user */ 5024 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 5025 for (i = 0; i < nb_ifreq ; i++) { 5026 thunk_convert(argptr + i * target_ifreq_size, 5027 host_ifc_buf + i * sizeof(struct ifreq), 5028 ifreq_arg_type, THUNK_TARGET); 5029 } 5030 unlock_user(argptr, target_ifc_buf, target_ifc_len); 5031 } 5032 } 5033 5034 if (free_buf) { 5035 free(host_ifconf); 5036 } 5037 5038 return ret; 5039 } 5040 5041 #if defined(CONFIG_USBFS) 5042 #if HOST_LONG_BITS > 64 5043 #error USBDEVFS thunks do not support >64 bit hosts yet. 5044 #endif 5045 struct live_urb { 5046 uint64_t target_urb_adr; 5047 uint64_t target_buf_adr; 5048 char *target_buf_ptr; 5049 struct usbdevfs_urb host_urb; 5050 }; 5051 5052 static GHashTable *usbdevfs_urb_hashtable(void) 5053 { 5054 static GHashTable *urb_hashtable; 5055 5056 if (!urb_hashtable) { 5057 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal); 5058 } 5059 return urb_hashtable; 5060 } 5061 5062 static void urb_hashtable_insert(struct live_urb *urb) 5063 { 5064 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 5065 g_hash_table_insert(urb_hashtable, urb, urb); 5066 } 5067 5068 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr) 5069 { 5070 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 5071 return g_hash_table_lookup(urb_hashtable, &target_urb_adr); 5072 } 5073 5074 static void urb_hashtable_remove(struct live_urb *urb) 5075 { 5076 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 5077 g_hash_table_remove(urb_hashtable, urb); 5078 } 5079 5080 static abi_long 5081 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp, 5082 int fd, int cmd, abi_long arg) 5083 { 5084 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) }; 5085 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 }; 5086 struct live_urb *lurb; 5087 void *argptr; 5088 uint64_t hurb; 5089 int target_size; 5090 uintptr_t target_urb_adr; 5091 abi_long ret; 5092 5093 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET); 5094 5095 memset(buf_temp, 0, sizeof(uint64_t)); 5096 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5097 if (is_error(ret)) { 5098 return ret; 5099 } 5100 5101 memcpy(&hurb, buf_temp, sizeof(uint64_t)); 5102 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb)); 5103 if (!lurb->target_urb_adr) { 5104 return -TARGET_EFAULT; 5105 } 5106 urb_hashtable_remove(lurb); 5107 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 5108 lurb->host_urb.buffer_length); 5109 lurb->target_buf_ptr = NULL; 5110 5111 /* restore the guest buffer pointer */ 5112 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr; 5113 5114 /* update the guest urb struct */ 5115 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0); 5116 if (!argptr) { 5117 g_free(lurb); 5118 return -TARGET_EFAULT; 5119 } 5120 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET); 5121 unlock_user(argptr, lurb->target_urb_adr, target_size); 5122 5123 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET); 5124 /* write back the urb handle */ 5125 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5126 if (!argptr) { 5127 g_free(lurb); 5128 return -TARGET_EFAULT; 5129 } 5130 5131 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */ 5132 target_urb_adr = lurb->target_urb_adr; 5133 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET); 5134 unlock_user(argptr, arg, target_size); 5135 5136 g_free(lurb); 5137 return ret; 5138 } 5139 5140 static abi_long 5141 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie, 5142 uint8_t *buf_temp __attribute__((unused)), 5143 int fd, int cmd, abi_long arg) 5144 { 5145 struct live_urb *lurb; 5146 5147 /* map target address back to host URB with metadata. */ 5148 lurb = urb_hashtable_lookup(arg); 5149 if (!lurb) { 5150 return -TARGET_EFAULT; 5151 } 5152 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); 5153 } 5154 5155 static abi_long 5156 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp, 5157 int fd, int cmd, abi_long arg) 5158 { 5159 const argtype *arg_type = ie->arg_type; 5160 int target_size; 5161 abi_long ret; 5162 void *argptr; 5163 int rw_dir; 5164 struct live_urb *lurb; 5165 5166 /* 5167 * each submitted URB needs to map to a unique ID for the 5168 * kernel, and that unique ID needs to be a pointer to 5169 * host memory. hence, we need to malloc for each URB. 5170 * isochronous transfers have a variable length struct. 5171 */ 5172 arg_type++; 5173 target_size = thunk_type_size(arg_type, THUNK_TARGET); 5174 5175 /* construct host copy of urb and metadata */ 5176 lurb = g_try_malloc0(sizeof(struct live_urb)); 5177 if (!lurb) { 5178 return -TARGET_ENOMEM; 5179 } 5180 5181 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5182 if (!argptr) { 5183 g_free(lurb); 5184 return -TARGET_EFAULT; 5185 } 5186 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST); 5187 unlock_user(argptr, arg, 0); 5188 5189 lurb->target_urb_adr = arg; 5190 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer; 5191 5192 /* buffer space used depends on endpoint type so lock the entire buffer */ 5193 /* control type urbs should check the buffer contents for true direction */ 5194 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ; 5195 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr, 5196 lurb->host_urb.buffer_length, 1); 5197 if (lurb->target_buf_ptr == NULL) { 5198 g_free(lurb); 5199 return -TARGET_EFAULT; 5200 } 5201 5202 /* update buffer pointer in host copy */ 5203 lurb->host_urb.buffer = lurb->target_buf_ptr; 5204 5205 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); 5206 if (is_error(ret)) { 5207 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0); 5208 g_free(lurb); 5209 } else { 5210 urb_hashtable_insert(lurb); 5211 } 5212 5213 return ret; 5214 } 5215 #endif /* CONFIG_USBFS */ 5216 5217 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5218 int cmd, abi_long arg) 5219 { 5220 void *argptr; 5221 struct dm_ioctl *host_dm; 5222 abi_long guest_data; 5223 uint32_t guest_data_size; 5224 int target_size; 5225 const argtype *arg_type = ie->arg_type; 5226 abi_long ret; 5227 void *big_buf = NULL; 5228 char *host_data; 5229 5230 arg_type++; 5231 target_size = thunk_type_size(arg_type, 0); 5232 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5233 if (!argptr) { 5234 ret = -TARGET_EFAULT; 5235 goto out; 5236 } 5237 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5238 unlock_user(argptr, arg, 0); 5239 5240 /* buf_temp is too small, so fetch things into a bigger buffer */ 5241 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 5242 memcpy(big_buf, buf_temp, target_size); 5243 buf_temp = big_buf; 5244 host_dm = big_buf; 5245 5246 guest_data = arg + host_dm->data_start; 5247 if ((guest_data - arg) < 0) { 5248 ret = -TARGET_EINVAL; 5249 goto out; 5250 } 5251 guest_data_size = host_dm->data_size - host_dm->data_start; 5252 host_data = (char*)host_dm + host_dm->data_start; 5253 5254 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 5255 if (!argptr) { 5256 ret = -TARGET_EFAULT; 5257 goto out; 5258 } 5259 5260 switch (ie->host_cmd) { 5261 case DM_REMOVE_ALL: 5262 case DM_LIST_DEVICES: 5263 case DM_DEV_CREATE: 5264 case DM_DEV_REMOVE: 5265 case DM_DEV_SUSPEND: 5266 case DM_DEV_STATUS: 5267 case DM_DEV_WAIT: 5268 case DM_TABLE_STATUS: 5269 case DM_TABLE_CLEAR: 5270 case DM_TABLE_DEPS: 5271 case DM_LIST_VERSIONS: 5272 /* no input data */ 5273 break; 5274 case DM_DEV_RENAME: 5275 case DM_DEV_SET_GEOMETRY: 5276 /* data contains only strings */ 5277 memcpy(host_data, argptr, guest_data_size); 5278 break; 5279 case DM_TARGET_MSG: 5280 memcpy(host_data, argptr, guest_data_size); 5281 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 5282 break; 5283 case DM_TABLE_LOAD: 5284 { 5285 void *gspec = argptr; 5286 void *cur_data = host_data; 5287 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5288 int spec_size = thunk_type_size(arg_type, 0); 5289 int i; 5290 5291 for (i = 0; i < host_dm->target_count; i++) { 5292 struct dm_target_spec *spec = cur_data; 5293 uint32_t next; 5294 int slen; 5295 5296 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 5297 slen = strlen((char*)gspec + spec_size) + 1; 5298 next = spec->next; 5299 spec->next = sizeof(*spec) + slen; 5300 strcpy((char*)&spec[1], gspec + spec_size); 5301 gspec += next; 5302 cur_data += spec->next; 5303 } 5304 break; 5305 } 5306 default: 5307 ret = -TARGET_EINVAL; 5308 unlock_user(argptr, guest_data, 0); 5309 goto out; 5310 } 5311 unlock_user(argptr, guest_data, 0); 5312 5313 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5314 if (!is_error(ret)) { 5315 guest_data = arg + host_dm->data_start; 5316 guest_data_size = host_dm->data_size - host_dm->data_start; 5317 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 5318 switch (ie->host_cmd) { 5319 case DM_REMOVE_ALL: 5320 case DM_DEV_CREATE: 5321 case DM_DEV_REMOVE: 5322 case DM_DEV_RENAME: 5323 case DM_DEV_SUSPEND: 5324 case DM_DEV_STATUS: 5325 case DM_TABLE_LOAD: 5326 case DM_TABLE_CLEAR: 5327 case DM_TARGET_MSG: 5328 case DM_DEV_SET_GEOMETRY: 5329 /* no return data */ 5330 break; 5331 case DM_LIST_DEVICES: 5332 { 5333 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 5334 uint32_t remaining_data = guest_data_size; 5335 void *cur_data = argptr; 5336 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 5337 int nl_size = 12; /* can't use thunk_size due to alignment */ 5338 5339 while (1) { 5340 uint32_t next = nl->next; 5341 if (next) { 5342 nl->next = nl_size + (strlen(nl->name) + 1); 5343 } 5344 if (remaining_data < nl->next) { 5345 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5346 break; 5347 } 5348 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 5349 strcpy(cur_data + nl_size, nl->name); 5350 cur_data += nl->next; 5351 remaining_data -= nl->next; 5352 if (!next) { 5353 break; 5354 } 5355 nl = (void*)nl + next; 5356 } 5357 break; 5358 } 5359 case DM_DEV_WAIT: 5360 case DM_TABLE_STATUS: 5361 { 5362 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 5363 void *cur_data = argptr; 5364 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5365 int spec_size = thunk_type_size(arg_type, 0); 5366 int i; 5367 5368 for (i = 0; i < host_dm->target_count; i++) { 5369 uint32_t next = spec->next; 5370 int slen = strlen((char*)&spec[1]) + 1; 5371 spec->next = (cur_data - argptr) + spec_size + slen; 5372 if (guest_data_size < spec->next) { 5373 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5374 break; 5375 } 5376 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 5377 strcpy(cur_data + spec_size, (char*)&spec[1]); 5378 cur_data = argptr + spec->next; 5379 spec = (void*)host_dm + host_dm->data_start + next; 5380 } 5381 break; 5382 } 5383 case DM_TABLE_DEPS: 5384 { 5385 void *hdata = (void*)host_dm + host_dm->data_start; 5386 int count = *(uint32_t*)hdata; 5387 uint64_t *hdev = hdata + 8; 5388 uint64_t *gdev = argptr + 8; 5389 int i; 5390 5391 *(uint32_t*)argptr = tswap32(count); 5392 for (i = 0; i < count; i++) { 5393 *gdev = tswap64(*hdev); 5394 gdev++; 5395 hdev++; 5396 } 5397 break; 5398 } 5399 case DM_LIST_VERSIONS: 5400 { 5401 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 5402 uint32_t remaining_data = guest_data_size; 5403 void *cur_data = argptr; 5404 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 5405 int vers_size = thunk_type_size(arg_type, 0); 5406 5407 while (1) { 5408 uint32_t next = vers->next; 5409 if (next) { 5410 vers->next = vers_size + (strlen(vers->name) + 1); 5411 } 5412 if (remaining_data < vers->next) { 5413 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5414 break; 5415 } 5416 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 5417 strcpy(cur_data + vers_size, vers->name); 5418 cur_data += vers->next; 5419 remaining_data -= vers->next; 5420 if (!next) { 5421 break; 5422 } 5423 vers = (void*)vers + next; 5424 } 5425 break; 5426 } 5427 default: 5428 unlock_user(argptr, guest_data, 0); 5429 ret = -TARGET_EINVAL; 5430 goto out; 5431 } 5432 unlock_user(argptr, guest_data, guest_data_size); 5433 5434 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5435 if (!argptr) { 5436 ret = -TARGET_EFAULT; 5437 goto out; 5438 } 5439 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5440 unlock_user(argptr, arg, target_size); 5441 } 5442 out: 5443 g_free(big_buf); 5444 return ret; 5445 } 5446 5447 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5448 int cmd, abi_long arg) 5449 { 5450 void *argptr; 5451 int target_size; 5452 const argtype *arg_type = ie->arg_type; 5453 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) }; 5454 abi_long ret; 5455 5456 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp; 5457 struct blkpg_partition host_part; 5458 5459 /* Read and convert blkpg */ 5460 arg_type++; 5461 target_size = thunk_type_size(arg_type, 0); 5462 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5463 if (!argptr) { 5464 ret = -TARGET_EFAULT; 5465 goto out; 5466 } 5467 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5468 unlock_user(argptr, arg, 0); 5469 5470 switch (host_blkpg->op) { 5471 case BLKPG_ADD_PARTITION: 5472 case BLKPG_DEL_PARTITION: 5473 /* payload is struct blkpg_partition */ 5474 break; 5475 default: 5476 /* Unknown opcode */ 5477 ret = -TARGET_EINVAL; 5478 goto out; 5479 } 5480 5481 /* Read and convert blkpg->data */ 5482 arg = (abi_long)(uintptr_t)host_blkpg->data; 5483 target_size = thunk_type_size(part_arg_type, 0); 5484 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5485 if (!argptr) { 5486 ret = -TARGET_EFAULT; 5487 goto out; 5488 } 5489 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST); 5490 unlock_user(argptr, arg, 0); 5491 5492 /* Swizzle the data pointer to our local copy and call! */ 5493 host_blkpg->data = &host_part; 5494 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg)); 5495 5496 out: 5497 return ret; 5498 } 5499 5500 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 5501 int fd, int cmd, abi_long arg) 5502 { 5503 const argtype *arg_type = ie->arg_type; 5504 const StructEntry *se; 5505 const argtype *field_types; 5506 const int *dst_offsets, *src_offsets; 5507 int target_size; 5508 void *argptr; 5509 abi_ulong *target_rt_dev_ptr = NULL; 5510 unsigned long *host_rt_dev_ptr = NULL; 5511 abi_long ret; 5512 int i; 5513 5514 assert(ie->access == IOC_W); 5515 assert(*arg_type == TYPE_PTR); 5516 arg_type++; 5517 assert(*arg_type == TYPE_STRUCT); 5518 target_size = thunk_type_size(arg_type, 0); 5519 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5520 if (!argptr) { 5521 return -TARGET_EFAULT; 5522 } 5523 arg_type++; 5524 assert(*arg_type == (int)STRUCT_rtentry); 5525 se = struct_entries + *arg_type++; 5526 assert(se->convert[0] == NULL); 5527 /* convert struct here to be able to catch rt_dev string */ 5528 field_types = se->field_types; 5529 dst_offsets = se->field_offsets[THUNK_HOST]; 5530 src_offsets = se->field_offsets[THUNK_TARGET]; 5531 for (i = 0; i < se->nb_fields; i++) { 5532 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 5533 assert(*field_types == TYPE_PTRVOID); 5534 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 5535 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 5536 if (*target_rt_dev_ptr != 0) { 5537 *host_rt_dev_ptr = (unsigned long)lock_user_string( 5538 tswapal(*target_rt_dev_ptr)); 5539 if (!*host_rt_dev_ptr) { 5540 unlock_user(argptr, arg, 0); 5541 return -TARGET_EFAULT; 5542 } 5543 } else { 5544 *host_rt_dev_ptr = 0; 5545 } 5546 field_types++; 5547 continue; 5548 } 5549 field_types = thunk_convert(buf_temp + dst_offsets[i], 5550 argptr + src_offsets[i], 5551 field_types, THUNK_HOST); 5552 } 5553 unlock_user(argptr, arg, 0); 5554 5555 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5556 5557 assert(host_rt_dev_ptr != NULL); 5558 assert(target_rt_dev_ptr != NULL); 5559 if (*host_rt_dev_ptr != 0) { 5560 unlock_user((void *)*host_rt_dev_ptr, 5561 *target_rt_dev_ptr, 0); 5562 } 5563 return ret; 5564 } 5565 5566 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 5567 int fd, int cmd, abi_long arg) 5568 { 5569 int sig = target_to_host_signal(arg); 5570 return get_errno(safe_ioctl(fd, ie->host_cmd, sig)); 5571 } 5572 5573 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp, 5574 int fd, int cmd, abi_long arg) 5575 { 5576 struct timeval tv; 5577 abi_long ret; 5578 5579 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv)); 5580 if (is_error(ret)) { 5581 return ret; 5582 } 5583 5584 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) { 5585 if (copy_to_user_timeval(arg, &tv)) { 5586 return -TARGET_EFAULT; 5587 } 5588 } else { 5589 if (copy_to_user_timeval64(arg, &tv)) { 5590 return -TARGET_EFAULT; 5591 } 5592 } 5593 5594 return ret; 5595 } 5596 5597 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp, 5598 int fd, int cmd, abi_long arg) 5599 { 5600 struct timespec ts; 5601 abi_long ret; 5602 5603 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts)); 5604 if (is_error(ret)) { 5605 return ret; 5606 } 5607 5608 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) { 5609 if (host_to_target_timespec(arg, &ts)) { 5610 return -TARGET_EFAULT; 5611 } 5612 } else{ 5613 if (host_to_target_timespec64(arg, &ts)) { 5614 return -TARGET_EFAULT; 5615 } 5616 } 5617 5618 return ret; 5619 } 5620 5621 #ifdef TIOCGPTPEER 5622 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp, 5623 int fd, int cmd, abi_long arg) 5624 { 5625 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl); 5626 return get_errno(safe_ioctl(fd, ie->host_cmd, flags)); 5627 } 5628 #endif 5629 5630 #ifdef HAVE_DRM_H 5631 5632 static void unlock_drm_version(struct drm_version *host_ver, 5633 struct target_drm_version *target_ver, 5634 bool copy) 5635 { 5636 unlock_user(host_ver->name, target_ver->name, 5637 copy ? host_ver->name_len : 0); 5638 unlock_user(host_ver->date, target_ver->date, 5639 copy ? host_ver->date_len : 0); 5640 unlock_user(host_ver->desc, target_ver->desc, 5641 copy ? host_ver->desc_len : 0); 5642 } 5643 5644 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver, 5645 struct target_drm_version *target_ver) 5646 { 5647 memset(host_ver, 0, sizeof(*host_ver)); 5648 5649 __get_user(host_ver->name_len, &target_ver->name_len); 5650 if (host_ver->name_len) { 5651 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name, 5652 target_ver->name_len, 0); 5653 if (!host_ver->name) { 5654 return -EFAULT; 5655 } 5656 } 5657 5658 __get_user(host_ver->date_len, &target_ver->date_len); 5659 if (host_ver->date_len) { 5660 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date, 5661 target_ver->date_len, 0); 5662 if (!host_ver->date) { 5663 goto err; 5664 } 5665 } 5666 5667 __get_user(host_ver->desc_len, &target_ver->desc_len); 5668 if (host_ver->desc_len) { 5669 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc, 5670 target_ver->desc_len, 0); 5671 if (!host_ver->desc) { 5672 goto err; 5673 } 5674 } 5675 5676 return 0; 5677 err: 5678 unlock_drm_version(host_ver, target_ver, false); 5679 return -EFAULT; 5680 } 5681 5682 static inline void host_to_target_drmversion( 5683 struct target_drm_version *target_ver, 5684 struct drm_version *host_ver) 5685 { 5686 __put_user(host_ver->version_major, &target_ver->version_major); 5687 __put_user(host_ver->version_minor, &target_ver->version_minor); 5688 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel); 5689 __put_user(host_ver->name_len, &target_ver->name_len); 5690 __put_user(host_ver->date_len, &target_ver->date_len); 5691 __put_user(host_ver->desc_len, &target_ver->desc_len); 5692 unlock_drm_version(host_ver, target_ver, true); 5693 } 5694 5695 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp, 5696 int fd, int cmd, abi_long arg) 5697 { 5698 struct drm_version *ver; 5699 struct target_drm_version *target_ver; 5700 abi_long ret; 5701 5702 switch (ie->host_cmd) { 5703 case DRM_IOCTL_VERSION: 5704 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) { 5705 return -TARGET_EFAULT; 5706 } 5707 ver = (struct drm_version *)buf_temp; 5708 ret = target_to_host_drmversion(ver, target_ver); 5709 if (!is_error(ret)) { 5710 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver)); 5711 if (is_error(ret)) { 5712 unlock_drm_version(ver, target_ver, false); 5713 } else { 5714 host_to_target_drmversion(target_ver, ver); 5715 } 5716 } 5717 unlock_user_struct(target_ver, arg, 0); 5718 return ret; 5719 } 5720 return -TARGET_ENOSYS; 5721 } 5722 5723 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie, 5724 struct drm_i915_getparam *gparam, 5725 int fd, abi_long arg) 5726 { 5727 abi_long ret; 5728 int value; 5729 struct target_drm_i915_getparam *target_gparam; 5730 5731 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) { 5732 return -TARGET_EFAULT; 5733 } 5734 5735 __get_user(gparam->param, &target_gparam->param); 5736 gparam->value = &value; 5737 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam)); 5738 put_user_s32(value, target_gparam->value); 5739 5740 unlock_user_struct(target_gparam, arg, 0); 5741 return ret; 5742 } 5743 5744 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp, 5745 int fd, int cmd, abi_long arg) 5746 { 5747 switch (ie->host_cmd) { 5748 case DRM_IOCTL_I915_GETPARAM: 5749 return do_ioctl_drm_i915_getparam(ie, 5750 (struct drm_i915_getparam *)buf_temp, 5751 fd, arg); 5752 default: 5753 return -TARGET_ENOSYS; 5754 } 5755 } 5756 5757 #endif 5758 5759 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp, 5760 int fd, int cmd, abi_long arg) 5761 { 5762 struct tun_filter *filter = (struct tun_filter *)buf_temp; 5763 struct tun_filter *target_filter; 5764 char *target_addr; 5765 5766 assert(ie->access == IOC_W); 5767 5768 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1); 5769 if (!target_filter) { 5770 return -TARGET_EFAULT; 5771 } 5772 filter->flags = tswap16(target_filter->flags); 5773 filter->count = tswap16(target_filter->count); 5774 unlock_user(target_filter, arg, 0); 5775 5776 if (filter->count) { 5777 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN > 5778 MAX_STRUCT_SIZE) { 5779 return -TARGET_EFAULT; 5780 } 5781 5782 target_addr = lock_user(VERIFY_READ, 5783 arg + offsetof(struct tun_filter, addr), 5784 filter->count * ETH_ALEN, 1); 5785 if (!target_addr) { 5786 return -TARGET_EFAULT; 5787 } 5788 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN); 5789 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0); 5790 } 5791 5792 return get_errno(safe_ioctl(fd, ie->host_cmd, filter)); 5793 } 5794 5795 IOCTLEntry ioctl_entries[] = { 5796 #define IOCTL(cmd, access, ...) \ 5797 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 5798 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 5799 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 5800 #define IOCTL_IGNORE(cmd) \ 5801 { TARGET_ ## cmd, 0, #cmd }, 5802 #include "ioctls.h" 5803 { 0, 0, }, 5804 }; 5805 5806 /* ??? Implement proper locking for ioctls. */ 5807 /* do_ioctl() Must return target values and target errnos. */ 5808 static abi_long do_ioctl(int fd, int cmd, abi_long arg) 5809 { 5810 const IOCTLEntry *ie; 5811 const argtype *arg_type; 5812 abi_long ret; 5813 uint8_t buf_temp[MAX_STRUCT_SIZE]; 5814 int target_size; 5815 void *argptr; 5816 5817 ie = ioctl_entries; 5818 for(;;) { 5819 if (ie->target_cmd == 0) { 5820 qemu_log_mask( 5821 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 5822 return -TARGET_ENOSYS; 5823 } 5824 if (ie->target_cmd == cmd) 5825 break; 5826 ie++; 5827 } 5828 arg_type = ie->arg_type; 5829 if (ie->do_ioctl) { 5830 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 5831 } else if (!ie->host_cmd) { 5832 /* Some architectures define BSD ioctls in their headers 5833 that are not implemented in Linux. */ 5834 return -TARGET_ENOSYS; 5835 } 5836 5837 switch(arg_type[0]) { 5838 case TYPE_NULL: 5839 /* no argument */ 5840 ret = get_errno(safe_ioctl(fd, ie->host_cmd)); 5841 break; 5842 case TYPE_PTRVOID: 5843 case TYPE_INT: 5844 case TYPE_LONG: 5845 case TYPE_ULONG: 5846 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg)); 5847 break; 5848 case TYPE_PTR: 5849 arg_type++; 5850 target_size = thunk_type_size(arg_type, 0); 5851 switch(ie->access) { 5852 case IOC_R: 5853 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5854 if (!is_error(ret)) { 5855 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5856 if (!argptr) 5857 return -TARGET_EFAULT; 5858 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5859 unlock_user(argptr, arg, target_size); 5860 } 5861 break; 5862 case IOC_W: 5863 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5864 if (!argptr) 5865 return -TARGET_EFAULT; 5866 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5867 unlock_user(argptr, arg, 0); 5868 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5869 break; 5870 default: 5871 case IOC_RW: 5872 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5873 if (!argptr) 5874 return -TARGET_EFAULT; 5875 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5876 unlock_user(argptr, arg, 0); 5877 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5878 if (!is_error(ret)) { 5879 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5880 if (!argptr) 5881 return -TARGET_EFAULT; 5882 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5883 unlock_user(argptr, arg, target_size); 5884 } 5885 break; 5886 } 5887 break; 5888 default: 5889 qemu_log_mask(LOG_UNIMP, 5890 "Unsupported ioctl type: cmd=0x%04lx type=%d\n", 5891 (long)cmd, arg_type[0]); 5892 ret = -TARGET_ENOSYS; 5893 break; 5894 } 5895 return ret; 5896 } 5897 5898 static const bitmask_transtbl iflag_tbl[] = { 5899 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 5900 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 5901 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 5902 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 5903 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 5904 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 5905 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 5906 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 5907 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 5908 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 5909 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 5910 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 5911 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 5912 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 5913 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8}, 5914 { 0, 0, 0, 0 } 5915 }; 5916 5917 static const bitmask_transtbl oflag_tbl[] = { 5918 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 5919 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 5920 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 5921 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 5922 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 5923 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 5924 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 5925 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 5926 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 5927 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 5928 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 5929 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 5930 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 5931 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 5932 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 5933 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 5934 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 5935 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 5936 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 5937 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 5938 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 5939 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 5940 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 5941 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 5942 { 0, 0, 0, 0 } 5943 }; 5944 5945 static const bitmask_transtbl cflag_tbl[] = { 5946 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 5947 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 5948 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 5949 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 5950 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 5951 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 5952 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 5953 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 5954 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 5955 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 5956 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 5957 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 5958 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 5959 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 5960 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 5961 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 5962 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 5963 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 5964 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 5965 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 5966 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 5967 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 5968 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 5969 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 5970 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 5971 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 5972 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 5973 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 5974 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 5975 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 5976 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 5977 { 0, 0, 0, 0 } 5978 }; 5979 5980 static const bitmask_transtbl lflag_tbl[] = { 5981 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 5982 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 5983 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 5984 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 5985 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 5986 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 5987 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 5988 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 5989 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 5990 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 5991 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 5992 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 5993 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 5994 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 5995 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 5996 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC}, 5997 { 0, 0, 0, 0 } 5998 }; 5999 6000 static void target_to_host_termios (void *dst, const void *src) 6001 { 6002 struct host_termios *host = dst; 6003 const struct target_termios *target = src; 6004 6005 host->c_iflag = 6006 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 6007 host->c_oflag = 6008 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 6009 host->c_cflag = 6010 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 6011 host->c_lflag = 6012 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 6013 host->c_line = target->c_line; 6014 6015 memset(host->c_cc, 0, sizeof(host->c_cc)); 6016 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 6017 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 6018 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 6019 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 6020 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 6021 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 6022 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 6023 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 6024 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 6025 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 6026 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 6027 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 6028 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 6029 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 6030 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 6031 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 6032 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 6033 } 6034 6035 static void host_to_target_termios (void *dst, const void *src) 6036 { 6037 struct target_termios *target = dst; 6038 const struct host_termios *host = src; 6039 6040 target->c_iflag = 6041 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 6042 target->c_oflag = 6043 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 6044 target->c_cflag = 6045 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 6046 target->c_lflag = 6047 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 6048 target->c_line = host->c_line; 6049 6050 memset(target->c_cc, 0, sizeof(target->c_cc)); 6051 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 6052 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 6053 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 6054 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 6055 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 6056 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 6057 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 6058 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 6059 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 6060 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 6061 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 6062 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 6063 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 6064 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 6065 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 6066 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 6067 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 6068 } 6069 6070 static const StructEntry struct_termios_def = { 6071 .convert = { host_to_target_termios, target_to_host_termios }, 6072 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 6073 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 6074 .print = print_termios, 6075 }; 6076 6077 static const bitmask_transtbl mmap_flags_tbl[] = { 6078 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 6079 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 6080 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 6081 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, 6082 MAP_ANONYMOUS, MAP_ANONYMOUS }, 6083 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, 6084 MAP_GROWSDOWN, MAP_GROWSDOWN }, 6085 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, 6086 MAP_DENYWRITE, MAP_DENYWRITE }, 6087 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, 6088 MAP_EXECUTABLE, MAP_EXECUTABLE }, 6089 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 6090 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, 6091 MAP_NORESERVE, MAP_NORESERVE }, 6092 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB }, 6093 /* MAP_STACK had been ignored by the kernel for quite some time. 6094 Recognize it for the target insofar as we do not want to pass 6095 it through to the host. */ 6096 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 }, 6097 { 0, 0, 0, 0 } 6098 }; 6099 6100 /* 6101 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64) 6102 * TARGET_I386 is defined if TARGET_X86_64 is defined 6103 */ 6104 #if defined(TARGET_I386) 6105 6106 /* NOTE: there is really one LDT for all the threads */ 6107 static uint8_t *ldt_table; 6108 6109 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 6110 { 6111 int size; 6112 void *p; 6113 6114 if (!ldt_table) 6115 return 0; 6116 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 6117 if (size > bytecount) 6118 size = bytecount; 6119 p = lock_user(VERIFY_WRITE, ptr, size, 0); 6120 if (!p) 6121 return -TARGET_EFAULT; 6122 /* ??? Should this by byteswapped? */ 6123 memcpy(p, ldt_table, size); 6124 unlock_user(p, ptr, size); 6125 return size; 6126 } 6127 6128 /* XXX: add locking support */ 6129 static abi_long write_ldt(CPUX86State *env, 6130 abi_ulong ptr, unsigned long bytecount, int oldmode) 6131 { 6132 struct target_modify_ldt_ldt_s ldt_info; 6133 struct target_modify_ldt_ldt_s *target_ldt_info; 6134 int seg_32bit, contents, read_exec_only, limit_in_pages; 6135 int seg_not_present, useable, lm; 6136 uint32_t *lp, entry_1, entry_2; 6137 6138 if (bytecount != sizeof(ldt_info)) 6139 return -TARGET_EINVAL; 6140 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 6141 return -TARGET_EFAULT; 6142 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6143 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6144 ldt_info.limit = tswap32(target_ldt_info->limit); 6145 ldt_info.flags = tswap32(target_ldt_info->flags); 6146 unlock_user_struct(target_ldt_info, ptr, 0); 6147 6148 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 6149 return -TARGET_EINVAL; 6150 seg_32bit = ldt_info.flags & 1; 6151 contents = (ldt_info.flags >> 1) & 3; 6152 read_exec_only = (ldt_info.flags >> 3) & 1; 6153 limit_in_pages = (ldt_info.flags >> 4) & 1; 6154 seg_not_present = (ldt_info.flags >> 5) & 1; 6155 useable = (ldt_info.flags >> 6) & 1; 6156 #ifdef TARGET_ABI32 6157 lm = 0; 6158 #else 6159 lm = (ldt_info.flags >> 7) & 1; 6160 #endif 6161 if (contents == 3) { 6162 if (oldmode) 6163 return -TARGET_EINVAL; 6164 if (seg_not_present == 0) 6165 return -TARGET_EINVAL; 6166 } 6167 /* allocate the LDT */ 6168 if (!ldt_table) { 6169 env->ldt.base = target_mmap(0, 6170 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 6171 PROT_READ|PROT_WRITE, 6172 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 6173 if (env->ldt.base == -1) 6174 return -TARGET_ENOMEM; 6175 memset(g2h_untagged(env->ldt.base), 0, 6176 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 6177 env->ldt.limit = 0xffff; 6178 ldt_table = g2h_untagged(env->ldt.base); 6179 } 6180 6181 /* NOTE: same code as Linux kernel */ 6182 /* Allow LDTs to be cleared by the user. */ 6183 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6184 if (oldmode || 6185 (contents == 0 && 6186 read_exec_only == 1 && 6187 seg_32bit == 0 && 6188 limit_in_pages == 0 && 6189 seg_not_present == 1 && 6190 useable == 0 )) { 6191 entry_1 = 0; 6192 entry_2 = 0; 6193 goto install; 6194 } 6195 } 6196 6197 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6198 (ldt_info.limit & 0x0ffff); 6199 entry_2 = (ldt_info.base_addr & 0xff000000) | 6200 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6201 (ldt_info.limit & 0xf0000) | 6202 ((read_exec_only ^ 1) << 9) | 6203 (contents << 10) | 6204 ((seg_not_present ^ 1) << 15) | 6205 (seg_32bit << 22) | 6206 (limit_in_pages << 23) | 6207 (lm << 21) | 6208 0x7000; 6209 if (!oldmode) 6210 entry_2 |= (useable << 20); 6211 6212 /* Install the new entry ... */ 6213 install: 6214 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 6215 lp[0] = tswap32(entry_1); 6216 lp[1] = tswap32(entry_2); 6217 return 0; 6218 } 6219 6220 /* specific and weird i386 syscalls */ 6221 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 6222 unsigned long bytecount) 6223 { 6224 abi_long ret; 6225 6226 switch (func) { 6227 case 0: 6228 ret = read_ldt(ptr, bytecount); 6229 break; 6230 case 1: 6231 ret = write_ldt(env, ptr, bytecount, 1); 6232 break; 6233 case 0x11: 6234 ret = write_ldt(env, ptr, bytecount, 0); 6235 break; 6236 default: 6237 ret = -TARGET_ENOSYS; 6238 break; 6239 } 6240 return ret; 6241 } 6242 6243 #if defined(TARGET_ABI32) 6244 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 6245 { 6246 uint64_t *gdt_table = g2h_untagged(env->gdt.base); 6247 struct target_modify_ldt_ldt_s ldt_info; 6248 struct target_modify_ldt_ldt_s *target_ldt_info; 6249 int seg_32bit, contents, read_exec_only, limit_in_pages; 6250 int seg_not_present, useable, lm; 6251 uint32_t *lp, entry_1, entry_2; 6252 int i; 6253 6254 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6255 if (!target_ldt_info) 6256 return -TARGET_EFAULT; 6257 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6258 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6259 ldt_info.limit = tswap32(target_ldt_info->limit); 6260 ldt_info.flags = tswap32(target_ldt_info->flags); 6261 if (ldt_info.entry_number == -1) { 6262 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 6263 if (gdt_table[i] == 0) { 6264 ldt_info.entry_number = i; 6265 target_ldt_info->entry_number = tswap32(i); 6266 break; 6267 } 6268 } 6269 } 6270 unlock_user_struct(target_ldt_info, ptr, 1); 6271 6272 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 6273 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 6274 return -TARGET_EINVAL; 6275 seg_32bit = ldt_info.flags & 1; 6276 contents = (ldt_info.flags >> 1) & 3; 6277 read_exec_only = (ldt_info.flags >> 3) & 1; 6278 limit_in_pages = (ldt_info.flags >> 4) & 1; 6279 seg_not_present = (ldt_info.flags >> 5) & 1; 6280 useable = (ldt_info.flags >> 6) & 1; 6281 #ifdef TARGET_ABI32 6282 lm = 0; 6283 #else 6284 lm = (ldt_info.flags >> 7) & 1; 6285 #endif 6286 6287 if (contents == 3) { 6288 if (seg_not_present == 0) 6289 return -TARGET_EINVAL; 6290 } 6291 6292 /* NOTE: same code as Linux kernel */ 6293 /* Allow LDTs to be cleared by the user. */ 6294 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6295 if ((contents == 0 && 6296 read_exec_only == 1 && 6297 seg_32bit == 0 && 6298 limit_in_pages == 0 && 6299 seg_not_present == 1 && 6300 useable == 0 )) { 6301 entry_1 = 0; 6302 entry_2 = 0; 6303 goto install; 6304 } 6305 } 6306 6307 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6308 (ldt_info.limit & 0x0ffff); 6309 entry_2 = (ldt_info.base_addr & 0xff000000) | 6310 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6311 (ldt_info.limit & 0xf0000) | 6312 ((read_exec_only ^ 1) << 9) | 6313 (contents << 10) | 6314 ((seg_not_present ^ 1) << 15) | 6315 (seg_32bit << 22) | 6316 (limit_in_pages << 23) | 6317 (useable << 20) | 6318 (lm << 21) | 6319 0x7000; 6320 6321 /* Install the new entry ... */ 6322 install: 6323 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 6324 lp[0] = tswap32(entry_1); 6325 lp[1] = tswap32(entry_2); 6326 return 0; 6327 } 6328 6329 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 6330 { 6331 struct target_modify_ldt_ldt_s *target_ldt_info; 6332 uint64_t *gdt_table = g2h_untagged(env->gdt.base); 6333 uint32_t base_addr, limit, flags; 6334 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 6335 int seg_not_present, useable, lm; 6336 uint32_t *lp, entry_1, entry_2; 6337 6338 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6339 if (!target_ldt_info) 6340 return -TARGET_EFAULT; 6341 idx = tswap32(target_ldt_info->entry_number); 6342 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 6343 idx > TARGET_GDT_ENTRY_TLS_MAX) { 6344 unlock_user_struct(target_ldt_info, ptr, 1); 6345 return -TARGET_EINVAL; 6346 } 6347 lp = (uint32_t *)(gdt_table + idx); 6348 entry_1 = tswap32(lp[0]); 6349 entry_2 = tswap32(lp[1]); 6350 6351 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 6352 contents = (entry_2 >> 10) & 3; 6353 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 6354 seg_32bit = (entry_2 >> 22) & 1; 6355 limit_in_pages = (entry_2 >> 23) & 1; 6356 useable = (entry_2 >> 20) & 1; 6357 #ifdef TARGET_ABI32 6358 lm = 0; 6359 #else 6360 lm = (entry_2 >> 21) & 1; 6361 #endif 6362 flags = (seg_32bit << 0) | (contents << 1) | 6363 (read_exec_only << 3) | (limit_in_pages << 4) | 6364 (seg_not_present << 5) | (useable << 6) | (lm << 7); 6365 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 6366 base_addr = (entry_1 >> 16) | 6367 (entry_2 & 0xff000000) | 6368 ((entry_2 & 0xff) << 16); 6369 target_ldt_info->base_addr = tswapal(base_addr); 6370 target_ldt_info->limit = tswap32(limit); 6371 target_ldt_info->flags = tswap32(flags); 6372 unlock_user_struct(target_ldt_info, ptr, 1); 6373 return 0; 6374 } 6375 6376 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6377 { 6378 return -TARGET_ENOSYS; 6379 } 6380 #else 6381 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6382 { 6383 abi_long ret = 0; 6384 abi_ulong val; 6385 int idx; 6386 6387 switch(code) { 6388 case TARGET_ARCH_SET_GS: 6389 case TARGET_ARCH_SET_FS: 6390 if (code == TARGET_ARCH_SET_GS) 6391 idx = R_GS; 6392 else 6393 idx = R_FS; 6394 cpu_x86_load_seg(env, idx, 0); 6395 env->segs[idx].base = addr; 6396 break; 6397 case TARGET_ARCH_GET_GS: 6398 case TARGET_ARCH_GET_FS: 6399 if (code == TARGET_ARCH_GET_GS) 6400 idx = R_GS; 6401 else 6402 idx = R_FS; 6403 val = env->segs[idx].base; 6404 if (put_user(val, addr, abi_ulong)) 6405 ret = -TARGET_EFAULT; 6406 break; 6407 default: 6408 ret = -TARGET_EINVAL; 6409 break; 6410 } 6411 return ret; 6412 } 6413 #endif /* defined(TARGET_ABI32 */ 6414 6415 #endif /* defined(TARGET_I386) */ 6416 6417 #define NEW_STACK_SIZE 0x40000 6418 6419 6420 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 6421 typedef struct { 6422 CPUArchState *env; 6423 pthread_mutex_t mutex; 6424 pthread_cond_t cond; 6425 pthread_t thread; 6426 uint32_t tid; 6427 abi_ulong child_tidptr; 6428 abi_ulong parent_tidptr; 6429 sigset_t sigmask; 6430 } new_thread_info; 6431 6432 static void *clone_func(void *arg) 6433 { 6434 new_thread_info *info = arg; 6435 CPUArchState *env; 6436 CPUState *cpu; 6437 TaskState *ts; 6438 6439 rcu_register_thread(); 6440 tcg_register_thread(); 6441 env = info->env; 6442 cpu = env_cpu(env); 6443 thread_cpu = cpu; 6444 ts = (TaskState *)cpu->opaque; 6445 info->tid = sys_gettid(); 6446 task_settid(ts); 6447 if (info->child_tidptr) 6448 put_user_u32(info->tid, info->child_tidptr); 6449 if (info->parent_tidptr) 6450 put_user_u32(info->tid, info->parent_tidptr); 6451 qemu_guest_random_seed_thread_part2(cpu->random_seed); 6452 /* Enable signals. */ 6453 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 6454 /* Signal to the parent that we're ready. */ 6455 pthread_mutex_lock(&info->mutex); 6456 pthread_cond_broadcast(&info->cond); 6457 pthread_mutex_unlock(&info->mutex); 6458 /* Wait until the parent has finished initializing the tls state. */ 6459 pthread_mutex_lock(&clone_lock); 6460 pthread_mutex_unlock(&clone_lock); 6461 cpu_loop(env); 6462 /* never exits */ 6463 return NULL; 6464 } 6465 6466 /* do_fork() Must return host values and target errnos (unlike most 6467 do_*() functions). */ 6468 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 6469 abi_ulong parent_tidptr, target_ulong newtls, 6470 abi_ulong child_tidptr) 6471 { 6472 CPUState *cpu = env_cpu(env); 6473 int ret; 6474 TaskState *ts; 6475 CPUState *new_cpu; 6476 CPUArchState *new_env; 6477 sigset_t sigmask; 6478 6479 flags &= ~CLONE_IGNORED_FLAGS; 6480 6481 /* Emulate vfork() with fork() */ 6482 if (flags & CLONE_VFORK) 6483 flags &= ~(CLONE_VFORK | CLONE_VM); 6484 6485 if (flags & CLONE_VM) { 6486 TaskState *parent_ts = (TaskState *)cpu->opaque; 6487 new_thread_info info; 6488 pthread_attr_t attr; 6489 6490 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) || 6491 (flags & CLONE_INVALID_THREAD_FLAGS)) { 6492 return -TARGET_EINVAL; 6493 } 6494 6495 ts = g_new0(TaskState, 1); 6496 init_task_state(ts); 6497 6498 /* Grab a mutex so that thread setup appears atomic. */ 6499 pthread_mutex_lock(&clone_lock); 6500 6501 /* 6502 * If this is our first additional thread, we need to ensure we 6503 * generate code for parallel execution and flush old translations. 6504 * Do this now so that the copy gets CF_PARALLEL too. 6505 */ 6506 if (!(cpu->tcg_cflags & CF_PARALLEL)) { 6507 cpu->tcg_cflags |= CF_PARALLEL; 6508 tb_flush(cpu); 6509 } 6510 6511 /* we create a new CPU instance. */ 6512 new_env = cpu_copy(env); 6513 /* Init regs that differ from the parent. */ 6514 cpu_clone_regs_child(new_env, newsp, flags); 6515 cpu_clone_regs_parent(env, flags); 6516 new_cpu = env_cpu(new_env); 6517 new_cpu->opaque = ts; 6518 ts->bprm = parent_ts->bprm; 6519 ts->info = parent_ts->info; 6520 ts->signal_mask = parent_ts->signal_mask; 6521 6522 if (flags & CLONE_CHILD_CLEARTID) { 6523 ts->child_tidptr = child_tidptr; 6524 } 6525 6526 if (flags & CLONE_SETTLS) { 6527 cpu_set_tls (new_env, newtls); 6528 } 6529 6530 memset(&info, 0, sizeof(info)); 6531 pthread_mutex_init(&info.mutex, NULL); 6532 pthread_mutex_lock(&info.mutex); 6533 pthread_cond_init(&info.cond, NULL); 6534 info.env = new_env; 6535 if (flags & CLONE_CHILD_SETTID) { 6536 info.child_tidptr = child_tidptr; 6537 } 6538 if (flags & CLONE_PARENT_SETTID) { 6539 info.parent_tidptr = parent_tidptr; 6540 } 6541 6542 ret = pthread_attr_init(&attr); 6543 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 6544 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 6545 /* It is not safe to deliver signals until the child has finished 6546 initializing, so temporarily block all signals. */ 6547 sigfillset(&sigmask); 6548 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 6549 cpu->random_seed = qemu_guest_random_seed_thread_part1(); 6550 6551 ret = pthread_create(&info.thread, &attr, clone_func, &info); 6552 /* TODO: Free new CPU state if thread creation failed. */ 6553 6554 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 6555 pthread_attr_destroy(&attr); 6556 if (ret == 0) { 6557 /* Wait for the child to initialize. */ 6558 pthread_cond_wait(&info.cond, &info.mutex); 6559 ret = info.tid; 6560 } else { 6561 ret = -1; 6562 } 6563 pthread_mutex_unlock(&info.mutex); 6564 pthread_cond_destroy(&info.cond); 6565 pthread_mutex_destroy(&info.mutex); 6566 pthread_mutex_unlock(&clone_lock); 6567 } else { 6568 /* if no CLONE_VM, we consider it is a fork */ 6569 if (flags & CLONE_INVALID_FORK_FLAGS) { 6570 return -TARGET_EINVAL; 6571 } 6572 6573 /* We can't support custom termination signals */ 6574 if ((flags & CSIGNAL) != TARGET_SIGCHLD) { 6575 return -TARGET_EINVAL; 6576 } 6577 6578 if (block_signals()) { 6579 return -TARGET_ERESTARTSYS; 6580 } 6581 6582 fork_start(); 6583 ret = fork(); 6584 if (ret == 0) { 6585 /* Child Process. */ 6586 cpu_clone_regs_child(env, newsp, flags); 6587 fork_end(1); 6588 /* There is a race condition here. The parent process could 6589 theoretically read the TID in the child process before the child 6590 tid is set. This would require using either ptrace 6591 (not implemented) or having *_tidptr to point at a shared memory 6592 mapping. We can't repeat the spinlock hack used above because 6593 the child process gets its own copy of the lock. */ 6594 if (flags & CLONE_CHILD_SETTID) 6595 put_user_u32(sys_gettid(), child_tidptr); 6596 if (flags & CLONE_PARENT_SETTID) 6597 put_user_u32(sys_gettid(), parent_tidptr); 6598 ts = (TaskState *)cpu->opaque; 6599 if (flags & CLONE_SETTLS) 6600 cpu_set_tls (env, newtls); 6601 if (flags & CLONE_CHILD_CLEARTID) 6602 ts->child_tidptr = child_tidptr; 6603 } else { 6604 cpu_clone_regs_parent(env, flags); 6605 fork_end(0); 6606 } 6607 } 6608 return ret; 6609 } 6610 6611 /* warning : doesn't handle linux specific flags... */ 6612 static int target_to_host_fcntl_cmd(int cmd) 6613 { 6614 int ret; 6615 6616 switch(cmd) { 6617 case TARGET_F_DUPFD: 6618 case TARGET_F_GETFD: 6619 case TARGET_F_SETFD: 6620 case TARGET_F_GETFL: 6621 case TARGET_F_SETFL: 6622 case TARGET_F_OFD_GETLK: 6623 case TARGET_F_OFD_SETLK: 6624 case TARGET_F_OFD_SETLKW: 6625 ret = cmd; 6626 break; 6627 case TARGET_F_GETLK: 6628 ret = F_GETLK64; 6629 break; 6630 case TARGET_F_SETLK: 6631 ret = F_SETLK64; 6632 break; 6633 case TARGET_F_SETLKW: 6634 ret = F_SETLKW64; 6635 break; 6636 case TARGET_F_GETOWN: 6637 ret = F_GETOWN; 6638 break; 6639 case TARGET_F_SETOWN: 6640 ret = F_SETOWN; 6641 break; 6642 case TARGET_F_GETSIG: 6643 ret = F_GETSIG; 6644 break; 6645 case TARGET_F_SETSIG: 6646 ret = F_SETSIG; 6647 break; 6648 #if TARGET_ABI_BITS == 32 6649 case TARGET_F_GETLK64: 6650 ret = F_GETLK64; 6651 break; 6652 case TARGET_F_SETLK64: 6653 ret = F_SETLK64; 6654 break; 6655 case TARGET_F_SETLKW64: 6656 ret = F_SETLKW64; 6657 break; 6658 #endif 6659 case TARGET_F_SETLEASE: 6660 ret = F_SETLEASE; 6661 break; 6662 case TARGET_F_GETLEASE: 6663 ret = F_GETLEASE; 6664 break; 6665 #ifdef F_DUPFD_CLOEXEC 6666 case TARGET_F_DUPFD_CLOEXEC: 6667 ret = F_DUPFD_CLOEXEC; 6668 break; 6669 #endif 6670 case TARGET_F_NOTIFY: 6671 ret = F_NOTIFY; 6672 break; 6673 #ifdef F_GETOWN_EX 6674 case TARGET_F_GETOWN_EX: 6675 ret = F_GETOWN_EX; 6676 break; 6677 #endif 6678 #ifdef F_SETOWN_EX 6679 case TARGET_F_SETOWN_EX: 6680 ret = F_SETOWN_EX; 6681 break; 6682 #endif 6683 #ifdef F_SETPIPE_SZ 6684 case TARGET_F_SETPIPE_SZ: 6685 ret = F_SETPIPE_SZ; 6686 break; 6687 case TARGET_F_GETPIPE_SZ: 6688 ret = F_GETPIPE_SZ; 6689 break; 6690 #endif 6691 #ifdef F_ADD_SEALS 6692 case TARGET_F_ADD_SEALS: 6693 ret = F_ADD_SEALS; 6694 break; 6695 case TARGET_F_GET_SEALS: 6696 ret = F_GET_SEALS; 6697 break; 6698 #endif 6699 default: 6700 ret = -TARGET_EINVAL; 6701 break; 6702 } 6703 6704 #if defined(__powerpc64__) 6705 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and 6706 * is not supported by kernel. The glibc fcntl call actually adjusts 6707 * them to 5, 6 and 7 before making the syscall(). Since we make the 6708 * syscall directly, adjust to what is supported by the kernel. 6709 */ 6710 if (ret >= F_GETLK64 && ret <= F_SETLKW64) { 6711 ret -= F_GETLK64 - 5; 6712 } 6713 #endif 6714 6715 return ret; 6716 } 6717 6718 #define FLOCK_TRANSTBL \ 6719 switch (type) { \ 6720 TRANSTBL_CONVERT(F_RDLCK); \ 6721 TRANSTBL_CONVERT(F_WRLCK); \ 6722 TRANSTBL_CONVERT(F_UNLCK); \ 6723 } 6724 6725 static int target_to_host_flock(int type) 6726 { 6727 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a 6728 FLOCK_TRANSTBL 6729 #undef TRANSTBL_CONVERT 6730 return -TARGET_EINVAL; 6731 } 6732 6733 static int host_to_target_flock(int type) 6734 { 6735 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a 6736 FLOCK_TRANSTBL 6737 #undef TRANSTBL_CONVERT 6738 /* if we don't know how to convert the value coming 6739 * from the host we copy to the target field as-is 6740 */ 6741 return type; 6742 } 6743 6744 static inline abi_long copy_from_user_flock(struct flock64 *fl, 6745 abi_ulong target_flock_addr) 6746 { 6747 struct target_flock *target_fl; 6748 int l_type; 6749 6750 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6751 return -TARGET_EFAULT; 6752 } 6753 6754 __get_user(l_type, &target_fl->l_type); 6755 l_type = target_to_host_flock(l_type); 6756 if (l_type < 0) { 6757 return l_type; 6758 } 6759 fl->l_type = l_type; 6760 __get_user(fl->l_whence, &target_fl->l_whence); 6761 __get_user(fl->l_start, &target_fl->l_start); 6762 __get_user(fl->l_len, &target_fl->l_len); 6763 __get_user(fl->l_pid, &target_fl->l_pid); 6764 unlock_user_struct(target_fl, target_flock_addr, 0); 6765 return 0; 6766 } 6767 6768 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr, 6769 const struct flock64 *fl) 6770 { 6771 struct target_flock *target_fl; 6772 short l_type; 6773 6774 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6775 return -TARGET_EFAULT; 6776 } 6777 6778 l_type = host_to_target_flock(fl->l_type); 6779 __put_user(l_type, &target_fl->l_type); 6780 __put_user(fl->l_whence, &target_fl->l_whence); 6781 __put_user(fl->l_start, &target_fl->l_start); 6782 __put_user(fl->l_len, &target_fl->l_len); 6783 __put_user(fl->l_pid, &target_fl->l_pid); 6784 unlock_user_struct(target_fl, target_flock_addr, 1); 6785 return 0; 6786 } 6787 6788 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr); 6789 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl); 6790 6791 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32 6792 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl, 6793 abi_ulong target_flock_addr) 6794 { 6795 struct target_oabi_flock64 *target_fl; 6796 int l_type; 6797 6798 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6799 return -TARGET_EFAULT; 6800 } 6801 6802 __get_user(l_type, &target_fl->l_type); 6803 l_type = target_to_host_flock(l_type); 6804 if (l_type < 0) { 6805 return l_type; 6806 } 6807 fl->l_type = l_type; 6808 __get_user(fl->l_whence, &target_fl->l_whence); 6809 __get_user(fl->l_start, &target_fl->l_start); 6810 __get_user(fl->l_len, &target_fl->l_len); 6811 __get_user(fl->l_pid, &target_fl->l_pid); 6812 unlock_user_struct(target_fl, target_flock_addr, 0); 6813 return 0; 6814 } 6815 6816 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr, 6817 const struct flock64 *fl) 6818 { 6819 struct target_oabi_flock64 *target_fl; 6820 short l_type; 6821 6822 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6823 return -TARGET_EFAULT; 6824 } 6825 6826 l_type = host_to_target_flock(fl->l_type); 6827 __put_user(l_type, &target_fl->l_type); 6828 __put_user(fl->l_whence, &target_fl->l_whence); 6829 __put_user(fl->l_start, &target_fl->l_start); 6830 __put_user(fl->l_len, &target_fl->l_len); 6831 __put_user(fl->l_pid, &target_fl->l_pid); 6832 unlock_user_struct(target_fl, target_flock_addr, 1); 6833 return 0; 6834 } 6835 #endif 6836 6837 static inline abi_long copy_from_user_flock64(struct flock64 *fl, 6838 abi_ulong target_flock_addr) 6839 { 6840 struct target_flock64 *target_fl; 6841 int l_type; 6842 6843 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6844 return -TARGET_EFAULT; 6845 } 6846 6847 __get_user(l_type, &target_fl->l_type); 6848 l_type = target_to_host_flock(l_type); 6849 if (l_type < 0) { 6850 return l_type; 6851 } 6852 fl->l_type = l_type; 6853 __get_user(fl->l_whence, &target_fl->l_whence); 6854 __get_user(fl->l_start, &target_fl->l_start); 6855 __get_user(fl->l_len, &target_fl->l_len); 6856 __get_user(fl->l_pid, &target_fl->l_pid); 6857 unlock_user_struct(target_fl, target_flock_addr, 0); 6858 return 0; 6859 } 6860 6861 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr, 6862 const struct flock64 *fl) 6863 { 6864 struct target_flock64 *target_fl; 6865 short l_type; 6866 6867 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6868 return -TARGET_EFAULT; 6869 } 6870 6871 l_type = host_to_target_flock(fl->l_type); 6872 __put_user(l_type, &target_fl->l_type); 6873 __put_user(fl->l_whence, &target_fl->l_whence); 6874 __put_user(fl->l_start, &target_fl->l_start); 6875 __put_user(fl->l_len, &target_fl->l_len); 6876 __put_user(fl->l_pid, &target_fl->l_pid); 6877 unlock_user_struct(target_fl, target_flock_addr, 1); 6878 return 0; 6879 } 6880 6881 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 6882 { 6883 struct flock64 fl64; 6884 #ifdef F_GETOWN_EX 6885 struct f_owner_ex fox; 6886 struct target_f_owner_ex *target_fox; 6887 #endif 6888 abi_long ret; 6889 int host_cmd = target_to_host_fcntl_cmd(cmd); 6890 6891 if (host_cmd == -TARGET_EINVAL) 6892 return host_cmd; 6893 6894 switch(cmd) { 6895 case TARGET_F_GETLK: 6896 ret = copy_from_user_flock(&fl64, arg); 6897 if (ret) { 6898 return ret; 6899 } 6900 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6901 if (ret == 0) { 6902 ret = copy_to_user_flock(arg, &fl64); 6903 } 6904 break; 6905 6906 case TARGET_F_SETLK: 6907 case TARGET_F_SETLKW: 6908 ret = copy_from_user_flock(&fl64, arg); 6909 if (ret) { 6910 return ret; 6911 } 6912 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6913 break; 6914 6915 case TARGET_F_GETLK64: 6916 case TARGET_F_OFD_GETLK: 6917 ret = copy_from_user_flock64(&fl64, arg); 6918 if (ret) { 6919 return ret; 6920 } 6921 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6922 if (ret == 0) { 6923 ret = copy_to_user_flock64(arg, &fl64); 6924 } 6925 break; 6926 case TARGET_F_SETLK64: 6927 case TARGET_F_SETLKW64: 6928 case TARGET_F_OFD_SETLK: 6929 case TARGET_F_OFD_SETLKW: 6930 ret = copy_from_user_flock64(&fl64, arg); 6931 if (ret) { 6932 return ret; 6933 } 6934 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6935 break; 6936 6937 case TARGET_F_GETFL: 6938 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6939 if (ret >= 0) { 6940 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 6941 } 6942 break; 6943 6944 case TARGET_F_SETFL: 6945 ret = get_errno(safe_fcntl(fd, host_cmd, 6946 target_to_host_bitmask(arg, 6947 fcntl_flags_tbl))); 6948 break; 6949 6950 #ifdef F_GETOWN_EX 6951 case TARGET_F_GETOWN_EX: 6952 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6953 if (ret >= 0) { 6954 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 6955 return -TARGET_EFAULT; 6956 target_fox->type = tswap32(fox.type); 6957 target_fox->pid = tswap32(fox.pid); 6958 unlock_user_struct(target_fox, arg, 1); 6959 } 6960 break; 6961 #endif 6962 6963 #ifdef F_SETOWN_EX 6964 case TARGET_F_SETOWN_EX: 6965 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 6966 return -TARGET_EFAULT; 6967 fox.type = tswap32(target_fox->type); 6968 fox.pid = tswap32(target_fox->pid); 6969 unlock_user_struct(target_fox, arg, 0); 6970 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6971 break; 6972 #endif 6973 6974 case TARGET_F_SETSIG: 6975 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg))); 6976 break; 6977 6978 case TARGET_F_GETSIG: 6979 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg))); 6980 break; 6981 6982 case TARGET_F_SETOWN: 6983 case TARGET_F_GETOWN: 6984 case TARGET_F_SETLEASE: 6985 case TARGET_F_GETLEASE: 6986 case TARGET_F_SETPIPE_SZ: 6987 case TARGET_F_GETPIPE_SZ: 6988 case TARGET_F_ADD_SEALS: 6989 case TARGET_F_GET_SEALS: 6990 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6991 break; 6992 6993 default: 6994 ret = get_errno(safe_fcntl(fd, cmd, arg)); 6995 break; 6996 } 6997 return ret; 6998 } 6999 7000 #ifdef USE_UID16 7001 7002 static inline int high2lowuid(int uid) 7003 { 7004 if (uid > 65535) 7005 return 65534; 7006 else 7007 return uid; 7008 } 7009 7010 static inline int high2lowgid(int gid) 7011 { 7012 if (gid > 65535) 7013 return 65534; 7014 else 7015 return gid; 7016 } 7017 7018 static inline int low2highuid(int uid) 7019 { 7020 if ((int16_t)uid == -1) 7021 return -1; 7022 else 7023 return uid; 7024 } 7025 7026 static inline int low2highgid(int gid) 7027 { 7028 if ((int16_t)gid == -1) 7029 return -1; 7030 else 7031 return gid; 7032 } 7033 static inline int tswapid(int id) 7034 { 7035 return tswap16(id); 7036 } 7037 7038 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 7039 7040 #else /* !USE_UID16 */ 7041 static inline int high2lowuid(int uid) 7042 { 7043 return uid; 7044 } 7045 static inline int high2lowgid(int gid) 7046 { 7047 return gid; 7048 } 7049 static inline int low2highuid(int uid) 7050 { 7051 return uid; 7052 } 7053 static inline int low2highgid(int gid) 7054 { 7055 return gid; 7056 } 7057 static inline int tswapid(int id) 7058 { 7059 return tswap32(id); 7060 } 7061 7062 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 7063 7064 #endif /* USE_UID16 */ 7065 7066 /* We must do direct syscalls for setting UID/GID, because we want to 7067 * implement the Linux system call semantics of "change only for this thread", 7068 * not the libc/POSIX semantics of "change for all threads in process". 7069 * (See http://ewontfix.com/17/ for more details.) 7070 * We use the 32-bit version of the syscalls if present; if it is not 7071 * then either the host architecture supports 32-bit UIDs natively with 7072 * the standard syscall, or the 16-bit UID is the best we can do. 7073 */ 7074 #ifdef __NR_setuid32 7075 #define __NR_sys_setuid __NR_setuid32 7076 #else 7077 #define __NR_sys_setuid __NR_setuid 7078 #endif 7079 #ifdef __NR_setgid32 7080 #define __NR_sys_setgid __NR_setgid32 7081 #else 7082 #define __NR_sys_setgid __NR_setgid 7083 #endif 7084 #ifdef __NR_setresuid32 7085 #define __NR_sys_setresuid __NR_setresuid32 7086 #else 7087 #define __NR_sys_setresuid __NR_setresuid 7088 #endif 7089 #ifdef __NR_setresgid32 7090 #define __NR_sys_setresgid __NR_setresgid32 7091 #else 7092 #define __NR_sys_setresgid __NR_setresgid 7093 #endif 7094 7095 _syscall1(int, sys_setuid, uid_t, uid) 7096 _syscall1(int, sys_setgid, gid_t, gid) 7097 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 7098 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 7099 7100 void syscall_init(void) 7101 { 7102 IOCTLEntry *ie; 7103 const argtype *arg_type; 7104 int size; 7105 int i; 7106 7107 thunk_init(STRUCT_MAX); 7108 7109 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 7110 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 7111 #include "syscall_types.h" 7112 #undef STRUCT 7113 #undef STRUCT_SPECIAL 7114 7115 /* Build target_to_host_errno_table[] table from 7116 * host_to_target_errno_table[]. */ 7117 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 7118 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 7119 } 7120 7121 /* we patch the ioctl size if necessary. We rely on the fact that 7122 no ioctl has all the bits at '1' in the size field */ 7123 ie = ioctl_entries; 7124 while (ie->target_cmd != 0) { 7125 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 7126 TARGET_IOC_SIZEMASK) { 7127 arg_type = ie->arg_type; 7128 if (arg_type[0] != TYPE_PTR) { 7129 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 7130 ie->target_cmd); 7131 exit(1); 7132 } 7133 arg_type++; 7134 size = thunk_type_size(arg_type, 0); 7135 ie->target_cmd = (ie->target_cmd & 7136 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 7137 (size << TARGET_IOC_SIZESHIFT); 7138 } 7139 7140 /* automatic consistency check if same arch */ 7141 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 7142 (defined(__x86_64__) && defined(TARGET_X86_64)) 7143 if (unlikely(ie->target_cmd != ie->host_cmd)) { 7144 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 7145 ie->name, ie->target_cmd, ie->host_cmd); 7146 } 7147 #endif 7148 ie++; 7149 } 7150 } 7151 7152 #ifdef TARGET_NR_truncate64 7153 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 7154 abi_long arg2, 7155 abi_long arg3, 7156 abi_long arg4) 7157 { 7158 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) { 7159 arg2 = arg3; 7160 arg3 = arg4; 7161 } 7162 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 7163 } 7164 #endif 7165 7166 #ifdef TARGET_NR_ftruncate64 7167 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 7168 abi_long arg2, 7169 abi_long arg3, 7170 abi_long arg4) 7171 { 7172 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) { 7173 arg2 = arg3; 7174 arg3 = arg4; 7175 } 7176 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 7177 } 7178 #endif 7179 7180 #if defined(TARGET_NR_timer_settime) || \ 7181 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)) 7182 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its, 7183 abi_ulong target_addr) 7184 { 7185 if (target_to_host_timespec(&host_its->it_interval, target_addr + 7186 offsetof(struct target_itimerspec, 7187 it_interval)) || 7188 target_to_host_timespec(&host_its->it_value, target_addr + 7189 offsetof(struct target_itimerspec, 7190 it_value))) { 7191 return -TARGET_EFAULT; 7192 } 7193 7194 return 0; 7195 } 7196 #endif 7197 7198 #if defined(TARGET_NR_timer_settime64) || \ 7199 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) 7200 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its, 7201 abi_ulong target_addr) 7202 { 7203 if (target_to_host_timespec64(&host_its->it_interval, target_addr + 7204 offsetof(struct target__kernel_itimerspec, 7205 it_interval)) || 7206 target_to_host_timespec64(&host_its->it_value, target_addr + 7207 offsetof(struct target__kernel_itimerspec, 7208 it_value))) { 7209 return -TARGET_EFAULT; 7210 } 7211 7212 return 0; 7213 } 7214 #endif 7215 7216 #if ((defined(TARGET_NR_timerfd_gettime) || \ 7217 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \ 7218 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime) 7219 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 7220 struct itimerspec *host_its) 7221 { 7222 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec, 7223 it_interval), 7224 &host_its->it_interval) || 7225 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec, 7226 it_value), 7227 &host_its->it_value)) { 7228 return -TARGET_EFAULT; 7229 } 7230 return 0; 7231 } 7232 #endif 7233 7234 #if ((defined(TARGET_NR_timerfd_gettime64) || \ 7235 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \ 7236 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64) 7237 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr, 7238 struct itimerspec *host_its) 7239 { 7240 if (host_to_target_timespec64(target_addr + 7241 offsetof(struct target__kernel_itimerspec, 7242 it_interval), 7243 &host_its->it_interval) || 7244 host_to_target_timespec64(target_addr + 7245 offsetof(struct target__kernel_itimerspec, 7246 it_value), 7247 &host_its->it_value)) { 7248 return -TARGET_EFAULT; 7249 } 7250 return 0; 7251 } 7252 #endif 7253 7254 #if defined(TARGET_NR_adjtimex) || \ 7255 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)) 7256 static inline abi_long target_to_host_timex(struct timex *host_tx, 7257 abi_long target_addr) 7258 { 7259 struct target_timex *target_tx; 7260 7261 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7262 return -TARGET_EFAULT; 7263 } 7264 7265 __get_user(host_tx->modes, &target_tx->modes); 7266 __get_user(host_tx->offset, &target_tx->offset); 7267 __get_user(host_tx->freq, &target_tx->freq); 7268 __get_user(host_tx->maxerror, &target_tx->maxerror); 7269 __get_user(host_tx->esterror, &target_tx->esterror); 7270 __get_user(host_tx->status, &target_tx->status); 7271 __get_user(host_tx->constant, &target_tx->constant); 7272 __get_user(host_tx->precision, &target_tx->precision); 7273 __get_user(host_tx->tolerance, &target_tx->tolerance); 7274 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7275 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7276 __get_user(host_tx->tick, &target_tx->tick); 7277 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7278 __get_user(host_tx->jitter, &target_tx->jitter); 7279 __get_user(host_tx->shift, &target_tx->shift); 7280 __get_user(host_tx->stabil, &target_tx->stabil); 7281 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7282 __get_user(host_tx->calcnt, &target_tx->calcnt); 7283 __get_user(host_tx->errcnt, &target_tx->errcnt); 7284 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7285 __get_user(host_tx->tai, &target_tx->tai); 7286 7287 unlock_user_struct(target_tx, target_addr, 0); 7288 return 0; 7289 } 7290 7291 static inline abi_long host_to_target_timex(abi_long target_addr, 7292 struct timex *host_tx) 7293 { 7294 struct target_timex *target_tx; 7295 7296 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7297 return -TARGET_EFAULT; 7298 } 7299 7300 __put_user(host_tx->modes, &target_tx->modes); 7301 __put_user(host_tx->offset, &target_tx->offset); 7302 __put_user(host_tx->freq, &target_tx->freq); 7303 __put_user(host_tx->maxerror, &target_tx->maxerror); 7304 __put_user(host_tx->esterror, &target_tx->esterror); 7305 __put_user(host_tx->status, &target_tx->status); 7306 __put_user(host_tx->constant, &target_tx->constant); 7307 __put_user(host_tx->precision, &target_tx->precision); 7308 __put_user(host_tx->tolerance, &target_tx->tolerance); 7309 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7310 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7311 __put_user(host_tx->tick, &target_tx->tick); 7312 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7313 __put_user(host_tx->jitter, &target_tx->jitter); 7314 __put_user(host_tx->shift, &target_tx->shift); 7315 __put_user(host_tx->stabil, &target_tx->stabil); 7316 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7317 __put_user(host_tx->calcnt, &target_tx->calcnt); 7318 __put_user(host_tx->errcnt, &target_tx->errcnt); 7319 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7320 __put_user(host_tx->tai, &target_tx->tai); 7321 7322 unlock_user_struct(target_tx, target_addr, 1); 7323 return 0; 7324 } 7325 #endif 7326 7327 7328 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 7329 static inline abi_long target_to_host_timex64(struct timex *host_tx, 7330 abi_long target_addr) 7331 { 7332 struct target__kernel_timex *target_tx; 7333 7334 if (copy_from_user_timeval64(&host_tx->time, target_addr + 7335 offsetof(struct target__kernel_timex, 7336 time))) { 7337 return -TARGET_EFAULT; 7338 } 7339 7340 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7341 return -TARGET_EFAULT; 7342 } 7343 7344 __get_user(host_tx->modes, &target_tx->modes); 7345 __get_user(host_tx->offset, &target_tx->offset); 7346 __get_user(host_tx->freq, &target_tx->freq); 7347 __get_user(host_tx->maxerror, &target_tx->maxerror); 7348 __get_user(host_tx->esterror, &target_tx->esterror); 7349 __get_user(host_tx->status, &target_tx->status); 7350 __get_user(host_tx->constant, &target_tx->constant); 7351 __get_user(host_tx->precision, &target_tx->precision); 7352 __get_user(host_tx->tolerance, &target_tx->tolerance); 7353 __get_user(host_tx->tick, &target_tx->tick); 7354 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7355 __get_user(host_tx->jitter, &target_tx->jitter); 7356 __get_user(host_tx->shift, &target_tx->shift); 7357 __get_user(host_tx->stabil, &target_tx->stabil); 7358 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7359 __get_user(host_tx->calcnt, &target_tx->calcnt); 7360 __get_user(host_tx->errcnt, &target_tx->errcnt); 7361 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7362 __get_user(host_tx->tai, &target_tx->tai); 7363 7364 unlock_user_struct(target_tx, target_addr, 0); 7365 return 0; 7366 } 7367 7368 static inline abi_long host_to_target_timex64(abi_long target_addr, 7369 struct timex *host_tx) 7370 { 7371 struct target__kernel_timex *target_tx; 7372 7373 if (copy_to_user_timeval64(target_addr + 7374 offsetof(struct target__kernel_timex, time), 7375 &host_tx->time)) { 7376 return -TARGET_EFAULT; 7377 } 7378 7379 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7380 return -TARGET_EFAULT; 7381 } 7382 7383 __put_user(host_tx->modes, &target_tx->modes); 7384 __put_user(host_tx->offset, &target_tx->offset); 7385 __put_user(host_tx->freq, &target_tx->freq); 7386 __put_user(host_tx->maxerror, &target_tx->maxerror); 7387 __put_user(host_tx->esterror, &target_tx->esterror); 7388 __put_user(host_tx->status, &target_tx->status); 7389 __put_user(host_tx->constant, &target_tx->constant); 7390 __put_user(host_tx->precision, &target_tx->precision); 7391 __put_user(host_tx->tolerance, &target_tx->tolerance); 7392 __put_user(host_tx->tick, &target_tx->tick); 7393 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7394 __put_user(host_tx->jitter, &target_tx->jitter); 7395 __put_user(host_tx->shift, &target_tx->shift); 7396 __put_user(host_tx->stabil, &target_tx->stabil); 7397 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7398 __put_user(host_tx->calcnt, &target_tx->calcnt); 7399 __put_user(host_tx->errcnt, &target_tx->errcnt); 7400 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7401 __put_user(host_tx->tai, &target_tx->tai); 7402 7403 unlock_user_struct(target_tx, target_addr, 1); 7404 return 0; 7405 } 7406 #endif 7407 7408 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID 7409 #define sigev_notify_thread_id _sigev_un._tid 7410 #endif 7411 7412 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, 7413 abi_ulong target_addr) 7414 { 7415 struct target_sigevent *target_sevp; 7416 7417 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { 7418 return -TARGET_EFAULT; 7419 } 7420 7421 /* This union is awkward on 64 bit systems because it has a 32 bit 7422 * integer and a pointer in it; we follow the conversion approach 7423 * used for handling sigval types in signal.c so the guest should get 7424 * the correct value back even if we did a 64 bit byteswap and it's 7425 * using the 32 bit integer. 7426 */ 7427 host_sevp->sigev_value.sival_ptr = 7428 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); 7429 host_sevp->sigev_signo = 7430 target_to_host_signal(tswap32(target_sevp->sigev_signo)); 7431 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); 7432 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid); 7433 7434 unlock_user_struct(target_sevp, target_addr, 1); 7435 return 0; 7436 } 7437 7438 #if defined(TARGET_NR_mlockall) 7439 static inline int target_to_host_mlockall_arg(int arg) 7440 { 7441 int result = 0; 7442 7443 if (arg & TARGET_MCL_CURRENT) { 7444 result |= MCL_CURRENT; 7445 } 7446 if (arg & TARGET_MCL_FUTURE) { 7447 result |= MCL_FUTURE; 7448 } 7449 #ifdef MCL_ONFAULT 7450 if (arg & TARGET_MCL_ONFAULT) { 7451 result |= MCL_ONFAULT; 7452 } 7453 #endif 7454 7455 return result; 7456 } 7457 #endif 7458 7459 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \ 7460 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \ 7461 defined(TARGET_NR_newfstatat)) 7462 static inline abi_long host_to_target_stat64(void *cpu_env, 7463 abi_ulong target_addr, 7464 struct stat *host_st) 7465 { 7466 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 7467 if (((CPUARMState *)cpu_env)->eabi) { 7468 struct target_eabi_stat64 *target_st; 7469 7470 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7471 return -TARGET_EFAULT; 7472 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 7473 __put_user(host_st->st_dev, &target_st->st_dev); 7474 __put_user(host_st->st_ino, &target_st->st_ino); 7475 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7476 __put_user(host_st->st_ino, &target_st->__st_ino); 7477 #endif 7478 __put_user(host_st->st_mode, &target_st->st_mode); 7479 __put_user(host_st->st_nlink, &target_st->st_nlink); 7480 __put_user(host_st->st_uid, &target_st->st_uid); 7481 __put_user(host_st->st_gid, &target_st->st_gid); 7482 __put_user(host_st->st_rdev, &target_st->st_rdev); 7483 __put_user(host_st->st_size, &target_st->st_size); 7484 __put_user(host_st->st_blksize, &target_st->st_blksize); 7485 __put_user(host_st->st_blocks, &target_st->st_blocks); 7486 __put_user(host_st->st_atime, &target_st->target_st_atime); 7487 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7488 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7489 #ifdef HAVE_STRUCT_STAT_ST_ATIM 7490 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); 7491 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); 7492 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); 7493 #endif 7494 unlock_user_struct(target_st, target_addr, 1); 7495 } else 7496 #endif 7497 { 7498 #if defined(TARGET_HAS_STRUCT_STAT64) 7499 struct target_stat64 *target_st; 7500 #else 7501 struct target_stat *target_st; 7502 #endif 7503 7504 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7505 return -TARGET_EFAULT; 7506 memset(target_st, 0, sizeof(*target_st)); 7507 __put_user(host_st->st_dev, &target_st->st_dev); 7508 __put_user(host_st->st_ino, &target_st->st_ino); 7509 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7510 __put_user(host_st->st_ino, &target_st->__st_ino); 7511 #endif 7512 __put_user(host_st->st_mode, &target_st->st_mode); 7513 __put_user(host_st->st_nlink, &target_st->st_nlink); 7514 __put_user(host_st->st_uid, &target_st->st_uid); 7515 __put_user(host_st->st_gid, &target_st->st_gid); 7516 __put_user(host_st->st_rdev, &target_st->st_rdev); 7517 /* XXX: better use of kernel struct */ 7518 __put_user(host_st->st_size, &target_st->st_size); 7519 __put_user(host_st->st_blksize, &target_st->st_blksize); 7520 __put_user(host_st->st_blocks, &target_st->st_blocks); 7521 __put_user(host_st->st_atime, &target_st->target_st_atime); 7522 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7523 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7524 #ifdef HAVE_STRUCT_STAT_ST_ATIM 7525 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); 7526 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); 7527 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); 7528 #endif 7529 unlock_user_struct(target_st, target_addr, 1); 7530 } 7531 7532 return 0; 7533 } 7534 #endif 7535 7536 #if defined(TARGET_NR_statx) && defined(__NR_statx) 7537 static inline abi_long host_to_target_statx(struct target_statx *host_stx, 7538 abi_ulong target_addr) 7539 { 7540 struct target_statx *target_stx; 7541 7542 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) { 7543 return -TARGET_EFAULT; 7544 } 7545 memset(target_stx, 0, sizeof(*target_stx)); 7546 7547 __put_user(host_stx->stx_mask, &target_stx->stx_mask); 7548 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize); 7549 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes); 7550 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink); 7551 __put_user(host_stx->stx_uid, &target_stx->stx_uid); 7552 __put_user(host_stx->stx_gid, &target_stx->stx_gid); 7553 __put_user(host_stx->stx_mode, &target_stx->stx_mode); 7554 __put_user(host_stx->stx_ino, &target_stx->stx_ino); 7555 __put_user(host_stx->stx_size, &target_stx->stx_size); 7556 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks); 7557 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask); 7558 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec); 7559 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec); 7560 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec); 7561 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec); 7562 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec); 7563 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec); 7564 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec); 7565 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec); 7566 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major); 7567 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor); 7568 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major); 7569 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor); 7570 7571 unlock_user_struct(target_stx, target_addr, 1); 7572 7573 return 0; 7574 } 7575 #endif 7576 7577 static int do_sys_futex(int *uaddr, int op, int val, 7578 const struct timespec *timeout, int *uaddr2, 7579 int val3) 7580 { 7581 #if HOST_LONG_BITS == 64 7582 #if defined(__NR_futex) 7583 /* always a 64-bit time_t, it doesn't define _time64 version */ 7584 return sys_futex(uaddr, op, val, timeout, uaddr2, val3); 7585 7586 #endif 7587 #else /* HOST_LONG_BITS == 64 */ 7588 #if defined(__NR_futex_time64) 7589 if (sizeof(timeout->tv_sec) == 8) { 7590 /* _time64 function on 32bit arch */ 7591 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3); 7592 } 7593 #endif 7594 #if defined(__NR_futex) 7595 /* old function on 32bit arch */ 7596 return sys_futex(uaddr, op, val, timeout, uaddr2, val3); 7597 #endif 7598 #endif /* HOST_LONG_BITS == 64 */ 7599 g_assert_not_reached(); 7600 } 7601 7602 static int do_safe_futex(int *uaddr, int op, int val, 7603 const struct timespec *timeout, int *uaddr2, 7604 int val3) 7605 { 7606 #if HOST_LONG_BITS == 64 7607 #if defined(__NR_futex) 7608 /* always a 64-bit time_t, it doesn't define _time64 version */ 7609 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3)); 7610 #endif 7611 #else /* HOST_LONG_BITS == 64 */ 7612 #if defined(__NR_futex_time64) 7613 if (sizeof(timeout->tv_sec) == 8) { 7614 /* _time64 function on 32bit arch */ 7615 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2, 7616 val3)); 7617 } 7618 #endif 7619 #if defined(__NR_futex) 7620 /* old function on 32bit arch */ 7621 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3)); 7622 #endif 7623 #endif /* HOST_LONG_BITS == 64 */ 7624 return -TARGET_ENOSYS; 7625 } 7626 7627 /* ??? Using host futex calls even when target atomic operations 7628 are not really atomic probably breaks things. However implementing 7629 futexes locally would make futexes shared between multiple processes 7630 tricky. However they're probably useless because guest atomic 7631 operations won't work either. */ 7632 #if defined(TARGET_NR_futex) 7633 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val, 7634 target_ulong timeout, target_ulong uaddr2, int val3) 7635 { 7636 struct timespec ts, *pts; 7637 int base_op; 7638 7639 /* ??? We assume FUTEX_* constants are the same on both host 7640 and target. */ 7641 #ifdef FUTEX_CMD_MASK 7642 base_op = op & FUTEX_CMD_MASK; 7643 #else 7644 base_op = op; 7645 #endif 7646 switch (base_op) { 7647 case FUTEX_WAIT: 7648 case FUTEX_WAIT_BITSET: 7649 if (timeout) { 7650 pts = &ts; 7651 target_to_host_timespec(pts, timeout); 7652 } else { 7653 pts = NULL; 7654 } 7655 return do_safe_futex(g2h(cpu, uaddr), 7656 op, tswap32(val), pts, NULL, val3); 7657 case FUTEX_WAKE: 7658 return do_safe_futex(g2h(cpu, uaddr), 7659 op, val, NULL, NULL, 0); 7660 case FUTEX_FD: 7661 return do_safe_futex(g2h(cpu, uaddr), 7662 op, val, NULL, NULL, 0); 7663 case FUTEX_REQUEUE: 7664 case FUTEX_CMP_REQUEUE: 7665 case FUTEX_WAKE_OP: 7666 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 7667 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 7668 But the prototype takes a `struct timespec *'; insert casts 7669 to satisfy the compiler. We do not need to tswap TIMEOUT 7670 since it's not compared to guest memory. */ 7671 pts = (struct timespec *)(uintptr_t) timeout; 7672 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2), 7673 (base_op == FUTEX_CMP_REQUEUE 7674 ? tswap32(val3) : val3)); 7675 default: 7676 return -TARGET_ENOSYS; 7677 } 7678 } 7679 #endif 7680 7681 #if defined(TARGET_NR_futex_time64) 7682 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op, 7683 int val, target_ulong timeout, 7684 target_ulong uaddr2, int val3) 7685 { 7686 struct timespec ts, *pts; 7687 int base_op; 7688 7689 /* ??? We assume FUTEX_* constants are the same on both host 7690 and target. */ 7691 #ifdef FUTEX_CMD_MASK 7692 base_op = op & FUTEX_CMD_MASK; 7693 #else 7694 base_op = op; 7695 #endif 7696 switch (base_op) { 7697 case FUTEX_WAIT: 7698 case FUTEX_WAIT_BITSET: 7699 if (timeout) { 7700 pts = &ts; 7701 if (target_to_host_timespec64(pts, timeout)) { 7702 return -TARGET_EFAULT; 7703 } 7704 } else { 7705 pts = NULL; 7706 } 7707 return do_safe_futex(g2h(cpu, uaddr), op, 7708 tswap32(val), pts, NULL, val3); 7709 case FUTEX_WAKE: 7710 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0); 7711 case FUTEX_FD: 7712 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0); 7713 case FUTEX_REQUEUE: 7714 case FUTEX_CMP_REQUEUE: 7715 case FUTEX_WAKE_OP: 7716 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 7717 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 7718 But the prototype takes a `struct timespec *'; insert casts 7719 to satisfy the compiler. We do not need to tswap TIMEOUT 7720 since it's not compared to guest memory. */ 7721 pts = (struct timespec *)(uintptr_t) timeout; 7722 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2), 7723 (base_op == FUTEX_CMP_REQUEUE 7724 ? tswap32(val3) : val3)); 7725 default: 7726 return -TARGET_ENOSYS; 7727 } 7728 } 7729 #endif 7730 7731 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7732 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname, 7733 abi_long handle, abi_long mount_id, 7734 abi_long flags) 7735 { 7736 struct file_handle *target_fh; 7737 struct file_handle *fh; 7738 int mid = 0; 7739 abi_long ret; 7740 char *name; 7741 unsigned int size, total_size; 7742 7743 if (get_user_s32(size, handle)) { 7744 return -TARGET_EFAULT; 7745 } 7746 7747 name = lock_user_string(pathname); 7748 if (!name) { 7749 return -TARGET_EFAULT; 7750 } 7751 7752 total_size = sizeof(struct file_handle) + size; 7753 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0); 7754 if (!target_fh) { 7755 unlock_user(name, pathname, 0); 7756 return -TARGET_EFAULT; 7757 } 7758 7759 fh = g_malloc0(total_size); 7760 fh->handle_bytes = size; 7761 7762 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags)); 7763 unlock_user(name, pathname, 0); 7764 7765 /* man name_to_handle_at(2): 7766 * Other than the use of the handle_bytes field, the caller should treat 7767 * the file_handle structure as an opaque data type 7768 */ 7769 7770 memcpy(target_fh, fh, total_size); 7771 target_fh->handle_bytes = tswap32(fh->handle_bytes); 7772 target_fh->handle_type = tswap32(fh->handle_type); 7773 g_free(fh); 7774 unlock_user(target_fh, handle, total_size); 7775 7776 if (put_user_s32(mid, mount_id)) { 7777 return -TARGET_EFAULT; 7778 } 7779 7780 return ret; 7781 7782 } 7783 #endif 7784 7785 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7786 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle, 7787 abi_long flags) 7788 { 7789 struct file_handle *target_fh; 7790 struct file_handle *fh; 7791 unsigned int size, total_size; 7792 abi_long ret; 7793 7794 if (get_user_s32(size, handle)) { 7795 return -TARGET_EFAULT; 7796 } 7797 7798 total_size = sizeof(struct file_handle) + size; 7799 target_fh = lock_user(VERIFY_READ, handle, total_size, 1); 7800 if (!target_fh) { 7801 return -TARGET_EFAULT; 7802 } 7803 7804 fh = g_memdup(target_fh, total_size); 7805 fh->handle_bytes = size; 7806 fh->handle_type = tswap32(target_fh->handle_type); 7807 7808 ret = get_errno(open_by_handle_at(mount_fd, fh, 7809 target_to_host_bitmask(flags, fcntl_flags_tbl))); 7810 7811 g_free(fh); 7812 7813 unlock_user(target_fh, handle, total_size); 7814 7815 return ret; 7816 } 7817 #endif 7818 7819 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4) 7820 7821 static abi_long do_signalfd4(int fd, abi_long mask, int flags) 7822 { 7823 int host_flags; 7824 target_sigset_t *target_mask; 7825 sigset_t host_mask; 7826 abi_long ret; 7827 7828 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) { 7829 return -TARGET_EINVAL; 7830 } 7831 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) { 7832 return -TARGET_EFAULT; 7833 } 7834 7835 target_to_host_sigset(&host_mask, target_mask); 7836 7837 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 7838 7839 ret = get_errno(signalfd(fd, &host_mask, host_flags)); 7840 if (ret >= 0) { 7841 fd_trans_register(ret, &target_signalfd_trans); 7842 } 7843 7844 unlock_user_struct(target_mask, mask, 0); 7845 7846 return ret; 7847 } 7848 #endif 7849 7850 /* Map host to target signal numbers for the wait family of syscalls. 7851 Assume all other status bits are the same. */ 7852 int host_to_target_waitstatus(int status) 7853 { 7854 if (WIFSIGNALED(status)) { 7855 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 7856 } 7857 if (WIFSTOPPED(status)) { 7858 return (host_to_target_signal(WSTOPSIG(status)) << 8) 7859 | (status & 0xff); 7860 } 7861 return status; 7862 } 7863 7864 static int open_self_cmdline(void *cpu_env, int fd) 7865 { 7866 CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 7867 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; 7868 int i; 7869 7870 for (i = 0; i < bprm->argc; i++) { 7871 size_t len = strlen(bprm->argv[i]) + 1; 7872 7873 if (write(fd, bprm->argv[i], len) != len) { 7874 return -1; 7875 } 7876 } 7877 7878 return 0; 7879 } 7880 7881 static int open_self_maps(void *cpu_env, int fd) 7882 { 7883 CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 7884 TaskState *ts = cpu->opaque; 7885 GSList *map_info = read_self_maps(); 7886 GSList *s; 7887 int count; 7888 7889 for (s = map_info; s; s = g_slist_next(s)) { 7890 MapInfo *e = (MapInfo *) s->data; 7891 7892 if (h2g_valid(e->start)) { 7893 unsigned long min = e->start; 7894 unsigned long max = e->end; 7895 int flags = page_get_flags(h2g(min)); 7896 const char *path; 7897 7898 max = h2g_valid(max - 1) ? 7899 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1; 7900 7901 if (page_check_range(h2g(min), max - min, flags) == -1) { 7902 continue; 7903 } 7904 7905 if (h2g(min) == ts->info->stack_limit) { 7906 path = "[stack]"; 7907 } else { 7908 path = e->path; 7909 } 7910 7911 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr 7912 " %c%c%c%c %08" PRIx64 " %s %"PRId64, 7913 h2g(min), h2g(max - 1) + 1, 7914 (flags & PAGE_READ) ? 'r' : '-', 7915 (flags & PAGE_WRITE_ORG) ? 'w' : '-', 7916 (flags & PAGE_EXEC) ? 'x' : '-', 7917 e->is_priv ? 'p' : '-', 7918 (uint64_t) e->offset, e->dev, e->inode); 7919 if (path) { 7920 dprintf(fd, "%*s%s\n", 73 - count, "", path); 7921 } else { 7922 dprintf(fd, "\n"); 7923 } 7924 } 7925 } 7926 7927 free_self_maps(map_info); 7928 7929 #ifdef TARGET_VSYSCALL_PAGE 7930 /* 7931 * We only support execution from the vsyscall page. 7932 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3. 7933 */ 7934 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx 7935 " --xp 00000000 00:00 0", 7936 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE); 7937 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]"); 7938 #endif 7939 7940 return 0; 7941 } 7942 7943 static int open_self_stat(void *cpu_env, int fd) 7944 { 7945 CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 7946 TaskState *ts = cpu->opaque; 7947 g_autoptr(GString) buf = g_string_new(NULL); 7948 int i; 7949 7950 for (i = 0; i < 44; i++) { 7951 if (i == 0) { 7952 /* pid */ 7953 g_string_printf(buf, FMT_pid " ", getpid()); 7954 } else if (i == 1) { 7955 /* app name */ 7956 gchar *bin = g_strrstr(ts->bprm->argv[0], "/"); 7957 bin = bin ? bin + 1 : ts->bprm->argv[0]; 7958 g_string_printf(buf, "(%.15s) ", bin); 7959 } else if (i == 27) { 7960 /* stack bottom */ 7961 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack); 7962 } else { 7963 /* for the rest, there is MasterCard */ 7964 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' '); 7965 } 7966 7967 if (write(fd, buf->str, buf->len) != buf->len) { 7968 return -1; 7969 } 7970 } 7971 7972 return 0; 7973 } 7974 7975 static int open_self_auxv(void *cpu_env, int fd) 7976 { 7977 CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 7978 TaskState *ts = cpu->opaque; 7979 abi_ulong auxv = ts->info->saved_auxv; 7980 abi_ulong len = ts->info->auxv_len; 7981 char *ptr; 7982 7983 /* 7984 * Auxiliary vector is stored in target process stack. 7985 * read in whole auxv vector and copy it to file 7986 */ 7987 ptr = lock_user(VERIFY_READ, auxv, len, 0); 7988 if (ptr != NULL) { 7989 while (len > 0) { 7990 ssize_t r; 7991 r = write(fd, ptr, len); 7992 if (r <= 0) { 7993 break; 7994 } 7995 len -= r; 7996 ptr += r; 7997 } 7998 lseek(fd, 0, SEEK_SET); 7999 unlock_user(ptr, auxv, len); 8000 } 8001 8002 return 0; 8003 } 8004 8005 static int is_proc_myself(const char *filename, const char *entry) 8006 { 8007 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 8008 filename += strlen("/proc/"); 8009 if (!strncmp(filename, "self/", strlen("self/"))) { 8010 filename += strlen("self/"); 8011 } else if (*filename >= '1' && *filename <= '9') { 8012 char myself[80]; 8013 snprintf(myself, sizeof(myself), "%d/", getpid()); 8014 if (!strncmp(filename, myself, strlen(myself))) { 8015 filename += strlen(myself); 8016 } else { 8017 return 0; 8018 } 8019 } else { 8020 return 0; 8021 } 8022 if (!strcmp(filename, entry)) { 8023 return 1; 8024 } 8025 } 8026 return 0; 8027 } 8028 8029 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \ 8030 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) 8031 static int is_proc(const char *filename, const char *entry) 8032 { 8033 return strcmp(filename, entry) == 0; 8034 } 8035 #endif 8036 8037 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 8038 static int open_net_route(void *cpu_env, int fd) 8039 { 8040 FILE *fp; 8041 char *line = NULL; 8042 size_t len = 0; 8043 ssize_t read; 8044 8045 fp = fopen("/proc/net/route", "r"); 8046 if (fp == NULL) { 8047 return -1; 8048 } 8049 8050 /* read header */ 8051 8052 read = getline(&line, &len, fp); 8053 dprintf(fd, "%s", line); 8054 8055 /* read routes */ 8056 8057 while ((read = getline(&line, &len, fp)) != -1) { 8058 char iface[16]; 8059 uint32_t dest, gw, mask; 8060 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 8061 int fields; 8062 8063 fields = sscanf(line, 8064 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 8065 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 8066 &mask, &mtu, &window, &irtt); 8067 if (fields != 11) { 8068 continue; 8069 } 8070 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 8071 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 8072 metric, tswap32(mask), mtu, window, irtt); 8073 } 8074 8075 free(line); 8076 fclose(fp); 8077 8078 return 0; 8079 } 8080 #endif 8081 8082 #if defined(TARGET_SPARC) 8083 static int open_cpuinfo(void *cpu_env, int fd) 8084 { 8085 dprintf(fd, "type\t\t: sun4u\n"); 8086 return 0; 8087 } 8088 #endif 8089 8090 #if defined(TARGET_HPPA) 8091 static int open_cpuinfo(void *cpu_env, int fd) 8092 { 8093 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n"); 8094 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n"); 8095 dprintf(fd, "capabilities\t: os32\n"); 8096 dprintf(fd, "model\t\t: 9000/778/B160L\n"); 8097 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n"); 8098 return 0; 8099 } 8100 #endif 8101 8102 #if defined(TARGET_M68K) 8103 static int open_hardware(void *cpu_env, int fd) 8104 { 8105 dprintf(fd, "Model:\t\tqemu-m68k\n"); 8106 return 0; 8107 } 8108 #endif 8109 8110 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) 8111 { 8112 struct fake_open { 8113 const char *filename; 8114 int (*fill)(void *cpu_env, int fd); 8115 int (*cmp)(const char *s1, const char *s2); 8116 }; 8117 const struct fake_open *fake_open; 8118 static const struct fake_open fakes[] = { 8119 { "maps", open_self_maps, is_proc_myself }, 8120 { "stat", open_self_stat, is_proc_myself }, 8121 { "auxv", open_self_auxv, is_proc_myself }, 8122 { "cmdline", open_self_cmdline, is_proc_myself }, 8123 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 8124 { "/proc/net/route", open_net_route, is_proc }, 8125 #endif 8126 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) 8127 { "/proc/cpuinfo", open_cpuinfo, is_proc }, 8128 #endif 8129 #if defined(TARGET_M68K) 8130 { "/proc/hardware", open_hardware, is_proc }, 8131 #endif 8132 { NULL, NULL, NULL } 8133 }; 8134 8135 if (is_proc_myself(pathname, "exe")) { 8136 int execfd = qemu_getauxval(AT_EXECFD); 8137 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode); 8138 } 8139 8140 for (fake_open = fakes; fake_open->filename; fake_open++) { 8141 if (fake_open->cmp(pathname, fake_open->filename)) { 8142 break; 8143 } 8144 } 8145 8146 if (fake_open->filename) { 8147 const char *tmpdir; 8148 char filename[PATH_MAX]; 8149 int fd, r; 8150 8151 /* create temporary file to map stat to */ 8152 tmpdir = getenv("TMPDIR"); 8153 if (!tmpdir) 8154 tmpdir = "/tmp"; 8155 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 8156 fd = mkstemp(filename); 8157 if (fd < 0) { 8158 return fd; 8159 } 8160 unlink(filename); 8161 8162 if ((r = fake_open->fill(cpu_env, fd))) { 8163 int e = errno; 8164 close(fd); 8165 errno = e; 8166 return r; 8167 } 8168 lseek(fd, 0, SEEK_SET); 8169 8170 return fd; 8171 } 8172 8173 return safe_openat(dirfd, path(pathname), flags, mode); 8174 } 8175 8176 #define TIMER_MAGIC 0x0caf0000 8177 #define TIMER_MAGIC_MASK 0xffff0000 8178 8179 /* Convert QEMU provided timer ID back to internal 16bit index format */ 8180 static target_timer_t get_timer_id(abi_long arg) 8181 { 8182 target_timer_t timerid = arg; 8183 8184 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) { 8185 return -TARGET_EINVAL; 8186 } 8187 8188 timerid &= 0xffff; 8189 8190 if (timerid >= ARRAY_SIZE(g_posix_timers)) { 8191 return -TARGET_EINVAL; 8192 } 8193 8194 return timerid; 8195 } 8196 8197 static int target_to_host_cpu_mask(unsigned long *host_mask, 8198 size_t host_size, 8199 abi_ulong target_addr, 8200 size_t target_size) 8201 { 8202 unsigned target_bits = sizeof(abi_ulong) * 8; 8203 unsigned host_bits = sizeof(*host_mask) * 8; 8204 abi_ulong *target_mask; 8205 unsigned i, j; 8206 8207 assert(host_size >= target_size); 8208 8209 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1); 8210 if (!target_mask) { 8211 return -TARGET_EFAULT; 8212 } 8213 memset(host_mask, 0, host_size); 8214 8215 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 8216 unsigned bit = i * target_bits; 8217 abi_ulong val; 8218 8219 __get_user(val, &target_mask[i]); 8220 for (j = 0; j < target_bits; j++, bit++) { 8221 if (val & (1UL << j)) { 8222 host_mask[bit / host_bits] |= 1UL << (bit % host_bits); 8223 } 8224 } 8225 } 8226 8227 unlock_user(target_mask, target_addr, 0); 8228 return 0; 8229 } 8230 8231 static int host_to_target_cpu_mask(const unsigned long *host_mask, 8232 size_t host_size, 8233 abi_ulong target_addr, 8234 size_t target_size) 8235 { 8236 unsigned target_bits = sizeof(abi_ulong) * 8; 8237 unsigned host_bits = sizeof(*host_mask) * 8; 8238 abi_ulong *target_mask; 8239 unsigned i, j; 8240 8241 assert(host_size >= target_size); 8242 8243 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0); 8244 if (!target_mask) { 8245 return -TARGET_EFAULT; 8246 } 8247 8248 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 8249 unsigned bit = i * target_bits; 8250 abi_ulong val = 0; 8251 8252 for (j = 0; j < target_bits; j++, bit++) { 8253 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) { 8254 val |= 1UL << j; 8255 } 8256 } 8257 __put_user(val, &target_mask[i]); 8258 } 8259 8260 unlock_user(target_mask, target_addr, target_size); 8261 return 0; 8262 } 8263 8264 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root) 8265 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old) 8266 #endif 8267 8268 /* This is an internal helper for do_syscall so that it is easier 8269 * to have a single return point, so that actions, such as logging 8270 * of syscall results, can be performed. 8271 * All errnos that do_syscall() returns must be -TARGET_<errcode>. 8272 */ 8273 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, 8274 abi_long arg2, abi_long arg3, abi_long arg4, 8275 abi_long arg5, abi_long arg6, abi_long arg7, 8276 abi_long arg8) 8277 { 8278 CPUState *cpu = env_cpu(cpu_env); 8279 abi_long ret; 8280 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \ 8281 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \ 8282 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \ 8283 || defined(TARGET_NR_statx) 8284 struct stat st; 8285 #endif 8286 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \ 8287 || defined(TARGET_NR_fstatfs) 8288 struct statfs stfs; 8289 #endif 8290 void *p; 8291 8292 switch(num) { 8293 case TARGET_NR_exit: 8294 /* In old applications this may be used to implement _exit(2). 8295 However in threaded applications it is used for thread termination, 8296 and _exit_group is used for application termination. 8297 Do thread termination if we have more then one thread. */ 8298 8299 if (block_signals()) { 8300 return -TARGET_ERESTARTSYS; 8301 } 8302 8303 pthread_mutex_lock(&clone_lock); 8304 8305 if (CPU_NEXT(first_cpu)) { 8306 TaskState *ts = cpu->opaque; 8307 8308 object_property_set_bool(OBJECT(cpu), "realized", false, NULL); 8309 object_unref(OBJECT(cpu)); 8310 /* 8311 * At this point the CPU should be unrealized and removed 8312 * from cpu lists. We can clean-up the rest of the thread 8313 * data without the lock held. 8314 */ 8315 8316 pthread_mutex_unlock(&clone_lock); 8317 8318 if (ts->child_tidptr) { 8319 put_user_u32(0, ts->child_tidptr); 8320 do_sys_futex(g2h(cpu, ts->child_tidptr), 8321 FUTEX_WAKE, INT_MAX, NULL, NULL, 0); 8322 } 8323 thread_cpu = NULL; 8324 g_free(ts); 8325 rcu_unregister_thread(); 8326 pthread_exit(NULL); 8327 } 8328 8329 pthread_mutex_unlock(&clone_lock); 8330 preexit_cleanup(cpu_env, arg1); 8331 _exit(arg1); 8332 return 0; /* avoid warning */ 8333 case TARGET_NR_read: 8334 if (arg2 == 0 && arg3 == 0) { 8335 return get_errno(safe_read(arg1, 0, 0)); 8336 } else { 8337 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 8338 return -TARGET_EFAULT; 8339 ret = get_errno(safe_read(arg1, p, arg3)); 8340 if (ret >= 0 && 8341 fd_trans_host_to_target_data(arg1)) { 8342 ret = fd_trans_host_to_target_data(arg1)(p, ret); 8343 } 8344 unlock_user(p, arg2, ret); 8345 } 8346 return ret; 8347 case TARGET_NR_write: 8348 if (arg2 == 0 && arg3 == 0) { 8349 return get_errno(safe_write(arg1, 0, 0)); 8350 } 8351 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 8352 return -TARGET_EFAULT; 8353 if (fd_trans_target_to_host_data(arg1)) { 8354 void *copy = g_malloc(arg3); 8355 memcpy(copy, p, arg3); 8356 ret = fd_trans_target_to_host_data(arg1)(copy, arg3); 8357 if (ret >= 0) { 8358 ret = get_errno(safe_write(arg1, copy, ret)); 8359 } 8360 g_free(copy); 8361 } else { 8362 ret = get_errno(safe_write(arg1, p, arg3)); 8363 } 8364 unlock_user(p, arg2, 0); 8365 return ret; 8366 8367 #ifdef TARGET_NR_open 8368 case TARGET_NR_open: 8369 if (!(p = lock_user_string(arg1))) 8370 return -TARGET_EFAULT; 8371 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, 8372 target_to_host_bitmask(arg2, fcntl_flags_tbl), 8373 arg3)); 8374 fd_trans_unregister(ret); 8375 unlock_user(p, arg1, 0); 8376 return ret; 8377 #endif 8378 case TARGET_NR_openat: 8379 if (!(p = lock_user_string(arg2))) 8380 return -TARGET_EFAULT; 8381 ret = get_errno(do_openat(cpu_env, arg1, p, 8382 target_to_host_bitmask(arg3, fcntl_flags_tbl), 8383 arg4)); 8384 fd_trans_unregister(ret); 8385 unlock_user(p, arg2, 0); 8386 return ret; 8387 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 8388 case TARGET_NR_name_to_handle_at: 8389 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5); 8390 return ret; 8391 #endif 8392 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 8393 case TARGET_NR_open_by_handle_at: 8394 ret = do_open_by_handle_at(arg1, arg2, arg3); 8395 fd_trans_unregister(ret); 8396 return ret; 8397 #endif 8398 case TARGET_NR_close: 8399 fd_trans_unregister(arg1); 8400 return get_errno(close(arg1)); 8401 8402 case TARGET_NR_brk: 8403 return do_brk(arg1); 8404 #ifdef TARGET_NR_fork 8405 case TARGET_NR_fork: 8406 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0)); 8407 #endif 8408 #ifdef TARGET_NR_waitpid 8409 case TARGET_NR_waitpid: 8410 { 8411 int status; 8412 ret = get_errno(safe_wait4(arg1, &status, arg3, 0)); 8413 if (!is_error(ret) && arg2 && ret 8414 && put_user_s32(host_to_target_waitstatus(status), arg2)) 8415 return -TARGET_EFAULT; 8416 } 8417 return ret; 8418 #endif 8419 #ifdef TARGET_NR_waitid 8420 case TARGET_NR_waitid: 8421 { 8422 siginfo_t info; 8423 info.si_pid = 0; 8424 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL)); 8425 if (!is_error(ret) && arg3 && info.si_pid != 0) { 8426 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 8427 return -TARGET_EFAULT; 8428 host_to_target_siginfo(p, &info); 8429 unlock_user(p, arg3, sizeof(target_siginfo_t)); 8430 } 8431 } 8432 return ret; 8433 #endif 8434 #ifdef TARGET_NR_creat /* not on alpha */ 8435 case TARGET_NR_creat: 8436 if (!(p = lock_user_string(arg1))) 8437 return -TARGET_EFAULT; 8438 ret = get_errno(creat(p, arg2)); 8439 fd_trans_unregister(ret); 8440 unlock_user(p, arg1, 0); 8441 return ret; 8442 #endif 8443 #ifdef TARGET_NR_link 8444 case TARGET_NR_link: 8445 { 8446 void * p2; 8447 p = lock_user_string(arg1); 8448 p2 = lock_user_string(arg2); 8449 if (!p || !p2) 8450 ret = -TARGET_EFAULT; 8451 else 8452 ret = get_errno(link(p, p2)); 8453 unlock_user(p2, arg2, 0); 8454 unlock_user(p, arg1, 0); 8455 } 8456 return ret; 8457 #endif 8458 #if defined(TARGET_NR_linkat) 8459 case TARGET_NR_linkat: 8460 { 8461 void * p2 = NULL; 8462 if (!arg2 || !arg4) 8463 return -TARGET_EFAULT; 8464 p = lock_user_string(arg2); 8465 p2 = lock_user_string(arg4); 8466 if (!p || !p2) 8467 ret = -TARGET_EFAULT; 8468 else 8469 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 8470 unlock_user(p, arg2, 0); 8471 unlock_user(p2, arg4, 0); 8472 } 8473 return ret; 8474 #endif 8475 #ifdef TARGET_NR_unlink 8476 case TARGET_NR_unlink: 8477 if (!(p = lock_user_string(arg1))) 8478 return -TARGET_EFAULT; 8479 ret = get_errno(unlink(p)); 8480 unlock_user(p, arg1, 0); 8481 return ret; 8482 #endif 8483 #if defined(TARGET_NR_unlinkat) 8484 case TARGET_NR_unlinkat: 8485 if (!(p = lock_user_string(arg2))) 8486 return -TARGET_EFAULT; 8487 ret = get_errno(unlinkat(arg1, p, arg3)); 8488 unlock_user(p, arg2, 0); 8489 return ret; 8490 #endif 8491 case TARGET_NR_execve: 8492 { 8493 char **argp, **envp; 8494 int argc, envc; 8495 abi_ulong gp; 8496 abi_ulong guest_argp; 8497 abi_ulong guest_envp; 8498 abi_ulong addr; 8499 char **q; 8500 int total_size = 0; 8501 8502 argc = 0; 8503 guest_argp = arg2; 8504 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 8505 if (get_user_ual(addr, gp)) 8506 return -TARGET_EFAULT; 8507 if (!addr) 8508 break; 8509 argc++; 8510 } 8511 envc = 0; 8512 guest_envp = arg3; 8513 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 8514 if (get_user_ual(addr, gp)) 8515 return -TARGET_EFAULT; 8516 if (!addr) 8517 break; 8518 envc++; 8519 } 8520 8521 argp = g_new0(char *, argc + 1); 8522 envp = g_new0(char *, envc + 1); 8523 8524 for (gp = guest_argp, q = argp; gp; 8525 gp += sizeof(abi_ulong), q++) { 8526 if (get_user_ual(addr, gp)) 8527 goto execve_efault; 8528 if (!addr) 8529 break; 8530 if (!(*q = lock_user_string(addr))) 8531 goto execve_efault; 8532 total_size += strlen(*q) + 1; 8533 } 8534 *q = NULL; 8535 8536 for (gp = guest_envp, q = envp; gp; 8537 gp += sizeof(abi_ulong), q++) { 8538 if (get_user_ual(addr, gp)) 8539 goto execve_efault; 8540 if (!addr) 8541 break; 8542 if (!(*q = lock_user_string(addr))) 8543 goto execve_efault; 8544 total_size += strlen(*q) + 1; 8545 } 8546 *q = NULL; 8547 8548 if (!(p = lock_user_string(arg1))) 8549 goto execve_efault; 8550 /* Although execve() is not an interruptible syscall it is 8551 * a special case where we must use the safe_syscall wrapper: 8552 * if we allow a signal to happen before we make the host 8553 * syscall then we will 'lose' it, because at the point of 8554 * execve the process leaves QEMU's control. So we use the 8555 * safe syscall wrapper to ensure that we either take the 8556 * signal as a guest signal, or else it does not happen 8557 * before the execve completes and makes it the other 8558 * program's problem. 8559 */ 8560 ret = get_errno(safe_execve(p, argp, envp)); 8561 unlock_user(p, arg1, 0); 8562 8563 goto execve_end; 8564 8565 execve_efault: 8566 ret = -TARGET_EFAULT; 8567 8568 execve_end: 8569 for (gp = guest_argp, q = argp; *q; 8570 gp += sizeof(abi_ulong), q++) { 8571 if (get_user_ual(addr, gp) 8572 || !addr) 8573 break; 8574 unlock_user(*q, addr, 0); 8575 } 8576 for (gp = guest_envp, q = envp; *q; 8577 gp += sizeof(abi_ulong), q++) { 8578 if (get_user_ual(addr, gp) 8579 || !addr) 8580 break; 8581 unlock_user(*q, addr, 0); 8582 } 8583 8584 g_free(argp); 8585 g_free(envp); 8586 } 8587 return ret; 8588 case TARGET_NR_chdir: 8589 if (!(p = lock_user_string(arg1))) 8590 return -TARGET_EFAULT; 8591 ret = get_errno(chdir(p)); 8592 unlock_user(p, arg1, 0); 8593 return ret; 8594 #ifdef TARGET_NR_time 8595 case TARGET_NR_time: 8596 { 8597 time_t host_time; 8598 ret = get_errno(time(&host_time)); 8599 if (!is_error(ret) 8600 && arg1 8601 && put_user_sal(host_time, arg1)) 8602 return -TARGET_EFAULT; 8603 } 8604 return ret; 8605 #endif 8606 #ifdef TARGET_NR_mknod 8607 case TARGET_NR_mknod: 8608 if (!(p = lock_user_string(arg1))) 8609 return -TARGET_EFAULT; 8610 ret = get_errno(mknod(p, arg2, arg3)); 8611 unlock_user(p, arg1, 0); 8612 return ret; 8613 #endif 8614 #if defined(TARGET_NR_mknodat) 8615 case TARGET_NR_mknodat: 8616 if (!(p = lock_user_string(arg2))) 8617 return -TARGET_EFAULT; 8618 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 8619 unlock_user(p, arg2, 0); 8620 return ret; 8621 #endif 8622 #ifdef TARGET_NR_chmod 8623 case TARGET_NR_chmod: 8624 if (!(p = lock_user_string(arg1))) 8625 return -TARGET_EFAULT; 8626 ret = get_errno(chmod(p, arg2)); 8627 unlock_user(p, arg1, 0); 8628 return ret; 8629 #endif 8630 #ifdef TARGET_NR_lseek 8631 case TARGET_NR_lseek: 8632 return get_errno(lseek(arg1, arg2, arg3)); 8633 #endif 8634 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 8635 /* Alpha specific */ 8636 case TARGET_NR_getxpid: 8637 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 8638 return get_errno(getpid()); 8639 #endif 8640 #ifdef TARGET_NR_getpid 8641 case TARGET_NR_getpid: 8642 return get_errno(getpid()); 8643 #endif 8644 case TARGET_NR_mount: 8645 { 8646 /* need to look at the data field */ 8647 void *p2, *p3; 8648 8649 if (arg1) { 8650 p = lock_user_string(arg1); 8651 if (!p) { 8652 return -TARGET_EFAULT; 8653 } 8654 } else { 8655 p = NULL; 8656 } 8657 8658 p2 = lock_user_string(arg2); 8659 if (!p2) { 8660 if (arg1) { 8661 unlock_user(p, arg1, 0); 8662 } 8663 return -TARGET_EFAULT; 8664 } 8665 8666 if (arg3) { 8667 p3 = lock_user_string(arg3); 8668 if (!p3) { 8669 if (arg1) { 8670 unlock_user(p, arg1, 0); 8671 } 8672 unlock_user(p2, arg2, 0); 8673 return -TARGET_EFAULT; 8674 } 8675 } else { 8676 p3 = NULL; 8677 } 8678 8679 /* FIXME - arg5 should be locked, but it isn't clear how to 8680 * do that since it's not guaranteed to be a NULL-terminated 8681 * string. 8682 */ 8683 if (!arg5) { 8684 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 8685 } else { 8686 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5)); 8687 } 8688 ret = get_errno(ret); 8689 8690 if (arg1) { 8691 unlock_user(p, arg1, 0); 8692 } 8693 unlock_user(p2, arg2, 0); 8694 if (arg3) { 8695 unlock_user(p3, arg3, 0); 8696 } 8697 } 8698 return ret; 8699 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount) 8700 #if defined(TARGET_NR_umount) 8701 case TARGET_NR_umount: 8702 #endif 8703 #if defined(TARGET_NR_oldumount) 8704 case TARGET_NR_oldumount: 8705 #endif 8706 if (!(p = lock_user_string(arg1))) 8707 return -TARGET_EFAULT; 8708 ret = get_errno(umount(p)); 8709 unlock_user(p, arg1, 0); 8710 return ret; 8711 #endif 8712 #ifdef TARGET_NR_stime /* not on alpha */ 8713 case TARGET_NR_stime: 8714 { 8715 struct timespec ts; 8716 ts.tv_nsec = 0; 8717 if (get_user_sal(ts.tv_sec, arg1)) { 8718 return -TARGET_EFAULT; 8719 } 8720 return get_errno(clock_settime(CLOCK_REALTIME, &ts)); 8721 } 8722 #endif 8723 #ifdef TARGET_NR_alarm /* not on alpha */ 8724 case TARGET_NR_alarm: 8725 return alarm(arg1); 8726 #endif 8727 #ifdef TARGET_NR_pause /* not on alpha */ 8728 case TARGET_NR_pause: 8729 if (!block_signals()) { 8730 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask); 8731 } 8732 return -TARGET_EINTR; 8733 #endif 8734 #ifdef TARGET_NR_utime 8735 case TARGET_NR_utime: 8736 { 8737 struct utimbuf tbuf, *host_tbuf; 8738 struct target_utimbuf *target_tbuf; 8739 if (arg2) { 8740 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 8741 return -TARGET_EFAULT; 8742 tbuf.actime = tswapal(target_tbuf->actime); 8743 tbuf.modtime = tswapal(target_tbuf->modtime); 8744 unlock_user_struct(target_tbuf, arg2, 0); 8745 host_tbuf = &tbuf; 8746 } else { 8747 host_tbuf = NULL; 8748 } 8749 if (!(p = lock_user_string(arg1))) 8750 return -TARGET_EFAULT; 8751 ret = get_errno(utime(p, host_tbuf)); 8752 unlock_user(p, arg1, 0); 8753 } 8754 return ret; 8755 #endif 8756 #ifdef TARGET_NR_utimes 8757 case TARGET_NR_utimes: 8758 { 8759 struct timeval *tvp, tv[2]; 8760 if (arg2) { 8761 if (copy_from_user_timeval(&tv[0], arg2) 8762 || copy_from_user_timeval(&tv[1], 8763 arg2 + sizeof(struct target_timeval))) 8764 return -TARGET_EFAULT; 8765 tvp = tv; 8766 } else { 8767 tvp = NULL; 8768 } 8769 if (!(p = lock_user_string(arg1))) 8770 return -TARGET_EFAULT; 8771 ret = get_errno(utimes(p, tvp)); 8772 unlock_user(p, arg1, 0); 8773 } 8774 return ret; 8775 #endif 8776 #if defined(TARGET_NR_futimesat) 8777 case TARGET_NR_futimesat: 8778 { 8779 struct timeval *tvp, tv[2]; 8780 if (arg3) { 8781 if (copy_from_user_timeval(&tv[0], arg3) 8782 || copy_from_user_timeval(&tv[1], 8783 arg3 + sizeof(struct target_timeval))) 8784 return -TARGET_EFAULT; 8785 tvp = tv; 8786 } else { 8787 tvp = NULL; 8788 } 8789 if (!(p = lock_user_string(arg2))) { 8790 return -TARGET_EFAULT; 8791 } 8792 ret = get_errno(futimesat(arg1, path(p), tvp)); 8793 unlock_user(p, arg2, 0); 8794 } 8795 return ret; 8796 #endif 8797 #ifdef TARGET_NR_access 8798 case TARGET_NR_access: 8799 if (!(p = lock_user_string(arg1))) { 8800 return -TARGET_EFAULT; 8801 } 8802 ret = get_errno(access(path(p), arg2)); 8803 unlock_user(p, arg1, 0); 8804 return ret; 8805 #endif 8806 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 8807 case TARGET_NR_faccessat: 8808 if (!(p = lock_user_string(arg2))) { 8809 return -TARGET_EFAULT; 8810 } 8811 ret = get_errno(faccessat(arg1, p, arg3, 0)); 8812 unlock_user(p, arg2, 0); 8813 return ret; 8814 #endif 8815 #ifdef TARGET_NR_nice /* not on alpha */ 8816 case TARGET_NR_nice: 8817 return get_errno(nice(arg1)); 8818 #endif 8819 case TARGET_NR_sync: 8820 sync(); 8821 return 0; 8822 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS) 8823 case TARGET_NR_syncfs: 8824 return get_errno(syncfs(arg1)); 8825 #endif 8826 case TARGET_NR_kill: 8827 return get_errno(safe_kill(arg1, target_to_host_signal(arg2))); 8828 #ifdef TARGET_NR_rename 8829 case TARGET_NR_rename: 8830 { 8831 void *p2; 8832 p = lock_user_string(arg1); 8833 p2 = lock_user_string(arg2); 8834 if (!p || !p2) 8835 ret = -TARGET_EFAULT; 8836 else 8837 ret = get_errno(rename(p, p2)); 8838 unlock_user(p2, arg2, 0); 8839 unlock_user(p, arg1, 0); 8840 } 8841 return ret; 8842 #endif 8843 #if defined(TARGET_NR_renameat) 8844 case TARGET_NR_renameat: 8845 { 8846 void *p2; 8847 p = lock_user_string(arg2); 8848 p2 = lock_user_string(arg4); 8849 if (!p || !p2) 8850 ret = -TARGET_EFAULT; 8851 else 8852 ret = get_errno(renameat(arg1, p, arg3, p2)); 8853 unlock_user(p2, arg4, 0); 8854 unlock_user(p, arg2, 0); 8855 } 8856 return ret; 8857 #endif 8858 #if defined(TARGET_NR_renameat2) 8859 case TARGET_NR_renameat2: 8860 { 8861 void *p2; 8862 p = lock_user_string(arg2); 8863 p2 = lock_user_string(arg4); 8864 if (!p || !p2) { 8865 ret = -TARGET_EFAULT; 8866 } else { 8867 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5)); 8868 } 8869 unlock_user(p2, arg4, 0); 8870 unlock_user(p, arg2, 0); 8871 } 8872 return ret; 8873 #endif 8874 #ifdef TARGET_NR_mkdir 8875 case TARGET_NR_mkdir: 8876 if (!(p = lock_user_string(arg1))) 8877 return -TARGET_EFAULT; 8878 ret = get_errno(mkdir(p, arg2)); 8879 unlock_user(p, arg1, 0); 8880 return ret; 8881 #endif 8882 #if defined(TARGET_NR_mkdirat) 8883 case TARGET_NR_mkdirat: 8884 if (!(p = lock_user_string(arg2))) 8885 return -TARGET_EFAULT; 8886 ret = get_errno(mkdirat(arg1, p, arg3)); 8887 unlock_user(p, arg2, 0); 8888 return ret; 8889 #endif 8890 #ifdef TARGET_NR_rmdir 8891 case TARGET_NR_rmdir: 8892 if (!(p = lock_user_string(arg1))) 8893 return -TARGET_EFAULT; 8894 ret = get_errno(rmdir(p)); 8895 unlock_user(p, arg1, 0); 8896 return ret; 8897 #endif 8898 case TARGET_NR_dup: 8899 ret = get_errno(dup(arg1)); 8900 if (ret >= 0) { 8901 fd_trans_dup(arg1, ret); 8902 } 8903 return ret; 8904 #ifdef TARGET_NR_pipe 8905 case TARGET_NR_pipe: 8906 return do_pipe(cpu_env, arg1, 0, 0); 8907 #endif 8908 #ifdef TARGET_NR_pipe2 8909 case TARGET_NR_pipe2: 8910 return do_pipe(cpu_env, arg1, 8911 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 8912 #endif 8913 case TARGET_NR_times: 8914 { 8915 struct target_tms *tmsp; 8916 struct tms tms; 8917 ret = get_errno(times(&tms)); 8918 if (arg1) { 8919 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 8920 if (!tmsp) 8921 return -TARGET_EFAULT; 8922 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 8923 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 8924 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 8925 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 8926 } 8927 if (!is_error(ret)) 8928 ret = host_to_target_clock_t(ret); 8929 } 8930 return ret; 8931 case TARGET_NR_acct: 8932 if (arg1 == 0) { 8933 ret = get_errno(acct(NULL)); 8934 } else { 8935 if (!(p = lock_user_string(arg1))) { 8936 return -TARGET_EFAULT; 8937 } 8938 ret = get_errno(acct(path(p))); 8939 unlock_user(p, arg1, 0); 8940 } 8941 return ret; 8942 #ifdef TARGET_NR_umount2 8943 case TARGET_NR_umount2: 8944 if (!(p = lock_user_string(arg1))) 8945 return -TARGET_EFAULT; 8946 ret = get_errno(umount2(p, arg2)); 8947 unlock_user(p, arg1, 0); 8948 return ret; 8949 #endif 8950 case TARGET_NR_ioctl: 8951 return do_ioctl(arg1, arg2, arg3); 8952 #ifdef TARGET_NR_fcntl 8953 case TARGET_NR_fcntl: 8954 return do_fcntl(arg1, arg2, arg3); 8955 #endif 8956 case TARGET_NR_setpgid: 8957 return get_errno(setpgid(arg1, arg2)); 8958 case TARGET_NR_umask: 8959 return get_errno(umask(arg1)); 8960 case TARGET_NR_chroot: 8961 if (!(p = lock_user_string(arg1))) 8962 return -TARGET_EFAULT; 8963 ret = get_errno(chroot(p)); 8964 unlock_user(p, arg1, 0); 8965 return ret; 8966 #ifdef TARGET_NR_dup2 8967 case TARGET_NR_dup2: 8968 ret = get_errno(dup2(arg1, arg2)); 8969 if (ret >= 0) { 8970 fd_trans_dup(arg1, arg2); 8971 } 8972 return ret; 8973 #endif 8974 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 8975 case TARGET_NR_dup3: 8976 { 8977 int host_flags; 8978 8979 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) { 8980 return -EINVAL; 8981 } 8982 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl); 8983 ret = get_errno(dup3(arg1, arg2, host_flags)); 8984 if (ret >= 0) { 8985 fd_trans_dup(arg1, arg2); 8986 } 8987 return ret; 8988 } 8989 #endif 8990 #ifdef TARGET_NR_getppid /* not on alpha */ 8991 case TARGET_NR_getppid: 8992 return get_errno(getppid()); 8993 #endif 8994 #ifdef TARGET_NR_getpgrp 8995 case TARGET_NR_getpgrp: 8996 return get_errno(getpgrp()); 8997 #endif 8998 case TARGET_NR_setsid: 8999 return get_errno(setsid()); 9000 #ifdef TARGET_NR_sigaction 9001 case TARGET_NR_sigaction: 9002 { 9003 #if defined(TARGET_MIPS) 9004 struct target_sigaction act, oact, *pact, *old_act; 9005 9006 if (arg2) { 9007 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 9008 return -TARGET_EFAULT; 9009 act._sa_handler = old_act->_sa_handler; 9010 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 9011 act.sa_flags = old_act->sa_flags; 9012 unlock_user_struct(old_act, arg2, 0); 9013 pact = &act; 9014 } else { 9015 pact = NULL; 9016 } 9017 9018 ret = get_errno(do_sigaction(arg1, pact, &oact, 0)); 9019 9020 if (!is_error(ret) && arg3) { 9021 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 9022 return -TARGET_EFAULT; 9023 old_act->_sa_handler = oact._sa_handler; 9024 old_act->sa_flags = oact.sa_flags; 9025 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 9026 old_act->sa_mask.sig[1] = 0; 9027 old_act->sa_mask.sig[2] = 0; 9028 old_act->sa_mask.sig[3] = 0; 9029 unlock_user_struct(old_act, arg3, 1); 9030 } 9031 #else 9032 struct target_old_sigaction *old_act; 9033 struct target_sigaction act, oact, *pact; 9034 if (arg2) { 9035 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 9036 return -TARGET_EFAULT; 9037 act._sa_handler = old_act->_sa_handler; 9038 target_siginitset(&act.sa_mask, old_act->sa_mask); 9039 act.sa_flags = old_act->sa_flags; 9040 #ifdef TARGET_ARCH_HAS_SA_RESTORER 9041 act.sa_restorer = old_act->sa_restorer; 9042 #endif 9043 unlock_user_struct(old_act, arg2, 0); 9044 pact = &act; 9045 } else { 9046 pact = NULL; 9047 } 9048 ret = get_errno(do_sigaction(arg1, pact, &oact, 0)); 9049 if (!is_error(ret) && arg3) { 9050 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 9051 return -TARGET_EFAULT; 9052 old_act->_sa_handler = oact._sa_handler; 9053 old_act->sa_mask = oact.sa_mask.sig[0]; 9054 old_act->sa_flags = oact.sa_flags; 9055 #ifdef TARGET_ARCH_HAS_SA_RESTORER 9056 old_act->sa_restorer = oact.sa_restorer; 9057 #endif 9058 unlock_user_struct(old_act, arg3, 1); 9059 } 9060 #endif 9061 } 9062 return ret; 9063 #endif 9064 case TARGET_NR_rt_sigaction: 9065 { 9066 /* 9067 * For Alpha and SPARC this is a 5 argument syscall, with 9068 * a 'restorer' parameter which must be copied into the 9069 * sa_restorer field of the sigaction struct. 9070 * For Alpha that 'restorer' is arg5; for SPARC it is arg4, 9071 * and arg5 is the sigsetsize. 9072 */ 9073 #if defined(TARGET_ALPHA) 9074 target_ulong sigsetsize = arg4; 9075 target_ulong restorer = arg5; 9076 #elif defined(TARGET_SPARC) 9077 target_ulong restorer = arg4; 9078 target_ulong sigsetsize = arg5; 9079 #else 9080 target_ulong sigsetsize = arg4; 9081 target_ulong restorer = 0; 9082 #endif 9083 struct target_sigaction *act = NULL; 9084 struct target_sigaction *oact = NULL; 9085 9086 if (sigsetsize != sizeof(target_sigset_t)) { 9087 return -TARGET_EINVAL; 9088 } 9089 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) { 9090 return -TARGET_EFAULT; 9091 } 9092 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 9093 ret = -TARGET_EFAULT; 9094 } else { 9095 ret = get_errno(do_sigaction(arg1, act, oact, restorer)); 9096 if (oact) { 9097 unlock_user_struct(oact, arg3, 1); 9098 } 9099 } 9100 if (act) { 9101 unlock_user_struct(act, arg2, 0); 9102 } 9103 } 9104 return ret; 9105 #ifdef TARGET_NR_sgetmask /* not on alpha */ 9106 case TARGET_NR_sgetmask: 9107 { 9108 sigset_t cur_set; 9109 abi_ulong target_set; 9110 ret = do_sigprocmask(0, NULL, &cur_set); 9111 if (!ret) { 9112 host_to_target_old_sigset(&target_set, &cur_set); 9113 ret = target_set; 9114 } 9115 } 9116 return ret; 9117 #endif 9118 #ifdef TARGET_NR_ssetmask /* not on alpha */ 9119 case TARGET_NR_ssetmask: 9120 { 9121 sigset_t set, oset; 9122 abi_ulong target_set = arg1; 9123 target_to_host_old_sigset(&set, &target_set); 9124 ret = do_sigprocmask(SIG_SETMASK, &set, &oset); 9125 if (!ret) { 9126 host_to_target_old_sigset(&target_set, &oset); 9127 ret = target_set; 9128 } 9129 } 9130 return ret; 9131 #endif 9132 #ifdef TARGET_NR_sigprocmask 9133 case TARGET_NR_sigprocmask: 9134 { 9135 #if defined(TARGET_ALPHA) 9136 sigset_t set, oldset; 9137 abi_ulong mask; 9138 int how; 9139 9140 switch (arg1) { 9141 case TARGET_SIG_BLOCK: 9142 how = SIG_BLOCK; 9143 break; 9144 case TARGET_SIG_UNBLOCK: 9145 how = SIG_UNBLOCK; 9146 break; 9147 case TARGET_SIG_SETMASK: 9148 how = SIG_SETMASK; 9149 break; 9150 default: 9151 return -TARGET_EINVAL; 9152 } 9153 mask = arg2; 9154 target_to_host_old_sigset(&set, &mask); 9155 9156 ret = do_sigprocmask(how, &set, &oldset); 9157 if (!is_error(ret)) { 9158 host_to_target_old_sigset(&mask, &oldset); 9159 ret = mask; 9160 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 9161 } 9162 #else 9163 sigset_t set, oldset, *set_ptr; 9164 int how; 9165 9166 if (arg2) { 9167 switch (arg1) { 9168 case TARGET_SIG_BLOCK: 9169 how = SIG_BLOCK; 9170 break; 9171 case TARGET_SIG_UNBLOCK: 9172 how = SIG_UNBLOCK; 9173 break; 9174 case TARGET_SIG_SETMASK: 9175 how = SIG_SETMASK; 9176 break; 9177 default: 9178 return -TARGET_EINVAL; 9179 } 9180 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 9181 return -TARGET_EFAULT; 9182 target_to_host_old_sigset(&set, p); 9183 unlock_user(p, arg2, 0); 9184 set_ptr = &set; 9185 } else { 9186 how = 0; 9187 set_ptr = NULL; 9188 } 9189 ret = do_sigprocmask(how, set_ptr, &oldset); 9190 if (!is_error(ret) && arg3) { 9191 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9192 return -TARGET_EFAULT; 9193 host_to_target_old_sigset(p, &oldset); 9194 unlock_user(p, arg3, sizeof(target_sigset_t)); 9195 } 9196 #endif 9197 } 9198 return ret; 9199 #endif 9200 case TARGET_NR_rt_sigprocmask: 9201 { 9202 int how = arg1; 9203 sigset_t set, oldset, *set_ptr; 9204 9205 if (arg4 != sizeof(target_sigset_t)) { 9206 return -TARGET_EINVAL; 9207 } 9208 9209 if (arg2) { 9210 switch(how) { 9211 case TARGET_SIG_BLOCK: 9212 how = SIG_BLOCK; 9213 break; 9214 case TARGET_SIG_UNBLOCK: 9215 how = SIG_UNBLOCK; 9216 break; 9217 case TARGET_SIG_SETMASK: 9218 how = SIG_SETMASK; 9219 break; 9220 default: 9221 return -TARGET_EINVAL; 9222 } 9223 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 9224 return -TARGET_EFAULT; 9225 target_to_host_sigset(&set, p); 9226 unlock_user(p, arg2, 0); 9227 set_ptr = &set; 9228 } else { 9229 how = 0; 9230 set_ptr = NULL; 9231 } 9232 ret = do_sigprocmask(how, set_ptr, &oldset); 9233 if (!is_error(ret) && arg3) { 9234 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9235 return -TARGET_EFAULT; 9236 host_to_target_sigset(p, &oldset); 9237 unlock_user(p, arg3, sizeof(target_sigset_t)); 9238 } 9239 } 9240 return ret; 9241 #ifdef TARGET_NR_sigpending 9242 case TARGET_NR_sigpending: 9243 { 9244 sigset_t set; 9245 ret = get_errno(sigpending(&set)); 9246 if (!is_error(ret)) { 9247 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9248 return -TARGET_EFAULT; 9249 host_to_target_old_sigset(p, &set); 9250 unlock_user(p, arg1, sizeof(target_sigset_t)); 9251 } 9252 } 9253 return ret; 9254 #endif 9255 case TARGET_NR_rt_sigpending: 9256 { 9257 sigset_t set; 9258 9259 /* Yes, this check is >, not != like most. We follow the kernel's 9260 * logic and it does it like this because it implements 9261 * NR_sigpending through the same code path, and in that case 9262 * the old_sigset_t is smaller in size. 9263 */ 9264 if (arg2 > sizeof(target_sigset_t)) { 9265 return -TARGET_EINVAL; 9266 } 9267 9268 ret = get_errno(sigpending(&set)); 9269 if (!is_error(ret)) { 9270 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9271 return -TARGET_EFAULT; 9272 host_to_target_sigset(p, &set); 9273 unlock_user(p, arg1, sizeof(target_sigset_t)); 9274 } 9275 } 9276 return ret; 9277 #ifdef TARGET_NR_sigsuspend 9278 case TARGET_NR_sigsuspend: 9279 { 9280 TaskState *ts = cpu->opaque; 9281 #if defined(TARGET_ALPHA) 9282 abi_ulong mask = arg1; 9283 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask); 9284 #else 9285 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9286 return -TARGET_EFAULT; 9287 target_to_host_old_sigset(&ts->sigsuspend_mask, p); 9288 unlock_user(p, arg1, 0); 9289 #endif 9290 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 9291 SIGSET_T_SIZE)); 9292 if (ret != -TARGET_ERESTARTSYS) { 9293 ts->in_sigsuspend = 1; 9294 } 9295 } 9296 return ret; 9297 #endif 9298 case TARGET_NR_rt_sigsuspend: 9299 { 9300 TaskState *ts = cpu->opaque; 9301 9302 if (arg2 != sizeof(target_sigset_t)) { 9303 return -TARGET_EINVAL; 9304 } 9305 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9306 return -TARGET_EFAULT; 9307 target_to_host_sigset(&ts->sigsuspend_mask, p); 9308 unlock_user(p, arg1, 0); 9309 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 9310 SIGSET_T_SIZE)); 9311 if (ret != -TARGET_ERESTARTSYS) { 9312 ts->in_sigsuspend = 1; 9313 } 9314 } 9315 return ret; 9316 #ifdef TARGET_NR_rt_sigtimedwait 9317 case TARGET_NR_rt_sigtimedwait: 9318 { 9319 sigset_t set; 9320 struct timespec uts, *puts; 9321 siginfo_t uinfo; 9322 9323 if (arg4 != sizeof(target_sigset_t)) { 9324 return -TARGET_EINVAL; 9325 } 9326 9327 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9328 return -TARGET_EFAULT; 9329 target_to_host_sigset(&set, p); 9330 unlock_user(p, arg1, 0); 9331 if (arg3) { 9332 puts = &uts; 9333 if (target_to_host_timespec(puts, arg3)) { 9334 return -TARGET_EFAULT; 9335 } 9336 } else { 9337 puts = NULL; 9338 } 9339 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9340 SIGSET_T_SIZE)); 9341 if (!is_error(ret)) { 9342 if (arg2) { 9343 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 9344 0); 9345 if (!p) { 9346 return -TARGET_EFAULT; 9347 } 9348 host_to_target_siginfo(p, &uinfo); 9349 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9350 } 9351 ret = host_to_target_signal(ret); 9352 } 9353 } 9354 return ret; 9355 #endif 9356 #ifdef TARGET_NR_rt_sigtimedwait_time64 9357 case TARGET_NR_rt_sigtimedwait_time64: 9358 { 9359 sigset_t set; 9360 struct timespec uts, *puts; 9361 siginfo_t uinfo; 9362 9363 if (arg4 != sizeof(target_sigset_t)) { 9364 return -TARGET_EINVAL; 9365 } 9366 9367 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1); 9368 if (!p) { 9369 return -TARGET_EFAULT; 9370 } 9371 target_to_host_sigset(&set, p); 9372 unlock_user(p, arg1, 0); 9373 if (arg3) { 9374 puts = &uts; 9375 if (target_to_host_timespec64(puts, arg3)) { 9376 return -TARGET_EFAULT; 9377 } 9378 } else { 9379 puts = NULL; 9380 } 9381 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9382 SIGSET_T_SIZE)); 9383 if (!is_error(ret)) { 9384 if (arg2) { 9385 p = lock_user(VERIFY_WRITE, arg2, 9386 sizeof(target_siginfo_t), 0); 9387 if (!p) { 9388 return -TARGET_EFAULT; 9389 } 9390 host_to_target_siginfo(p, &uinfo); 9391 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9392 } 9393 ret = host_to_target_signal(ret); 9394 } 9395 } 9396 return ret; 9397 #endif 9398 case TARGET_NR_rt_sigqueueinfo: 9399 { 9400 siginfo_t uinfo; 9401 9402 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); 9403 if (!p) { 9404 return -TARGET_EFAULT; 9405 } 9406 target_to_host_siginfo(&uinfo, p); 9407 unlock_user(p, arg3, 0); 9408 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 9409 } 9410 return ret; 9411 case TARGET_NR_rt_tgsigqueueinfo: 9412 { 9413 siginfo_t uinfo; 9414 9415 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1); 9416 if (!p) { 9417 return -TARGET_EFAULT; 9418 } 9419 target_to_host_siginfo(&uinfo, p); 9420 unlock_user(p, arg4, 0); 9421 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo)); 9422 } 9423 return ret; 9424 #ifdef TARGET_NR_sigreturn 9425 case TARGET_NR_sigreturn: 9426 if (block_signals()) { 9427 return -TARGET_ERESTARTSYS; 9428 } 9429 return do_sigreturn(cpu_env); 9430 #endif 9431 case TARGET_NR_rt_sigreturn: 9432 if (block_signals()) { 9433 return -TARGET_ERESTARTSYS; 9434 } 9435 return do_rt_sigreturn(cpu_env); 9436 case TARGET_NR_sethostname: 9437 if (!(p = lock_user_string(arg1))) 9438 return -TARGET_EFAULT; 9439 ret = get_errno(sethostname(p, arg2)); 9440 unlock_user(p, arg1, 0); 9441 return ret; 9442 #ifdef TARGET_NR_setrlimit 9443 case TARGET_NR_setrlimit: 9444 { 9445 int resource = target_to_host_resource(arg1); 9446 struct target_rlimit *target_rlim; 9447 struct rlimit rlim; 9448 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 9449 return -TARGET_EFAULT; 9450 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 9451 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 9452 unlock_user_struct(target_rlim, arg2, 0); 9453 /* 9454 * If we just passed through resource limit settings for memory then 9455 * they would also apply to QEMU's own allocations, and QEMU will 9456 * crash or hang or die if its allocations fail. Ideally we would 9457 * track the guest allocations in QEMU and apply the limits ourselves. 9458 * For now, just tell the guest the call succeeded but don't actually 9459 * limit anything. 9460 */ 9461 if (resource != RLIMIT_AS && 9462 resource != RLIMIT_DATA && 9463 resource != RLIMIT_STACK) { 9464 return get_errno(setrlimit(resource, &rlim)); 9465 } else { 9466 return 0; 9467 } 9468 } 9469 #endif 9470 #ifdef TARGET_NR_getrlimit 9471 case TARGET_NR_getrlimit: 9472 { 9473 int resource = target_to_host_resource(arg1); 9474 struct target_rlimit *target_rlim; 9475 struct rlimit rlim; 9476 9477 ret = get_errno(getrlimit(resource, &rlim)); 9478 if (!is_error(ret)) { 9479 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 9480 return -TARGET_EFAULT; 9481 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 9482 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 9483 unlock_user_struct(target_rlim, arg2, 1); 9484 } 9485 } 9486 return ret; 9487 #endif 9488 case TARGET_NR_getrusage: 9489 { 9490 struct rusage rusage; 9491 ret = get_errno(getrusage(arg1, &rusage)); 9492 if (!is_error(ret)) { 9493 ret = host_to_target_rusage(arg2, &rusage); 9494 } 9495 } 9496 return ret; 9497 #if defined(TARGET_NR_gettimeofday) 9498 case TARGET_NR_gettimeofday: 9499 { 9500 struct timeval tv; 9501 struct timezone tz; 9502 9503 ret = get_errno(gettimeofday(&tv, &tz)); 9504 if (!is_error(ret)) { 9505 if (arg1 && copy_to_user_timeval(arg1, &tv)) { 9506 return -TARGET_EFAULT; 9507 } 9508 if (arg2 && copy_to_user_timezone(arg2, &tz)) { 9509 return -TARGET_EFAULT; 9510 } 9511 } 9512 } 9513 return ret; 9514 #endif 9515 #if defined(TARGET_NR_settimeofday) 9516 case TARGET_NR_settimeofday: 9517 { 9518 struct timeval tv, *ptv = NULL; 9519 struct timezone tz, *ptz = NULL; 9520 9521 if (arg1) { 9522 if (copy_from_user_timeval(&tv, arg1)) { 9523 return -TARGET_EFAULT; 9524 } 9525 ptv = &tv; 9526 } 9527 9528 if (arg2) { 9529 if (copy_from_user_timezone(&tz, arg2)) { 9530 return -TARGET_EFAULT; 9531 } 9532 ptz = &tz; 9533 } 9534 9535 return get_errno(settimeofday(ptv, ptz)); 9536 } 9537 #endif 9538 #if defined(TARGET_NR_select) 9539 case TARGET_NR_select: 9540 #if defined(TARGET_WANT_NI_OLD_SELECT) 9541 /* some architectures used to have old_select here 9542 * but now ENOSYS it. 9543 */ 9544 ret = -TARGET_ENOSYS; 9545 #elif defined(TARGET_WANT_OLD_SYS_SELECT) 9546 ret = do_old_select(arg1); 9547 #else 9548 ret = do_select(arg1, arg2, arg3, arg4, arg5); 9549 #endif 9550 return ret; 9551 #endif 9552 #ifdef TARGET_NR_pselect6 9553 case TARGET_NR_pselect6: 9554 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false); 9555 #endif 9556 #ifdef TARGET_NR_pselect6_time64 9557 case TARGET_NR_pselect6_time64: 9558 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true); 9559 #endif 9560 #ifdef TARGET_NR_symlink 9561 case TARGET_NR_symlink: 9562 { 9563 void *p2; 9564 p = lock_user_string(arg1); 9565 p2 = lock_user_string(arg2); 9566 if (!p || !p2) 9567 ret = -TARGET_EFAULT; 9568 else 9569 ret = get_errno(symlink(p, p2)); 9570 unlock_user(p2, arg2, 0); 9571 unlock_user(p, arg1, 0); 9572 } 9573 return ret; 9574 #endif 9575 #if defined(TARGET_NR_symlinkat) 9576 case TARGET_NR_symlinkat: 9577 { 9578 void *p2; 9579 p = lock_user_string(arg1); 9580 p2 = lock_user_string(arg3); 9581 if (!p || !p2) 9582 ret = -TARGET_EFAULT; 9583 else 9584 ret = get_errno(symlinkat(p, arg2, p2)); 9585 unlock_user(p2, arg3, 0); 9586 unlock_user(p, arg1, 0); 9587 } 9588 return ret; 9589 #endif 9590 #ifdef TARGET_NR_readlink 9591 case TARGET_NR_readlink: 9592 { 9593 void *p2; 9594 p = lock_user_string(arg1); 9595 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9596 if (!p || !p2) { 9597 ret = -TARGET_EFAULT; 9598 } else if (!arg3) { 9599 /* Short circuit this for the magic exe check. */ 9600 ret = -TARGET_EINVAL; 9601 } else if (is_proc_myself((const char *)p, "exe")) { 9602 char real[PATH_MAX], *temp; 9603 temp = realpath(exec_path, real); 9604 /* Return value is # of bytes that we wrote to the buffer. */ 9605 if (temp == NULL) { 9606 ret = get_errno(-1); 9607 } else { 9608 /* Don't worry about sign mismatch as earlier mapping 9609 * logic would have thrown a bad address error. */ 9610 ret = MIN(strlen(real), arg3); 9611 /* We cannot NUL terminate the string. */ 9612 memcpy(p2, real, ret); 9613 } 9614 } else { 9615 ret = get_errno(readlink(path(p), p2, arg3)); 9616 } 9617 unlock_user(p2, arg2, ret); 9618 unlock_user(p, arg1, 0); 9619 } 9620 return ret; 9621 #endif 9622 #if defined(TARGET_NR_readlinkat) 9623 case TARGET_NR_readlinkat: 9624 { 9625 void *p2; 9626 p = lock_user_string(arg2); 9627 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 9628 if (!p || !p2) { 9629 ret = -TARGET_EFAULT; 9630 } else if (is_proc_myself((const char *)p, "exe")) { 9631 char real[PATH_MAX], *temp; 9632 temp = realpath(exec_path, real); 9633 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 9634 snprintf((char *)p2, arg4, "%s", real); 9635 } else { 9636 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 9637 } 9638 unlock_user(p2, arg3, ret); 9639 unlock_user(p, arg2, 0); 9640 } 9641 return ret; 9642 #endif 9643 #ifdef TARGET_NR_swapon 9644 case TARGET_NR_swapon: 9645 if (!(p = lock_user_string(arg1))) 9646 return -TARGET_EFAULT; 9647 ret = get_errno(swapon(p, arg2)); 9648 unlock_user(p, arg1, 0); 9649 return ret; 9650 #endif 9651 case TARGET_NR_reboot: 9652 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 9653 /* arg4 must be ignored in all other cases */ 9654 p = lock_user_string(arg4); 9655 if (!p) { 9656 return -TARGET_EFAULT; 9657 } 9658 ret = get_errno(reboot(arg1, arg2, arg3, p)); 9659 unlock_user(p, arg4, 0); 9660 } else { 9661 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 9662 } 9663 return ret; 9664 #ifdef TARGET_NR_mmap 9665 case TARGET_NR_mmap: 9666 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 9667 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 9668 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 9669 || defined(TARGET_S390X) 9670 { 9671 abi_ulong *v; 9672 abi_ulong v1, v2, v3, v4, v5, v6; 9673 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 9674 return -TARGET_EFAULT; 9675 v1 = tswapal(v[0]); 9676 v2 = tswapal(v[1]); 9677 v3 = tswapal(v[2]); 9678 v4 = tswapal(v[3]); 9679 v5 = tswapal(v[4]); 9680 v6 = tswapal(v[5]); 9681 unlock_user(v, arg1, 0); 9682 ret = get_errno(target_mmap(v1, v2, v3, 9683 target_to_host_bitmask(v4, mmap_flags_tbl), 9684 v5, v6)); 9685 } 9686 #else 9687 /* mmap pointers are always untagged */ 9688 ret = get_errno(target_mmap(arg1, arg2, arg3, 9689 target_to_host_bitmask(arg4, mmap_flags_tbl), 9690 arg5, 9691 arg6)); 9692 #endif 9693 return ret; 9694 #endif 9695 #ifdef TARGET_NR_mmap2 9696 case TARGET_NR_mmap2: 9697 #ifndef MMAP_SHIFT 9698 #define MMAP_SHIFT 12 9699 #endif 9700 ret = target_mmap(arg1, arg2, arg3, 9701 target_to_host_bitmask(arg4, mmap_flags_tbl), 9702 arg5, arg6 << MMAP_SHIFT); 9703 return get_errno(ret); 9704 #endif 9705 case TARGET_NR_munmap: 9706 arg1 = cpu_untagged_addr(cpu, arg1); 9707 return get_errno(target_munmap(arg1, arg2)); 9708 case TARGET_NR_mprotect: 9709 arg1 = cpu_untagged_addr(cpu, arg1); 9710 { 9711 TaskState *ts = cpu->opaque; 9712 /* Special hack to detect libc making the stack executable. */ 9713 if ((arg3 & PROT_GROWSDOWN) 9714 && arg1 >= ts->info->stack_limit 9715 && arg1 <= ts->info->start_stack) { 9716 arg3 &= ~PROT_GROWSDOWN; 9717 arg2 = arg2 + arg1 - ts->info->stack_limit; 9718 arg1 = ts->info->stack_limit; 9719 } 9720 } 9721 return get_errno(target_mprotect(arg1, arg2, arg3)); 9722 #ifdef TARGET_NR_mremap 9723 case TARGET_NR_mremap: 9724 arg1 = cpu_untagged_addr(cpu, arg1); 9725 /* mremap new_addr (arg5) is always untagged */ 9726 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 9727 #endif 9728 /* ??? msync/mlock/munlock are broken for softmmu. */ 9729 #ifdef TARGET_NR_msync 9730 case TARGET_NR_msync: 9731 return get_errno(msync(g2h(cpu, arg1), arg2, arg3)); 9732 #endif 9733 #ifdef TARGET_NR_mlock 9734 case TARGET_NR_mlock: 9735 return get_errno(mlock(g2h(cpu, arg1), arg2)); 9736 #endif 9737 #ifdef TARGET_NR_munlock 9738 case TARGET_NR_munlock: 9739 return get_errno(munlock(g2h(cpu, arg1), arg2)); 9740 #endif 9741 #ifdef TARGET_NR_mlockall 9742 case TARGET_NR_mlockall: 9743 return get_errno(mlockall(target_to_host_mlockall_arg(arg1))); 9744 #endif 9745 #ifdef TARGET_NR_munlockall 9746 case TARGET_NR_munlockall: 9747 return get_errno(munlockall()); 9748 #endif 9749 #ifdef TARGET_NR_truncate 9750 case TARGET_NR_truncate: 9751 if (!(p = lock_user_string(arg1))) 9752 return -TARGET_EFAULT; 9753 ret = get_errno(truncate(p, arg2)); 9754 unlock_user(p, arg1, 0); 9755 return ret; 9756 #endif 9757 #ifdef TARGET_NR_ftruncate 9758 case TARGET_NR_ftruncate: 9759 return get_errno(ftruncate(arg1, arg2)); 9760 #endif 9761 case TARGET_NR_fchmod: 9762 return get_errno(fchmod(arg1, arg2)); 9763 #if defined(TARGET_NR_fchmodat) 9764 case TARGET_NR_fchmodat: 9765 if (!(p = lock_user_string(arg2))) 9766 return -TARGET_EFAULT; 9767 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 9768 unlock_user(p, arg2, 0); 9769 return ret; 9770 #endif 9771 case TARGET_NR_getpriority: 9772 /* Note that negative values are valid for getpriority, so we must 9773 differentiate based on errno settings. */ 9774 errno = 0; 9775 ret = getpriority(arg1, arg2); 9776 if (ret == -1 && errno != 0) { 9777 return -host_to_target_errno(errno); 9778 } 9779 #ifdef TARGET_ALPHA 9780 /* Return value is the unbiased priority. Signal no error. */ 9781 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 9782 #else 9783 /* Return value is a biased priority to avoid negative numbers. */ 9784 ret = 20 - ret; 9785 #endif 9786 return ret; 9787 case TARGET_NR_setpriority: 9788 return get_errno(setpriority(arg1, arg2, arg3)); 9789 #ifdef TARGET_NR_statfs 9790 case TARGET_NR_statfs: 9791 if (!(p = lock_user_string(arg1))) { 9792 return -TARGET_EFAULT; 9793 } 9794 ret = get_errno(statfs(path(p), &stfs)); 9795 unlock_user(p, arg1, 0); 9796 convert_statfs: 9797 if (!is_error(ret)) { 9798 struct target_statfs *target_stfs; 9799 9800 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 9801 return -TARGET_EFAULT; 9802 __put_user(stfs.f_type, &target_stfs->f_type); 9803 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9804 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9805 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9806 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9807 __put_user(stfs.f_files, &target_stfs->f_files); 9808 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9809 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9810 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9811 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9812 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9813 #ifdef _STATFS_F_FLAGS 9814 __put_user(stfs.f_flags, &target_stfs->f_flags); 9815 #else 9816 __put_user(0, &target_stfs->f_flags); 9817 #endif 9818 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9819 unlock_user_struct(target_stfs, arg2, 1); 9820 } 9821 return ret; 9822 #endif 9823 #ifdef TARGET_NR_fstatfs 9824 case TARGET_NR_fstatfs: 9825 ret = get_errno(fstatfs(arg1, &stfs)); 9826 goto convert_statfs; 9827 #endif 9828 #ifdef TARGET_NR_statfs64 9829 case TARGET_NR_statfs64: 9830 if (!(p = lock_user_string(arg1))) { 9831 return -TARGET_EFAULT; 9832 } 9833 ret = get_errno(statfs(path(p), &stfs)); 9834 unlock_user(p, arg1, 0); 9835 convert_statfs64: 9836 if (!is_error(ret)) { 9837 struct target_statfs64 *target_stfs; 9838 9839 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 9840 return -TARGET_EFAULT; 9841 __put_user(stfs.f_type, &target_stfs->f_type); 9842 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9843 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9844 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9845 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9846 __put_user(stfs.f_files, &target_stfs->f_files); 9847 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9848 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9849 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9850 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9851 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9852 #ifdef _STATFS_F_FLAGS 9853 __put_user(stfs.f_flags, &target_stfs->f_flags); 9854 #else 9855 __put_user(0, &target_stfs->f_flags); 9856 #endif 9857 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9858 unlock_user_struct(target_stfs, arg3, 1); 9859 } 9860 return ret; 9861 case TARGET_NR_fstatfs64: 9862 ret = get_errno(fstatfs(arg1, &stfs)); 9863 goto convert_statfs64; 9864 #endif 9865 #ifdef TARGET_NR_socketcall 9866 case TARGET_NR_socketcall: 9867 return do_socketcall(arg1, arg2); 9868 #endif 9869 #ifdef TARGET_NR_accept 9870 case TARGET_NR_accept: 9871 return do_accept4(arg1, arg2, arg3, 0); 9872 #endif 9873 #ifdef TARGET_NR_accept4 9874 case TARGET_NR_accept4: 9875 return do_accept4(arg1, arg2, arg3, arg4); 9876 #endif 9877 #ifdef TARGET_NR_bind 9878 case TARGET_NR_bind: 9879 return do_bind(arg1, arg2, arg3); 9880 #endif 9881 #ifdef TARGET_NR_connect 9882 case TARGET_NR_connect: 9883 return do_connect(arg1, arg2, arg3); 9884 #endif 9885 #ifdef TARGET_NR_getpeername 9886 case TARGET_NR_getpeername: 9887 return do_getpeername(arg1, arg2, arg3); 9888 #endif 9889 #ifdef TARGET_NR_getsockname 9890 case TARGET_NR_getsockname: 9891 return do_getsockname(arg1, arg2, arg3); 9892 #endif 9893 #ifdef TARGET_NR_getsockopt 9894 case TARGET_NR_getsockopt: 9895 return do_getsockopt(arg1, arg2, arg3, arg4, arg5); 9896 #endif 9897 #ifdef TARGET_NR_listen 9898 case TARGET_NR_listen: 9899 return get_errno(listen(arg1, arg2)); 9900 #endif 9901 #ifdef TARGET_NR_recv 9902 case TARGET_NR_recv: 9903 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 9904 #endif 9905 #ifdef TARGET_NR_recvfrom 9906 case TARGET_NR_recvfrom: 9907 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 9908 #endif 9909 #ifdef TARGET_NR_recvmsg 9910 case TARGET_NR_recvmsg: 9911 return do_sendrecvmsg(arg1, arg2, arg3, 0); 9912 #endif 9913 #ifdef TARGET_NR_send 9914 case TARGET_NR_send: 9915 return do_sendto(arg1, arg2, arg3, arg4, 0, 0); 9916 #endif 9917 #ifdef TARGET_NR_sendmsg 9918 case TARGET_NR_sendmsg: 9919 return do_sendrecvmsg(arg1, arg2, arg3, 1); 9920 #endif 9921 #ifdef TARGET_NR_sendmmsg 9922 case TARGET_NR_sendmmsg: 9923 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 9924 #endif 9925 #ifdef TARGET_NR_recvmmsg 9926 case TARGET_NR_recvmmsg: 9927 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 9928 #endif 9929 #ifdef TARGET_NR_sendto 9930 case TARGET_NR_sendto: 9931 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 9932 #endif 9933 #ifdef TARGET_NR_shutdown 9934 case TARGET_NR_shutdown: 9935 return get_errno(shutdown(arg1, arg2)); 9936 #endif 9937 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 9938 case TARGET_NR_getrandom: 9939 p = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9940 if (!p) { 9941 return -TARGET_EFAULT; 9942 } 9943 ret = get_errno(getrandom(p, arg2, arg3)); 9944 unlock_user(p, arg1, ret); 9945 return ret; 9946 #endif 9947 #ifdef TARGET_NR_socket 9948 case TARGET_NR_socket: 9949 return do_socket(arg1, arg2, arg3); 9950 #endif 9951 #ifdef TARGET_NR_socketpair 9952 case TARGET_NR_socketpair: 9953 return do_socketpair(arg1, arg2, arg3, arg4); 9954 #endif 9955 #ifdef TARGET_NR_setsockopt 9956 case TARGET_NR_setsockopt: 9957 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 9958 #endif 9959 #if defined(TARGET_NR_syslog) 9960 case TARGET_NR_syslog: 9961 { 9962 int len = arg2; 9963 9964 switch (arg1) { 9965 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */ 9966 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */ 9967 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */ 9968 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */ 9969 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */ 9970 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */ 9971 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */ 9972 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */ 9973 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3)); 9974 case TARGET_SYSLOG_ACTION_READ: /* Read from log */ 9975 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */ 9976 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */ 9977 { 9978 if (len < 0) { 9979 return -TARGET_EINVAL; 9980 } 9981 if (len == 0) { 9982 return 0; 9983 } 9984 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9985 if (!p) { 9986 return -TARGET_EFAULT; 9987 } 9988 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 9989 unlock_user(p, arg2, arg3); 9990 } 9991 return ret; 9992 default: 9993 return -TARGET_EINVAL; 9994 } 9995 } 9996 break; 9997 #endif 9998 case TARGET_NR_setitimer: 9999 { 10000 struct itimerval value, ovalue, *pvalue; 10001 10002 if (arg2) { 10003 pvalue = &value; 10004 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 10005 || copy_from_user_timeval(&pvalue->it_value, 10006 arg2 + sizeof(struct target_timeval))) 10007 return -TARGET_EFAULT; 10008 } else { 10009 pvalue = NULL; 10010 } 10011 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 10012 if (!is_error(ret) && arg3) { 10013 if (copy_to_user_timeval(arg3, 10014 &ovalue.it_interval) 10015 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 10016 &ovalue.it_value)) 10017 return -TARGET_EFAULT; 10018 } 10019 } 10020 return ret; 10021 case TARGET_NR_getitimer: 10022 { 10023 struct itimerval value; 10024 10025 ret = get_errno(getitimer(arg1, &value)); 10026 if (!is_error(ret) && arg2) { 10027 if (copy_to_user_timeval(arg2, 10028 &value.it_interval) 10029 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 10030 &value.it_value)) 10031 return -TARGET_EFAULT; 10032 } 10033 } 10034 return ret; 10035 #ifdef TARGET_NR_stat 10036 case TARGET_NR_stat: 10037 if (!(p = lock_user_string(arg1))) { 10038 return -TARGET_EFAULT; 10039 } 10040 ret = get_errno(stat(path(p), &st)); 10041 unlock_user(p, arg1, 0); 10042 goto do_stat; 10043 #endif 10044 #ifdef TARGET_NR_lstat 10045 case TARGET_NR_lstat: 10046 if (!(p = lock_user_string(arg1))) { 10047 return -TARGET_EFAULT; 10048 } 10049 ret = get_errno(lstat(path(p), &st)); 10050 unlock_user(p, arg1, 0); 10051 goto do_stat; 10052 #endif 10053 #ifdef TARGET_NR_fstat 10054 case TARGET_NR_fstat: 10055 { 10056 ret = get_errno(fstat(arg1, &st)); 10057 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat) 10058 do_stat: 10059 #endif 10060 if (!is_error(ret)) { 10061 struct target_stat *target_st; 10062 10063 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 10064 return -TARGET_EFAULT; 10065 memset(target_st, 0, sizeof(*target_st)); 10066 __put_user(st.st_dev, &target_st->st_dev); 10067 __put_user(st.st_ino, &target_st->st_ino); 10068 __put_user(st.st_mode, &target_st->st_mode); 10069 __put_user(st.st_uid, &target_st->st_uid); 10070 __put_user(st.st_gid, &target_st->st_gid); 10071 __put_user(st.st_nlink, &target_st->st_nlink); 10072 __put_user(st.st_rdev, &target_st->st_rdev); 10073 __put_user(st.st_size, &target_st->st_size); 10074 __put_user(st.st_blksize, &target_st->st_blksize); 10075 __put_user(st.st_blocks, &target_st->st_blocks); 10076 __put_user(st.st_atime, &target_st->target_st_atime); 10077 __put_user(st.st_mtime, &target_st->target_st_mtime); 10078 __put_user(st.st_ctime, &target_st->target_st_ctime); 10079 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC) 10080 __put_user(st.st_atim.tv_nsec, 10081 &target_st->target_st_atime_nsec); 10082 __put_user(st.st_mtim.tv_nsec, 10083 &target_st->target_st_mtime_nsec); 10084 __put_user(st.st_ctim.tv_nsec, 10085 &target_st->target_st_ctime_nsec); 10086 #endif 10087 unlock_user_struct(target_st, arg2, 1); 10088 } 10089 } 10090 return ret; 10091 #endif 10092 case TARGET_NR_vhangup: 10093 return get_errno(vhangup()); 10094 #ifdef TARGET_NR_syscall 10095 case TARGET_NR_syscall: 10096 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 10097 arg6, arg7, arg8, 0); 10098 #endif 10099 #if defined(TARGET_NR_wait4) 10100 case TARGET_NR_wait4: 10101 { 10102 int status; 10103 abi_long status_ptr = arg2; 10104 struct rusage rusage, *rusage_ptr; 10105 abi_ulong target_rusage = arg4; 10106 abi_long rusage_err; 10107 if (target_rusage) 10108 rusage_ptr = &rusage; 10109 else 10110 rusage_ptr = NULL; 10111 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr)); 10112 if (!is_error(ret)) { 10113 if (status_ptr && ret) { 10114 status = host_to_target_waitstatus(status); 10115 if (put_user_s32(status, status_ptr)) 10116 return -TARGET_EFAULT; 10117 } 10118 if (target_rusage) { 10119 rusage_err = host_to_target_rusage(target_rusage, &rusage); 10120 if (rusage_err) { 10121 ret = rusage_err; 10122 } 10123 } 10124 } 10125 } 10126 return ret; 10127 #endif 10128 #ifdef TARGET_NR_swapoff 10129 case TARGET_NR_swapoff: 10130 if (!(p = lock_user_string(arg1))) 10131 return -TARGET_EFAULT; 10132 ret = get_errno(swapoff(p)); 10133 unlock_user(p, arg1, 0); 10134 return ret; 10135 #endif 10136 case TARGET_NR_sysinfo: 10137 { 10138 struct target_sysinfo *target_value; 10139 struct sysinfo value; 10140 ret = get_errno(sysinfo(&value)); 10141 if (!is_error(ret) && arg1) 10142 { 10143 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 10144 return -TARGET_EFAULT; 10145 __put_user(value.uptime, &target_value->uptime); 10146 __put_user(value.loads[0], &target_value->loads[0]); 10147 __put_user(value.loads[1], &target_value->loads[1]); 10148 __put_user(value.loads[2], &target_value->loads[2]); 10149 __put_user(value.totalram, &target_value->totalram); 10150 __put_user(value.freeram, &target_value->freeram); 10151 __put_user(value.sharedram, &target_value->sharedram); 10152 __put_user(value.bufferram, &target_value->bufferram); 10153 __put_user(value.totalswap, &target_value->totalswap); 10154 __put_user(value.freeswap, &target_value->freeswap); 10155 __put_user(value.procs, &target_value->procs); 10156 __put_user(value.totalhigh, &target_value->totalhigh); 10157 __put_user(value.freehigh, &target_value->freehigh); 10158 __put_user(value.mem_unit, &target_value->mem_unit); 10159 unlock_user_struct(target_value, arg1, 1); 10160 } 10161 } 10162 return ret; 10163 #ifdef TARGET_NR_ipc 10164 case TARGET_NR_ipc: 10165 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); 10166 #endif 10167 #ifdef TARGET_NR_semget 10168 case TARGET_NR_semget: 10169 return get_errno(semget(arg1, arg2, arg3)); 10170 #endif 10171 #ifdef TARGET_NR_semop 10172 case TARGET_NR_semop: 10173 return do_semtimedop(arg1, arg2, arg3, 0, false); 10174 #endif 10175 #ifdef TARGET_NR_semtimedop 10176 case TARGET_NR_semtimedop: 10177 return do_semtimedop(arg1, arg2, arg3, arg4, false); 10178 #endif 10179 #ifdef TARGET_NR_semtimedop_time64 10180 case TARGET_NR_semtimedop_time64: 10181 return do_semtimedop(arg1, arg2, arg3, arg4, true); 10182 #endif 10183 #ifdef TARGET_NR_semctl 10184 case TARGET_NR_semctl: 10185 return do_semctl(arg1, arg2, arg3, arg4); 10186 #endif 10187 #ifdef TARGET_NR_msgctl 10188 case TARGET_NR_msgctl: 10189 return do_msgctl(arg1, arg2, arg3); 10190 #endif 10191 #ifdef TARGET_NR_msgget 10192 case TARGET_NR_msgget: 10193 return get_errno(msgget(arg1, arg2)); 10194 #endif 10195 #ifdef TARGET_NR_msgrcv 10196 case TARGET_NR_msgrcv: 10197 return do_msgrcv(arg1, arg2, arg3, arg4, arg5); 10198 #endif 10199 #ifdef TARGET_NR_msgsnd 10200 case TARGET_NR_msgsnd: 10201 return do_msgsnd(arg1, arg2, arg3, arg4); 10202 #endif 10203 #ifdef TARGET_NR_shmget 10204 case TARGET_NR_shmget: 10205 return get_errno(shmget(arg1, arg2, arg3)); 10206 #endif 10207 #ifdef TARGET_NR_shmctl 10208 case TARGET_NR_shmctl: 10209 return do_shmctl(arg1, arg2, arg3); 10210 #endif 10211 #ifdef TARGET_NR_shmat 10212 case TARGET_NR_shmat: 10213 return do_shmat(cpu_env, arg1, arg2, arg3); 10214 #endif 10215 #ifdef TARGET_NR_shmdt 10216 case TARGET_NR_shmdt: 10217 return do_shmdt(arg1); 10218 #endif 10219 case TARGET_NR_fsync: 10220 return get_errno(fsync(arg1)); 10221 case TARGET_NR_clone: 10222 /* Linux manages to have three different orderings for its 10223 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 10224 * match the kernel's CONFIG_CLONE_* settings. 10225 * Microblaze is further special in that it uses a sixth 10226 * implicit argument to clone for the TLS pointer. 10227 */ 10228 #if defined(TARGET_MICROBLAZE) 10229 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 10230 #elif defined(TARGET_CLONE_BACKWARDS) 10231 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 10232 #elif defined(TARGET_CLONE_BACKWARDS2) 10233 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 10234 #else 10235 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 10236 #endif 10237 return ret; 10238 #ifdef __NR_exit_group 10239 /* new thread calls */ 10240 case TARGET_NR_exit_group: 10241 preexit_cleanup(cpu_env, arg1); 10242 return get_errno(exit_group(arg1)); 10243 #endif 10244 case TARGET_NR_setdomainname: 10245 if (!(p = lock_user_string(arg1))) 10246 return -TARGET_EFAULT; 10247 ret = get_errno(setdomainname(p, arg2)); 10248 unlock_user(p, arg1, 0); 10249 return ret; 10250 case TARGET_NR_uname: 10251 /* no need to transcode because we use the linux syscall */ 10252 { 10253 struct new_utsname * buf; 10254 10255 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 10256 return -TARGET_EFAULT; 10257 ret = get_errno(sys_uname(buf)); 10258 if (!is_error(ret)) { 10259 /* Overwrite the native machine name with whatever is being 10260 emulated. */ 10261 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env), 10262 sizeof(buf->machine)); 10263 /* Allow the user to override the reported release. */ 10264 if (qemu_uname_release && *qemu_uname_release) { 10265 g_strlcpy(buf->release, qemu_uname_release, 10266 sizeof(buf->release)); 10267 } 10268 } 10269 unlock_user_struct(buf, arg1, 1); 10270 } 10271 return ret; 10272 #ifdef TARGET_I386 10273 case TARGET_NR_modify_ldt: 10274 return do_modify_ldt(cpu_env, arg1, arg2, arg3); 10275 #if !defined(TARGET_X86_64) 10276 case TARGET_NR_vm86: 10277 return do_vm86(cpu_env, arg1, arg2); 10278 #endif 10279 #endif 10280 #if defined(TARGET_NR_adjtimex) 10281 case TARGET_NR_adjtimex: 10282 { 10283 struct timex host_buf; 10284 10285 if (target_to_host_timex(&host_buf, arg1) != 0) { 10286 return -TARGET_EFAULT; 10287 } 10288 ret = get_errno(adjtimex(&host_buf)); 10289 if (!is_error(ret)) { 10290 if (host_to_target_timex(arg1, &host_buf) != 0) { 10291 return -TARGET_EFAULT; 10292 } 10293 } 10294 } 10295 return ret; 10296 #endif 10297 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) 10298 case TARGET_NR_clock_adjtime: 10299 { 10300 struct timex htx, *phtx = &htx; 10301 10302 if (target_to_host_timex(phtx, arg2) != 0) { 10303 return -TARGET_EFAULT; 10304 } 10305 ret = get_errno(clock_adjtime(arg1, phtx)); 10306 if (!is_error(ret) && phtx) { 10307 if (host_to_target_timex(arg2, phtx) != 0) { 10308 return -TARGET_EFAULT; 10309 } 10310 } 10311 } 10312 return ret; 10313 #endif 10314 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 10315 case TARGET_NR_clock_adjtime64: 10316 { 10317 struct timex htx; 10318 10319 if (target_to_host_timex64(&htx, arg2) != 0) { 10320 return -TARGET_EFAULT; 10321 } 10322 ret = get_errno(clock_adjtime(arg1, &htx)); 10323 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) { 10324 return -TARGET_EFAULT; 10325 } 10326 } 10327 return ret; 10328 #endif 10329 case TARGET_NR_getpgid: 10330 return get_errno(getpgid(arg1)); 10331 case TARGET_NR_fchdir: 10332 return get_errno(fchdir(arg1)); 10333 case TARGET_NR_personality: 10334 return get_errno(personality(arg1)); 10335 #ifdef TARGET_NR__llseek /* Not on alpha */ 10336 case TARGET_NR__llseek: 10337 { 10338 int64_t res; 10339 #if !defined(__NR_llseek) 10340 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5); 10341 if (res == -1) { 10342 ret = get_errno(res); 10343 } else { 10344 ret = 0; 10345 } 10346 #else 10347 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 10348 #endif 10349 if ((ret == 0) && put_user_s64(res, arg4)) { 10350 return -TARGET_EFAULT; 10351 } 10352 } 10353 return ret; 10354 #endif 10355 #ifdef TARGET_NR_getdents 10356 case TARGET_NR_getdents: 10357 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 10358 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 10359 { 10360 struct target_dirent *target_dirp; 10361 struct linux_dirent *dirp; 10362 abi_long count = arg3; 10363 10364 dirp = g_try_malloc(count); 10365 if (!dirp) { 10366 return -TARGET_ENOMEM; 10367 } 10368 10369 ret = get_errno(sys_getdents(arg1, dirp, count)); 10370 if (!is_error(ret)) { 10371 struct linux_dirent *de; 10372 struct target_dirent *tde; 10373 int len = ret; 10374 int reclen, treclen; 10375 int count1, tnamelen; 10376 10377 count1 = 0; 10378 de = dirp; 10379 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10380 return -TARGET_EFAULT; 10381 tde = target_dirp; 10382 while (len > 0) { 10383 reclen = de->d_reclen; 10384 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 10385 assert(tnamelen >= 0); 10386 treclen = tnamelen + offsetof(struct target_dirent, d_name); 10387 assert(count1 + treclen <= count); 10388 tde->d_reclen = tswap16(treclen); 10389 tde->d_ino = tswapal(de->d_ino); 10390 tde->d_off = tswapal(de->d_off); 10391 memcpy(tde->d_name, de->d_name, tnamelen); 10392 de = (struct linux_dirent *)((char *)de + reclen); 10393 len -= reclen; 10394 tde = (struct target_dirent *)((char *)tde + treclen); 10395 count1 += treclen; 10396 } 10397 ret = count1; 10398 unlock_user(target_dirp, arg2, ret); 10399 } 10400 g_free(dirp); 10401 } 10402 #else 10403 { 10404 struct linux_dirent *dirp; 10405 abi_long count = arg3; 10406 10407 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10408 return -TARGET_EFAULT; 10409 ret = get_errno(sys_getdents(arg1, dirp, count)); 10410 if (!is_error(ret)) { 10411 struct linux_dirent *de; 10412 int len = ret; 10413 int reclen; 10414 de = dirp; 10415 while (len > 0) { 10416 reclen = de->d_reclen; 10417 if (reclen > len) 10418 break; 10419 de->d_reclen = tswap16(reclen); 10420 tswapls(&de->d_ino); 10421 tswapls(&de->d_off); 10422 de = (struct linux_dirent *)((char *)de + reclen); 10423 len -= reclen; 10424 } 10425 } 10426 unlock_user(dirp, arg2, ret); 10427 } 10428 #endif 10429 #else 10430 /* Implement getdents in terms of getdents64 */ 10431 { 10432 struct linux_dirent64 *dirp; 10433 abi_long count = arg3; 10434 10435 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 10436 if (!dirp) { 10437 return -TARGET_EFAULT; 10438 } 10439 ret = get_errno(sys_getdents64(arg1, dirp, count)); 10440 if (!is_error(ret)) { 10441 /* Convert the dirent64 structs to target dirent. We do this 10442 * in-place, since we can guarantee that a target_dirent is no 10443 * larger than a dirent64; however this means we have to be 10444 * careful to read everything before writing in the new format. 10445 */ 10446 struct linux_dirent64 *de; 10447 struct target_dirent *tde; 10448 int len = ret; 10449 int tlen = 0; 10450 10451 de = dirp; 10452 tde = (struct target_dirent *)dirp; 10453 while (len > 0) { 10454 int namelen, treclen; 10455 int reclen = de->d_reclen; 10456 uint64_t ino = de->d_ino; 10457 int64_t off = de->d_off; 10458 uint8_t type = de->d_type; 10459 10460 namelen = strlen(de->d_name); 10461 treclen = offsetof(struct target_dirent, d_name) 10462 + namelen + 2; 10463 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 10464 10465 memmove(tde->d_name, de->d_name, namelen + 1); 10466 tde->d_ino = tswapal(ino); 10467 tde->d_off = tswapal(off); 10468 tde->d_reclen = tswap16(treclen); 10469 /* The target_dirent type is in what was formerly a padding 10470 * byte at the end of the structure: 10471 */ 10472 *(((char *)tde) + treclen - 1) = type; 10473 10474 de = (struct linux_dirent64 *)((char *)de + reclen); 10475 tde = (struct target_dirent *)((char *)tde + treclen); 10476 len -= reclen; 10477 tlen += treclen; 10478 } 10479 ret = tlen; 10480 } 10481 unlock_user(dirp, arg2, ret); 10482 } 10483 #endif 10484 return ret; 10485 #endif /* TARGET_NR_getdents */ 10486 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 10487 case TARGET_NR_getdents64: 10488 { 10489 struct linux_dirent64 *dirp; 10490 abi_long count = arg3; 10491 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10492 return -TARGET_EFAULT; 10493 ret = get_errno(sys_getdents64(arg1, dirp, count)); 10494 if (!is_error(ret)) { 10495 struct linux_dirent64 *de; 10496 int len = ret; 10497 int reclen; 10498 de = dirp; 10499 while (len > 0) { 10500 reclen = de->d_reclen; 10501 if (reclen > len) 10502 break; 10503 de->d_reclen = tswap16(reclen); 10504 tswap64s((uint64_t *)&de->d_ino); 10505 tswap64s((uint64_t *)&de->d_off); 10506 de = (struct linux_dirent64 *)((char *)de + reclen); 10507 len -= reclen; 10508 } 10509 } 10510 unlock_user(dirp, arg2, ret); 10511 } 10512 return ret; 10513 #endif /* TARGET_NR_getdents64 */ 10514 #if defined(TARGET_NR__newselect) 10515 case TARGET_NR__newselect: 10516 return do_select(arg1, arg2, arg3, arg4, arg5); 10517 #endif 10518 #ifdef TARGET_NR_poll 10519 case TARGET_NR_poll: 10520 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false); 10521 #endif 10522 #ifdef TARGET_NR_ppoll 10523 case TARGET_NR_ppoll: 10524 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false); 10525 #endif 10526 #ifdef TARGET_NR_ppoll_time64 10527 case TARGET_NR_ppoll_time64: 10528 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true); 10529 #endif 10530 case TARGET_NR_flock: 10531 /* NOTE: the flock constant seems to be the same for every 10532 Linux platform */ 10533 return get_errno(safe_flock(arg1, arg2)); 10534 case TARGET_NR_readv: 10535 { 10536 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10537 if (vec != NULL) { 10538 ret = get_errno(safe_readv(arg1, vec, arg3)); 10539 unlock_iovec(vec, arg2, arg3, 1); 10540 } else { 10541 ret = -host_to_target_errno(errno); 10542 } 10543 } 10544 return ret; 10545 case TARGET_NR_writev: 10546 { 10547 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10548 if (vec != NULL) { 10549 ret = get_errno(safe_writev(arg1, vec, arg3)); 10550 unlock_iovec(vec, arg2, arg3, 0); 10551 } else { 10552 ret = -host_to_target_errno(errno); 10553 } 10554 } 10555 return ret; 10556 #if defined(TARGET_NR_preadv) 10557 case TARGET_NR_preadv: 10558 { 10559 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10560 if (vec != NULL) { 10561 unsigned long low, high; 10562 10563 target_to_host_low_high(arg4, arg5, &low, &high); 10564 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high)); 10565 unlock_iovec(vec, arg2, arg3, 1); 10566 } else { 10567 ret = -host_to_target_errno(errno); 10568 } 10569 } 10570 return ret; 10571 #endif 10572 #if defined(TARGET_NR_pwritev) 10573 case TARGET_NR_pwritev: 10574 { 10575 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10576 if (vec != NULL) { 10577 unsigned long low, high; 10578 10579 target_to_host_low_high(arg4, arg5, &low, &high); 10580 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high)); 10581 unlock_iovec(vec, arg2, arg3, 0); 10582 } else { 10583 ret = -host_to_target_errno(errno); 10584 } 10585 } 10586 return ret; 10587 #endif 10588 case TARGET_NR_getsid: 10589 return get_errno(getsid(arg1)); 10590 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 10591 case TARGET_NR_fdatasync: 10592 return get_errno(fdatasync(arg1)); 10593 #endif 10594 case TARGET_NR_sched_getaffinity: 10595 { 10596 unsigned int mask_size; 10597 unsigned long *mask; 10598 10599 /* 10600 * sched_getaffinity needs multiples of ulong, so need to take 10601 * care of mismatches between target ulong and host ulong sizes. 10602 */ 10603 if (arg2 & (sizeof(abi_ulong) - 1)) { 10604 return -TARGET_EINVAL; 10605 } 10606 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10607 10608 mask = alloca(mask_size); 10609 memset(mask, 0, mask_size); 10610 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 10611 10612 if (!is_error(ret)) { 10613 if (ret > arg2) { 10614 /* More data returned than the caller's buffer will fit. 10615 * This only happens if sizeof(abi_long) < sizeof(long) 10616 * and the caller passed us a buffer holding an odd number 10617 * of abi_longs. If the host kernel is actually using the 10618 * extra 4 bytes then fail EINVAL; otherwise we can just 10619 * ignore them and only copy the interesting part. 10620 */ 10621 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 10622 if (numcpus > arg2 * 8) { 10623 return -TARGET_EINVAL; 10624 } 10625 ret = arg2; 10626 } 10627 10628 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) { 10629 return -TARGET_EFAULT; 10630 } 10631 } 10632 } 10633 return ret; 10634 case TARGET_NR_sched_setaffinity: 10635 { 10636 unsigned int mask_size; 10637 unsigned long *mask; 10638 10639 /* 10640 * sched_setaffinity needs multiples of ulong, so need to take 10641 * care of mismatches between target ulong and host ulong sizes. 10642 */ 10643 if (arg2 & (sizeof(abi_ulong) - 1)) { 10644 return -TARGET_EINVAL; 10645 } 10646 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10647 mask = alloca(mask_size); 10648 10649 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2); 10650 if (ret) { 10651 return ret; 10652 } 10653 10654 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 10655 } 10656 case TARGET_NR_getcpu: 10657 { 10658 unsigned cpu, node; 10659 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL, 10660 arg2 ? &node : NULL, 10661 NULL)); 10662 if (is_error(ret)) { 10663 return ret; 10664 } 10665 if (arg1 && put_user_u32(cpu, arg1)) { 10666 return -TARGET_EFAULT; 10667 } 10668 if (arg2 && put_user_u32(node, arg2)) { 10669 return -TARGET_EFAULT; 10670 } 10671 } 10672 return ret; 10673 case TARGET_NR_sched_setparam: 10674 { 10675 struct sched_param *target_schp; 10676 struct sched_param schp; 10677 10678 if (arg2 == 0) { 10679 return -TARGET_EINVAL; 10680 } 10681 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 10682 return -TARGET_EFAULT; 10683 schp.sched_priority = tswap32(target_schp->sched_priority); 10684 unlock_user_struct(target_schp, arg2, 0); 10685 return get_errno(sched_setparam(arg1, &schp)); 10686 } 10687 case TARGET_NR_sched_getparam: 10688 { 10689 struct sched_param *target_schp; 10690 struct sched_param schp; 10691 10692 if (arg2 == 0) { 10693 return -TARGET_EINVAL; 10694 } 10695 ret = get_errno(sched_getparam(arg1, &schp)); 10696 if (!is_error(ret)) { 10697 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 10698 return -TARGET_EFAULT; 10699 target_schp->sched_priority = tswap32(schp.sched_priority); 10700 unlock_user_struct(target_schp, arg2, 1); 10701 } 10702 } 10703 return ret; 10704 case TARGET_NR_sched_setscheduler: 10705 { 10706 struct sched_param *target_schp; 10707 struct sched_param schp; 10708 if (arg3 == 0) { 10709 return -TARGET_EINVAL; 10710 } 10711 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 10712 return -TARGET_EFAULT; 10713 schp.sched_priority = tswap32(target_schp->sched_priority); 10714 unlock_user_struct(target_schp, arg3, 0); 10715 return get_errno(sched_setscheduler(arg1, arg2, &schp)); 10716 } 10717 case TARGET_NR_sched_getscheduler: 10718 return get_errno(sched_getscheduler(arg1)); 10719 case TARGET_NR_sched_yield: 10720 return get_errno(sched_yield()); 10721 case TARGET_NR_sched_get_priority_max: 10722 return get_errno(sched_get_priority_max(arg1)); 10723 case TARGET_NR_sched_get_priority_min: 10724 return get_errno(sched_get_priority_min(arg1)); 10725 #ifdef TARGET_NR_sched_rr_get_interval 10726 case TARGET_NR_sched_rr_get_interval: 10727 { 10728 struct timespec ts; 10729 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 10730 if (!is_error(ret)) { 10731 ret = host_to_target_timespec(arg2, &ts); 10732 } 10733 } 10734 return ret; 10735 #endif 10736 #ifdef TARGET_NR_sched_rr_get_interval_time64 10737 case TARGET_NR_sched_rr_get_interval_time64: 10738 { 10739 struct timespec ts; 10740 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 10741 if (!is_error(ret)) { 10742 ret = host_to_target_timespec64(arg2, &ts); 10743 } 10744 } 10745 return ret; 10746 #endif 10747 #if defined(TARGET_NR_nanosleep) 10748 case TARGET_NR_nanosleep: 10749 { 10750 struct timespec req, rem; 10751 target_to_host_timespec(&req, arg1); 10752 ret = get_errno(safe_nanosleep(&req, &rem)); 10753 if (is_error(ret) && arg2) { 10754 host_to_target_timespec(arg2, &rem); 10755 } 10756 } 10757 return ret; 10758 #endif 10759 case TARGET_NR_prctl: 10760 switch (arg1) { 10761 case PR_GET_PDEATHSIG: 10762 { 10763 int deathsig; 10764 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 10765 if (!is_error(ret) && arg2 10766 && put_user_s32(deathsig, arg2)) { 10767 return -TARGET_EFAULT; 10768 } 10769 return ret; 10770 } 10771 #ifdef PR_GET_NAME 10772 case PR_GET_NAME: 10773 { 10774 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 10775 if (!name) { 10776 return -TARGET_EFAULT; 10777 } 10778 ret = get_errno(prctl(arg1, (unsigned long)name, 10779 arg3, arg4, arg5)); 10780 unlock_user(name, arg2, 16); 10781 return ret; 10782 } 10783 case PR_SET_NAME: 10784 { 10785 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 10786 if (!name) { 10787 return -TARGET_EFAULT; 10788 } 10789 ret = get_errno(prctl(arg1, (unsigned long)name, 10790 arg3, arg4, arg5)); 10791 unlock_user(name, arg2, 0); 10792 return ret; 10793 } 10794 #endif 10795 #ifdef TARGET_MIPS 10796 case TARGET_PR_GET_FP_MODE: 10797 { 10798 CPUMIPSState *env = ((CPUMIPSState *)cpu_env); 10799 ret = 0; 10800 if (env->CP0_Status & (1 << CP0St_FR)) { 10801 ret |= TARGET_PR_FP_MODE_FR; 10802 } 10803 if (env->CP0_Config5 & (1 << CP0C5_FRE)) { 10804 ret |= TARGET_PR_FP_MODE_FRE; 10805 } 10806 return ret; 10807 } 10808 case TARGET_PR_SET_FP_MODE: 10809 { 10810 CPUMIPSState *env = ((CPUMIPSState *)cpu_env); 10811 bool old_fr = env->CP0_Status & (1 << CP0St_FR); 10812 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE); 10813 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR; 10814 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE; 10815 10816 const unsigned int known_bits = TARGET_PR_FP_MODE_FR | 10817 TARGET_PR_FP_MODE_FRE; 10818 10819 /* If nothing to change, return right away, successfully. */ 10820 if (old_fr == new_fr && old_fre == new_fre) { 10821 return 0; 10822 } 10823 /* Check the value is valid */ 10824 if (arg2 & ~known_bits) { 10825 return -TARGET_EOPNOTSUPP; 10826 } 10827 /* Setting FRE without FR is not supported. */ 10828 if (new_fre && !new_fr) { 10829 return -TARGET_EOPNOTSUPP; 10830 } 10831 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) { 10832 /* FR1 is not supported */ 10833 return -TARGET_EOPNOTSUPP; 10834 } 10835 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64)) 10836 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) { 10837 /* cannot set FR=0 */ 10838 return -TARGET_EOPNOTSUPP; 10839 } 10840 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) { 10841 /* Cannot set FRE=1 */ 10842 return -TARGET_EOPNOTSUPP; 10843 } 10844 10845 int i; 10846 fpr_t *fpr = env->active_fpu.fpr; 10847 for (i = 0; i < 32 ; i += 2) { 10848 if (!old_fr && new_fr) { 10849 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX]; 10850 } else if (old_fr && !new_fr) { 10851 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX]; 10852 } 10853 } 10854 10855 if (new_fr) { 10856 env->CP0_Status |= (1 << CP0St_FR); 10857 env->hflags |= MIPS_HFLAG_F64; 10858 } else { 10859 env->CP0_Status &= ~(1 << CP0St_FR); 10860 env->hflags &= ~MIPS_HFLAG_F64; 10861 } 10862 if (new_fre) { 10863 env->CP0_Config5 |= (1 << CP0C5_FRE); 10864 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) { 10865 env->hflags |= MIPS_HFLAG_FRE; 10866 } 10867 } else { 10868 env->CP0_Config5 &= ~(1 << CP0C5_FRE); 10869 env->hflags &= ~MIPS_HFLAG_FRE; 10870 } 10871 10872 return 0; 10873 } 10874 #endif /* MIPS */ 10875 #ifdef TARGET_AARCH64 10876 case TARGET_PR_SVE_SET_VL: 10877 /* 10878 * We cannot support either PR_SVE_SET_VL_ONEXEC or 10879 * PR_SVE_VL_INHERIT. Note the kernel definition 10880 * of sve_vl_valid allows for VQ=512, i.e. VL=8192, 10881 * even though the current architectural maximum is VQ=16. 10882 */ 10883 ret = -TARGET_EINVAL; 10884 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env)) 10885 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { 10886 CPUARMState *env = cpu_env; 10887 ARMCPU *cpu = env_archcpu(env); 10888 uint32_t vq, old_vq; 10889 10890 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1; 10891 vq = MAX(arg2 / 16, 1); 10892 vq = MIN(vq, cpu->sve_max_vq); 10893 10894 if (vq < old_vq) { 10895 aarch64_sve_narrow_vq(env, vq); 10896 } 10897 env->vfp.zcr_el[1] = vq - 1; 10898 arm_rebuild_hflags(env); 10899 ret = vq * 16; 10900 } 10901 return ret; 10902 case TARGET_PR_SVE_GET_VL: 10903 ret = -TARGET_EINVAL; 10904 { 10905 ARMCPU *cpu = env_archcpu(cpu_env); 10906 if (cpu_isar_feature(aa64_sve, cpu)) { 10907 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16; 10908 } 10909 } 10910 return ret; 10911 case TARGET_PR_PAC_RESET_KEYS: 10912 { 10913 CPUARMState *env = cpu_env; 10914 ARMCPU *cpu = env_archcpu(env); 10915 10916 if (arg3 || arg4 || arg5) { 10917 return -TARGET_EINVAL; 10918 } 10919 if (cpu_isar_feature(aa64_pauth, cpu)) { 10920 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY | 10921 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY | 10922 TARGET_PR_PAC_APGAKEY); 10923 int ret = 0; 10924 Error *err = NULL; 10925 10926 if (arg2 == 0) { 10927 arg2 = all; 10928 } else if (arg2 & ~all) { 10929 return -TARGET_EINVAL; 10930 } 10931 if (arg2 & TARGET_PR_PAC_APIAKEY) { 10932 ret |= qemu_guest_getrandom(&env->keys.apia, 10933 sizeof(ARMPACKey), &err); 10934 } 10935 if (arg2 & TARGET_PR_PAC_APIBKEY) { 10936 ret |= qemu_guest_getrandom(&env->keys.apib, 10937 sizeof(ARMPACKey), &err); 10938 } 10939 if (arg2 & TARGET_PR_PAC_APDAKEY) { 10940 ret |= qemu_guest_getrandom(&env->keys.apda, 10941 sizeof(ARMPACKey), &err); 10942 } 10943 if (arg2 & TARGET_PR_PAC_APDBKEY) { 10944 ret |= qemu_guest_getrandom(&env->keys.apdb, 10945 sizeof(ARMPACKey), &err); 10946 } 10947 if (arg2 & TARGET_PR_PAC_APGAKEY) { 10948 ret |= qemu_guest_getrandom(&env->keys.apga, 10949 sizeof(ARMPACKey), &err); 10950 } 10951 if (ret != 0) { 10952 /* 10953 * Some unknown failure in the crypto. The best 10954 * we can do is log it and fail the syscall. 10955 * The real syscall cannot fail this way. 10956 */ 10957 qemu_log_mask(LOG_UNIMP, 10958 "PR_PAC_RESET_KEYS: Crypto failure: %s", 10959 error_get_pretty(err)); 10960 error_free(err); 10961 return -TARGET_EIO; 10962 } 10963 return 0; 10964 } 10965 } 10966 return -TARGET_EINVAL; 10967 case TARGET_PR_SET_TAGGED_ADDR_CTRL: 10968 { 10969 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE; 10970 CPUARMState *env = cpu_env; 10971 ARMCPU *cpu = env_archcpu(env); 10972 10973 if (cpu_isar_feature(aa64_mte, cpu)) { 10974 valid_mask |= TARGET_PR_MTE_TCF_MASK; 10975 valid_mask |= TARGET_PR_MTE_TAG_MASK; 10976 } 10977 10978 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) { 10979 return -TARGET_EINVAL; 10980 } 10981 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE; 10982 10983 if (cpu_isar_feature(aa64_mte, cpu)) { 10984 switch (arg2 & TARGET_PR_MTE_TCF_MASK) { 10985 case TARGET_PR_MTE_TCF_NONE: 10986 case TARGET_PR_MTE_TCF_SYNC: 10987 case TARGET_PR_MTE_TCF_ASYNC: 10988 break; 10989 default: 10990 return -EINVAL; 10991 } 10992 10993 /* 10994 * Write PR_MTE_TCF to SCTLR_EL1[TCF0]. 10995 * Note that the syscall values are consistent with hw. 10996 */ 10997 env->cp15.sctlr_el[1] = 10998 deposit64(env->cp15.sctlr_el[1], 38, 2, 10999 arg2 >> TARGET_PR_MTE_TCF_SHIFT); 11000 11001 /* 11002 * Write PR_MTE_TAG to GCR_EL1[Exclude]. 11003 * Note that the syscall uses an include mask, 11004 * and hardware uses an exclude mask -- invert. 11005 */ 11006 env->cp15.gcr_el1 = 11007 deposit64(env->cp15.gcr_el1, 0, 16, 11008 ~arg2 >> TARGET_PR_MTE_TAG_SHIFT); 11009 arm_rebuild_hflags(env); 11010 } 11011 return 0; 11012 } 11013 case TARGET_PR_GET_TAGGED_ADDR_CTRL: 11014 { 11015 abi_long ret = 0; 11016 CPUARMState *env = cpu_env; 11017 ARMCPU *cpu = env_archcpu(env); 11018 11019 if (arg2 || arg3 || arg4 || arg5) { 11020 return -TARGET_EINVAL; 11021 } 11022 if (env->tagged_addr_enable) { 11023 ret |= TARGET_PR_TAGGED_ADDR_ENABLE; 11024 } 11025 if (cpu_isar_feature(aa64_mte, cpu)) { 11026 /* See above. */ 11027 ret |= (extract64(env->cp15.sctlr_el[1], 38, 2) 11028 << TARGET_PR_MTE_TCF_SHIFT); 11029 ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16, 11030 ~env->cp15.gcr_el1); 11031 } 11032 return ret; 11033 } 11034 #endif /* AARCH64 */ 11035 case PR_GET_SECCOMP: 11036 case PR_SET_SECCOMP: 11037 /* Disable seccomp to prevent the target disabling syscalls we 11038 * need. */ 11039 return -TARGET_EINVAL; 11040 default: 11041 /* Most prctl options have no pointer arguments */ 11042 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 11043 } 11044 break; 11045 #ifdef TARGET_NR_arch_prctl 11046 case TARGET_NR_arch_prctl: 11047 return do_arch_prctl(cpu_env, arg1, arg2); 11048 #endif 11049 #ifdef TARGET_NR_pread64 11050 case TARGET_NR_pread64: 11051 if (regpairs_aligned(cpu_env, num)) { 11052 arg4 = arg5; 11053 arg5 = arg6; 11054 } 11055 if (arg2 == 0 && arg3 == 0) { 11056 /* Special-case NULL buffer and zero length, which should succeed */ 11057 p = 0; 11058 } else { 11059 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11060 if (!p) { 11061 return -TARGET_EFAULT; 11062 } 11063 } 11064 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 11065 unlock_user(p, arg2, ret); 11066 return ret; 11067 case TARGET_NR_pwrite64: 11068 if (regpairs_aligned(cpu_env, num)) { 11069 arg4 = arg5; 11070 arg5 = arg6; 11071 } 11072 if (arg2 == 0 && arg3 == 0) { 11073 /* Special-case NULL buffer and zero length, which should succeed */ 11074 p = 0; 11075 } else { 11076 p = lock_user(VERIFY_READ, arg2, arg3, 1); 11077 if (!p) { 11078 return -TARGET_EFAULT; 11079 } 11080 } 11081 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 11082 unlock_user(p, arg2, 0); 11083 return ret; 11084 #endif 11085 case TARGET_NR_getcwd: 11086 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 11087 return -TARGET_EFAULT; 11088 ret = get_errno(sys_getcwd1(p, arg2)); 11089 unlock_user(p, arg1, ret); 11090 return ret; 11091 case TARGET_NR_capget: 11092 case TARGET_NR_capset: 11093 { 11094 struct target_user_cap_header *target_header; 11095 struct target_user_cap_data *target_data = NULL; 11096 struct __user_cap_header_struct header; 11097 struct __user_cap_data_struct data[2]; 11098 struct __user_cap_data_struct *dataptr = NULL; 11099 int i, target_datalen; 11100 int data_items = 1; 11101 11102 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 11103 return -TARGET_EFAULT; 11104 } 11105 header.version = tswap32(target_header->version); 11106 header.pid = tswap32(target_header->pid); 11107 11108 if (header.version != _LINUX_CAPABILITY_VERSION) { 11109 /* Version 2 and up takes pointer to two user_data structs */ 11110 data_items = 2; 11111 } 11112 11113 target_datalen = sizeof(*target_data) * data_items; 11114 11115 if (arg2) { 11116 if (num == TARGET_NR_capget) { 11117 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 11118 } else { 11119 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 11120 } 11121 if (!target_data) { 11122 unlock_user_struct(target_header, arg1, 0); 11123 return -TARGET_EFAULT; 11124 } 11125 11126 if (num == TARGET_NR_capset) { 11127 for (i = 0; i < data_items; i++) { 11128 data[i].effective = tswap32(target_data[i].effective); 11129 data[i].permitted = tswap32(target_data[i].permitted); 11130 data[i].inheritable = tswap32(target_data[i].inheritable); 11131 } 11132 } 11133 11134 dataptr = data; 11135 } 11136 11137 if (num == TARGET_NR_capget) { 11138 ret = get_errno(capget(&header, dataptr)); 11139 } else { 11140 ret = get_errno(capset(&header, dataptr)); 11141 } 11142 11143 /* The kernel always updates version for both capget and capset */ 11144 target_header->version = tswap32(header.version); 11145 unlock_user_struct(target_header, arg1, 1); 11146 11147 if (arg2) { 11148 if (num == TARGET_NR_capget) { 11149 for (i = 0; i < data_items; i++) { 11150 target_data[i].effective = tswap32(data[i].effective); 11151 target_data[i].permitted = tswap32(data[i].permitted); 11152 target_data[i].inheritable = tswap32(data[i].inheritable); 11153 } 11154 unlock_user(target_data, arg2, target_datalen); 11155 } else { 11156 unlock_user(target_data, arg2, 0); 11157 } 11158 } 11159 return ret; 11160 } 11161 case TARGET_NR_sigaltstack: 11162 return do_sigaltstack(arg1, arg2, cpu_env); 11163 11164 #ifdef CONFIG_SENDFILE 11165 #ifdef TARGET_NR_sendfile 11166 case TARGET_NR_sendfile: 11167 { 11168 off_t *offp = NULL; 11169 off_t off; 11170 if (arg3) { 11171 ret = get_user_sal(off, arg3); 11172 if (is_error(ret)) { 11173 return ret; 11174 } 11175 offp = &off; 11176 } 11177 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 11178 if (!is_error(ret) && arg3) { 11179 abi_long ret2 = put_user_sal(off, arg3); 11180 if (is_error(ret2)) { 11181 ret = ret2; 11182 } 11183 } 11184 return ret; 11185 } 11186 #endif 11187 #ifdef TARGET_NR_sendfile64 11188 case TARGET_NR_sendfile64: 11189 { 11190 off_t *offp = NULL; 11191 off_t off; 11192 if (arg3) { 11193 ret = get_user_s64(off, arg3); 11194 if (is_error(ret)) { 11195 return ret; 11196 } 11197 offp = &off; 11198 } 11199 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 11200 if (!is_error(ret) && arg3) { 11201 abi_long ret2 = put_user_s64(off, arg3); 11202 if (is_error(ret2)) { 11203 ret = ret2; 11204 } 11205 } 11206 return ret; 11207 } 11208 #endif 11209 #endif 11210 #ifdef TARGET_NR_vfork 11211 case TARGET_NR_vfork: 11212 return get_errno(do_fork(cpu_env, 11213 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD, 11214 0, 0, 0, 0)); 11215 #endif 11216 #ifdef TARGET_NR_ugetrlimit 11217 case TARGET_NR_ugetrlimit: 11218 { 11219 struct rlimit rlim; 11220 int resource = target_to_host_resource(arg1); 11221 ret = get_errno(getrlimit(resource, &rlim)); 11222 if (!is_error(ret)) { 11223 struct target_rlimit *target_rlim; 11224 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 11225 return -TARGET_EFAULT; 11226 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 11227 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 11228 unlock_user_struct(target_rlim, arg2, 1); 11229 } 11230 return ret; 11231 } 11232 #endif 11233 #ifdef TARGET_NR_truncate64 11234 case TARGET_NR_truncate64: 11235 if (!(p = lock_user_string(arg1))) 11236 return -TARGET_EFAULT; 11237 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 11238 unlock_user(p, arg1, 0); 11239 return ret; 11240 #endif 11241 #ifdef TARGET_NR_ftruncate64 11242 case TARGET_NR_ftruncate64: 11243 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 11244 #endif 11245 #ifdef TARGET_NR_stat64 11246 case TARGET_NR_stat64: 11247 if (!(p = lock_user_string(arg1))) { 11248 return -TARGET_EFAULT; 11249 } 11250 ret = get_errno(stat(path(p), &st)); 11251 unlock_user(p, arg1, 0); 11252 if (!is_error(ret)) 11253 ret = host_to_target_stat64(cpu_env, arg2, &st); 11254 return ret; 11255 #endif 11256 #ifdef TARGET_NR_lstat64 11257 case TARGET_NR_lstat64: 11258 if (!(p = lock_user_string(arg1))) { 11259 return -TARGET_EFAULT; 11260 } 11261 ret = get_errno(lstat(path(p), &st)); 11262 unlock_user(p, arg1, 0); 11263 if (!is_error(ret)) 11264 ret = host_to_target_stat64(cpu_env, arg2, &st); 11265 return ret; 11266 #endif 11267 #ifdef TARGET_NR_fstat64 11268 case TARGET_NR_fstat64: 11269 ret = get_errno(fstat(arg1, &st)); 11270 if (!is_error(ret)) 11271 ret = host_to_target_stat64(cpu_env, arg2, &st); 11272 return ret; 11273 #endif 11274 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 11275 #ifdef TARGET_NR_fstatat64 11276 case TARGET_NR_fstatat64: 11277 #endif 11278 #ifdef TARGET_NR_newfstatat 11279 case TARGET_NR_newfstatat: 11280 #endif 11281 if (!(p = lock_user_string(arg2))) { 11282 return -TARGET_EFAULT; 11283 } 11284 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 11285 unlock_user(p, arg2, 0); 11286 if (!is_error(ret)) 11287 ret = host_to_target_stat64(cpu_env, arg3, &st); 11288 return ret; 11289 #endif 11290 #if defined(TARGET_NR_statx) 11291 case TARGET_NR_statx: 11292 { 11293 struct target_statx *target_stx; 11294 int dirfd = arg1; 11295 int flags = arg3; 11296 11297 p = lock_user_string(arg2); 11298 if (p == NULL) { 11299 return -TARGET_EFAULT; 11300 } 11301 #if defined(__NR_statx) 11302 { 11303 /* 11304 * It is assumed that struct statx is architecture independent. 11305 */ 11306 struct target_statx host_stx; 11307 int mask = arg4; 11308 11309 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx)); 11310 if (!is_error(ret)) { 11311 if (host_to_target_statx(&host_stx, arg5) != 0) { 11312 unlock_user(p, arg2, 0); 11313 return -TARGET_EFAULT; 11314 } 11315 } 11316 11317 if (ret != -TARGET_ENOSYS) { 11318 unlock_user(p, arg2, 0); 11319 return ret; 11320 } 11321 } 11322 #endif 11323 ret = get_errno(fstatat(dirfd, path(p), &st, flags)); 11324 unlock_user(p, arg2, 0); 11325 11326 if (!is_error(ret)) { 11327 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) { 11328 return -TARGET_EFAULT; 11329 } 11330 memset(target_stx, 0, sizeof(*target_stx)); 11331 __put_user(major(st.st_dev), &target_stx->stx_dev_major); 11332 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor); 11333 __put_user(st.st_ino, &target_stx->stx_ino); 11334 __put_user(st.st_mode, &target_stx->stx_mode); 11335 __put_user(st.st_uid, &target_stx->stx_uid); 11336 __put_user(st.st_gid, &target_stx->stx_gid); 11337 __put_user(st.st_nlink, &target_stx->stx_nlink); 11338 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major); 11339 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor); 11340 __put_user(st.st_size, &target_stx->stx_size); 11341 __put_user(st.st_blksize, &target_stx->stx_blksize); 11342 __put_user(st.st_blocks, &target_stx->stx_blocks); 11343 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec); 11344 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec); 11345 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec); 11346 unlock_user_struct(target_stx, arg5, 1); 11347 } 11348 } 11349 return ret; 11350 #endif 11351 #ifdef TARGET_NR_lchown 11352 case TARGET_NR_lchown: 11353 if (!(p = lock_user_string(arg1))) 11354 return -TARGET_EFAULT; 11355 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 11356 unlock_user(p, arg1, 0); 11357 return ret; 11358 #endif 11359 #ifdef TARGET_NR_getuid 11360 case TARGET_NR_getuid: 11361 return get_errno(high2lowuid(getuid())); 11362 #endif 11363 #ifdef TARGET_NR_getgid 11364 case TARGET_NR_getgid: 11365 return get_errno(high2lowgid(getgid())); 11366 #endif 11367 #ifdef TARGET_NR_geteuid 11368 case TARGET_NR_geteuid: 11369 return get_errno(high2lowuid(geteuid())); 11370 #endif 11371 #ifdef TARGET_NR_getegid 11372 case TARGET_NR_getegid: 11373 return get_errno(high2lowgid(getegid())); 11374 #endif 11375 case TARGET_NR_setreuid: 11376 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 11377 case TARGET_NR_setregid: 11378 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 11379 case TARGET_NR_getgroups: 11380 { 11381 int gidsetsize = arg1; 11382 target_id *target_grouplist; 11383 gid_t *grouplist; 11384 int i; 11385 11386 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11387 ret = get_errno(getgroups(gidsetsize, grouplist)); 11388 if (gidsetsize == 0) 11389 return ret; 11390 if (!is_error(ret)) { 11391 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 11392 if (!target_grouplist) 11393 return -TARGET_EFAULT; 11394 for(i = 0;i < ret; i++) 11395 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 11396 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 11397 } 11398 } 11399 return ret; 11400 case TARGET_NR_setgroups: 11401 { 11402 int gidsetsize = arg1; 11403 target_id *target_grouplist; 11404 gid_t *grouplist = NULL; 11405 int i; 11406 if (gidsetsize) { 11407 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11408 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 11409 if (!target_grouplist) { 11410 return -TARGET_EFAULT; 11411 } 11412 for (i = 0; i < gidsetsize; i++) { 11413 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 11414 } 11415 unlock_user(target_grouplist, arg2, 0); 11416 } 11417 return get_errno(setgroups(gidsetsize, grouplist)); 11418 } 11419 case TARGET_NR_fchown: 11420 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 11421 #if defined(TARGET_NR_fchownat) 11422 case TARGET_NR_fchownat: 11423 if (!(p = lock_user_string(arg2))) 11424 return -TARGET_EFAULT; 11425 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 11426 low2highgid(arg4), arg5)); 11427 unlock_user(p, arg2, 0); 11428 return ret; 11429 #endif 11430 #ifdef TARGET_NR_setresuid 11431 case TARGET_NR_setresuid: 11432 return get_errno(sys_setresuid(low2highuid(arg1), 11433 low2highuid(arg2), 11434 low2highuid(arg3))); 11435 #endif 11436 #ifdef TARGET_NR_getresuid 11437 case TARGET_NR_getresuid: 11438 { 11439 uid_t ruid, euid, suid; 11440 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11441 if (!is_error(ret)) { 11442 if (put_user_id(high2lowuid(ruid), arg1) 11443 || put_user_id(high2lowuid(euid), arg2) 11444 || put_user_id(high2lowuid(suid), arg3)) 11445 return -TARGET_EFAULT; 11446 } 11447 } 11448 return ret; 11449 #endif 11450 #ifdef TARGET_NR_getresgid 11451 case TARGET_NR_setresgid: 11452 return get_errno(sys_setresgid(low2highgid(arg1), 11453 low2highgid(arg2), 11454 low2highgid(arg3))); 11455 #endif 11456 #ifdef TARGET_NR_getresgid 11457 case TARGET_NR_getresgid: 11458 { 11459 gid_t rgid, egid, sgid; 11460 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11461 if (!is_error(ret)) { 11462 if (put_user_id(high2lowgid(rgid), arg1) 11463 || put_user_id(high2lowgid(egid), arg2) 11464 || put_user_id(high2lowgid(sgid), arg3)) 11465 return -TARGET_EFAULT; 11466 } 11467 } 11468 return ret; 11469 #endif 11470 #ifdef TARGET_NR_chown 11471 case TARGET_NR_chown: 11472 if (!(p = lock_user_string(arg1))) 11473 return -TARGET_EFAULT; 11474 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 11475 unlock_user(p, arg1, 0); 11476 return ret; 11477 #endif 11478 case TARGET_NR_setuid: 11479 return get_errno(sys_setuid(low2highuid(arg1))); 11480 case TARGET_NR_setgid: 11481 return get_errno(sys_setgid(low2highgid(arg1))); 11482 case TARGET_NR_setfsuid: 11483 return get_errno(setfsuid(arg1)); 11484 case TARGET_NR_setfsgid: 11485 return get_errno(setfsgid(arg1)); 11486 11487 #ifdef TARGET_NR_lchown32 11488 case TARGET_NR_lchown32: 11489 if (!(p = lock_user_string(arg1))) 11490 return -TARGET_EFAULT; 11491 ret = get_errno(lchown(p, arg2, arg3)); 11492 unlock_user(p, arg1, 0); 11493 return ret; 11494 #endif 11495 #ifdef TARGET_NR_getuid32 11496 case TARGET_NR_getuid32: 11497 return get_errno(getuid()); 11498 #endif 11499 11500 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 11501 /* Alpha specific */ 11502 case TARGET_NR_getxuid: 11503 { 11504 uid_t euid; 11505 euid=geteuid(); 11506 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 11507 } 11508 return get_errno(getuid()); 11509 #endif 11510 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 11511 /* Alpha specific */ 11512 case TARGET_NR_getxgid: 11513 { 11514 uid_t egid; 11515 egid=getegid(); 11516 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 11517 } 11518 return get_errno(getgid()); 11519 #endif 11520 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 11521 /* Alpha specific */ 11522 case TARGET_NR_osf_getsysinfo: 11523 ret = -TARGET_EOPNOTSUPP; 11524 switch (arg1) { 11525 case TARGET_GSI_IEEE_FP_CONTROL: 11526 { 11527 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env); 11528 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr; 11529 11530 swcr &= ~SWCR_STATUS_MASK; 11531 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK; 11532 11533 if (put_user_u64 (swcr, arg2)) 11534 return -TARGET_EFAULT; 11535 ret = 0; 11536 } 11537 break; 11538 11539 /* case GSI_IEEE_STATE_AT_SIGNAL: 11540 -- Not implemented in linux kernel. 11541 case GSI_UACPROC: 11542 -- Retrieves current unaligned access state; not much used. 11543 case GSI_PROC_TYPE: 11544 -- Retrieves implver information; surely not used. 11545 case GSI_GET_HWRPB: 11546 -- Grabs a copy of the HWRPB; surely not used. 11547 */ 11548 } 11549 return ret; 11550 #endif 11551 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 11552 /* Alpha specific */ 11553 case TARGET_NR_osf_setsysinfo: 11554 ret = -TARGET_EOPNOTSUPP; 11555 switch (arg1) { 11556 case TARGET_SSI_IEEE_FP_CONTROL: 11557 { 11558 uint64_t swcr, fpcr; 11559 11560 if (get_user_u64 (swcr, arg2)) { 11561 return -TARGET_EFAULT; 11562 } 11563 11564 /* 11565 * The kernel calls swcr_update_status to update the 11566 * status bits from the fpcr at every point that it 11567 * could be queried. Therefore, we store the status 11568 * bits only in FPCR. 11569 */ 11570 ((CPUAlphaState *)cpu_env)->swcr 11571 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK); 11572 11573 fpcr = cpu_alpha_load_fpcr(cpu_env); 11574 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32); 11575 fpcr |= alpha_ieee_swcr_to_fpcr(swcr); 11576 cpu_alpha_store_fpcr(cpu_env, fpcr); 11577 ret = 0; 11578 } 11579 break; 11580 11581 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 11582 { 11583 uint64_t exc, fpcr, fex; 11584 11585 if (get_user_u64(exc, arg2)) { 11586 return -TARGET_EFAULT; 11587 } 11588 exc &= SWCR_STATUS_MASK; 11589 fpcr = cpu_alpha_load_fpcr(cpu_env); 11590 11591 /* Old exceptions are not signaled. */ 11592 fex = alpha_ieee_fpcr_to_swcr(fpcr); 11593 fex = exc & ~fex; 11594 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT; 11595 fex &= ((CPUArchState *)cpu_env)->swcr; 11596 11597 /* Update the hardware fpcr. */ 11598 fpcr |= alpha_ieee_swcr_to_fpcr(exc); 11599 cpu_alpha_store_fpcr(cpu_env, fpcr); 11600 11601 if (fex) { 11602 int si_code = TARGET_FPE_FLTUNK; 11603 target_siginfo_t info; 11604 11605 if (fex & SWCR_TRAP_ENABLE_DNO) { 11606 si_code = TARGET_FPE_FLTUND; 11607 } 11608 if (fex & SWCR_TRAP_ENABLE_INE) { 11609 si_code = TARGET_FPE_FLTRES; 11610 } 11611 if (fex & SWCR_TRAP_ENABLE_UNF) { 11612 si_code = TARGET_FPE_FLTUND; 11613 } 11614 if (fex & SWCR_TRAP_ENABLE_OVF) { 11615 si_code = TARGET_FPE_FLTOVF; 11616 } 11617 if (fex & SWCR_TRAP_ENABLE_DZE) { 11618 si_code = TARGET_FPE_FLTDIV; 11619 } 11620 if (fex & SWCR_TRAP_ENABLE_INV) { 11621 si_code = TARGET_FPE_FLTINV; 11622 } 11623 11624 info.si_signo = SIGFPE; 11625 info.si_errno = 0; 11626 info.si_code = si_code; 11627 info._sifields._sigfault._addr 11628 = ((CPUArchState *)cpu_env)->pc; 11629 queue_signal((CPUArchState *)cpu_env, info.si_signo, 11630 QEMU_SI_FAULT, &info); 11631 } 11632 ret = 0; 11633 } 11634 break; 11635 11636 /* case SSI_NVPAIRS: 11637 -- Used with SSIN_UACPROC to enable unaligned accesses. 11638 case SSI_IEEE_STATE_AT_SIGNAL: 11639 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 11640 -- Not implemented in linux kernel 11641 */ 11642 } 11643 return ret; 11644 #endif 11645 #ifdef TARGET_NR_osf_sigprocmask 11646 /* Alpha specific. */ 11647 case TARGET_NR_osf_sigprocmask: 11648 { 11649 abi_ulong mask; 11650 int how; 11651 sigset_t set, oldset; 11652 11653 switch(arg1) { 11654 case TARGET_SIG_BLOCK: 11655 how = SIG_BLOCK; 11656 break; 11657 case TARGET_SIG_UNBLOCK: 11658 how = SIG_UNBLOCK; 11659 break; 11660 case TARGET_SIG_SETMASK: 11661 how = SIG_SETMASK; 11662 break; 11663 default: 11664 return -TARGET_EINVAL; 11665 } 11666 mask = arg2; 11667 target_to_host_old_sigset(&set, &mask); 11668 ret = do_sigprocmask(how, &set, &oldset); 11669 if (!ret) { 11670 host_to_target_old_sigset(&mask, &oldset); 11671 ret = mask; 11672 } 11673 } 11674 return ret; 11675 #endif 11676 11677 #ifdef TARGET_NR_getgid32 11678 case TARGET_NR_getgid32: 11679 return get_errno(getgid()); 11680 #endif 11681 #ifdef TARGET_NR_geteuid32 11682 case TARGET_NR_geteuid32: 11683 return get_errno(geteuid()); 11684 #endif 11685 #ifdef TARGET_NR_getegid32 11686 case TARGET_NR_getegid32: 11687 return get_errno(getegid()); 11688 #endif 11689 #ifdef TARGET_NR_setreuid32 11690 case TARGET_NR_setreuid32: 11691 return get_errno(setreuid(arg1, arg2)); 11692 #endif 11693 #ifdef TARGET_NR_setregid32 11694 case TARGET_NR_setregid32: 11695 return get_errno(setregid(arg1, arg2)); 11696 #endif 11697 #ifdef TARGET_NR_getgroups32 11698 case TARGET_NR_getgroups32: 11699 { 11700 int gidsetsize = arg1; 11701 uint32_t *target_grouplist; 11702 gid_t *grouplist; 11703 int i; 11704 11705 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11706 ret = get_errno(getgroups(gidsetsize, grouplist)); 11707 if (gidsetsize == 0) 11708 return ret; 11709 if (!is_error(ret)) { 11710 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 11711 if (!target_grouplist) { 11712 return -TARGET_EFAULT; 11713 } 11714 for(i = 0;i < ret; i++) 11715 target_grouplist[i] = tswap32(grouplist[i]); 11716 unlock_user(target_grouplist, arg2, gidsetsize * 4); 11717 } 11718 } 11719 return ret; 11720 #endif 11721 #ifdef TARGET_NR_setgroups32 11722 case TARGET_NR_setgroups32: 11723 { 11724 int gidsetsize = arg1; 11725 uint32_t *target_grouplist; 11726 gid_t *grouplist; 11727 int i; 11728 11729 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11730 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 11731 if (!target_grouplist) { 11732 return -TARGET_EFAULT; 11733 } 11734 for(i = 0;i < gidsetsize; i++) 11735 grouplist[i] = tswap32(target_grouplist[i]); 11736 unlock_user(target_grouplist, arg2, 0); 11737 return get_errno(setgroups(gidsetsize, grouplist)); 11738 } 11739 #endif 11740 #ifdef TARGET_NR_fchown32 11741 case TARGET_NR_fchown32: 11742 return get_errno(fchown(arg1, arg2, arg3)); 11743 #endif 11744 #ifdef TARGET_NR_setresuid32 11745 case TARGET_NR_setresuid32: 11746 return get_errno(sys_setresuid(arg1, arg2, arg3)); 11747 #endif 11748 #ifdef TARGET_NR_getresuid32 11749 case TARGET_NR_getresuid32: 11750 { 11751 uid_t ruid, euid, suid; 11752 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11753 if (!is_error(ret)) { 11754 if (put_user_u32(ruid, arg1) 11755 || put_user_u32(euid, arg2) 11756 || put_user_u32(suid, arg3)) 11757 return -TARGET_EFAULT; 11758 } 11759 } 11760 return ret; 11761 #endif 11762 #ifdef TARGET_NR_setresgid32 11763 case TARGET_NR_setresgid32: 11764 return get_errno(sys_setresgid(arg1, arg2, arg3)); 11765 #endif 11766 #ifdef TARGET_NR_getresgid32 11767 case TARGET_NR_getresgid32: 11768 { 11769 gid_t rgid, egid, sgid; 11770 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11771 if (!is_error(ret)) { 11772 if (put_user_u32(rgid, arg1) 11773 || put_user_u32(egid, arg2) 11774 || put_user_u32(sgid, arg3)) 11775 return -TARGET_EFAULT; 11776 } 11777 } 11778 return ret; 11779 #endif 11780 #ifdef TARGET_NR_chown32 11781 case TARGET_NR_chown32: 11782 if (!(p = lock_user_string(arg1))) 11783 return -TARGET_EFAULT; 11784 ret = get_errno(chown(p, arg2, arg3)); 11785 unlock_user(p, arg1, 0); 11786 return ret; 11787 #endif 11788 #ifdef TARGET_NR_setuid32 11789 case TARGET_NR_setuid32: 11790 return get_errno(sys_setuid(arg1)); 11791 #endif 11792 #ifdef TARGET_NR_setgid32 11793 case TARGET_NR_setgid32: 11794 return get_errno(sys_setgid(arg1)); 11795 #endif 11796 #ifdef TARGET_NR_setfsuid32 11797 case TARGET_NR_setfsuid32: 11798 return get_errno(setfsuid(arg1)); 11799 #endif 11800 #ifdef TARGET_NR_setfsgid32 11801 case TARGET_NR_setfsgid32: 11802 return get_errno(setfsgid(arg1)); 11803 #endif 11804 #ifdef TARGET_NR_mincore 11805 case TARGET_NR_mincore: 11806 { 11807 void *a = lock_user(VERIFY_READ, arg1, arg2, 0); 11808 if (!a) { 11809 return -TARGET_ENOMEM; 11810 } 11811 p = lock_user_string(arg3); 11812 if (!p) { 11813 ret = -TARGET_EFAULT; 11814 } else { 11815 ret = get_errno(mincore(a, arg2, p)); 11816 unlock_user(p, arg3, ret); 11817 } 11818 unlock_user(a, arg1, 0); 11819 } 11820 return ret; 11821 #endif 11822 #ifdef TARGET_NR_arm_fadvise64_64 11823 case TARGET_NR_arm_fadvise64_64: 11824 /* arm_fadvise64_64 looks like fadvise64_64 but 11825 * with different argument order: fd, advice, offset, len 11826 * rather than the usual fd, offset, len, advice. 11827 * Note that offset and len are both 64-bit so appear as 11828 * pairs of 32-bit registers. 11829 */ 11830 ret = posix_fadvise(arg1, target_offset64(arg3, arg4), 11831 target_offset64(arg5, arg6), arg2); 11832 return -host_to_target_errno(ret); 11833 #endif 11834 11835 #if TARGET_ABI_BITS == 32 11836 11837 #ifdef TARGET_NR_fadvise64_64 11838 case TARGET_NR_fadvise64_64: 11839 #if defined(TARGET_PPC) || defined(TARGET_XTENSA) 11840 /* 6 args: fd, advice, offset (high, low), len (high, low) */ 11841 ret = arg2; 11842 arg2 = arg3; 11843 arg3 = arg4; 11844 arg4 = arg5; 11845 arg5 = arg6; 11846 arg6 = ret; 11847 #else 11848 /* 6 args: fd, offset (high, low), len (high, low), advice */ 11849 if (regpairs_aligned(cpu_env, num)) { 11850 /* offset is in (3,4), len in (5,6) and advice in 7 */ 11851 arg2 = arg3; 11852 arg3 = arg4; 11853 arg4 = arg5; 11854 arg5 = arg6; 11855 arg6 = arg7; 11856 } 11857 #endif 11858 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), 11859 target_offset64(arg4, arg5), arg6); 11860 return -host_to_target_errno(ret); 11861 #endif 11862 11863 #ifdef TARGET_NR_fadvise64 11864 case TARGET_NR_fadvise64: 11865 /* 5 args: fd, offset (high, low), len, advice */ 11866 if (regpairs_aligned(cpu_env, num)) { 11867 /* offset is in (3,4), len in 5 and advice in 6 */ 11868 arg2 = arg3; 11869 arg3 = arg4; 11870 arg4 = arg5; 11871 arg5 = arg6; 11872 } 11873 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5); 11874 return -host_to_target_errno(ret); 11875 #endif 11876 11877 #else /* not a 32-bit ABI */ 11878 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64) 11879 #ifdef TARGET_NR_fadvise64_64 11880 case TARGET_NR_fadvise64_64: 11881 #endif 11882 #ifdef TARGET_NR_fadvise64 11883 case TARGET_NR_fadvise64: 11884 #endif 11885 #ifdef TARGET_S390X 11886 switch (arg4) { 11887 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 11888 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 11889 case 6: arg4 = POSIX_FADV_DONTNEED; break; 11890 case 7: arg4 = POSIX_FADV_NOREUSE; break; 11891 default: break; 11892 } 11893 #endif 11894 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4)); 11895 #endif 11896 #endif /* end of 64-bit ABI fadvise handling */ 11897 11898 #ifdef TARGET_NR_madvise 11899 case TARGET_NR_madvise: 11900 /* A straight passthrough may not be safe because qemu sometimes 11901 turns private file-backed mappings into anonymous mappings. 11902 This will break MADV_DONTNEED. 11903 This is a hint, so ignoring and returning success is ok. */ 11904 return 0; 11905 #endif 11906 #ifdef TARGET_NR_fcntl64 11907 case TARGET_NR_fcntl64: 11908 { 11909 int cmd; 11910 struct flock64 fl; 11911 from_flock64_fn *copyfrom = copy_from_user_flock64; 11912 to_flock64_fn *copyto = copy_to_user_flock64; 11913 11914 #ifdef TARGET_ARM 11915 if (!((CPUARMState *)cpu_env)->eabi) { 11916 copyfrom = copy_from_user_oabi_flock64; 11917 copyto = copy_to_user_oabi_flock64; 11918 } 11919 #endif 11920 11921 cmd = target_to_host_fcntl_cmd(arg2); 11922 if (cmd == -TARGET_EINVAL) { 11923 return cmd; 11924 } 11925 11926 switch(arg2) { 11927 case TARGET_F_GETLK64: 11928 ret = copyfrom(&fl, arg3); 11929 if (ret) { 11930 break; 11931 } 11932 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 11933 if (ret == 0) { 11934 ret = copyto(arg3, &fl); 11935 } 11936 break; 11937 11938 case TARGET_F_SETLK64: 11939 case TARGET_F_SETLKW64: 11940 ret = copyfrom(&fl, arg3); 11941 if (ret) { 11942 break; 11943 } 11944 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 11945 break; 11946 default: 11947 ret = do_fcntl(arg1, arg2, arg3); 11948 break; 11949 } 11950 return ret; 11951 } 11952 #endif 11953 #ifdef TARGET_NR_cacheflush 11954 case TARGET_NR_cacheflush: 11955 /* self-modifying code is handled automatically, so nothing needed */ 11956 return 0; 11957 #endif 11958 #ifdef TARGET_NR_getpagesize 11959 case TARGET_NR_getpagesize: 11960 return TARGET_PAGE_SIZE; 11961 #endif 11962 case TARGET_NR_gettid: 11963 return get_errno(sys_gettid()); 11964 #ifdef TARGET_NR_readahead 11965 case TARGET_NR_readahead: 11966 #if TARGET_ABI_BITS == 32 11967 if (regpairs_aligned(cpu_env, num)) { 11968 arg2 = arg3; 11969 arg3 = arg4; 11970 arg4 = arg5; 11971 } 11972 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4)); 11973 #else 11974 ret = get_errno(readahead(arg1, arg2, arg3)); 11975 #endif 11976 return ret; 11977 #endif 11978 #ifdef CONFIG_ATTR 11979 #ifdef TARGET_NR_setxattr 11980 case TARGET_NR_listxattr: 11981 case TARGET_NR_llistxattr: 11982 { 11983 void *p, *b = 0; 11984 if (arg2) { 11985 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11986 if (!b) { 11987 return -TARGET_EFAULT; 11988 } 11989 } 11990 p = lock_user_string(arg1); 11991 if (p) { 11992 if (num == TARGET_NR_listxattr) { 11993 ret = get_errno(listxattr(p, b, arg3)); 11994 } else { 11995 ret = get_errno(llistxattr(p, b, arg3)); 11996 } 11997 } else { 11998 ret = -TARGET_EFAULT; 11999 } 12000 unlock_user(p, arg1, 0); 12001 unlock_user(b, arg2, arg3); 12002 return ret; 12003 } 12004 case TARGET_NR_flistxattr: 12005 { 12006 void *b = 0; 12007 if (arg2) { 12008 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 12009 if (!b) { 12010 return -TARGET_EFAULT; 12011 } 12012 } 12013 ret = get_errno(flistxattr(arg1, b, arg3)); 12014 unlock_user(b, arg2, arg3); 12015 return ret; 12016 } 12017 case TARGET_NR_setxattr: 12018 case TARGET_NR_lsetxattr: 12019 { 12020 void *p, *n, *v = 0; 12021 if (arg3) { 12022 v = lock_user(VERIFY_READ, arg3, arg4, 1); 12023 if (!v) { 12024 return -TARGET_EFAULT; 12025 } 12026 } 12027 p = lock_user_string(arg1); 12028 n = lock_user_string(arg2); 12029 if (p && n) { 12030 if (num == TARGET_NR_setxattr) { 12031 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 12032 } else { 12033 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 12034 } 12035 } else { 12036 ret = -TARGET_EFAULT; 12037 } 12038 unlock_user(p, arg1, 0); 12039 unlock_user(n, arg2, 0); 12040 unlock_user(v, arg3, 0); 12041 } 12042 return ret; 12043 case TARGET_NR_fsetxattr: 12044 { 12045 void *n, *v = 0; 12046 if (arg3) { 12047 v = lock_user(VERIFY_READ, arg3, arg4, 1); 12048 if (!v) { 12049 return -TARGET_EFAULT; 12050 } 12051 } 12052 n = lock_user_string(arg2); 12053 if (n) { 12054 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 12055 } else { 12056 ret = -TARGET_EFAULT; 12057 } 12058 unlock_user(n, arg2, 0); 12059 unlock_user(v, arg3, 0); 12060 } 12061 return ret; 12062 case TARGET_NR_getxattr: 12063 case TARGET_NR_lgetxattr: 12064 { 12065 void *p, *n, *v = 0; 12066 if (arg3) { 12067 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 12068 if (!v) { 12069 return -TARGET_EFAULT; 12070 } 12071 } 12072 p = lock_user_string(arg1); 12073 n = lock_user_string(arg2); 12074 if (p && n) { 12075 if (num == TARGET_NR_getxattr) { 12076 ret = get_errno(getxattr(p, n, v, arg4)); 12077 } else { 12078 ret = get_errno(lgetxattr(p, n, v, arg4)); 12079 } 12080 } else { 12081 ret = -TARGET_EFAULT; 12082 } 12083 unlock_user(p, arg1, 0); 12084 unlock_user(n, arg2, 0); 12085 unlock_user(v, arg3, arg4); 12086 } 12087 return ret; 12088 case TARGET_NR_fgetxattr: 12089 { 12090 void *n, *v = 0; 12091 if (arg3) { 12092 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 12093 if (!v) { 12094 return -TARGET_EFAULT; 12095 } 12096 } 12097 n = lock_user_string(arg2); 12098 if (n) { 12099 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 12100 } else { 12101 ret = -TARGET_EFAULT; 12102 } 12103 unlock_user(n, arg2, 0); 12104 unlock_user(v, arg3, arg4); 12105 } 12106 return ret; 12107 case TARGET_NR_removexattr: 12108 case TARGET_NR_lremovexattr: 12109 { 12110 void *p, *n; 12111 p = lock_user_string(arg1); 12112 n = lock_user_string(arg2); 12113 if (p && n) { 12114 if (num == TARGET_NR_removexattr) { 12115 ret = get_errno(removexattr(p, n)); 12116 } else { 12117 ret = get_errno(lremovexattr(p, n)); 12118 } 12119 } else { 12120 ret = -TARGET_EFAULT; 12121 } 12122 unlock_user(p, arg1, 0); 12123 unlock_user(n, arg2, 0); 12124 } 12125 return ret; 12126 case TARGET_NR_fremovexattr: 12127 { 12128 void *n; 12129 n = lock_user_string(arg2); 12130 if (n) { 12131 ret = get_errno(fremovexattr(arg1, n)); 12132 } else { 12133 ret = -TARGET_EFAULT; 12134 } 12135 unlock_user(n, arg2, 0); 12136 } 12137 return ret; 12138 #endif 12139 #endif /* CONFIG_ATTR */ 12140 #ifdef TARGET_NR_set_thread_area 12141 case TARGET_NR_set_thread_area: 12142 #if defined(TARGET_MIPS) 12143 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1; 12144 return 0; 12145 #elif defined(TARGET_CRIS) 12146 if (arg1 & 0xff) 12147 ret = -TARGET_EINVAL; 12148 else { 12149 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 12150 ret = 0; 12151 } 12152 return ret; 12153 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 12154 return do_set_thread_area(cpu_env, arg1); 12155 #elif defined(TARGET_M68K) 12156 { 12157 TaskState *ts = cpu->opaque; 12158 ts->tp_value = arg1; 12159 return 0; 12160 } 12161 #else 12162 return -TARGET_ENOSYS; 12163 #endif 12164 #endif 12165 #ifdef TARGET_NR_get_thread_area 12166 case TARGET_NR_get_thread_area: 12167 #if defined(TARGET_I386) && defined(TARGET_ABI32) 12168 return do_get_thread_area(cpu_env, arg1); 12169 #elif defined(TARGET_M68K) 12170 { 12171 TaskState *ts = cpu->opaque; 12172 return ts->tp_value; 12173 } 12174 #else 12175 return -TARGET_ENOSYS; 12176 #endif 12177 #endif 12178 #ifdef TARGET_NR_getdomainname 12179 case TARGET_NR_getdomainname: 12180 return -TARGET_ENOSYS; 12181 #endif 12182 12183 #ifdef TARGET_NR_clock_settime 12184 case TARGET_NR_clock_settime: 12185 { 12186 struct timespec ts; 12187 12188 ret = target_to_host_timespec(&ts, arg2); 12189 if (!is_error(ret)) { 12190 ret = get_errno(clock_settime(arg1, &ts)); 12191 } 12192 return ret; 12193 } 12194 #endif 12195 #ifdef TARGET_NR_clock_settime64 12196 case TARGET_NR_clock_settime64: 12197 { 12198 struct timespec ts; 12199 12200 ret = target_to_host_timespec64(&ts, arg2); 12201 if (!is_error(ret)) { 12202 ret = get_errno(clock_settime(arg1, &ts)); 12203 } 12204 return ret; 12205 } 12206 #endif 12207 #ifdef TARGET_NR_clock_gettime 12208 case TARGET_NR_clock_gettime: 12209 { 12210 struct timespec ts; 12211 ret = get_errno(clock_gettime(arg1, &ts)); 12212 if (!is_error(ret)) { 12213 ret = host_to_target_timespec(arg2, &ts); 12214 } 12215 return ret; 12216 } 12217 #endif 12218 #ifdef TARGET_NR_clock_gettime64 12219 case TARGET_NR_clock_gettime64: 12220 { 12221 struct timespec ts; 12222 ret = get_errno(clock_gettime(arg1, &ts)); 12223 if (!is_error(ret)) { 12224 ret = host_to_target_timespec64(arg2, &ts); 12225 } 12226 return ret; 12227 } 12228 #endif 12229 #ifdef TARGET_NR_clock_getres 12230 case TARGET_NR_clock_getres: 12231 { 12232 struct timespec ts; 12233 ret = get_errno(clock_getres(arg1, &ts)); 12234 if (!is_error(ret)) { 12235 host_to_target_timespec(arg2, &ts); 12236 } 12237 return ret; 12238 } 12239 #endif 12240 #ifdef TARGET_NR_clock_getres_time64 12241 case TARGET_NR_clock_getres_time64: 12242 { 12243 struct timespec ts; 12244 ret = get_errno(clock_getres(arg1, &ts)); 12245 if (!is_error(ret)) { 12246 host_to_target_timespec64(arg2, &ts); 12247 } 12248 return ret; 12249 } 12250 #endif 12251 #ifdef TARGET_NR_clock_nanosleep 12252 case TARGET_NR_clock_nanosleep: 12253 { 12254 struct timespec ts; 12255 if (target_to_host_timespec(&ts, arg3)) { 12256 return -TARGET_EFAULT; 12257 } 12258 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 12259 &ts, arg4 ? &ts : NULL)); 12260 /* 12261 * if the call is interrupted by a signal handler, it fails 12262 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not 12263 * TIMER_ABSTIME, it returns the remaining unslept time in arg4. 12264 */ 12265 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME && 12266 host_to_target_timespec(arg4, &ts)) { 12267 return -TARGET_EFAULT; 12268 } 12269 12270 return ret; 12271 } 12272 #endif 12273 #ifdef TARGET_NR_clock_nanosleep_time64 12274 case TARGET_NR_clock_nanosleep_time64: 12275 { 12276 struct timespec ts; 12277 12278 if (target_to_host_timespec64(&ts, arg3)) { 12279 return -TARGET_EFAULT; 12280 } 12281 12282 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 12283 &ts, arg4 ? &ts : NULL)); 12284 12285 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME && 12286 host_to_target_timespec64(arg4, &ts)) { 12287 return -TARGET_EFAULT; 12288 } 12289 return ret; 12290 } 12291 #endif 12292 12293 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 12294 case TARGET_NR_set_tid_address: 12295 return get_errno(set_tid_address((int *)g2h(cpu, arg1))); 12296 #endif 12297 12298 case TARGET_NR_tkill: 12299 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2))); 12300 12301 case TARGET_NR_tgkill: 12302 return get_errno(safe_tgkill((int)arg1, (int)arg2, 12303 target_to_host_signal(arg3))); 12304 12305 #ifdef TARGET_NR_set_robust_list 12306 case TARGET_NR_set_robust_list: 12307 case TARGET_NR_get_robust_list: 12308 /* The ABI for supporting robust futexes has userspace pass 12309 * the kernel a pointer to a linked list which is updated by 12310 * userspace after the syscall; the list is walked by the kernel 12311 * when the thread exits. Since the linked list in QEMU guest 12312 * memory isn't a valid linked list for the host and we have 12313 * no way to reliably intercept the thread-death event, we can't 12314 * support these. Silently return ENOSYS so that guest userspace 12315 * falls back to a non-robust futex implementation (which should 12316 * be OK except in the corner case of the guest crashing while 12317 * holding a mutex that is shared with another process via 12318 * shared memory). 12319 */ 12320 return -TARGET_ENOSYS; 12321 #endif 12322 12323 #if defined(TARGET_NR_utimensat) 12324 case TARGET_NR_utimensat: 12325 { 12326 struct timespec *tsp, ts[2]; 12327 if (!arg3) { 12328 tsp = NULL; 12329 } else { 12330 if (target_to_host_timespec(ts, arg3)) { 12331 return -TARGET_EFAULT; 12332 } 12333 if (target_to_host_timespec(ts + 1, arg3 + 12334 sizeof(struct target_timespec))) { 12335 return -TARGET_EFAULT; 12336 } 12337 tsp = ts; 12338 } 12339 if (!arg2) 12340 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 12341 else { 12342 if (!(p = lock_user_string(arg2))) { 12343 return -TARGET_EFAULT; 12344 } 12345 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 12346 unlock_user(p, arg2, 0); 12347 } 12348 } 12349 return ret; 12350 #endif 12351 #ifdef TARGET_NR_utimensat_time64 12352 case TARGET_NR_utimensat_time64: 12353 { 12354 struct timespec *tsp, ts[2]; 12355 if (!arg3) { 12356 tsp = NULL; 12357 } else { 12358 if (target_to_host_timespec64(ts, arg3)) { 12359 return -TARGET_EFAULT; 12360 } 12361 if (target_to_host_timespec64(ts + 1, arg3 + 12362 sizeof(struct target__kernel_timespec))) { 12363 return -TARGET_EFAULT; 12364 } 12365 tsp = ts; 12366 } 12367 if (!arg2) 12368 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 12369 else { 12370 p = lock_user_string(arg2); 12371 if (!p) { 12372 return -TARGET_EFAULT; 12373 } 12374 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 12375 unlock_user(p, arg2, 0); 12376 } 12377 } 12378 return ret; 12379 #endif 12380 #ifdef TARGET_NR_futex 12381 case TARGET_NR_futex: 12382 return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6); 12383 #endif 12384 #ifdef TARGET_NR_futex_time64 12385 case TARGET_NR_futex_time64: 12386 return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6); 12387 #endif 12388 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 12389 case TARGET_NR_inotify_init: 12390 ret = get_errno(sys_inotify_init()); 12391 if (ret >= 0) { 12392 fd_trans_register(ret, &target_inotify_trans); 12393 } 12394 return ret; 12395 #endif 12396 #ifdef CONFIG_INOTIFY1 12397 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 12398 case TARGET_NR_inotify_init1: 12399 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1, 12400 fcntl_flags_tbl))); 12401 if (ret >= 0) { 12402 fd_trans_register(ret, &target_inotify_trans); 12403 } 12404 return ret; 12405 #endif 12406 #endif 12407 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 12408 case TARGET_NR_inotify_add_watch: 12409 p = lock_user_string(arg2); 12410 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 12411 unlock_user(p, arg2, 0); 12412 return ret; 12413 #endif 12414 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 12415 case TARGET_NR_inotify_rm_watch: 12416 return get_errno(sys_inotify_rm_watch(arg1, arg2)); 12417 #endif 12418 12419 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 12420 case TARGET_NR_mq_open: 12421 { 12422 struct mq_attr posix_mq_attr; 12423 struct mq_attr *pposix_mq_attr; 12424 int host_flags; 12425 12426 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl); 12427 pposix_mq_attr = NULL; 12428 if (arg4) { 12429 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) { 12430 return -TARGET_EFAULT; 12431 } 12432 pposix_mq_attr = &posix_mq_attr; 12433 } 12434 p = lock_user_string(arg1 - 1); 12435 if (!p) { 12436 return -TARGET_EFAULT; 12437 } 12438 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr)); 12439 unlock_user (p, arg1, 0); 12440 } 12441 return ret; 12442 12443 case TARGET_NR_mq_unlink: 12444 p = lock_user_string(arg1 - 1); 12445 if (!p) { 12446 return -TARGET_EFAULT; 12447 } 12448 ret = get_errno(mq_unlink(p)); 12449 unlock_user (p, arg1, 0); 12450 return ret; 12451 12452 #ifdef TARGET_NR_mq_timedsend 12453 case TARGET_NR_mq_timedsend: 12454 { 12455 struct timespec ts; 12456 12457 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12458 if (arg5 != 0) { 12459 if (target_to_host_timespec(&ts, arg5)) { 12460 return -TARGET_EFAULT; 12461 } 12462 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12463 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) { 12464 return -TARGET_EFAULT; 12465 } 12466 } else { 12467 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12468 } 12469 unlock_user (p, arg2, arg3); 12470 } 12471 return ret; 12472 #endif 12473 #ifdef TARGET_NR_mq_timedsend_time64 12474 case TARGET_NR_mq_timedsend_time64: 12475 { 12476 struct timespec ts; 12477 12478 p = lock_user(VERIFY_READ, arg2, arg3, 1); 12479 if (arg5 != 0) { 12480 if (target_to_host_timespec64(&ts, arg5)) { 12481 return -TARGET_EFAULT; 12482 } 12483 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12484 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) { 12485 return -TARGET_EFAULT; 12486 } 12487 } else { 12488 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12489 } 12490 unlock_user(p, arg2, arg3); 12491 } 12492 return ret; 12493 #endif 12494 12495 #ifdef TARGET_NR_mq_timedreceive 12496 case TARGET_NR_mq_timedreceive: 12497 { 12498 struct timespec ts; 12499 unsigned int prio; 12500 12501 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12502 if (arg5 != 0) { 12503 if (target_to_host_timespec(&ts, arg5)) { 12504 return -TARGET_EFAULT; 12505 } 12506 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12507 &prio, &ts)); 12508 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) { 12509 return -TARGET_EFAULT; 12510 } 12511 } else { 12512 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12513 &prio, NULL)); 12514 } 12515 unlock_user (p, arg2, arg3); 12516 if (arg4 != 0) 12517 put_user_u32(prio, arg4); 12518 } 12519 return ret; 12520 #endif 12521 #ifdef TARGET_NR_mq_timedreceive_time64 12522 case TARGET_NR_mq_timedreceive_time64: 12523 { 12524 struct timespec ts; 12525 unsigned int prio; 12526 12527 p = lock_user(VERIFY_READ, arg2, arg3, 1); 12528 if (arg5 != 0) { 12529 if (target_to_host_timespec64(&ts, arg5)) { 12530 return -TARGET_EFAULT; 12531 } 12532 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12533 &prio, &ts)); 12534 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) { 12535 return -TARGET_EFAULT; 12536 } 12537 } else { 12538 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12539 &prio, NULL)); 12540 } 12541 unlock_user(p, arg2, arg3); 12542 if (arg4 != 0) { 12543 put_user_u32(prio, arg4); 12544 } 12545 } 12546 return ret; 12547 #endif 12548 12549 /* Not implemented for now... */ 12550 /* case TARGET_NR_mq_notify: */ 12551 /* break; */ 12552 12553 case TARGET_NR_mq_getsetattr: 12554 { 12555 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 12556 ret = 0; 12557 if (arg2 != 0) { 12558 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 12559 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in, 12560 &posix_mq_attr_out)); 12561 } else if (arg3 != 0) { 12562 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out)); 12563 } 12564 if (ret == 0 && arg3 != 0) { 12565 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 12566 } 12567 } 12568 return ret; 12569 #endif 12570 12571 #ifdef CONFIG_SPLICE 12572 #ifdef TARGET_NR_tee 12573 case TARGET_NR_tee: 12574 { 12575 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 12576 } 12577 return ret; 12578 #endif 12579 #ifdef TARGET_NR_splice 12580 case TARGET_NR_splice: 12581 { 12582 loff_t loff_in, loff_out; 12583 loff_t *ploff_in = NULL, *ploff_out = NULL; 12584 if (arg2) { 12585 if (get_user_u64(loff_in, arg2)) { 12586 return -TARGET_EFAULT; 12587 } 12588 ploff_in = &loff_in; 12589 } 12590 if (arg4) { 12591 if (get_user_u64(loff_out, arg4)) { 12592 return -TARGET_EFAULT; 12593 } 12594 ploff_out = &loff_out; 12595 } 12596 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 12597 if (arg2) { 12598 if (put_user_u64(loff_in, arg2)) { 12599 return -TARGET_EFAULT; 12600 } 12601 } 12602 if (arg4) { 12603 if (put_user_u64(loff_out, arg4)) { 12604 return -TARGET_EFAULT; 12605 } 12606 } 12607 } 12608 return ret; 12609 #endif 12610 #ifdef TARGET_NR_vmsplice 12611 case TARGET_NR_vmsplice: 12612 { 12613 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 12614 if (vec != NULL) { 12615 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 12616 unlock_iovec(vec, arg2, arg3, 0); 12617 } else { 12618 ret = -host_to_target_errno(errno); 12619 } 12620 } 12621 return ret; 12622 #endif 12623 #endif /* CONFIG_SPLICE */ 12624 #ifdef CONFIG_EVENTFD 12625 #if defined(TARGET_NR_eventfd) 12626 case TARGET_NR_eventfd: 12627 ret = get_errno(eventfd(arg1, 0)); 12628 if (ret >= 0) { 12629 fd_trans_register(ret, &target_eventfd_trans); 12630 } 12631 return ret; 12632 #endif 12633 #if defined(TARGET_NR_eventfd2) 12634 case TARGET_NR_eventfd2: 12635 { 12636 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)); 12637 if (arg2 & TARGET_O_NONBLOCK) { 12638 host_flags |= O_NONBLOCK; 12639 } 12640 if (arg2 & TARGET_O_CLOEXEC) { 12641 host_flags |= O_CLOEXEC; 12642 } 12643 ret = get_errno(eventfd(arg1, host_flags)); 12644 if (ret >= 0) { 12645 fd_trans_register(ret, &target_eventfd_trans); 12646 } 12647 return ret; 12648 } 12649 #endif 12650 #endif /* CONFIG_EVENTFD */ 12651 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 12652 case TARGET_NR_fallocate: 12653 #if TARGET_ABI_BITS == 32 12654 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 12655 target_offset64(arg5, arg6))); 12656 #else 12657 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 12658 #endif 12659 return ret; 12660 #endif 12661 #if defined(CONFIG_SYNC_FILE_RANGE) 12662 #if defined(TARGET_NR_sync_file_range) 12663 case TARGET_NR_sync_file_range: 12664 #if TARGET_ABI_BITS == 32 12665 #if defined(TARGET_MIPS) 12666 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12667 target_offset64(arg5, arg6), arg7)); 12668 #else 12669 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 12670 target_offset64(arg4, arg5), arg6)); 12671 #endif /* !TARGET_MIPS */ 12672 #else 12673 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 12674 #endif 12675 return ret; 12676 #endif 12677 #if defined(TARGET_NR_sync_file_range2) || \ 12678 defined(TARGET_NR_arm_sync_file_range) 12679 #if defined(TARGET_NR_sync_file_range2) 12680 case TARGET_NR_sync_file_range2: 12681 #endif 12682 #if defined(TARGET_NR_arm_sync_file_range) 12683 case TARGET_NR_arm_sync_file_range: 12684 #endif 12685 /* This is like sync_file_range but the arguments are reordered */ 12686 #if TARGET_ABI_BITS == 32 12687 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12688 target_offset64(arg5, arg6), arg2)); 12689 #else 12690 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 12691 #endif 12692 return ret; 12693 #endif 12694 #endif 12695 #if defined(TARGET_NR_signalfd4) 12696 case TARGET_NR_signalfd4: 12697 return do_signalfd4(arg1, arg2, arg4); 12698 #endif 12699 #if defined(TARGET_NR_signalfd) 12700 case TARGET_NR_signalfd: 12701 return do_signalfd4(arg1, arg2, 0); 12702 #endif 12703 #if defined(CONFIG_EPOLL) 12704 #if defined(TARGET_NR_epoll_create) 12705 case TARGET_NR_epoll_create: 12706 return get_errno(epoll_create(arg1)); 12707 #endif 12708 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 12709 case TARGET_NR_epoll_create1: 12710 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl))); 12711 #endif 12712 #if defined(TARGET_NR_epoll_ctl) 12713 case TARGET_NR_epoll_ctl: 12714 { 12715 struct epoll_event ep; 12716 struct epoll_event *epp = 0; 12717 if (arg4) { 12718 if (arg2 != EPOLL_CTL_DEL) { 12719 struct target_epoll_event *target_ep; 12720 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 12721 return -TARGET_EFAULT; 12722 } 12723 ep.events = tswap32(target_ep->events); 12724 /* 12725 * The epoll_data_t union is just opaque data to the kernel, 12726 * so we transfer all 64 bits across and need not worry what 12727 * actual data type it is. 12728 */ 12729 ep.data.u64 = tswap64(target_ep->data.u64); 12730 unlock_user_struct(target_ep, arg4, 0); 12731 } 12732 /* 12733 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a 12734 * non-null pointer, even though this argument is ignored. 12735 * 12736 */ 12737 epp = &ep; 12738 } 12739 return get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 12740 } 12741 #endif 12742 12743 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait) 12744 #if defined(TARGET_NR_epoll_wait) 12745 case TARGET_NR_epoll_wait: 12746 #endif 12747 #if defined(TARGET_NR_epoll_pwait) 12748 case TARGET_NR_epoll_pwait: 12749 #endif 12750 { 12751 struct target_epoll_event *target_ep; 12752 struct epoll_event *ep; 12753 int epfd = arg1; 12754 int maxevents = arg3; 12755 int timeout = arg4; 12756 12757 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) { 12758 return -TARGET_EINVAL; 12759 } 12760 12761 target_ep = lock_user(VERIFY_WRITE, arg2, 12762 maxevents * sizeof(struct target_epoll_event), 1); 12763 if (!target_ep) { 12764 return -TARGET_EFAULT; 12765 } 12766 12767 ep = g_try_new(struct epoll_event, maxevents); 12768 if (!ep) { 12769 unlock_user(target_ep, arg2, 0); 12770 return -TARGET_ENOMEM; 12771 } 12772 12773 switch (num) { 12774 #if defined(TARGET_NR_epoll_pwait) 12775 case TARGET_NR_epoll_pwait: 12776 { 12777 target_sigset_t *target_set; 12778 sigset_t _set, *set = &_set; 12779 12780 if (arg5) { 12781 if (arg6 != sizeof(target_sigset_t)) { 12782 ret = -TARGET_EINVAL; 12783 break; 12784 } 12785 12786 target_set = lock_user(VERIFY_READ, arg5, 12787 sizeof(target_sigset_t), 1); 12788 if (!target_set) { 12789 ret = -TARGET_EFAULT; 12790 break; 12791 } 12792 target_to_host_sigset(set, target_set); 12793 unlock_user(target_set, arg5, 0); 12794 } else { 12795 set = NULL; 12796 } 12797 12798 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12799 set, SIGSET_T_SIZE)); 12800 break; 12801 } 12802 #endif 12803 #if defined(TARGET_NR_epoll_wait) 12804 case TARGET_NR_epoll_wait: 12805 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12806 NULL, 0)); 12807 break; 12808 #endif 12809 default: 12810 ret = -TARGET_ENOSYS; 12811 } 12812 if (!is_error(ret)) { 12813 int i; 12814 for (i = 0; i < ret; i++) { 12815 target_ep[i].events = tswap32(ep[i].events); 12816 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 12817 } 12818 unlock_user(target_ep, arg2, 12819 ret * sizeof(struct target_epoll_event)); 12820 } else { 12821 unlock_user(target_ep, arg2, 0); 12822 } 12823 g_free(ep); 12824 return ret; 12825 } 12826 #endif 12827 #endif 12828 #ifdef TARGET_NR_prlimit64 12829 case TARGET_NR_prlimit64: 12830 { 12831 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 12832 struct target_rlimit64 *target_rnew, *target_rold; 12833 struct host_rlimit64 rnew, rold, *rnewp = 0; 12834 int resource = target_to_host_resource(arg2); 12835 12836 if (arg3 && (resource != RLIMIT_AS && 12837 resource != RLIMIT_DATA && 12838 resource != RLIMIT_STACK)) { 12839 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 12840 return -TARGET_EFAULT; 12841 } 12842 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 12843 rnew.rlim_max = tswap64(target_rnew->rlim_max); 12844 unlock_user_struct(target_rnew, arg3, 0); 12845 rnewp = &rnew; 12846 } 12847 12848 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0)); 12849 if (!is_error(ret) && arg4) { 12850 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 12851 return -TARGET_EFAULT; 12852 } 12853 target_rold->rlim_cur = tswap64(rold.rlim_cur); 12854 target_rold->rlim_max = tswap64(rold.rlim_max); 12855 unlock_user_struct(target_rold, arg4, 1); 12856 } 12857 return ret; 12858 } 12859 #endif 12860 #ifdef TARGET_NR_gethostname 12861 case TARGET_NR_gethostname: 12862 { 12863 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 12864 if (name) { 12865 ret = get_errno(gethostname(name, arg2)); 12866 unlock_user(name, arg1, arg2); 12867 } else { 12868 ret = -TARGET_EFAULT; 12869 } 12870 return ret; 12871 } 12872 #endif 12873 #ifdef TARGET_NR_atomic_cmpxchg_32 12874 case TARGET_NR_atomic_cmpxchg_32: 12875 { 12876 /* should use start_exclusive from main.c */ 12877 abi_ulong mem_value; 12878 if (get_user_u32(mem_value, arg6)) { 12879 target_siginfo_t info; 12880 info.si_signo = SIGSEGV; 12881 info.si_errno = 0; 12882 info.si_code = TARGET_SEGV_MAPERR; 12883 info._sifields._sigfault._addr = arg6; 12884 queue_signal((CPUArchState *)cpu_env, info.si_signo, 12885 QEMU_SI_FAULT, &info); 12886 ret = 0xdeadbeef; 12887 12888 } 12889 if (mem_value == arg2) 12890 put_user_u32(arg1, arg6); 12891 return mem_value; 12892 } 12893 #endif 12894 #ifdef TARGET_NR_atomic_barrier 12895 case TARGET_NR_atomic_barrier: 12896 /* Like the kernel implementation and the 12897 qemu arm barrier, no-op this? */ 12898 return 0; 12899 #endif 12900 12901 #ifdef TARGET_NR_timer_create 12902 case TARGET_NR_timer_create: 12903 { 12904 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 12905 12906 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 12907 12908 int clkid = arg1; 12909 int timer_index = next_free_host_timer(); 12910 12911 if (timer_index < 0) { 12912 ret = -TARGET_EAGAIN; 12913 } else { 12914 timer_t *phtimer = g_posix_timers + timer_index; 12915 12916 if (arg2) { 12917 phost_sevp = &host_sevp; 12918 ret = target_to_host_sigevent(phost_sevp, arg2); 12919 if (ret != 0) { 12920 return ret; 12921 } 12922 } 12923 12924 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 12925 if (ret) { 12926 phtimer = NULL; 12927 } else { 12928 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { 12929 return -TARGET_EFAULT; 12930 } 12931 } 12932 } 12933 return ret; 12934 } 12935 #endif 12936 12937 #ifdef TARGET_NR_timer_settime 12938 case TARGET_NR_timer_settime: 12939 { 12940 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 12941 * struct itimerspec * old_value */ 12942 target_timer_t timerid = get_timer_id(arg1); 12943 12944 if (timerid < 0) { 12945 ret = timerid; 12946 } else if (arg3 == 0) { 12947 ret = -TARGET_EINVAL; 12948 } else { 12949 timer_t htimer = g_posix_timers[timerid]; 12950 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 12951 12952 if (target_to_host_itimerspec(&hspec_new, arg3)) { 12953 return -TARGET_EFAULT; 12954 } 12955 ret = get_errno( 12956 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 12957 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) { 12958 return -TARGET_EFAULT; 12959 } 12960 } 12961 return ret; 12962 } 12963 #endif 12964 12965 #ifdef TARGET_NR_timer_settime64 12966 case TARGET_NR_timer_settime64: 12967 { 12968 target_timer_t timerid = get_timer_id(arg1); 12969 12970 if (timerid < 0) { 12971 ret = timerid; 12972 } else if (arg3 == 0) { 12973 ret = -TARGET_EINVAL; 12974 } else { 12975 timer_t htimer = g_posix_timers[timerid]; 12976 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 12977 12978 if (target_to_host_itimerspec64(&hspec_new, arg3)) { 12979 return -TARGET_EFAULT; 12980 } 12981 ret = get_errno( 12982 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 12983 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) { 12984 return -TARGET_EFAULT; 12985 } 12986 } 12987 return ret; 12988 } 12989 #endif 12990 12991 #ifdef TARGET_NR_timer_gettime 12992 case TARGET_NR_timer_gettime: 12993 { 12994 /* args: timer_t timerid, struct itimerspec *curr_value */ 12995 target_timer_t timerid = get_timer_id(arg1); 12996 12997 if (timerid < 0) { 12998 ret = timerid; 12999 } else if (!arg2) { 13000 ret = -TARGET_EFAULT; 13001 } else { 13002 timer_t htimer = g_posix_timers[timerid]; 13003 struct itimerspec hspec; 13004 ret = get_errno(timer_gettime(htimer, &hspec)); 13005 13006 if (host_to_target_itimerspec(arg2, &hspec)) { 13007 ret = -TARGET_EFAULT; 13008 } 13009 } 13010 return ret; 13011 } 13012 #endif 13013 13014 #ifdef TARGET_NR_timer_gettime64 13015 case TARGET_NR_timer_gettime64: 13016 { 13017 /* args: timer_t timerid, struct itimerspec64 *curr_value */ 13018 target_timer_t timerid = get_timer_id(arg1); 13019 13020 if (timerid < 0) { 13021 ret = timerid; 13022 } else if (!arg2) { 13023 ret = -TARGET_EFAULT; 13024 } else { 13025 timer_t htimer = g_posix_timers[timerid]; 13026 struct itimerspec hspec; 13027 ret = get_errno(timer_gettime(htimer, &hspec)); 13028 13029 if (host_to_target_itimerspec64(arg2, &hspec)) { 13030 ret = -TARGET_EFAULT; 13031 } 13032 } 13033 return ret; 13034 } 13035 #endif 13036 13037 #ifdef TARGET_NR_timer_getoverrun 13038 case TARGET_NR_timer_getoverrun: 13039 { 13040 /* args: timer_t timerid */ 13041 target_timer_t timerid = get_timer_id(arg1); 13042 13043 if (timerid < 0) { 13044 ret = timerid; 13045 } else { 13046 timer_t htimer = g_posix_timers[timerid]; 13047 ret = get_errno(timer_getoverrun(htimer)); 13048 } 13049 return ret; 13050 } 13051 #endif 13052 13053 #ifdef TARGET_NR_timer_delete 13054 case TARGET_NR_timer_delete: 13055 { 13056 /* args: timer_t timerid */ 13057 target_timer_t timerid = get_timer_id(arg1); 13058 13059 if (timerid < 0) { 13060 ret = timerid; 13061 } else { 13062 timer_t htimer = g_posix_timers[timerid]; 13063 ret = get_errno(timer_delete(htimer)); 13064 g_posix_timers[timerid] = 0; 13065 } 13066 return ret; 13067 } 13068 #endif 13069 13070 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) 13071 case TARGET_NR_timerfd_create: 13072 return get_errno(timerfd_create(arg1, 13073 target_to_host_bitmask(arg2, fcntl_flags_tbl))); 13074 #endif 13075 13076 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) 13077 case TARGET_NR_timerfd_gettime: 13078 { 13079 struct itimerspec its_curr; 13080 13081 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 13082 13083 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { 13084 return -TARGET_EFAULT; 13085 } 13086 } 13087 return ret; 13088 #endif 13089 13090 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD) 13091 case TARGET_NR_timerfd_gettime64: 13092 { 13093 struct itimerspec its_curr; 13094 13095 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 13096 13097 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) { 13098 return -TARGET_EFAULT; 13099 } 13100 } 13101 return ret; 13102 #endif 13103 13104 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) 13105 case TARGET_NR_timerfd_settime: 13106 { 13107 struct itimerspec its_new, its_old, *p_new; 13108 13109 if (arg3) { 13110 if (target_to_host_itimerspec(&its_new, arg3)) { 13111 return -TARGET_EFAULT; 13112 } 13113 p_new = &its_new; 13114 } else { 13115 p_new = NULL; 13116 } 13117 13118 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 13119 13120 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { 13121 return -TARGET_EFAULT; 13122 } 13123 } 13124 return ret; 13125 #endif 13126 13127 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD) 13128 case TARGET_NR_timerfd_settime64: 13129 { 13130 struct itimerspec its_new, its_old, *p_new; 13131 13132 if (arg3) { 13133 if (target_to_host_itimerspec64(&its_new, arg3)) { 13134 return -TARGET_EFAULT; 13135 } 13136 p_new = &its_new; 13137 } else { 13138 p_new = NULL; 13139 } 13140 13141 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 13142 13143 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) { 13144 return -TARGET_EFAULT; 13145 } 13146 } 13147 return ret; 13148 #endif 13149 13150 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 13151 case TARGET_NR_ioprio_get: 13152 return get_errno(ioprio_get(arg1, arg2)); 13153 #endif 13154 13155 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 13156 case TARGET_NR_ioprio_set: 13157 return get_errno(ioprio_set(arg1, arg2, arg3)); 13158 #endif 13159 13160 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) 13161 case TARGET_NR_setns: 13162 return get_errno(setns(arg1, arg2)); 13163 #endif 13164 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) 13165 case TARGET_NR_unshare: 13166 return get_errno(unshare(arg1)); 13167 #endif 13168 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 13169 case TARGET_NR_kcmp: 13170 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5)); 13171 #endif 13172 #ifdef TARGET_NR_swapcontext 13173 case TARGET_NR_swapcontext: 13174 /* PowerPC specific. */ 13175 return do_swapcontext(cpu_env, arg1, arg2, arg3); 13176 #endif 13177 #ifdef TARGET_NR_memfd_create 13178 case TARGET_NR_memfd_create: 13179 p = lock_user_string(arg1); 13180 if (!p) { 13181 return -TARGET_EFAULT; 13182 } 13183 ret = get_errno(memfd_create(p, arg2)); 13184 fd_trans_unregister(ret); 13185 unlock_user(p, arg1, 0); 13186 return ret; 13187 #endif 13188 #if defined TARGET_NR_membarrier && defined __NR_membarrier 13189 case TARGET_NR_membarrier: 13190 return get_errno(membarrier(arg1, arg2)); 13191 #endif 13192 13193 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range) 13194 case TARGET_NR_copy_file_range: 13195 { 13196 loff_t inoff, outoff; 13197 loff_t *pinoff = NULL, *poutoff = NULL; 13198 13199 if (arg2) { 13200 if (get_user_u64(inoff, arg2)) { 13201 return -TARGET_EFAULT; 13202 } 13203 pinoff = &inoff; 13204 } 13205 if (arg4) { 13206 if (get_user_u64(outoff, arg4)) { 13207 return -TARGET_EFAULT; 13208 } 13209 poutoff = &outoff; 13210 } 13211 /* Do not sign-extend the count parameter. */ 13212 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff, 13213 (abi_ulong)arg5, arg6)); 13214 if (!is_error(ret) && ret > 0) { 13215 if (arg2) { 13216 if (put_user_u64(inoff, arg2)) { 13217 return -TARGET_EFAULT; 13218 } 13219 } 13220 if (arg4) { 13221 if (put_user_u64(outoff, arg4)) { 13222 return -TARGET_EFAULT; 13223 } 13224 } 13225 } 13226 } 13227 return ret; 13228 #endif 13229 13230 #if defined(TARGET_NR_pivot_root) 13231 case TARGET_NR_pivot_root: 13232 { 13233 void *p2; 13234 p = lock_user_string(arg1); /* new_root */ 13235 p2 = lock_user_string(arg2); /* put_old */ 13236 if (!p || !p2) { 13237 ret = -TARGET_EFAULT; 13238 } else { 13239 ret = get_errno(pivot_root(p, p2)); 13240 } 13241 unlock_user(p2, arg2, 0); 13242 unlock_user(p, arg1, 0); 13243 } 13244 return ret; 13245 #endif 13246 13247 default: 13248 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num); 13249 return -TARGET_ENOSYS; 13250 } 13251 return ret; 13252 } 13253 13254 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 13255 abi_long arg2, abi_long arg3, abi_long arg4, 13256 abi_long arg5, abi_long arg6, abi_long arg7, 13257 abi_long arg8) 13258 { 13259 CPUState *cpu = env_cpu(cpu_env); 13260 abi_long ret; 13261 13262 #ifdef DEBUG_ERESTARTSYS 13263 /* Debug-only code for exercising the syscall-restart code paths 13264 * in the per-architecture cpu main loops: restart every syscall 13265 * the guest makes once before letting it through. 13266 */ 13267 { 13268 static bool flag; 13269 flag = !flag; 13270 if (flag) { 13271 return -TARGET_ERESTARTSYS; 13272 } 13273 } 13274 #endif 13275 13276 record_syscall_start(cpu, num, arg1, 13277 arg2, arg3, arg4, arg5, arg6, arg7, arg8); 13278 13279 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { 13280 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6); 13281 } 13282 13283 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4, 13284 arg5, arg6, arg7, arg8); 13285 13286 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { 13287 print_syscall_ret(cpu_env, num, ret, arg1, arg2, 13288 arg3, arg4, arg5, arg6); 13289 } 13290 13291 record_syscall_return(cpu, num, ret); 13292 return ret; 13293 } 13294