1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include "qemu/osdep.h" 21 #include "qemu/cutils.h" 22 #include "qemu/path.h" 23 #include "qemu/memfd.h" 24 #include "qemu/queue.h" 25 #include <elf.h> 26 #include <endian.h> 27 #include <grp.h> 28 #include <sys/ipc.h> 29 #include <sys/msg.h> 30 #include <sys/wait.h> 31 #include <sys/mount.h> 32 #include <sys/file.h> 33 #include <sys/fsuid.h> 34 #include <sys/personality.h> 35 #include <sys/prctl.h> 36 #include <sys/resource.h> 37 #include <sys/swap.h> 38 #include <linux/capability.h> 39 #include <sched.h> 40 #include <sys/timex.h> 41 #include <sys/socket.h> 42 #include <linux/sockios.h> 43 #include <sys/un.h> 44 #include <sys/uio.h> 45 #include <poll.h> 46 #include <sys/times.h> 47 #include <sys/shm.h> 48 #include <sys/sem.h> 49 #include <sys/statfs.h> 50 #include <utime.h> 51 #include <sys/sysinfo.h> 52 #include <sys/signalfd.h> 53 //#include <sys/user.h> 54 #include <netinet/ip.h> 55 #include <netinet/tcp.h> 56 #include <linux/wireless.h> 57 #include <linux/icmp.h> 58 #include <linux/icmpv6.h> 59 #include <linux/errqueue.h> 60 #include <linux/random.h> 61 #ifdef CONFIG_TIMERFD 62 #include <sys/timerfd.h> 63 #endif 64 #ifdef CONFIG_EVENTFD 65 #include <sys/eventfd.h> 66 #endif 67 #ifdef CONFIG_EPOLL 68 #include <sys/epoll.h> 69 #endif 70 #ifdef CONFIG_ATTR 71 #include "qemu/xattr.h" 72 #endif 73 #ifdef CONFIG_SENDFILE 74 #include <sys/sendfile.h> 75 #endif 76 #ifdef CONFIG_KCOV 77 #include <sys/kcov.h> 78 #endif 79 80 #define termios host_termios 81 #define winsize host_winsize 82 #define termio host_termio 83 #define sgttyb host_sgttyb /* same as target */ 84 #define tchars host_tchars /* same as target */ 85 #define ltchars host_ltchars /* same as target */ 86 87 #include <linux/termios.h> 88 #include <linux/unistd.h> 89 #include <linux/cdrom.h> 90 #include <linux/hdreg.h> 91 #include <linux/soundcard.h> 92 #include <linux/kd.h> 93 #include <linux/mtio.h> 94 #include <linux/fs.h> 95 #include <linux/fd.h> 96 #if defined(CONFIG_FIEMAP) 97 #include <linux/fiemap.h> 98 #endif 99 #include <linux/fb.h> 100 #if defined(CONFIG_USBFS) 101 #include <linux/usbdevice_fs.h> 102 #include <linux/usb/ch9.h> 103 #endif 104 #include <linux/vt.h> 105 #include <linux/dm-ioctl.h> 106 #include <linux/reboot.h> 107 #include <linux/route.h> 108 #include <linux/filter.h> 109 #include <linux/blkpg.h> 110 #include <netpacket/packet.h> 111 #include <linux/netlink.h> 112 #include <linux/if_alg.h> 113 #include <linux/rtc.h> 114 #include <sound/asound.h> 115 #ifdef CONFIG_BTRFS 116 #include <linux/btrfs.h> 117 #endif 118 #ifdef HAVE_DRM_H 119 #include <libdrm/drm.h> 120 #include <libdrm/i915_drm.h> 121 #endif 122 #include "linux_loop.h" 123 #include "uname.h" 124 125 #include "qemu.h" 126 #include "qemu/guest-random.h" 127 #include "qemu/selfmap.h" 128 #include "user/syscall-trace.h" 129 #include "qapi/error.h" 130 #include "fd-trans.h" 131 #include "tcg/tcg.h" 132 133 #ifndef CLONE_IO 134 #define CLONE_IO 0x80000000 /* Clone io context */ 135 #endif 136 137 /* We can't directly call the host clone syscall, because this will 138 * badly confuse libc (breaking mutexes, for example). So we must 139 * divide clone flags into: 140 * * flag combinations that look like pthread_create() 141 * * flag combinations that look like fork() 142 * * flags we can implement within QEMU itself 143 * * flags we can't support and will return an error for 144 */ 145 /* For thread creation, all these flags must be present; for 146 * fork, none must be present. 147 */ 148 #define CLONE_THREAD_FLAGS \ 149 (CLONE_VM | CLONE_FS | CLONE_FILES | \ 150 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM) 151 152 /* These flags are ignored: 153 * CLONE_DETACHED is now ignored by the kernel; 154 * CLONE_IO is just an optimisation hint to the I/O scheduler 155 */ 156 #define CLONE_IGNORED_FLAGS \ 157 (CLONE_DETACHED | CLONE_IO) 158 159 /* Flags for fork which we can implement within QEMU itself */ 160 #define CLONE_OPTIONAL_FORK_FLAGS \ 161 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 162 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID) 163 164 /* Flags for thread creation which we can implement within QEMU itself */ 165 #define CLONE_OPTIONAL_THREAD_FLAGS \ 166 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 167 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT) 168 169 #define CLONE_INVALID_FORK_FLAGS \ 170 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS)) 171 172 #define CLONE_INVALID_THREAD_FLAGS \ 173 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \ 174 CLONE_IGNORED_FLAGS)) 175 176 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits 177 * have almost all been allocated. We cannot support any of 178 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC, 179 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED. 180 * The checks against the invalid thread masks above will catch these. 181 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.) 182 */ 183 184 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted 185 * once. This exercises the codepaths for restart. 186 */ 187 //#define DEBUG_ERESTARTSYS 188 189 //#include <linux/msdos_fs.h> 190 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 191 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 192 193 #undef _syscall0 194 #undef _syscall1 195 #undef _syscall2 196 #undef _syscall3 197 #undef _syscall4 198 #undef _syscall5 199 #undef _syscall6 200 201 #define _syscall0(type,name) \ 202 static type name (void) \ 203 { \ 204 return syscall(__NR_##name); \ 205 } 206 207 #define _syscall1(type,name,type1,arg1) \ 208 static type name (type1 arg1) \ 209 { \ 210 return syscall(__NR_##name, arg1); \ 211 } 212 213 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 214 static type name (type1 arg1,type2 arg2) \ 215 { \ 216 return syscall(__NR_##name, arg1, arg2); \ 217 } 218 219 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 220 static type name (type1 arg1,type2 arg2,type3 arg3) \ 221 { \ 222 return syscall(__NR_##name, arg1, arg2, arg3); \ 223 } 224 225 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 227 { \ 228 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 229 } 230 231 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 232 type5,arg5) \ 233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 234 { \ 235 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 236 } 237 238 239 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 240 type5,arg5,type6,arg6) \ 241 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 242 type6 arg6) \ 243 { \ 244 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 245 } 246 247 248 #define __NR_sys_uname __NR_uname 249 #define __NR_sys_getcwd1 __NR_getcwd 250 #define __NR_sys_getdents __NR_getdents 251 #define __NR_sys_getdents64 __NR_getdents64 252 #define __NR_sys_getpriority __NR_getpriority 253 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 254 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo 255 #define __NR_sys_syslog __NR_syslog 256 #if defined(__NR_futex) 257 # define __NR_sys_futex __NR_futex 258 #endif 259 #if defined(__NR_futex_time64) 260 # define __NR_sys_futex_time64 __NR_futex_time64 261 #endif 262 #define __NR_sys_inotify_init __NR_inotify_init 263 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 264 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 265 #define __NR_sys_statx __NR_statx 266 267 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__) 268 #define __NR__llseek __NR_lseek 269 #endif 270 271 /* Newer kernel ports have llseek() instead of _llseek() */ 272 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 273 #define TARGET_NR__llseek TARGET_NR_llseek 274 #endif 275 276 #define __NR_sys_gettid __NR_gettid 277 _syscall0(int, sys_gettid) 278 279 /* For the 64-bit guest on 32-bit host case we must emulate 280 * getdents using getdents64, because otherwise the host 281 * might hand us back more dirent records than we can fit 282 * into the guest buffer after structure format conversion. 283 * Otherwise we emulate getdents with getdents if the host has it. 284 */ 285 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS 286 #define EMULATE_GETDENTS_WITH_GETDENTS 287 #endif 288 289 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS) 290 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 291 #endif 292 #if (defined(TARGET_NR_getdents) && \ 293 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \ 294 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 295 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 296 #endif 297 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 298 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 299 loff_t *, res, uint, wh); 300 #endif 301 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo) 302 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig, 303 siginfo_t *, uinfo) 304 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 305 #ifdef __NR_exit_group 306 _syscall1(int,exit_group,int,error_code) 307 #endif 308 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 309 _syscall1(int,set_tid_address,int *,tidptr) 310 #endif 311 #if defined(__NR_futex) 312 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 313 const struct timespec *,timeout,int *,uaddr2,int,val3) 314 #endif 315 #if defined(__NR_futex_time64) 316 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val, 317 const struct timespec *,timeout,int *,uaddr2,int,val3) 318 #endif 319 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 320 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 321 unsigned long *, user_mask_ptr); 322 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 323 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 324 unsigned long *, user_mask_ptr); 325 #define __NR_sys_getcpu __NR_getcpu 326 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache); 327 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 328 void *, arg); 329 _syscall2(int, capget, struct __user_cap_header_struct *, header, 330 struct __user_cap_data_struct *, data); 331 _syscall2(int, capset, struct __user_cap_header_struct *, header, 332 struct __user_cap_data_struct *, data); 333 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 334 _syscall2(int, ioprio_get, int, which, int, who) 335 #endif 336 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 337 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio) 338 #endif 339 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 340 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags) 341 #endif 342 343 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 344 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type, 345 unsigned long, idx1, unsigned long, idx2) 346 #endif 347 348 /* 349 * It is assumed that struct statx is architecture independent. 350 */ 351 #if defined(TARGET_NR_statx) && defined(__NR_statx) 352 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags, 353 unsigned int, mask, struct target_statx *, statxbuf) 354 #endif 355 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier) 356 _syscall2(int, membarrier, int, cmd, int, flags) 357 #endif 358 359 static bitmask_transtbl fcntl_flags_tbl[] = { 360 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 361 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 362 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 363 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 364 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 365 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 366 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 367 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 368 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 369 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 370 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 371 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 372 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 373 #if defined(O_DIRECT) 374 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 375 #endif 376 #if defined(O_NOATIME) 377 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 378 #endif 379 #if defined(O_CLOEXEC) 380 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 381 #endif 382 #if defined(O_PATH) 383 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 384 #endif 385 #if defined(O_TMPFILE) 386 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE }, 387 #endif 388 /* Don't terminate the list prematurely on 64-bit host+guest. */ 389 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 390 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 391 #endif 392 { 0, 0, 0, 0 } 393 }; 394 395 _syscall2(int, sys_getcwd1, char *, buf, size_t, size) 396 397 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64) 398 #if defined(__NR_utimensat) 399 #define __NR_sys_utimensat __NR_utimensat 400 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 401 const struct timespec *,tsp,int,flags) 402 #else 403 static int sys_utimensat(int dirfd, const char *pathname, 404 const struct timespec times[2], int flags) 405 { 406 errno = ENOSYS; 407 return -1; 408 } 409 #endif 410 #endif /* TARGET_NR_utimensat */ 411 412 #ifdef TARGET_NR_renameat2 413 #if defined(__NR_renameat2) 414 #define __NR_sys_renameat2 __NR_renameat2 415 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd, 416 const char *, new, unsigned int, flags) 417 #else 418 static int sys_renameat2(int oldfd, const char *old, 419 int newfd, const char *new, int flags) 420 { 421 if (flags == 0) { 422 return renameat(oldfd, old, newfd, new); 423 } 424 errno = ENOSYS; 425 return -1; 426 } 427 #endif 428 #endif /* TARGET_NR_renameat2 */ 429 430 #ifdef CONFIG_INOTIFY 431 #include <sys/inotify.h> 432 433 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 434 static int sys_inotify_init(void) 435 { 436 return (inotify_init()); 437 } 438 #endif 439 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 440 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 441 { 442 return (inotify_add_watch(fd, pathname, mask)); 443 } 444 #endif 445 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 446 static int sys_inotify_rm_watch(int fd, int32_t wd) 447 { 448 return (inotify_rm_watch(fd, wd)); 449 } 450 #endif 451 #ifdef CONFIG_INOTIFY1 452 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 453 static int sys_inotify_init1(int flags) 454 { 455 return (inotify_init1(flags)); 456 } 457 #endif 458 #endif 459 #else 460 /* Userspace can usually survive runtime without inotify */ 461 #undef TARGET_NR_inotify_init 462 #undef TARGET_NR_inotify_init1 463 #undef TARGET_NR_inotify_add_watch 464 #undef TARGET_NR_inotify_rm_watch 465 #endif /* CONFIG_INOTIFY */ 466 467 #if defined(TARGET_NR_prlimit64) 468 #ifndef __NR_prlimit64 469 # define __NR_prlimit64 -1 470 #endif 471 #define __NR_sys_prlimit64 __NR_prlimit64 472 /* The glibc rlimit structure may not be that used by the underlying syscall */ 473 struct host_rlimit64 { 474 uint64_t rlim_cur; 475 uint64_t rlim_max; 476 }; 477 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 478 const struct host_rlimit64 *, new_limit, 479 struct host_rlimit64 *, old_limit) 480 #endif 481 482 483 #if defined(TARGET_NR_timer_create) 484 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 485 static timer_t g_posix_timers[32] = { 0, } ; 486 487 static inline int next_free_host_timer(void) 488 { 489 int k ; 490 /* FIXME: Does finding the next free slot require a lock? */ 491 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 492 if (g_posix_timers[k] == 0) { 493 g_posix_timers[k] = (timer_t) 1; 494 return k; 495 } 496 } 497 return -1; 498 } 499 #endif 500 501 #define ERRNO_TABLE_SIZE 1200 502 503 /* target_to_host_errno_table[] is initialized from 504 * host_to_target_errno_table[] in syscall_init(). */ 505 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 506 }; 507 508 /* 509 * This list is the union of errno values overridden in asm-<arch>/errno.h 510 * minus the errnos that are not actually generic to all archs. 511 */ 512 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 513 [EAGAIN] = TARGET_EAGAIN, 514 [EIDRM] = TARGET_EIDRM, 515 [ECHRNG] = TARGET_ECHRNG, 516 [EL2NSYNC] = TARGET_EL2NSYNC, 517 [EL3HLT] = TARGET_EL3HLT, 518 [EL3RST] = TARGET_EL3RST, 519 [ELNRNG] = TARGET_ELNRNG, 520 [EUNATCH] = TARGET_EUNATCH, 521 [ENOCSI] = TARGET_ENOCSI, 522 [EL2HLT] = TARGET_EL2HLT, 523 [EDEADLK] = TARGET_EDEADLK, 524 [ENOLCK] = TARGET_ENOLCK, 525 [EBADE] = TARGET_EBADE, 526 [EBADR] = TARGET_EBADR, 527 [EXFULL] = TARGET_EXFULL, 528 [ENOANO] = TARGET_ENOANO, 529 [EBADRQC] = TARGET_EBADRQC, 530 [EBADSLT] = TARGET_EBADSLT, 531 [EBFONT] = TARGET_EBFONT, 532 [ENOSTR] = TARGET_ENOSTR, 533 [ENODATA] = TARGET_ENODATA, 534 [ETIME] = TARGET_ETIME, 535 [ENOSR] = TARGET_ENOSR, 536 [ENONET] = TARGET_ENONET, 537 [ENOPKG] = TARGET_ENOPKG, 538 [EREMOTE] = TARGET_EREMOTE, 539 [ENOLINK] = TARGET_ENOLINK, 540 [EADV] = TARGET_EADV, 541 [ESRMNT] = TARGET_ESRMNT, 542 [ECOMM] = TARGET_ECOMM, 543 [EPROTO] = TARGET_EPROTO, 544 [EDOTDOT] = TARGET_EDOTDOT, 545 [EMULTIHOP] = TARGET_EMULTIHOP, 546 [EBADMSG] = TARGET_EBADMSG, 547 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 548 [EOVERFLOW] = TARGET_EOVERFLOW, 549 [ENOTUNIQ] = TARGET_ENOTUNIQ, 550 [EBADFD] = TARGET_EBADFD, 551 [EREMCHG] = TARGET_EREMCHG, 552 [ELIBACC] = TARGET_ELIBACC, 553 [ELIBBAD] = TARGET_ELIBBAD, 554 [ELIBSCN] = TARGET_ELIBSCN, 555 [ELIBMAX] = TARGET_ELIBMAX, 556 [ELIBEXEC] = TARGET_ELIBEXEC, 557 [EILSEQ] = TARGET_EILSEQ, 558 [ENOSYS] = TARGET_ENOSYS, 559 [ELOOP] = TARGET_ELOOP, 560 [ERESTART] = TARGET_ERESTART, 561 [ESTRPIPE] = TARGET_ESTRPIPE, 562 [ENOTEMPTY] = TARGET_ENOTEMPTY, 563 [EUSERS] = TARGET_EUSERS, 564 [ENOTSOCK] = TARGET_ENOTSOCK, 565 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 566 [EMSGSIZE] = TARGET_EMSGSIZE, 567 [EPROTOTYPE] = TARGET_EPROTOTYPE, 568 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 569 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 570 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 571 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 572 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 573 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 574 [EADDRINUSE] = TARGET_EADDRINUSE, 575 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 576 [ENETDOWN] = TARGET_ENETDOWN, 577 [ENETUNREACH] = TARGET_ENETUNREACH, 578 [ENETRESET] = TARGET_ENETRESET, 579 [ECONNABORTED] = TARGET_ECONNABORTED, 580 [ECONNRESET] = TARGET_ECONNRESET, 581 [ENOBUFS] = TARGET_ENOBUFS, 582 [EISCONN] = TARGET_EISCONN, 583 [ENOTCONN] = TARGET_ENOTCONN, 584 [EUCLEAN] = TARGET_EUCLEAN, 585 [ENOTNAM] = TARGET_ENOTNAM, 586 [ENAVAIL] = TARGET_ENAVAIL, 587 [EISNAM] = TARGET_EISNAM, 588 [EREMOTEIO] = TARGET_EREMOTEIO, 589 [EDQUOT] = TARGET_EDQUOT, 590 [ESHUTDOWN] = TARGET_ESHUTDOWN, 591 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 592 [ETIMEDOUT] = TARGET_ETIMEDOUT, 593 [ECONNREFUSED] = TARGET_ECONNREFUSED, 594 [EHOSTDOWN] = TARGET_EHOSTDOWN, 595 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 596 [EALREADY] = TARGET_EALREADY, 597 [EINPROGRESS] = TARGET_EINPROGRESS, 598 [ESTALE] = TARGET_ESTALE, 599 [ECANCELED] = TARGET_ECANCELED, 600 [ENOMEDIUM] = TARGET_ENOMEDIUM, 601 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 602 #ifdef ENOKEY 603 [ENOKEY] = TARGET_ENOKEY, 604 #endif 605 #ifdef EKEYEXPIRED 606 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 607 #endif 608 #ifdef EKEYREVOKED 609 [EKEYREVOKED] = TARGET_EKEYREVOKED, 610 #endif 611 #ifdef EKEYREJECTED 612 [EKEYREJECTED] = TARGET_EKEYREJECTED, 613 #endif 614 #ifdef EOWNERDEAD 615 [EOWNERDEAD] = TARGET_EOWNERDEAD, 616 #endif 617 #ifdef ENOTRECOVERABLE 618 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 619 #endif 620 #ifdef ENOMSG 621 [ENOMSG] = TARGET_ENOMSG, 622 #endif 623 #ifdef ERKFILL 624 [ERFKILL] = TARGET_ERFKILL, 625 #endif 626 #ifdef EHWPOISON 627 [EHWPOISON] = TARGET_EHWPOISON, 628 #endif 629 }; 630 631 static inline int host_to_target_errno(int err) 632 { 633 if (err >= 0 && err < ERRNO_TABLE_SIZE && 634 host_to_target_errno_table[err]) { 635 return host_to_target_errno_table[err]; 636 } 637 return err; 638 } 639 640 static inline int target_to_host_errno(int err) 641 { 642 if (err >= 0 && err < ERRNO_TABLE_SIZE && 643 target_to_host_errno_table[err]) { 644 return target_to_host_errno_table[err]; 645 } 646 return err; 647 } 648 649 static inline abi_long get_errno(abi_long ret) 650 { 651 if (ret == -1) 652 return -host_to_target_errno(errno); 653 else 654 return ret; 655 } 656 657 const char *target_strerror(int err) 658 { 659 if (err == TARGET_ERESTARTSYS) { 660 return "To be restarted"; 661 } 662 if (err == TARGET_QEMU_ESIGRETURN) { 663 return "Successful exit from sigreturn"; 664 } 665 666 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 667 return NULL; 668 } 669 return strerror(target_to_host_errno(err)); 670 } 671 672 #define safe_syscall0(type, name) \ 673 static type safe_##name(void) \ 674 { \ 675 return safe_syscall(__NR_##name); \ 676 } 677 678 #define safe_syscall1(type, name, type1, arg1) \ 679 static type safe_##name(type1 arg1) \ 680 { \ 681 return safe_syscall(__NR_##name, arg1); \ 682 } 683 684 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \ 685 static type safe_##name(type1 arg1, type2 arg2) \ 686 { \ 687 return safe_syscall(__NR_##name, arg1, arg2); \ 688 } 689 690 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 691 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \ 692 { \ 693 return safe_syscall(__NR_##name, arg1, arg2, arg3); \ 694 } 695 696 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ 697 type4, arg4) \ 698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ 699 { \ 700 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 701 } 702 703 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ 704 type4, arg4, type5, arg5) \ 705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 706 type5 arg5) \ 707 { \ 708 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 709 } 710 711 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ 712 type4, arg4, type5, arg5, type6, arg6) \ 713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 714 type5 arg5, type6 arg6) \ 715 { \ 716 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 717 } 718 719 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count) 720 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count) 721 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \ 722 int, flags, mode_t, mode) 723 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid) 724 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \ 725 struct rusage *, rusage) 726 #endif 727 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \ 728 int, options, struct rusage *, rusage) 729 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp) 730 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \ 731 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 732 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \ 733 fd_set *, exceptfds, struct timespec *, timeout, void *, sig) 734 #endif 735 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64) 736 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds, 737 struct timespec *, tsp, const sigset_t *, sigmask, 738 size_t, sigsetsize) 739 #endif 740 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events, 741 int, maxevents, int, timeout, const sigset_t *, sigmask, 742 size_t, sigsetsize) 743 #if defined(__NR_futex) 744 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \ 745 const struct timespec *,timeout,int *,uaddr2,int,val3) 746 #endif 747 #if defined(__NR_futex_time64) 748 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \ 749 const struct timespec *,timeout,int *,uaddr2,int,val3) 750 #endif 751 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize) 752 safe_syscall2(int, kill, pid_t, pid, int, sig) 753 safe_syscall2(int, tkill, int, tid, int, sig) 754 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig) 755 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt) 756 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt) 757 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt, 758 unsigned long, pos_l, unsigned long, pos_h) 759 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt, 760 unsigned long, pos_l, unsigned long, pos_h) 761 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr, 762 socklen_t, addrlen) 763 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len, 764 int, flags, const struct sockaddr *, addr, socklen_t, addrlen) 765 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len, 766 int, flags, struct sockaddr *, addr, socklen_t *, addrlen) 767 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags) 768 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags) 769 safe_syscall2(int, flock, int, fd, int, operation) 770 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64) 771 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo, 772 const struct timespec *, uts, size_t, sigsetsize) 773 #endif 774 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len, 775 int, flags) 776 #if defined(TARGET_NR_nanosleep) 777 safe_syscall2(int, nanosleep, const struct timespec *, req, 778 struct timespec *, rem) 779 #endif 780 #if defined(TARGET_NR_clock_nanosleep) || \ 781 defined(TARGET_NR_clock_nanosleep_time64) 782 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags, 783 const struct timespec *, req, struct timespec *, rem) 784 #endif 785 #ifdef __NR_ipc 786 #ifdef __s390x__ 787 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third, 788 void *, ptr) 789 #else 790 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third, 791 void *, ptr, long, fifth) 792 #endif 793 #endif 794 #ifdef __NR_msgsnd 795 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz, 796 int, flags) 797 #endif 798 #ifdef __NR_msgrcv 799 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz, 800 long, msgtype, int, flags) 801 #endif 802 #ifdef __NR_semtimedop 803 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops, 804 unsigned, nsops, const struct timespec *, timeout) 805 #endif 806 #if defined(TARGET_NR_mq_timedsend) || \ 807 defined(TARGET_NR_mq_timedsend_time64) 808 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr, 809 size_t, len, unsigned, prio, const struct timespec *, timeout) 810 #endif 811 #if defined(TARGET_NR_mq_timedreceive) || \ 812 defined(TARGET_NR_mq_timedreceive_time64) 813 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr, 814 size_t, len, unsigned *, prio, const struct timespec *, timeout) 815 #endif 816 /* We do ioctl like this rather than via safe_syscall3 to preserve the 817 * "third argument might be integer or pointer or not present" behaviour of 818 * the libc function. 819 */ 820 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__) 821 /* Similarly for fcntl. Note that callers must always: 822 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK 823 * use the flock64 struct rather than unsuffixed flock 824 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts. 825 */ 826 #ifdef __NR_fcntl64 827 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__) 828 #else 829 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__) 830 #endif 831 832 static inline int host_to_target_sock_type(int host_type) 833 { 834 int target_type; 835 836 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 837 case SOCK_DGRAM: 838 target_type = TARGET_SOCK_DGRAM; 839 break; 840 case SOCK_STREAM: 841 target_type = TARGET_SOCK_STREAM; 842 break; 843 default: 844 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 845 break; 846 } 847 848 #if defined(SOCK_CLOEXEC) 849 if (host_type & SOCK_CLOEXEC) { 850 target_type |= TARGET_SOCK_CLOEXEC; 851 } 852 #endif 853 854 #if defined(SOCK_NONBLOCK) 855 if (host_type & SOCK_NONBLOCK) { 856 target_type |= TARGET_SOCK_NONBLOCK; 857 } 858 #endif 859 860 return target_type; 861 } 862 863 static abi_ulong target_brk; 864 static abi_ulong target_original_brk; 865 static abi_ulong brk_page; 866 867 void target_set_brk(abi_ulong new_brk) 868 { 869 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 870 brk_page = HOST_PAGE_ALIGN(target_brk); 871 } 872 873 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 874 #define DEBUGF_BRK(message, args...) 875 876 /* do_brk() must return target values and target errnos. */ 877 abi_long do_brk(abi_ulong new_brk) 878 { 879 abi_long mapped_addr; 880 abi_ulong new_alloc_size; 881 882 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 883 884 if (!new_brk) { 885 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 886 return target_brk; 887 } 888 if (new_brk < target_original_brk) { 889 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 890 target_brk); 891 return target_brk; 892 } 893 894 /* If the new brk is less than the highest page reserved to the 895 * target heap allocation, set it and we're almost done... */ 896 if (new_brk <= brk_page) { 897 /* Heap contents are initialized to zero, as for anonymous 898 * mapped pages. */ 899 if (new_brk > target_brk) { 900 memset(g2h(target_brk), 0, new_brk - target_brk); 901 } 902 target_brk = new_brk; 903 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 904 return target_brk; 905 } 906 907 /* We need to allocate more memory after the brk... Note that 908 * we don't use MAP_FIXED because that will map over the top of 909 * any existing mapping (like the one with the host libc or qemu 910 * itself); instead we treat "mapped but at wrong address" as 911 * a failure and unmap again. 912 */ 913 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 914 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 915 PROT_READ|PROT_WRITE, 916 MAP_ANON|MAP_PRIVATE, 0, 0)); 917 918 if (mapped_addr == brk_page) { 919 /* Heap contents are initialized to zero, as for anonymous 920 * mapped pages. Technically the new pages are already 921 * initialized to zero since they *are* anonymous mapped 922 * pages, however we have to take care with the contents that 923 * come from the remaining part of the previous page: it may 924 * contains garbage data due to a previous heap usage (grown 925 * then shrunken). */ 926 memset(g2h(target_brk), 0, brk_page - target_brk); 927 928 target_brk = new_brk; 929 brk_page = HOST_PAGE_ALIGN(target_brk); 930 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 931 target_brk); 932 return target_brk; 933 } else if (mapped_addr != -1) { 934 /* Mapped but at wrong address, meaning there wasn't actually 935 * enough space for this brk. 936 */ 937 target_munmap(mapped_addr, new_alloc_size); 938 mapped_addr = -1; 939 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 940 } 941 else { 942 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 943 } 944 945 #if defined(TARGET_ALPHA) 946 /* We (partially) emulate OSF/1 on Alpha, which requires we 947 return a proper errno, not an unchanged brk value. */ 948 return -TARGET_ENOMEM; 949 #endif 950 /* For everything else, return the previous break. */ 951 return target_brk; 952 } 953 954 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \ 955 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 956 static inline abi_long copy_from_user_fdset(fd_set *fds, 957 abi_ulong target_fds_addr, 958 int n) 959 { 960 int i, nw, j, k; 961 abi_ulong b, *target_fds; 962 963 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 964 if (!(target_fds = lock_user(VERIFY_READ, 965 target_fds_addr, 966 sizeof(abi_ulong) * nw, 967 1))) 968 return -TARGET_EFAULT; 969 970 FD_ZERO(fds); 971 k = 0; 972 for (i = 0; i < nw; i++) { 973 /* grab the abi_ulong */ 974 __get_user(b, &target_fds[i]); 975 for (j = 0; j < TARGET_ABI_BITS; j++) { 976 /* check the bit inside the abi_ulong */ 977 if ((b >> j) & 1) 978 FD_SET(k, fds); 979 k++; 980 } 981 } 982 983 unlock_user(target_fds, target_fds_addr, 0); 984 985 return 0; 986 } 987 988 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 989 abi_ulong target_fds_addr, 990 int n) 991 { 992 if (target_fds_addr) { 993 if (copy_from_user_fdset(fds, target_fds_addr, n)) 994 return -TARGET_EFAULT; 995 *fds_ptr = fds; 996 } else { 997 *fds_ptr = NULL; 998 } 999 return 0; 1000 } 1001 1002 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 1003 const fd_set *fds, 1004 int n) 1005 { 1006 int i, nw, j, k; 1007 abi_long v; 1008 abi_ulong *target_fds; 1009 1010 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 1011 if (!(target_fds = lock_user(VERIFY_WRITE, 1012 target_fds_addr, 1013 sizeof(abi_ulong) * nw, 1014 0))) 1015 return -TARGET_EFAULT; 1016 1017 k = 0; 1018 for (i = 0; i < nw; i++) { 1019 v = 0; 1020 for (j = 0; j < TARGET_ABI_BITS; j++) { 1021 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 1022 k++; 1023 } 1024 __put_user(v, &target_fds[i]); 1025 } 1026 1027 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 1028 1029 return 0; 1030 } 1031 #endif 1032 1033 #if defined(__alpha__) 1034 #define HOST_HZ 1024 1035 #else 1036 #define HOST_HZ 100 1037 #endif 1038 1039 static inline abi_long host_to_target_clock_t(long ticks) 1040 { 1041 #if HOST_HZ == TARGET_HZ 1042 return ticks; 1043 #else 1044 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 1045 #endif 1046 } 1047 1048 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 1049 const struct rusage *rusage) 1050 { 1051 struct target_rusage *target_rusage; 1052 1053 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 1054 return -TARGET_EFAULT; 1055 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 1056 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 1057 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 1058 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 1059 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 1060 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 1061 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 1062 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 1063 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 1064 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 1065 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 1066 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 1067 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 1068 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 1069 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 1070 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 1071 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 1072 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 1073 unlock_user_struct(target_rusage, target_addr, 1); 1074 1075 return 0; 1076 } 1077 1078 #ifdef TARGET_NR_setrlimit 1079 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 1080 { 1081 abi_ulong target_rlim_swap; 1082 rlim_t result; 1083 1084 target_rlim_swap = tswapal(target_rlim); 1085 if (target_rlim_swap == TARGET_RLIM_INFINITY) 1086 return RLIM_INFINITY; 1087 1088 result = target_rlim_swap; 1089 if (target_rlim_swap != (rlim_t)result) 1090 return RLIM_INFINITY; 1091 1092 return result; 1093 } 1094 #endif 1095 1096 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit) 1097 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 1098 { 1099 abi_ulong target_rlim_swap; 1100 abi_ulong result; 1101 1102 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 1103 target_rlim_swap = TARGET_RLIM_INFINITY; 1104 else 1105 target_rlim_swap = rlim; 1106 result = tswapal(target_rlim_swap); 1107 1108 return result; 1109 } 1110 #endif 1111 1112 static inline int target_to_host_resource(int code) 1113 { 1114 switch (code) { 1115 case TARGET_RLIMIT_AS: 1116 return RLIMIT_AS; 1117 case TARGET_RLIMIT_CORE: 1118 return RLIMIT_CORE; 1119 case TARGET_RLIMIT_CPU: 1120 return RLIMIT_CPU; 1121 case TARGET_RLIMIT_DATA: 1122 return RLIMIT_DATA; 1123 case TARGET_RLIMIT_FSIZE: 1124 return RLIMIT_FSIZE; 1125 case TARGET_RLIMIT_LOCKS: 1126 return RLIMIT_LOCKS; 1127 case TARGET_RLIMIT_MEMLOCK: 1128 return RLIMIT_MEMLOCK; 1129 case TARGET_RLIMIT_MSGQUEUE: 1130 return RLIMIT_MSGQUEUE; 1131 case TARGET_RLIMIT_NICE: 1132 return RLIMIT_NICE; 1133 case TARGET_RLIMIT_NOFILE: 1134 return RLIMIT_NOFILE; 1135 case TARGET_RLIMIT_NPROC: 1136 return RLIMIT_NPROC; 1137 case TARGET_RLIMIT_RSS: 1138 return RLIMIT_RSS; 1139 case TARGET_RLIMIT_RTPRIO: 1140 return RLIMIT_RTPRIO; 1141 case TARGET_RLIMIT_SIGPENDING: 1142 return RLIMIT_SIGPENDING; 1143 case TARGET_RLIMIT_STACK: 1144 return RLIMIT_STACK; 1145 default: 1146 return code; 1147 } 1148 } 1149 1150 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1151 abi_ulong target_tv_addr) 1152 { 1153 struct target_timeval *target_tv; 1154 1155 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) { 1156 return -TARGET_EFAULT; 1157 } 1158 1159 __get_user(tv->tv_sec, &target_tv->tv_sec); 1160 __get_user(tv->tv_usec, &target_tv->tv_usec); 1161 1162 unlock_user_struct(target_tv, target_tv_addr, 0); 1163 1164 return 0; 1165 } 1166 1167 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1168 const struct timeval *tv) 1169 { 1170 struct target_timeval *target_tv; 1171 1172 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) { 1173 return -TARGET_EFAULT; 1174 } 1175 1176 __put_user(tv->tv_sec, &target_tv->tv_sec); 1177 __put_user(tv->tv_usec, &target_tv->tv_usec); 1178 1179 unlock_user_struct(target_tv, target_tv_addr, 1); 1180 1181 return 0; 1182 } 1183 1184 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 1185 static inline abi_long copy_from_user_timeval64(struct timeval *tv, 1186 abi_ulong target_tv_addr) 1187 { 1188 struct target__kernel_sock_timeval *target_tv; 1189 1190 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) { 1191 return -TARGET_EFAULT; 1192 } 1193 1194 __get_user(tv->tv_sec, &target_tv->tv_sec); 1195 __get_user(tv->tv_usec, &target_tv->tv_usec); 1196 1197 unlock_user_struct(target_tv, target_tv_addr, 0); 1198 1199 return 0; 1200 } 1201 #endif 1202 1203 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr, 1204 const struct timeval *tv) 1205 { 1206 struct target__kernel_sock_timeval *target_tv; 1207 1208 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) { 1209 return -TARGET_EFAULT; 1210 } 1211 1212 __put_user(tv->tv_sec, &target_tv->tv_sec); 1213 __put_user(tv->tv_usec, &target_tv->tv_usec); 1214 1215 unlock_user_struct(target_tv, target_tv_addr, 1); 1216 1217 return 0; 1218 } 1219 1220 #if defined(TARGET_NR_futex) || \ 1221 defined(TARGET_NR_rt_sigtimedwait) || \ 1222 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \ 1223 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \ 1224 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \ 1225 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \ 1226 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \ 1227 defined(TARGET_NR_timer_settime) || \ 1228 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)) 1229 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 1230 abi_ulong target_addr) 1231 { 1232 struct target_timespec *target_ts; 1233 1234 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) { 1235 return -TARGET_EFAULT; 1236 } 1237 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 1238 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1239 unlock_user_struct(target_ts, target_addr, 0); 1240 return 0; 1241 } 1242 #endif 1243 1244 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \ 1245 defined(TARGET_NR_timer_settime64) || \ 1246 defined(TARGET_NR_mq_timedsend_time64) || \ 1247 defined(TARGET_NR_mq_timedreceive_time64) || \ 1248 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \ 1249 defined(TARGET_NR_clock_nanosleep_time64) || \ 1250 defined(TARGET_NR_rt_sigtimedwait_time64) || \ 1251 defined(TARGET_NR_utimensat) || \ 1252 defined(TARGET_NR_utimensat_time64) || \ 1253 defined(TARGET_NR_semtimedop_time64) || \ 1254 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64) 1255 static inline abi_long target_to_host_timespec64(struct timespec *host_ts, 1256 abi_ulong target_addr) 1257 { 1258 struct target__kernel_timespec *target_ts; 1259 1260 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) { 1261 return -TARGET_EFAULT; 1262 } 1263 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 1264 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1265 /* in 32bit mode, this drops the padding */ 1266 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec; 1267 unlock_user_struct(target_ts, target_addr, 0); 1268 return 0; 1269 } 1270 #endif 1271 1272 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 1273 struct timespec *host_ts) 1274 { 1275 struct target_timespec *target_ts; 1276 1277 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) { 1278 return -TARGET_EFAULT; 1279 } 1280 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 1281 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1282 unlock_user_struct(target_ts, target_addr, 1); 1283 return 0; 1284 } 1285 1286 static inline abi_long host_to_target_timespec64(abi_ulong target_addr, 1287 struct timespec *host_ts) 1288 { 1289 struct target__kernel_timespec *target_ts; 1290 1291 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) { 1292 return -TARGET_EFAULT; 1293 } 1294 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 1295 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1296 unlock_user_struct(target_ts, target_addr, 1); 1297 return 0; 1298 } 1299 1300 #if defined(TARGET_NR_gettimeofday) 1301 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr, 1302 struct timezone *tz) 1303 { 1304 struct target_timezone *target_tz; 1305 1306 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) { 1307 return -TARGET_EFAULT; 1308 } 1309 1310 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1311 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1312 1313 unlock_user_struct(target_tz, target_tz_addr, 1); 1314 1315 return 0; 1316 } 1317 #endif 1318 1319 #if defined(TARGET_NR_settimeofday) 1320 static inline abi_long copy_from_user_timezone(struct timezone *tz, 1321 abi_ulong target_tz_addr) 1322 { 1323 struct target_timezone *target_tz; 1324 1325 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 1326 return -TARGET_EFAULT; 1327 } 1328 1329 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1330 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1331 1332 unlock_user_struct(target_tz, target_tz_addr, 0); 1333 1334 return 0; 1335 } 1336 #endif 1337 1338 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1339 #include <mqueue.h> 1340 1341 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1342 abi_ulong target_mq_attr_addr) 1343 { 1344 struct target_mq_attr *target_mq_attr; 1345 1346 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1347 target_mq_attr_addr, 1)) 1348 return -TARGET_EFAULT; 1349 1350 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1351 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1352 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1353 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1354 1355 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1356 1357 return 0; 1358 } 1359 1360 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1361 const struct mq_attr *attr) 1362 { 1363 struct target_mq_attr *target_mq_attr; 1364 1365 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1366 target_mq_attr_addr, 0)) 1367 return -TARGET_EFAULT; 1368 1369 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1370 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1371 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1372 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1373 1374 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1375 1376 return 0; 1377 } 1378 #endif 1379 1380 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1381 /* do_select() must return target values and target errnos. */ 1382 static abi_long do_select(int n, 1383 abi_ulong rfd_addr, abi_ulong wfd_addr, 1384 abi_ulong efd_addr, abi_ulong target_tv_addr) 1385 { 1386 fd_set rfds, wfds, efds; 1387 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1388 struct timeval tv; 1389 struct timespec ts, *ts_ptr; 1390 abi_long ret; 1391 1392 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1393 if (ret) { 1394 return ret; 1395 } 1396 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1397 if (ret) { 1398 return ret; 1399 } 1400 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1401 if (ret) { 1402 return ret; 1403 } 1404 1405 if (target_tv_addr) { 1406 if (copy_from_user_timeval(&tv, target_tv_addr)) 1407 return -TARGET_EFAULT; 1408 ts.tv_sec = tv.tv_sec; 1409 ts.tv_nsec = tv.tv_usec * 1000; 1410 ts_ptr = &ts; 1411 } else { 1412 ts_ptr = NULL; 1413 } 1414 1415 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1416 ts_ptr, NULL)); 1417 1418 if (!is_error(ret)) { 1419 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1420 return -TARGET_EFAULT; 1421 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1422 return -TARGET_EFAULT; 1423 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1424 return -TARGET_EFAULT; 1425 1426 if (target_tv_addr) { 1427 tv.tv_sec = ts.tv_sec; 1428 tv.tv_usec = ts.tv_nsec / 1000; 1429 if (copy_to_user_timeval(target_tv_addr, &tv)) { 1430 return -TARGET_EFAULT; 1431 } 1432 } 1433 } 1434 1435 return ret; 1436 } 1437 1438 #if defined(TARGET_WANT_OLD_SYS_SELECT) 1439 static abi_long do_old_select(abi_ulong arg1) 1440 { 1441 struct target_sel_arg_struct *sel; 1442 abi_ulong inp, outp, exp, tvp; 1443 long nsel; 1444 1445 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) { 1446 return -TARGET_EFAULT; 1447 } 1448 1449 nsel = tswapal(sel->n); 1450 inp = tswapal(sel->inp); 1451 outp = tswapal(sel->outp); 1452 exp = tswapal(sel->exp); 1453 tvp = tswapal(sel->tvp); 1454 1455 unlock_user_struct(sel, arg1, 0); 1456 1457 return do_select(nsel, inp, outp, exp, tvp); 1458 } 1459 #endif 1460 #endif 1461 1462 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 1463 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3, 1464 abi_long arg4, abi_long arg5, abi_long arg6, 1465 bool time64) 1466 { 1467 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 1468 fd_set rfds, wfds, efds; 1469 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1470 struct timespec ts, *ts_ptr; 1471 abi_long ret; 1472 1473 /* 1474 * The 6th arg is actually two args smashed together, 1475 * so we cannot use the C library. 1476 */ 1477 sigset_t set; 1478 struct { 1479 sigset_t *set; 1480 size_t size; 1481 } sig, *sig_ptr; 1482 1483 abi_ulong arg_sigset, arg_sigsize, *arg7; 1484 target_sigset_t *target_sigset; 1485 1486 n = arg1; 1487 rfd_addr = arg2; 1488 wfd_addr = arg3; 1489 efd_addr = arg4; 1490 ts_addr = arg5; 1491 1492 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1493 if (ret) { 1494 return ret; 1495 } 1496 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1497 if (ret) { 1498 return ret; 1499 } 1500 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1501 if (ret) { 1502 return ret; 1503 } 1504 1505 /* 1506 * This takes a timespec, and not a timeval, so we cannot 1507 * use the do_select() helper ... 1508 */ 1509 if (ts_addr) { 1510 if (time64) { 1511 if (target_to_host_timespec64(&ts, ts_addr)) { 1512 return -TARGET_EFAULT; 1513 } 1514 } else { 1515 if (target_to_host_timespec(&ts, ts_addr)) { 1516 return -TARGET_EFAULT; 1517 } 1518 } 1519 ts_ptr = &ts; 1520 } else { 1521 ts_ptr = NULL; 1522 } 1523 1524 /* Extract the two packed args for the sigset */ 1525 if (arg6) { 1526 sig_ptr = &sig; 1527 sig.size = SIGSET_T_SIZE; 1528 1529 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 1530 if (!arg7) { 1531 return -TARGET_EFAULT; 1532 } 1533 arg_sigset = tswapal(arg7[0]); 1534 arg_sigsize = tswapal(arg7[1]); 1535 unlock_user(arg7, arg6, 0); 1536 1537 if (arg_sigset) { 1538 sig.set = &set; 1539 if (arg_sigsize != sizeof(*target_sigset)) { 1540 /* Like the kernel, we enforce correct size sigsets */ 1541 return -TARGET_EINVAL; 1542 } 1543 target_sigset = lock_user(VERIFY_READ, arg_sigset, 1544 sizeof(*target_sigset), 1); 1545 if (!target_sigset) { 1546 return -TARGET_EFAULT; 1547 } 1548 target_to_host_sigset(&set, target_sigset); 1549 unlock_user(target_sigset, arg_sigset, 0); 1550 } else { 1551 sig.set = NULL; 1552 } 1553 } else { 1554 sig_ptr = NULL; 1555 } 1556 1557 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1558 ts_ptr, sig_ptr)); 1559 1560 if (!is_error(ret)) { 1561 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) { 1562 return -TARGET_EFAULT; 1563 } 1564 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) { 1565 return -TARGET_EFAULT; 1566 } 1567 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) { 1568 return -TARGET_EFAULT; 1569 } 1570 if (time64) { 1571 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) { 1572 return -TARGET_EFAULT; 1573 } 1574 } else { 1575 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) { 1576 return -TARGET_EFAULT; 1577 } 1578 } 1579 } 1580 return ret; 1581 } 1582 #endif 1583 1584 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \ 1585 defined(TARGET_NR_ppoll_time64) 1586 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3, 1587 abi_long arg4, abi_long arg5, bool ppoll, bool time64) 1588 { 1589 struct target_pollfd *target_pfd; 1590 unsigned int nfds = arg2; 1591 struct pollfd *pfd; 1592 unsigned int i; 1593 abi_long ret; 1594 1595 pfd = NULL; 1596 target_pfd = NULL; 1597 if (nfds) { 1598 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) { 1599 return -TARGET_EINVAL; 1600 } 1601 target_pfd = lock_user(VERIFY_WRITE, arg1, 1602 sizeof(struct target_pollfd) * nfds, 1); 1603 if (!target_pfd) { 1604 return -TARGET_EFAULT; 1605 } 1606 1607 pfd = alloca(sizeof(struct pollfd) * nfds); 1608 for (i = 0; i < nfds; i++) { 1609 pfd[i].fd = tswap32(target_pfd[i].fd); 1610 pfd[i].events = tswap16(target_pfd[i].events); 1611 } 1612 } 1613 if (ppoll) { 1614 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 1615 target_sigset_t *target_set; 1616 sigset_t _set, *set = &_set; 1617 1618 if (arg3) { 1619 if (time64) { 1620 if (target_to_host_timespec64(timeout_ts, arg3)) { 1621 unlock_user(target_pfd, arg1, 0); 1622 return -TARGET_EFAULT; 1623 } 1624 } else { 1625 if (target_to_host_timespec(timeout_ts, arg3)) { 1626 unlock_user(target_pfd, arg1, 0); 1627 return -TARGET_EFAULT; 1628 } 1629 } 1630 } else { 1631 timeout_ts = NULL; 1632 } 1633 1634 if (arg4) { 1635 if (arg5 != sizeof(target_sigset_t)) { 1636 unlock_user(target_pfd, arg1, 0); 1637 return -TARGET_EINVAL; 1638 } 1639 1640 target_set = lock_user(VERIFY_READ, arg4, 1641 sizeof(target_sigset_t), 1); 1642 if (!target_set) { 1643 unlock_user(target_pfd, arg1, 0); 1644 return -TARGET_EFAULT; 1645 } 1646 target_to_host_sigset(set, target_set); 1647 } else { 1648 set = NULL; 1649 } 1650 1651 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts, 1652 set, SIGSET_T_SIZE)); 1653 1654 if (!is_error(ret) && arg3) { 1655 if (time64) { 1656 if (host_to_target_timespec64(arg3, timeout_ts)) { 1657 return -TARGET_EFAULT; 1658 } 1659 } else { 1660 if (host_to_target_timespec(arg3, timeout_ts)) { 1661 return -TARGET_EFAULT; 1662 } 1663 } 1664 } 1665 if (arg4) { 1666 unlock_user(target_set, arg4, 0); 1667 } 1668 } else { 1669 struct timespec ts, *pts; 1670 1671 if (arg3 >= 0) { 1672 /* Convert ms to secs, ns */ 1673 ts.tv_sec = arg3 / 1000; 1674 ts.tv_nsec = (arg3 % 1000) * 1000000LL; 1675 pts = &ts; 1676 } else { 1677 /* -ve poll() timeout means "infinite" */ 1678 pts = NULL; 1679 } 1680 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0)); 1681 } 1682 1683 if (!is_error(ret)) { 1684 for (i = 0; i < nfds; i++) { 1685 target_pfd[i].revents = tswap16(pfd[i].revents); 1686 } 1687 } 1688 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 1689 return ret; 1690 } 1691 #endif 1692 1693 static abi_long do_pipe2(int host_pipe[], int flags) 1694 { 1695 #ifdef CONFIG_PIPE2 1696 return pipe2(host_pipe, flags); 1697 #else 1698 return -ENOSYS; 1699 #endif 1700 } 1701 1702 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1703 int flags, int is_pipe2) 1704 { 1705 int host_pipe[2]; 1706 abi_long ret; 1707 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1708 1709 if (is_error(ret)) 1710 return get_errno(ret); 1711 1712 /* Several targets have special calling conventions for the original 1713 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1714 if (!is_pipe2) { 1715 #if defined(TARGET_ALPHA) 1716 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1717 return host_pipe[0]; 1718 #elif defined(TARGET_MIPS) 1719 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1720 return host_pipe[0]; 1721 #elif defined(TARGET_SH4) 1722 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1723 return host_pipe[0]; 1724 #elif defined(TARGET_SPARC) 1725 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1726 return host_pipe[0]; 1727 #endif 1728 } 1729 1730 if (put_user_s32(host_pipe[0], pipedes) 1731 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1732 return -TARGET_EFAULT; 1733 return get_errno(ret); 1734 } 1735 1736 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1737 abi_ulong target_addr, 1738 socklen_t len) 1739 { 1740 struct target_ip_mreqn *target_smreqn; 1741 1742 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1743 if (!target_smreqn) 1744 return -TARGET_EFAULT; 1745 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1746 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1747 if (len == sizeof(struct target_ip_mreqn)) 1748 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1749 unlock_user(target_smreqn, target_addr, 0); 1750 1751 return 0; 1752 } 1753 1754 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, 1755 abi_ulong target_addr, 1756 socklen_t len) 1757 { 1758 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1759 sa_family_t sa_family; 1760 struct target_sockaddr *target_saddr; 1761 1762 if (fd_trans_target_to_host_addr(fd)) { 1763 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len); 1764 } 1765 1766 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1767 if (!target_saddr) 1768 return -TARGET_EFAULT; 1769 1770 sa_family = tswap16(target_saddr->sa_family); 1771 1772 /* Oops. The caller might send a incomplete sun_path; sun_path 1773 * must be terminated by \0 (see the manual page), but 1774 * unfortunately it is quite common to specify sockaddr_un 1775 * length as "strlen(x->sun_path)" while it should be 1776 * "strlen(...) + 1". We'll fix that here if needed. 1777 * Linux kernel has a similar feature. 1778 */ 1779 1780 if (sa_family == AF_UNIX) { 1781 if (len < unix_maxlen && len > 0) { 1782 char *cp = (char*)target_saddr; 1783 1784 if ( cp[len-1] && !cp[len] ) 1785 len++; 1786 } 1787 if (len > unix_maxlen) 1788 len = unix_maxlen; 1789 } 1790 1791 memcpy(addr, target_saddr, len); 1792 addr->sa_family = sa_family; 1793 if (sa_family == AF_NETLINK) { 1794 struct sockaddr_nl *nladdr; 1795 1796 nladdr = (struct sockaddr_nl *)addr; 1797 nladdr->nl_pid = tswap32(nladdr->nl_pid); 1798 nladdr->nl_groups = tswap32(nladdr->nl_groups); 1799 } else if (sa_family == AF_PACKET) { 1800 struct target_sockaddr_ll *lladdr; 1801 1802 lladdr = (struct target_sockaddr_ll *)addr; 1803 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); 1804 lladdr->sll_hatype = tswap16(lladdr->sll_hatype); 1805 } 1806 unlock_user(target_saddr, target_addr, 0); 1807 1808 return 0; 1809 } 1810 1811 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1812 struct sockaddr *addr, 1813 socklen_t len) 1814 { 1815 struct target_sockaddr *target_saddr; 1816 1817 if (len == 0) { 1818 return 0; 1819 } 1820 assert(addr); 1821 1822 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1823 if (!target_saddr) 1824 return -TARGET_EFAULT; 1825 memcpy(target_saddr, addr, len); 1826 if (len >= offsetof(struct target_sockaddr, sa_family) + 1827 sizeof(target_saddr->sa_family)) { 1828 target_saddr->sa_family = tswap16(addr->sa_family); 1829 } 1830 if (addr->sa_family == AF_NETLINK && 1831 len >= sizeof(struct target_sockaddr_nl)) { 1832 struct target_sockaddr_nl *target_nl = 1833 (struct target_sockaddr_nl *)target_saddr; 1834 target_nl->nl_pid = tswap32(target_nl->nl_pid); 1835 target_nl->nl_groups = tswap32(target_nl->nl_groups); 1836 } else if (addr->sa_family == AF_PACKET) { 1837 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr; 1838 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex); 1839 target_ll->sll_hatype = tswap16(target_ll->sll_hatype); 1840 } else if (addr->sa_family == AF_INET6 && 1841 len >= sizeof(struct target_sockaddr_in6)) { 1842 struct target_sockaddr_in6 *target_in6 = 1843 (struct target_sockaddr_in6 *)target_saddr; 1844 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id); 1845 } 1846 unlock_user(target_saddr, target_addr, len); 1847 1848 return 0; 1849 } 1850 1851 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1852 struct target_msghdr *target_msgh) 1853 { 1854 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1855 abi_long msg_controllen; 1856 abi_ulong target_cmsg_addr; 1857 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1858 socklen_t space = 0; 1859 1860 msg_controllen = tswapal(target_msgh->msg_controllen); 1861 if (msg_controllen < sizeof (struct target_cmsghdr)) 1862 goto the_end; 1863 target_cmsg_addr = tswapal(target_msgh->msg_control); 1864 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1865 target_cmsg_start = target_cmsg; 1866 if (!target_cmsg) 1867 return -TARGET_EFAULT; 1868 1869 while (cmsg && target_cmsg) { 1870 void *data = CMSG_DATA(cmsg); 1871 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1872 1873 int len = tswapal(target_cmsg->cmsg_len) 1874 - sizeof(struct target_cmsghdr); 1875 1876 space += CMSG_SPACE(len); 1877 if (space > msgh->msg_controllen) { 1878 space -= CMSG_SPACE(len); 1879 /* This is a QEMU bug, since we allocated the payload 1880 * area ourselves (unlike overflow in host-to-target 1881 * conversion, which is just the guest giving us a buffer 1882 * that's too small). It can't happen for the payload types 1883 * we currently support; if it becomes an issue in future 1884 * we would need to improve our allocation strategy to 1885 * something more intelligent than "twice the size of the 1886 * target buffer we're reading from". 1887 */ 1888 qemu_log_mask(LOG_UNIMP, 1889 ("Unsupported ancillary data %d/%d: " 1890 "unhandled msg size\n"), 1891 tswap32(target_cmsg->cmsg_level), 1892 tswap32(target_cmsg->cmsg_type)); 1893 break; 1894 } 1895 1896 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1897 cmsg->cmsg_level = SOL_SOCKET; 1898 } else { 1899 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1900 } 1901 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1902 cmsg->cmsg_len = CMSG_LEN(len); 1903 1904 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 1905 int *fd = (int *)data; 1906 int *target_fd = (int *)target_data; 1907 int i, numfds = len / sizeof(int); 1908 1909 for (i = 0; i < numfds; i++) { 1910 __get_user(fd[i], target_fd + i); 1911 } 1912 } else if (cmsg->cmsg_level == SOL_SOCKET 1913 && cmsg->cmsg_type == SCM_CREDENTIALS) { 1914 struct ucred *cred = (struct ucred *)data; 1915 struct target_ucred *target_cred = 1916 (struct target_ucred *)target_data; 1917 1918 __get_user(cred->pid, &target_cred->pid); 1919 __get_user(cred->uid, &target_cred->uid); 1920 __get_user(cred->gid, &target_cred->gid); 1921 } else { 1922 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", 1923 cmsg->cmsg_level, cmsg->cmsg_type); 1924 memcpy(data, target_data, len); 1925 } 1926 1927 cmsg = CMSG_NXTHDR(msgh, cmsg); 1928 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1929 target_cmsg_start); 1930 } 1931 unlock_user(target_cmsg, target_cmsg_addr, 0); 1932 the_end: 1933 msgh->msg_controllen = space; 1934 return 0; 1935 } 1936 1937 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1938 struct msghdr *msgh) 1939 { 1940 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1941 abi_long msg_controllen; 1942 abi_ulong target_cmsg_addr; 1943 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1944 socklen_t space = 0; 1945 1946 msg_controllen = tswapal(target_msgh->msg_controllen); 1947 if (msg_controllen < sizeof (struct target_cmsghdr)) 1948 goto the_end; 1949 target_cmsg_addr = tswapal(target_msgh->msg_control); 1950 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1951 target_cmsg_start = target_cmsg; 1952 if (!target_cmsg) 1953 return -TARGET_EFAULT; 1954 1955 while (cmsg && target_cmsg) { 1956 void *data = CMSG_DATA(cmsg); 1957 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1958 1959 int len = cmsg->cmsg_len - sizeof(struct cmsghdr); 1960 int tgt_len, tgt_space; 1961 1962 /* We never copy a half-header but may copy half-data; 1963 * this is Linux's behaviour in put_cmsg(). Note that 1964 * truncation here is a guest problem (which we report 1965 * to the guest via the CTRUNC bit), unlike truncation 1966 * in target_to_host_cmsg, which is a QEMU bug. 1967 */ 1968 if (msg_controllen < sizeof(struct target_cmsghdr)) { 1969 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1970 break; 1971 } 1972 1973 if (cmsg->cmsg_level == SOL_SOCKET) { 1974 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1975 } else { 1976 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1977 } 1978 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1979 1980 /* Payload types which need a different size of payload on 1981 * the target must adjust tgt_len here. 1982 */ 1983 tgt_len = len; 1984 switch (cmsg->cmsg_level) { 1985 case SOL_SOCKET: 1986 switch (cmsg->cmsg_type) { 1987 case SO_TIMESTAMP: 1988 tgt_len = sizeof(struct target_timeval); 1989 break; 1990 default: 1991 break; 1992 } 1993 break; 1994 default: 1995 break; 1996 } 1997 1998 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) { 1999 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 2000 tgt_len = msg_controllen - sizeof(struct target_cmsghdr); 2001 } 2002 2003 /* We must now copy-and-convert len bytes of payload 2004 * into tgt_len bytes of destination space. Bear in mind 2005 * that in both source and destination we may be dealing 2006 * with a truncated value! 2007 */ 2008 switch (cmsg->cmsg_level) { 2009 case SOL_SOCKET: 2010 switch (cmsg->cmsg_type) { 2011 case SCM_RIGHTS: 2012 { 2013 int *fd = (int *)data; 2014 int *target_fd = (int *)target_data; 2015 int i, numfds = tgt_len / sizeof(int); 2016 2017 for (i = 0; i < numfds; i++) { 2018 __put_user(fd[i], target_fd + i); 2019 } 2020 break; 2021 } 2022 case SO_TIMESTAMP: 2023 { 2024 struct timeval *tv = (struct timeval *)data; 2025 struct target_timeval *target_tv = 2026 (struct target_timeval *)target_data; 2027 2028 if (len != sizeof(struct timeval) || 2029 tgt_len != sizeof(struct target_timeval)) { 2030 goto unimplemented; 2031 } 2032 2033 /* copy struct timeval to target */ 2034 __put_user(tv->tv_sec, &target_tv->tv_sec); 2035 __put_user(tv->tv_usec, &target_tv->tv_usec); 2036 break; 2037 } 2038 case SCM_CREDENTIALS: 2039 { 2040 struct ucred *cred = (struct ucred *)data; 2041 struct target_ucred *target_cred = 2042 (struct target_ucred *)target_data; 2043 2044 __put_user(cred->pid, &target_cred->pid); 2045 __put_user(cred->uid, &target_cred->uid); 2046 __put_user(cred->gid, &target_cred->gid); 2047 break; 2048 } 2049 default: 2050 goto unimplemented; 2051 } 2052 break; 2053 2054 case SOL_IP: 2055 switch (cmsg->cmsg_type) { 2056 case IP_TTL: 2057 { 2058 uint32_t *v = (uint32_t *)data; 2059 uint32_t *t_int = (uint32_t *)target_data; 2060 2061 if (len != sizeof(uint32_t) || 2062 tgt_len != sizeof(uint32_t)) { 2063 goto unimplemented; 2064 } 2065 __put_user(*v, t_int); 2066 break; 2067 } 2068 case IP_RECVERR: 2069 { 2070 struct errhdr_t { 2071 struct sock_extended_err ee; 2072 struct sockaddr_in offender; 2073 }; 2074 struct errhdr_t *errh = (struct errhdr_t *)data; 2075 struct errhdr_t *target_errh = 2076 (struct errhdr_t *)target_data; 2077 2078 if (len != sizeof(struct errhdr_t) || 2079 tgt_len != sizeof(struct errhdr_t)) { 2080 goto unimplemented; 2081 } 2082 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 2083 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 2084 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 2085 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 2086 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 2087 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 2088 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 2089 host_to_target_sockaddr((unsigned long) &target_errh->offender, 2090 (void *) &errh->offender, sizeof(errh->offender)); 2091 break; 2092 } 2093 default: 2094 goto unimplemented; 2095 } 2096 break; 2097 2098 case SOL_IPV6: 2099 switch (cmsg->cmsg_type) { 2100 case IPV6_HOPLIMIT: 2101 { 2102 uint32_t *v = (uint32_t *)data; 2103 uint32_t *t_int = (uint32_t *)target_data; 2104 2105 if (len != sizeof(uint32_t) || 2106 tgt_len != sizeof(uint32_t)) { 2107 goto unimplemented; 2108 } 2109 __put_user(*v, t_int); 2110 break; 2111 } 2112 case IPV6_RECVERR: 2113 { 2114 struct errhdr6_t { 2115 struct sock_extended_err ee; 2116 struct sockaddr_in6 offender; 2117 }; 2118 struct errhdr6_t *errh = (struct errhdr6_t *)data; 2119 struct errhdr6_t *target_errh = 2120 (struct errhdr6_t *)target_data; 2121 2122 if (len != sizeof(struct errhdr6_t) || 2123 tgt_len != sizeof(struct errhdr6_t)) { 2124 goto unimplemented; 2125 } 2126 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 2127 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 2128 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 2129 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 2130 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 2131 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 2132 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 2133 host_to_target_sockaddr((unsigned long) &target_errh->offender, 2134 (void *) &errh->offender, sizeof(errh->offender)); 2135 break; 2136 } 2137 default: 2138 goto unimplemented; 2139 } 2140 break; 2141 2142 default: 2143 unimplemented: 2144 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", 2145 cmsg->cmsg_level, cmsg->cmsg_type); 2146 memcpy(target_data, data, MIN(len, tgt_len)); 2147 if (tgt_len > len) { 2148 memset(target_data + len, 0, tgt_len - len); 2149 } 2150 } 2151 2152 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len)); 2153 tgt_space = TARGET_CMSG_SPACE(tgt_len); 2154 if (msg_controllen < tgt_space) { 2155 tgt_space = msg_controllen; 2156 } 2157 msg_controllen -= tgt_space; 2158 space += tgt_space; 2159 cmsg = CMSG_NXTHDR(msgh, cmsg); 2160 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 2161 target_cmsg_start); 2162 } 2163 unlock_user(target_cmsg, target_cmsg_addr, space); 2164 the_end: 2165 target_msgh->msg_controllen = tswapal(space); 2166 return 0; 2167 } 2168 2169 /* do_setsockopt() Must return target values and target errnos. */ 2170 static abi_long do_setsockopt(int sockfd, int level, int optname, 2171 abi_ulong optval_addr, socklen_t optlen) 2172 { 2173 abi_long ret; 2174 int val; 2175 struct ip_mreqn *ip_mreq; 2176 struct ip_mreq_source *ip_mreq_source; 2177 2178 switch(level) { 2179 case SOL_TCP: 2180 /* TCP options all take an 'int' value. */ 2181 if (optlen < sizeof(uint32_t)) 2182 return -TARGET_EINVAL; 2183 2184 if (get_user_u32(val, optval_addr)) 2185 return -TARGET_EFAULT; 2186 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2187 break; 2188 case SOL_IP: 2189 switch(optname) { 2190 case IP_TOS: 2191 case IP_TTL: 2192 case IP_HDRINCL: 2193 case IP_ROUTER_ALERT: 2194 case IP_RECVOPTS: 2195 case IP_RETOPTS: 2196 case IP_PKTINFO: 2197 case IP_MTU_DISCOVER: 2198 case IP_RECVERR: 2199 case IP_RECVTTL: 2200 case IP_RECVTOS: 2201 #ifdef IP_FREEBIND 2202 case IP_FREEBIND: 2203 #endif 2204 case IP_MULTICAST_TTL: 2205 case IP_MULTICAST_LOOP: 2206 val = 0; 2207 if (optlen >= sizeof(uint32_t)) { 2208 if (get_user_u32(val, optval_addr)) 2209 return -TARGET_EFAULT; 2210 } else if (optlen >= 1) { 2211 if (get_user_u8(val, optval_addr)) 2212 return -TARGET_EFAULT; 2213 } 2214 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2215 break; 2216 case IP_ADD_MEMBERSHIP: 2217 case IP_DROP_MEMBERSHIP: 2218 if (optlen < sizeof (struct target_ip_mreq) || 2219 optlen > sizeof (struct target_ip_mreqn)) 2220 return -TARGET_EINVAL; 2221 2222 ip_mreq = (struct ip_mreqn *) alloca(optlen); 2223 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 2224 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 2225 break; 2226 2227 case IP_BLOCK_SOURCE: 2228 case IP_UNBLOCK_SOURCE: 2229 case IP_ADD_SOURCE_MEMBERSHIP: 2230 case IP_DROP_SOURCE_MEMBERSHIP: 2231 if (optlen != sizeof (struct target_ip_mreq_source)) 2232 return -TARGET_EINVAL; 2233 2234 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2235 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 2236 unlock_user (ip_mreq_source, optval_addr, 0); 2237 break; 2238 2239 default: 2240 goto unimplemented; 2241 } 2242 break; 2243 case SOL_IPV6: 2244 switch (optname) { 2245 case IPV6_MTU_DISCOVER: 2246 case IPV6_MTU: 2247 case IPV6_V6ONLY: 2248 case IPV6_RECVPKTINFO: 2249 case IPV6_UNICAST_HOPS: 2250 case IPV6_MULTICAST_HOPS: 2251 case IPV6_MULTICAST_LOOP: 2252 case IPV6_RECVERR: 2253 case IPV6_RECVHOPLIMIT: 2254 case IPV6_2292HOPLIMIT: 2255 case IPV6_CHECKSUM: 2256 case IPV6_ADDRFORM: 2257 case IPV6_2292PKTINFO: 2258 case IPV6_RECVTCLASS: 2259 case IPV6_RECVRTHDR: 2260 case IPV6_2292RTHDR: 2261 case IPV6_RECVHOPOPTS: 2262 case IPV6_2292HOPOPTS: 2263 case IPV6_RECVDSTOPTS: 2264 case IPV6_2292DSTOPTS: 2265 case IPV6_TCLASS: 2266 #ifdef IPV6_RECVPATHMTU 2267 case IPV6_RECVPATHMTU: 2268 #endif 2269 #ifdef IPV6_TRANSPARENT 2270 case IPV6_TRANSPARENT: 2271 #endif 2272 #ifdef IPV6_FREEBIND 2273 case IPV6_FREEBIND: 2274 #endif 2275 #ifdef IPV6_RECVORIGDSTADDR 2276 case IPV6_RECVORIGDSTADDR: 2277 #endif 2278 val = 0; 2279 if (optlen < sizeof(uint32_t)) { 2280 return -TARGET_EINVAL; 2281 } 2282 if (get_user_u32(val, optval_addr)) { 2283 return -TARGET_EFAULT; 2284 } 2285 ret = get_errno(setsockopt(sockfd, level, optname, 2286 &val, sizeof(val))); 2287 break; 2288 case IPV6_PKTINFO: 2289 { 2290 struct in6_pktinfo pki; 2291 2292 if (optlen < sizeof(pki)) { 2293 return -TARGET_EINVAL; 2294 } 2295 2296 if (copy_from_user(&pki, optval_addr, sizeof(pki))) { 2297 return -TARGET_EFAULT; 2298 } 2299 2300 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex); 2301 2302 ret = get_errno(setsockopt(sockfd, level, optname, 2303 &pki, sizeof(pki))); 2304 break; 2305 } 2306 case IPV6_ADD_MEMBERSHIP: 2307 case IPV6_DROP_MEMBERSHIP: 2308 { 2309 struct ipv6_mreq ipv6mreq; 2310 2311 if (optlen < sizeof(ipv6mreq)) { 2312 return -TARGET_EINVAL; 2313 } 2314 2315 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) { 2316 return -TARGET_EFAULT; 2317 } 2318 2319 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface); 2320 2321 ret = get_errno(setsockopt(sockfd, level, optname, 2322 &ipv6mreq, sizeof(ipv6mreq))); 2323 break; 2324 } 2325 default: 2326 goto unimplemented; 2327 } 2328 break; 2329 case SOL_ICMPV6: 2330 switch (optname) { 2331 case ICMPV6_FILTER: 2332 { 2333 struct icmp6_filter icmp6f; 2334 2335 if (optlen > sizeof(icmp6f)) { 2336 optlen = sizeof(icmp6f); 2337 } 2338 2339 if (copy_from_user(&icmp6f, optval_addr, optlen)) { 2340 return -TARGET_EFAULT; 2341 } 2342 2343 for (val = 0; val < 8; val++) { 2344 icmp6f.data[val] = tswap32(icmp6f.data[val]); 2345 } 2346 2347 ret = get_errno(setsockopt(sockfd, level, optname, 2348 &icmp6f, optlen)); 2349 break; 2350 } 2351 default: 2352 goto unimplemented; 2353 } 2354 break; 2355 case SOL_RAW: 2356 switch (optname) { 2357 case ICMP_FILTER: 2358 case IPV6_CHECKSUM: 2359 /* those take an u32 value */ 2360 if (optlen < sizeof(uint32_t)) { 2361 return -TARGET_EINVAL; 2362 } 2363 2364 if (get_user_u32(val, optval_addr)) { 2365 return -TARGET_EFAULT; 2366 } 2367 ret = get_errno(setsockopt(sockfd, level, optname, 2368 &val, sizeof(val))); 2369 break; 2370 2371 default: 2372 goto unimplemented; 2373 } 2374 break; 2375 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE) 2376 case SOL_ALG: 2377 switch (optname) { 2378 case ALG_SET_KEY: 2379 { 2380 char *alg_key = g_malloc(optlen); 2381 2382 if (!alg_key) { 2383 return -TARGET_ENOMEM; 2384 } 2385 if (copy_from_user(alg_key, optval_addr, optlen)) { 2386 g_free(alg_key); 2387 return -TARGET_EFAULT; 2388 } 2389 ret = get_errno(setsockopt(sockfd, level, optname, 2390 alg_key, optlen)); 2391 g_free(alg_key); 2392 break; 2393 } 2394 case ALG_SET_AEAD_AUTHSIZE: 2395 { 2396 ret = get_errno(setsockopt(sockfd, level, optname, 2397 NULL, optlen)); 2398 break; 2399 } 2400 default: 2401 goto unimplemented; 2402 } 2403 break; 2404 #endif 2405 case TARGET_SOL_SOCKET: 2406 switch (optname) { 2407 case TARGET_SO_RCVTIMEO: 2408 { 2409 struct timeval tv; 2410 2411 optname = SO_RCVTIMEO; 2412 2413 set_timeout: 2414 if (optlen != sizeof(struct target_timeval)) { 2415 return -TARGET_EINVAL; 2416 } 2417 2418 if (copy_from_user_timeval(&tv, optval_addr)) { 2419 return -TARGET_EFAULT; 2420 } 2421 2422 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 2423 &tv, sizeof(tv))); 2424 return ret; 2425 } 2426 case TARGET_SO_SNDTIMEO: 2427 optname = SO_SNDTIMEO; 2428 goto set_timeout; 2429 case TARGET_SO_ATTACH_FILTER: 2430 { 2431 struct target_sock_fprog *tfprog; 2432 struct target_sock_filter *tfilter; 2433 struct sock_fprog fprog; 2434 struct sock_filter *filter; 2435 int i; 2436 2437 if (optlen != sizeof(*tfprog)) { 2438 return -TARGET_EINVAL; 2439 } 2440 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 2441 return -TARGET_EFAULT; 2442 } 2443 if (!lock_user_struct(VERIFY_READ, tfilter, 2444 tswapal(tfprog->filter), 0)) { 2445 unlock_user_struct(tfprog, optval_addr, 1); 2446 return -TARGET_EFAULT; 2447 } 2448 2449 fprog.len = tswap16(tfprog->len); 2450 filter = g_try_new(struct sock_filter, fprog.len); 2451 if (filter == NULL) { 2452 unlock_user_struct(tfilter, tfprog->filter, 1); 2453 unlock_user_struct(tfprog, optval_addr, 1); 2454 return -TARGET_ENOMEM; 2455 } 2456 for (i = 0; i < fprog.len; i++) { 2457 filter[i].code = tswap16(tfilter[i].code); 2458 filter[i].jt = tfilter[i].jt; 2459 filter[i].jf = tfilter[i].jf; 2460 filter[i].k = tswap32(tfilter[i].k); 2461 } 2462 fprog.filter = filter; 2463 2464 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 2465 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 2466 g_free(filter); 2467 2468 unlock_user_struct(tfilter, tfprog->filter, 1); 2469 unlock_user_struct(tfprog, optval_addr, 1); 2470 return ret; 2471 } 2472 case TARGET_SO_BINDTODEVICE: 2473 { 2474 char *dev_ifname, *addr_ifname; 2475 2476 if (optlen > IFNAMSIZ - 1) { 2477 optlen = IFNAMSIZ - 1; 2478 } 2479 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2480 if (!dev_ifname) { 2481 return -TARGET_EFAULT; 2482 } 2483 optname = SO_BINDTODEVICE; 2484 addr_ifname = alloca(IFNAMSIZ); 2485 memcpy(addr_ifname, dev_ifname, optlen); 2486 addr_ifname[optlen] = 0; 2487 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 2488 addr_ifname, optlen)); 2489 unlock_user (dev_ifname, optval_addr, 0); 2490 return ret; 2491 } 2492 case TARGET_SO_LINGER: 2493 { 2494 struct linger lg; 2495 struct target_linger *tlg; 2496 2497 if (optlen != sizeof(struct target_linger)) { 2498 return -TARGET_EINVAL; 2499 } 2500 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) { 2501 return -TARGET_EFAULT; 2502 } 2503 __get_user(lg.l_onoff, &tlg->l_onoff); 2504 __get_user(lg.l_linger, &tlg->l_linger); 2505 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER, 2506 &lg, sizeof(lg))); 2507 unlock_user_struct(tlg, optval_addr, 0); 2508 return ret; 2509 } 2510 /* Options with 'int' argument. */ 2511 case TARGET_SO_DEBUG: 2512 optname = SO_DEBUG; 2513 break; 2514 case TARGET_SO_REUSEADDR: 2515 optname = SO_REUSEADDR; 2516 break; 2517 #ifdef SO_REUSEPORT 2518 case TARGET_SO_REUSEPORT: 2519 optname = SO_REUSEPORT; 2520 break; 2521 #endif 2522 case TARGET_SO_TYPE: 2523 optname = SO_TYPE; 2524 break; 2525 case TARGET_SO_ERROR: 2526 optname = SO_ERROR; 2527 break; 2528 case TARGET_SO_DONTROUTE: 2529 optname = SO_DONTROUTE; 2530 break; 2531 case TARGET_SO_BROADCAST: 2532 optname = SO_BROADCAST; 2533 break; 2534 case TARGET_SO_SNDBUF: 2535 optname = SO_SNDBUF; 2536 break; 2537 case TARGET_SO_SNDBUFFORCE: 2538 optname = SO_SNDBUFFORCE; 2539 break; 2540 case TARGET_SO_RCVBUF: 2541 optname = SO_RCVBUF; 2542 break; 2543 case TARGET_SO_RCVBUFFORCE: 2544 optname = SO_RCVBUFFORCE; 2545 break; 2546 case TARGET_SO_KEEPALIVE: 2547 optname = SO_KEEPALIVE; 2548 break; 2549 case TARGET_SO_OOBINLINE: 2550 optname = SO_OOBINLINE; 2551 break; 2552 case TARGET_SO_NO_CHECK: 2553 optname = SO_NO_CHECK; 2554 break; 2555 case TARGET_SO_PRIORITY: 2556 optname = SO_PRIORITY; 2557 break; 2558 #ifdef SO_BSDCOMPAT 2559 case TARGET_SO_BSDCOMPAT: 2560 optname = SO_BSDCOMPAT; 2561 break; 2562 #endif 2563 case TARGET_SO_PASSCRED: 2564 optname = SO_PASSCRED; 2565 break; 2566 case TARGET_SO_PASSSEC: 2567 optname = SO_PASSSEC; 2568 break; 2569 case TARGET_SO_TIMESTAMP: 2570 optname = SO_TIMESTAMP; 2571 break; 2572 case TARGET_SO_RCVLOWAT: 2573 optname = SO_RCVLOWAT; 2574 break; 2575 default: 2576 goto unimplemented; 2577 } 2578 if (optlen < sizeof(uint32_t)) 2579 return -TARGET_EINVAL; 2580 2581 if (get_user_u32(val, optval_addr)) 2582 return -TARGET_EFAULT; 2583 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 2584 break; 2585 #ifdef SOL_NETLINK 2586 case SOL_NETLINK: 2587 switch (optname) { 2588 case NETLINK_PKTINFO: 2589 case NETLINK_ADD_MEMBERSHIP: 2590 case NETLINK_DROP_MEMBERSHIP: 2591 case NETLINK_BROADCAST_ERROR: 2592 case NETLINK_NO_ENOBUFS: 2593 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2594 case NETLINK_LISTEN_ALL_NSID: 2595 case NETLINK_CAP_ACK: 2596 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2597 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) 2598 case NETLINK_EXT_ACK: 2599 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2600 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) 2601 case NETLINK_GET_STRICT_CHK: 2602 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2603 break; 2604 default: 2605 goto unimplemented; 2606 } 2607 val = 0; 2608 if (optlen < sizeof(uint32_t)) { 2609 return -TARGET_EINVAL; 2610 } 2611 if (get_user_u32(val, optval_addr)) { 2612 return -TARGET_EFAULT; 2613 } 2614 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val, 2615 sizeof(val))); 2616 break; 2617 #endif /* SOL_NETLINK */ 2618 default: 2619 unimplemented: 2620 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n", 2621 level, optname); 2622 ret = -TARGET_ENOPROTOOPT; 2623 } 2624 return ret; 2625 } 2626 2627 /* do_getsockopt() Must return target values and target errnos. */ 2628 static abi_long do_getsockopt(int sockfd, int level, int optname, 2629 abi_ulong optval_addr, abi_ulong optlen) 2630 { 2631 abi_long ret; 2632 int len, val; 2633 socklen_t lv; 2634 2635 switch(level) { 2636 case TARGET_SOL_SOCKET: 2637 level = SOL_SOCKET; 2638 switch (optname) { 2639 /* These don't just return a single integer */ 2640 case TARGET_SO_PEERNAME: 2641 goto unimplemented; 2642 case TARGET_SO_RCVTIMEO: { 2643 struct timeval tv; 2644 socklen_t tvlen; 2645 2646 optname = SO_RCVTIMEO; 2647 2648 get_timeout: 2649 if (get_user_u32(len, optlen)) { 2650 return -TARGET_EFAULT; 2651 } 2652 if (len < 0) { 2653 return -TARGET_EINVAL; 2654 } 2655 2656 tvlen = sizeof(tv); 2657 ret = get_errno(getsockopt(sockfd, level, optname, 2658 &tv, &tvlen)); 2659 if (ret < 0) { 2660 return ret; 2661 } 2662 if (len > sizeof(struct target_timeval)) { 2663 len = sizeof(struct target_timeval); 2664 } 2665 if (copy_to_user_timeval(optval_addr, &tv)) { 2666 return -TARGET_EFAULT; 2667 } 2668 if (put_user_u32(len, optlen)) { 2669 return -TARGET_EFAULT; 2670 } 2671 break; 2672 } 2673 case TARGET_SO_SNDTIMEO: 2674 optname = SO_SNDTIMEO; 2675 goto get_timeout; 2676 case TARGET_SO_PEERCRED: { 2677 struct ucred cr; 2678 socklen_t crlen; 2679 struct target_ucred *tcr; 2680 2681 if (get_user_u32(len, optlen)) { 2682 return -TARGET_EFAULT; 2683 } 2684 if (len < 0) { 2685 return -TARGET_EINVAL; 2686 } 2687 2688 crlen = sizeof(cr); 2689 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 2690 &cr, &crlen)); 2691 if (ret < 0) { 2692 return ret; 2693 } 2694 if (len > crlen) { 2695 len = crlen; 2696 } 2697 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 2698 return -TARGET_EFAULT; 2699 } 2700 __put_user(cr.pid, &tcr->pid); 2701 __put_user(cr.uid, &tcr->uid); 2702 __put_user(cr.gid, &tcr->gid); 2703 unlock_user_struct(tcr, optval_addr, 1); 2704 if (put_user_u32(len, optlen)) { 2705 return -TARGET_EFAULT; 2706 } 2707 break; 2708 } 2709 case TARGET_SO_PEERSEC: { 2710 char *name; 2711 2712 if (get_user_u32(len, optlen)) { 2713 return -TARGET_EFAULT; 2714 } 2715 if (len < 0) { 2716 return -TARGET_EINVAL; 2717 } 2718 name = lock_user(VERIFY_WRITE, optval_addr, len, 0); 2719 if (!name) { 2720 return -TARGET_EFAULT; 2721 } 2722 lv = len; 2723 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC, 2724 name, &lv)); 2725 if (put_user_u32(lv, optlen)) { 2726 ret = -TARGET_EFAULT; 2727 } 2728 unlock_user(name, optval_addr, lv); 2729 break; 2730 } 2731 case TARGET_SO_LINGER: 2732 { 2733 struct linger lg; 2734 socklen_t lglen; 2735 struct target_linger *tlg; 2736 2737 if (get_user_u32(len, optlen)) { 2738 return -TARGET_EFAULT; 2739 } 2740 if (len < 0) { 2741 return -TARGET_EINVAL; 2742 } 2743 2744 lglen = sizeof(lg); 2745 ret = get_errno(getsockopt(sockfd, level, SO_LINGER, 2746 &lg, &lglen)); 2747 if (ret < 0) { 2748 return ret; 2749 } 2750 if (len > lglen) { 2751 len = lglen; 2752 } 2753 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) { 2754 return -TARGET_EFAULT; 2755 } 2756 __put_user(lg.l_onoff, &tlg->l_onoff); 2757 __put_user(lg.l_linger, &tlg->l_linger); 2758 unlock_user_struct(tlg, optval_addr, 1); 2759 if (put_user_u32(len, optlen)) { 2760 return -TARGET_EFAULT; 2761 } 2762 break; 2763 } 2764 /* Options with 'int' argument. */ 2765 case TARGET_SO_DEBUG: 2766 optname = SO_DEBUG; 2767 goto int_case; 2768 case TARGET_SO_REUSEADDR: 2769 optname = SO_REUSEADDR; 2770 goto int_case; 2771 #ifdef SO_REUSEPORT 2772 case TARGET_SO_REUSEPORT: 2773 optname = SO_REUSEPORT; 2774 goto int_case; 2775 #endif 2776 case TARGET_SO_TYPE: 2777 optname = SO_TYPE; 2778 goto int_case; 2779 case TARGET_SO_ERROR: 2780 optname = SO_ERROR; 2781 goto int_case; 2782 case TARGET_SO_DONTROUTE: 2783 optname = SO_DONTROUTE; 2784 goto int_case; 2785 case TARGET_SO_BROADCAST: 2786 optname = SO_BROADCAST; 2787 goto int_case; 2788 case TARGET_SO_SNDBUF: 2789 optname = SO_SNDBUF; 2790 goto int_case; 2791 case TARGET_SO_RCVBUF: 2792 optname = SO_RCVBUF; 2793 goto int_case; 2794 case TARGET_SO_KEEPALIVE: 2795 optname = SO_KEEPALIVE; 2796 goto int_case; 2797 case TARGET_SO_OOBINLINE: 2798 optname = SO_OOBINLINE; 2799 goto int_case; 2800 case TARGET_SO_NO_CHECK: 2801 optname = SO_NO_CHECK; 2802 goto int_case; 2803 case TARGET_SO_PRIORITY: 2804 optname = SO_PRIORITY; 2805 goto int_case; 2806 #ifdef SO_BSDCOMPAT 2807 case TARGET_SO_BSDCOMPAT: 2808 optname = SO_BSDCOMPAT; 2809 goto int_case; 2810 #endif 2811 case TARGET_SO_PASSCRED: 2812 optname = SO_PASSCRED; 2813 goto int_case; 2814 case TARGET_SO_TIMESTAMP: 2815 optname = SO_TIMESTAMP; 2816 goto int_case; 2817 case TARGET_SO_RCVLOWAT: 2818 optname = SO_RCVLOWAT; 2819 goto int_case; 2820 case TARGET_SO_ACCEPTCONN: 2821 optname = SO_ACCEPTCONN; 2822 goto int_case; 2823 default: 2824 goto int_case; 2825 } 2826 break; 2827 case SOL_TCP: 2828 /* TCP options all take an 'int' value. */ 2829 int_case: 2830 if (get_user_u32(len, optlen)) 2831 return -TARGET_EFAULT; 2832 if (len < 0) 2833 return -TARGET_EINVAL; 2834 lv = sizeof(lv); 2835 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2836 if (ret < 0) 2837 return ret; 2838 if (optname == SO_TYPE) { 2839 val = host_to_target_sock_type(val); 2840 } 2841 if (len > lv) 2842 len = lv; 2843 if (len == 4) { 2844 if (put_user_u32(val, optval_addr)) 2845 return -TARGET_EFAULT; 2846 } else { 2847 if (put_user_u8(val, optval_addr)) 2848 return -TARGET_EFAULT; 2849 } 2850 if (put_user_u32(len, optlen)) 2851 return -TARGET_EFAULT; 2852 break; 2853 case SOL_IP: 2854 switch(optname) { 2855 case IP_TOS: 2856 case IP_TTL: 2857 case IP_HDRINCL: 2858 case IP_ROUTER_ALERT: 2859 case IP_RECVOPTS: 2860 case IP_RETOPTS: 2861 case IP_PKTINFO: 2862 case IP_MTU_DISCOVER: 2863 case IP_RECVERR: 2864 case IP_RECVTOS: 2865 #ifdef IP_FREEBIND 2866 case IP_FREEBIND: 2867 #endif 2868 case IP_MULTICAST_TTL: 2869 case IP_MULTICAST_LOOP: 2870 if (get_user_u32(len, optlen)) 2871 return -TARGET_EFAULT; 2872 if (len < 0) 2873 return -TARGET_EINVAL; 2874 lv = sizeof(lv); 2875 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2876 if (ret < 0) 2877 return ret; 2878 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 2879 len = 1; 2880 if (put_user_u32(len, optlen) 2881 || put_user_u8(val, optval_addr)) 2882 return -TARGET_EFAULT; 2883 } else { 2884 if (len > sizeof(int)) 2885 len = sizeof(int); 2886 if (put_user_u32(len, optlen) 2887 || put_user_u32(val, optval_addr)) 2888 return -TARGET_EFAULT; 2889 } 2890 break; 2891 default: 2892 ret = -TARGET_ENOPROTOOPT; 2893 break; 2894 } 2895 break; 2896 case SOL_IPV6: 2897 switch (optname) { 2898 case IPV6_MTU_DISCOVER: 2899 case IPV6_MTU: 2900 case IPV6_V6ONLY: 2901 case IPV6_RECVPKTINFO: 2902 case IPV6_UNICAST_HOPS: 2903 case IPV6_MULTICAST_HOPS: 2904 case IPV6_MULTICAST_LOOP: 2905 case IPV6_RECVERR: 2906 case IPV6_RECVHOPLIMIT: 2907 case IPV6_2292HOPLIMIT: 2908 case IPV6_CHECKSUM: 2909 case IPV6_ADDRFORM: 2910 case IPV6_2292PKTINFO: 2911 case IPV6_RECVTCLASS: 2912 case IPV6_RECVRTHDR: 2913 case IPV6_2292RTHDR: 2914 case IPV6_RECVHOPOPTS: 2915 case IPV6_2292HOPOPTS: 2916 case IPV6_RECVDSTOPTS: 2917 case IPV6_2292DSTOPTS: 2918 case IPV6_TCLASS: 2919 #ifdef IPV6_RECVPATHMTU 2920 case IPV6_RECVPATHMTU: 2921 #endif 2922 #ifdef IPV6_TRANSPARENT 2923 case IPV6_TRANSPARENT: 2924 #endif 2925 #ifdef IPV6_FREEBIND 2926 case IPV6_FREEBIND: 2927 #endif 2928 #ifdef IPV6_RECVORIGDSTADDR 2929 case IPV6_RECVORIGDSTADDR: 2930 #endif 2931 if (get_user_u32(len, optlen)) 2932 return -TARGET_EFAULT; 2933 if (len < 0) 2934 return -TARGET_EINVAL; 2935 lv = sizeof(lv); 2936 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2937 if (ret < 0) 2938 return ret; 2939 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 2940 len = 1; 2941 if (put_user_u32(len, optlen) 2942 || put_user_u8(val, optval_addr)) 2943 return -TARGET_EFAULT; 2944 } else { 2945 if (len > sizeof(int)) 2946 len = sizeof(int); 2947 if (put_user_u32(len, optlen) 2948 || put_user_u32(val, optval_addr)) 2949 return -TARGET_EFAULT; 2950 } 2951 break; 2952 default: 2953 ret = -TARGET_ENOPROTOOPT; 2954 break; 2955 } 2956 break; 2957 #ifdef SOL_NETLINK 2958 case SOL_NETLINK: 2959 switch (optname) { 2960 case NETLINK_PKTINFO: 2961 case NETLINK_BROADCAST_ERROR: 2962 case NETLINK_NO_ENOBUFS: 2963 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2964 case NETLINK_LISTEN_ALL_NSID: 2965 case NETLINK_CAP_ACK: 2966 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2967 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) 2968 case NETLINK_EXT_ACK: 2969 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2970 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) 2971 case NETLINK_GET_STRICT_CHK: 2972 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2973 if (get_user_u32(len, optlen)) { 2974 return -TARGET_EFAULT; 2975 } 2976 if (len != sizeof(val)) { 2977 return -TARGET_EINVAL; 2978 } 2979 lv = len; 2980 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2981 if (ret < 0) { 2982 return ret; 2983 } 2984 if (put_user_u32(lv, optlen) 2985 || put_user_u32(val, optval_addr)) { 2986 return -TARGET_EFAULT; 2987 } 2988 break; 2989 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2990 case NETLINK_LIST_MEMBERSHIPS: 2991 { 2992 uint32_t *results; 2993 int i; 2994 if (get_user_u32(len, optlen)) { 2995 return -TARGET_EFAULT; 2996 } 2997 if (len < 0) { 2998 return -TARGET_EINVAL; 2999 } 3000 results = lock_user(VERIFY_WRITE, optval_addr, len, 1); 3001 if (!results) { 3002 return -TARGET_EFAULT; 3003 } 3004 lv = len; 3005 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv)); 3006 if (ret < 0) { 3007 unlock_user(results, optval_addr, 0); 3008 return ret; 3009 } 3010 /* swap host endianess to target endianess. */ 3011 for (i = 0; i < (len / sizeof(uint32_t)); i++) { 3012 results[i] = tswap32(results[i]); 3013 } 3014 if (put_user_u32(lv, optlen)) { 3015 return -TARGET_EFAULT; 3016 } 3017 unlock_user(results, optval_addr, 0); 3018 break; 3019 } 3020 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 3021 default: 3022 goto unimplemented; 3023 } 3024 break; 3025 #endif /* SOL_NETLINK */ 3026 default: 3027 unimplemented: 3028 qemu_log_mask(LOG_UNIMP, 3029 "getsockopt level=%d optname=%d not yet supported\n", 3030 level, optname); 3031 ret = -TARGET_EOPNOTSUPP; 3032 break; 3033 } 3034 return ret; 3035 } 3036 3037 /* Convert target low/high pair representing file offset into the host 3038 * low/high pair. This function doesn't handle offsets bigger than 64 bits 3039 * as the kernel doesn't handle them either. 3040 */ 3041 static void target_to_host_low_high(abi_ulong tlow, 3042 abi_ulong thigh, 3043 unsigned long *hlow, 3044 unsigned long *hhigh) 3045 { 3046 uint64_t off = tlow | 3047 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) << 3048 TARGET_LONG_BITS / 2; 3049 3050 *hlow = off; 3051 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2; 3052 } 3053 3054 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 3055 abi_ulong count, int copy) 3056 { 3057 struct target_iovec *target_vec; 3058 struct iovec *vec; 3059 abi_ulong total_len, max_len; 3060 int i; 3061 int err = 0; 3062 bool bad_address = false; 3063 3064 if (count == 0) { 3065 errno = 0; 3066 return NULL; 3067 } 3068 if (count > IOV_MAX) { 3069 errno = EINVAL; 3070 return NULL; 3071 } 3072 3073 vec = g_try_new0(struct iovec, count); 3074 if (vec == NULL) { 3075 errno = ENOMEM; 3076 return NULL; 3077 } 3078 3079 target_vec = lock_user(VERIFY_READ, target_addr, 3080 count * sizeof(struct target_iovec), 1); 3081 if (target_vec == NULL) { 3082 err = EFAULT; 3083 goto fail2; 3084 } 3085 3086 /* ??? If host page size > target page size, this will result in a 3087 value larger than what we can actually support. */ 3088 max_len = 0x7fffffff & TARGET_PAGE_MASK; 3089 total_len = 0; 3090 3091 for (i = 0; i < count; i++) { 3092 abi_ulong base = tswapal(target_vec[i].iov_base); 3093 abi_long len = tswapal(target_vec[i].iov_len); 3094 3095 if (len < 0) { 3096 err = EINVAL; 3097 goto fail; 3098 } else if (len == 0) { 3099 /* Zero length pointer is ignored. */ 3100 vec[i].iov_base = 0; 3101 } else { 3102 vec[i].iov_base = lock_user(type, base, len, copy); 3103 /* If the first buffer pointer is bad, this is a fault. But 3104 * subsequent bad buffers will result in a partial write; this 3105 * is realized by filling the vector with null pointers and 3106 * zero lengths. */ 3107 if (!vec[i].iov_base) { 3108 if (i == 0) { 3109 err = EFAULT; 3110 goto fail; 3111 } else { 3112 bad_address = true; 3113 } 3114 } 3115 if (bad_address) { 3116 len = 0; 3117 } 3118 if (len > max_len - total_len) { 3119 len = max_len - total_len; 3120 } 3121 } 3122 vec[i].iov_len = len; 3123 total_len += len; 3124 } 3125 3126 unlock_user(target_vec, target_addr, 0); 3127 return vec; 3128 3129 fail: 3130 while (--i >= 0) { 3131 if (tswapal(target_vec[i].iov_len) > 0) { 3132 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0); 3133 } 3134 } 3135 unlock_user(target_vec, target_addr, 0); 3136 fail2: 3137 g_free(vec); 3138 errno = err; 3139 return NULL; 3140 } 3141 3142 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 3143 abi_ulong count, int copy) 3144 { 3145 struct target_iovec *target_vec; 3146 int i; 3147 3148 target_vec = lock_user(VERIFY_READ, target_addr, 3149 count * sizeof(struct target_iovec), 1); 3150 if (target_vec) { 3151 for (i = 0; i < count; i++) { 3152 abi_ulong base = tswapal(target_vec[i].iov_base); 3153 abi_long len = tswapal(target_vec[i].iov_len); 3154 if (len < 0) { 3155 break; 3156 } 3157 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 3158 } 3159 unlock_user(target_vec, target_addr, 0); 3160 } 3161 3162 g_free(vec); 3163 } 3164 3165 static inline int target_to_host_sock_type(int *type) 3166 { 3167 int host_type = 0; 3168 int target_type = *type; 3169 3170 switch (target_type & TARGET_SOCK_TYPE_MASK) { 3171 case TARGET_SOCK_DGRAM: 3172 host_type = SOCK_DGRAM; 3173 break; 3174 case TARGET_SOCK_STREAM: 3175 host_type = SOCK_STREAM; 3176 break; 3177 default: 3178 host_type = target_type & TARGET_SOCK_TYPE_MASK; 3179 break; 3180 } 3181 if (target_type & TARGET_SOCK_CLOEXEC) { 3182 #if defined(SOCK_CLOEXEC) 3183 host_type |= SOCK_CLOEXEC; 3184 #else 3185 return -TARGET_EINVAL; 3186 #endif 3187 } 3188 if (target_type & TARGET_SOCK_NONBLOCK) { 3189 #if defined(SOCK_NONBLOCK) 3190 host_type |= SOCK_NONBLOCK; 3191 #elif !defined(O_NONBLOCK) 3192 return -TARGET_EINVAL; 3193 #endif 3194 } 3195 *type = host_type; 3196 return 0; 3197 } 3198 3199 /* Try to emulate socket type flags after socket creation. */ 3200 static int sock_flags_fixup(int fd, int target_type) 3201 { 3202 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 3203 if (target_type & TARGET_SOCK_NONBLOCK) { 3204 int flags = fcntl(fd, F_GETFL); 3205 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 3206 close(fd); 3207 return -TARGET_EINVAL; 3208 } 3209 } 3210 #endif 3211 return fd; 3212 } 3213 3214 /* do_socket() Must return target values and target errnos. */ 3215 static abi_long do_socket(int domain, int type, int protocol) 3216 { 3217 int target_type = type; 3218 int ret; 3219 3220 ret = target_to_host_sock_type(&type); 3221 if (ret) { 3222 return ret; 3223 } 3224 3225 if (domain == PF_NETLINK && !( 3226 #ifdef CONFIG_RTNETLINK 3227 protocol == NETLINK_ROUTE || 3228 #endif 3229 protocol == NETLINK_KOBJECT_UEVENT || 3230 protocol == NETLINK_AUDIT)) { 3231 return -TARGET_EPROTONOSUPPORT; 3232 } 3233 3234 if (domain == AF_PACKET || 3235 (domain == AF_INET && type == SOCK_PACKET)) { 3236 protocol = tswap16(protocol); 3237 } 3238 3239 ret = get_errno(socket(domain, type, protocol)); 3240 if (ret >= 0) { 3241 ret = sock_flags_fixup(ret, target_type); 3242 if (type == SOCK_PACKET) { 3243 /* Manage an obsolete case : 3244 * if socket type is SOCK_PACKET, bind by name 3245 */ 3246 fd_trans_register(ret, &target_packet_trans); 3247 } else if (domain == PF_NETLINK) { 3248 switch (protocol) { 3249 #ifdef CONFIG_RTNETLINK 3250 case NETLINK_ROUTE: 3251 fd_trans_register(ret, &target_netlink_route_trans); 3252 break; 3253 #endif 3254 case NETLINK_KOBJECT_UEVENT: 3255 /* nothing to do: messages are strings */ 3256 break; 3257 case NETLINK_AUDIT: 3258 fd_trans_register(ret, &target_netlink_audit_trans); 3259 break; 3260 default: 3261 g_assert_not_reached(); 3262 } 3263 } 3264 } 3265 return ret; 3266 } 3267 3268 /* do_bind() Must return target values and target errnos. */ 3269 static abi_long do_bind(int sockfd, abi_ulong target_addr, 3270 socklen_t addrlen) 3271 { 3272 void *addr; 3273 abi_long ret; 3274 3275 if ((int)addrlen < 0) { 3276 return -TARGET_EINVAL; 3277 } 3278 3279 addr = alloca(addrlen+1); 3280 3281 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3282 if (ret) 3283 return ret; 3284 3285 return get_errno(bind(sockfd, addr, addrlen)); 3286 } 3287 3288 /* do_connect() Must return target values and target errnos. */ 3289 static abi_long do_connect(int sockfd, abi_ulong target_addr, 3290 socklen_t addrlen) 3291 { 3292 void *addr; 3293 abi_long ret; 3294 3295 if ((int)addrlen < 0) { 3296 return -TARGET_EINVAL; 3297 } 3298 3299 addr = alloca(addrlen+1); 3300 3301 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3302 if (ret) 3303 return ret; 3304 3305 return get_errno(safe_connect(sockfd, addr, addrlen)); 3306 } 3307 3308 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 3309 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 3310 int flags, int send) 3311 { 3312 abi_long ret, len; 3313 struct msghdr msg; 3314 abi_ulong count; 3315 struct iovec *vec; 3316 abi_ulong target_vec; 3317 3318 if (msgp->msg_name) { 3319 msg.msg_namelen = tswap32(msgp->msg_namelen); 3320 msg.msg_name = alloca(msg.msg_namelen+1); 3321 ret = target_to_host_sockaddr(fd, msg.msg_name, 3322 tswapal(msgp->msg_name), 3323 msg.msg_namelen); 3324 if (ret == -TARGET_EFAULT) { 3325 /* For connected sockets msg_name and msg_namelen must 3326 * be ignored, so returning EFAULT immediately is wrong. 3327 * Instead, pass a bad msg_name to the host kernel, and 3328 * let it decide whether to return EFAULT or not. 3329 */ 3330 msg.msg_name = (void *)-1; 3331 } else if (ret) { 3332 goto out2; 3333 } 3334 } else { 3335 msg.msg_name = NULL; 3336 msg.msg_namelen = 0; 3337 } 3338 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 3339 msg.msg_control = alloca(msg.msg_controllen); 3340 memset(msg.msg_control, 0, msg.msg_controllen); 3341 3342 msg.msg_flags = tswap32(msgp->msg_flags); 3343 3344 count = tswapal(msgp->msg_iovlen); 3345 target_vec = tswapal(msgp->msg_iov); 3346 3347 if (count > IOV_MAX) { 3348 /* sendrcvmsg returns a different errno for this condition than 3349 * readv/writev, so we must catch it here before lock_iovec() does. 3350 */ 3351 ret = -TARGET_EMSGSIZE; 3352 goto out2; 3353 } 3354 3355 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 3356 target_vec, count, send); 3357 if (vec == NULL) { 3358 ret = -host_to_target_errno(errno); 3359 goto out2; 3360 } 3361 msg.msg_iovlen = count; 3362 msg.msg_iov = vec; 3363 3364 if (send) { 3365 if (fd_trans_target_to_host_data(fd)) { 3366 void *host_msg; 3367 3368 host_msg = g_malloc(msg.msg_iov->iov_len); 3369 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len); 3370 ret = fd_trans_target_to_host_data(fd)(host_msg, 3371 msg.msg_iov->iov_len); 3372 if (ret >= 0) { 3373 msg.msg_iov->iov_base = host_msg; 3374 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3375 } 3376 g_free(host_msg); 3377 } else { 3378 ret = target_to_host_cmsg(&msg, msgp); 3379 if (ret == 0) { 3380 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3381 } 3382 } 3383 } else { 3384 ret = get_errno(safe_recvmsg(fd, &msg, flags)); 3385 if (!is_error(ret)) { 3386 len = ret; 3387 if (fd_trans_host_to_target_data(fd)) { 3388 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base, 3389 MIN(msg.msg_iov->iov_len, len)); 3390 } else { 3391 ret = host_to_target_cmsg(msgp, &msg); 3392 } 3393 if (!is_error(ret)) { 3394 msgp->msg_namelen = tswap32(msg.msg_namelen); 3395 msgp->msg_flags = tswap32(msg.msg_flags); 3396 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) { 3397 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 3398 msg.msg_name, msg.msg_namelen); 3399 if (ret) { 3400 goto out; 3401 } 3402 } 3403 3404 ret = len; 3405 } 3406 } 3407 } 3408 3409 out: 3410 unlock_iovec(vec, target_vec, count, !send); 3411 out2: 3412 return ret; 3413 } 3414 3415 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 3416 int flags, int send) 3417 { 3418 abi_long ret; 3419 struct target_msghdr *msgp; 3420 3421 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 3422 msgp, 3423 target_msg, 3424 send ? 1 : 0)) { 3425 return -TARGET_EFAULT; 3426 } 3427 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 3428 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 3429 return ret; 3430 } 3431 3432 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 3433 * so it might not have this *mmsg-specific flag either. 3434 */ 3435 #ifndef MSG_WAITFORONE 3436 #define MSG_WAITFORONE 0x10000 3437 #endif 3438 3439 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 3440 unsigned int vlen, unsigned int flags, 3441 int send) 3442 { 3443 struct target_mmsghdr *mmsgp; 3444 abi_long ret = 0; 3445 int i; 3446 3447 if (vlen > UIO_MAXIOV) { 3448 vlen = UIO_MAXIOV; 3449 } 3450 3451 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 3452 if (!mmsgp) { 3453 return -TARGET_EFAULT; 3454 } 3455 3456 for (i = 0; i < vlen; i++) { 3457 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 3458 if (is_error(ret)) { 3459 break; 3460 } 3461 mmsgp[i].msg_len = tswap32(ret); 3462 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 3463 if (flags & MSG_WAITFORONE) { 3464 flags |= MSG_DONTWAIT; 3465 } 3466 } 3467 3468 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 3469 3470 /* Return number of datagrams sent if we sent any at all; 3471 * otherwise return the error. 3472 */ 3473 if (i) { 3474 return i; 3475 } 3476 return ret; 3477 } 3478 3479 /* do_accept4() Must return target values and target errnos. */ 3480 static abi_long do_accept4(int fd, abi_ulong target_addr, 3481 abi_ulong target_addrlen_addr, int flags) 3482 { 3483 socklen_t addrlen, ret_addrlen; 3484 void *addr; 3485 abi_long ret; 3486 int host_flags; 3487 3488 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 3489 3490 if (target_addr == 0) { 3491 return get_errno(safe_accept4(fd, NULL, NULL, host_flags)); 3492 } 3493 3494 /* linux returns EINVAL if addrlen pointer is invalid */ 3495 if (get_user_u32(addrlen, target_addrlen_addr)) 3496 return -TARGET_EINVAL; 3497 3498 if ((int)addrlen < 0) { 3499 return -TARGET_EINVAL; 3500 } 3501 3502 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 3503 return -TARGET_EINVAL; 3504 3505 addr = alloca(addrlen); 3506 3507 ret_addrlen = addrlen; 3508 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags)); 3509 if (!is_error(ret)) { 3510 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3511 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3512 ret = -TARGET_EFAULT; 3513 } 3514 } 3515 return ret; 3516 } 3517 3518 /* do_getpeername() Must return target values and target errnos. */ 3519 static abi_long do_getpeername(int fd, abi_ulong target_addr, 3520 abi_ulong target_addrlen_addr) 3521 { 3522 socklen_t addrlen, ret_addrlen; 3523 void *addr; 3524 abi_long ret; 3525 3526 if (get_user_u32(addrlen, target_addrlen_addr)) 3527 return -TARGET_EFAULT; 3528 3529 if ((int)addrlen < 0) { 3530 return -TARGET_EINVAL; 3531 } 3532 3533 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 3534 return -TARGET_EFAULT; 3535 3536 addr = alloca(addrlen); 3537 3538 ret_addrlen = addrlen; 3539 ret = get_errno(getpeername(fd, addr, &ret_addrlen)); 3540 if (!is_error(ret)) { 3541 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3542 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3543 ret = -TARGET_EFAULT; 3544 } 3545 } 3546 return ret; 3547 } 3548 3549 /* do_getsockname() Must return target values and target errnos. */ 3550 static abi_long do_getsockname(int fd, abi_ulong target_addr, 3551 abi_ulong target_addrlen_addr) 3552 { 3553 socklen_t addrlen, ret_addrlen; 3554 void *addr; 3555 abi_long ret; 3556 3557 if (get_user_u32(addrlen, target_addrlen_addr)) 3558 return -TARGET_EFAULT; 3559 3560 if ((int)addrlen < 0) { 3561 return -TARGET_EINVAL; 3562 } 3563 3564 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 3565 return -TARGET_EFAULT; 3566 3567 addr = alloca(addrlen); 3568 3569 ret_addrlen = addrlen; 3570 ret = get_errno(getsockname(fd, addr, &ret_addrlen)); 3571 if (!is_error(ret)) { 3572 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3573 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3574 ret = -TARGET_EFAULT; 3575 } 3576 } 3577 return ret; 3578 } 3579 3580 /* do_socketpair() Must return target values and target errnos. */ 3581 static abi_long do_socketpair(int domain, int type, int protocol, 3582 abi_ulong target_tab_addr) 3583 { 3584 int tab[2]; 3585 abi_long ret; 3586 3587 target_to_host_sock_type(&type); 3588 3589 ret = get_errno(socketpair(domain, type, protocol, tab)); 3590 if (!is_error(ret)) { 3591 if (put_user_s32(tab[0], target_tab_addr) 3592 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 3593 ret = -TARGET_EFAULT; 3594 } 3595 return ret; 3596 } 3597 3598 /* do_sendto() Must return target values and target errnos. */ 3599 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 3600 abi_ulong target_addr, socklen_t addrlen) 3601 { 3602 void *addr; 3603 void *host_msg; 3604 void *copy_msg = NULL; 3605 abi_long ret; 3606 3607 if ((int)addrlen < 0) { 3608 return -TARGET_EINVAL; 3609 } 3610 3611 host_msg = lock_user(VERIFY_READ, msg, len, 1); 3612 if (!host_msg) 3613 return -TARGET_EFAULT; 3614 if (fd_trans_target_to_host_data(fd)) { 3615 copy_msg = host_msg; 3616 host_msg = g_malloc(len); 3617 memcpy(host_msg, copy_msg, len); 3618 ret = fd_trans_target_to_host_data(fd)(host_msg, len); 3619 if (ret < 0) { 3620 goto fail; 3621 } 3622 } 3623 if (target_addr) { 3624 addr = alloca(addrlen+1); 3625 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen); 3626 if (ret) { 3627 goto fail; 3628 } 3629 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen)); 3630 } else { 3631 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0)); 3632 } 3633 fail: 3634 if (copy_msg) { 3635 g_free(host_msg); 3636 host_msg = copy_msg; 3637 } 3638 unlock_user(host_msg, msg, 0); 3639 return ret; 3640 } 3641 3642 /* do_recvfrom() Must return target values and target errnos. */ 3643 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 3644 abi_ulong target_addr, 3645 abi_ulong target_addrlen) 3646 { 3647 socklen_t addrlen, ret_addrlen; 3648 void *addr; 3649 void *host_msg; 3650 abi_long ret; 3651 3652 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 3653 if (!host_msg) 3654 return -TARGET_EFAULT; 3655 if (target_addr) { 3656 if (get_user_u32(addrlen, target_addrlen)) { 3657 ret = -TARGET_EFAULT; 3658 goto fail; 3659 } 3660 if ((int)addrlen < 0) { 3661 ret = -TARGET_EINVAL; 3662 goto fail; 3663 } 3664 addr = alloca(addrlen); 3665 ret_addrlen = addrlen; 3666 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, 3667 addr, &ret_addrlen)); 3668 } else { 3669 addr = NULL; /* To keep compiler quiet. */ 3670 addrlen = 0; /* To keep compiler quiet. */ 3671 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0)); 3672 } 3673 if (!is_error(ret)) { 3674 if (fd_trans_host_to_target_data(fd)) { 3675 abi_long trans; 3676 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len)); 3677 if (is_error(trans)) { 3678 ret = trans; 3679 goto fail; 3680 } 3681 } 3682 if (target_addr) { 3683 host_to_target_sockaddr(target_addr, addr, 3684 MIN(addrlen, ret_addrlen)); 3685 if (put_user_u32(ret_addrlen, target_addrlen)) { 3686 ret = -TARGET_EFAULT; 3687 goto fail; 3688 } 3689 } 3690 unlock_user(host_msg, msg, len); 3691 } else { 3692 fail: 3693 unlock_user(host_msg, msg, 0); 3694 } 3695 return ret; 3696 } 3697 3698 #ifdef TARGET_NR_socketcall 3699 /* do_socketcall() must return target values and target errnos. */ 3700 static abi_long do_socketcall(int num, abi_ulong vptr) 3701 { 3702 static const unsigned nargs[] = { /* number of arguments per operation */ 3703 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */ 3704 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */ 3705 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */ 3706 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */ 3707 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */ 3708 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */ 3709 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */ 3710 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */ 3711 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */ 3712 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */ 3713 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */ 3714 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */ 3715 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */ 3716 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 3717 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 3718 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */ 3719 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */ 3720 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */ 3721 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */ 3722 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */ 3723 }; 3724 abi_long a[6]; /* max 6 args */ 3725 unsigned i; 3726 3727 /* check the range of the first argument num */ 3728 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */ 3729 if (num < 1 || num > TARGET_SYS_SENDMMSG) { 3730 return -TARGET_EINVAL; 3731 } 3732 /* ensure we have space for args */ 3733 if (nargs[num] > ARRAY_SIZE(a)) { 3734 return -TARGET_EINVAL; 3735 } 3736 /* collect the arguments in a[] according to nargs[] */ 3737 for (i = 0; i < nargs[num]; ++i) { 3738 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 3739 return -TARGET_EFAULT; 3740 } 3741 } 3742 /* now when we have the args, invoke the appropriate underlying function */ 3743 switch (num) { 3744 case TARGET_SYS_SOCKET: /* domain, type, protocol */ 3745 return do_socket(a[0], a[1], a[2]); 3746 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */ 3747 return do_bind(a[0], a[1], a[2]); 3748 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */ 3749 return do_connect(a[0], a[1], a[2]); 3750 case TARGET_SYS_LISTEN: /* sockfd, backlog */ 3751 return get_errno(listen(a[0], a[1])); 3752 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */ 3753 return do_accept4(a[0], a[1], a[2], 0); 3754 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */ 3755 return do_getsockname(a[0], a[1], a[2]); 3756 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */ 3757 return do_getpeername(a[0], a[1], a[2]); 3758 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */ 3759 return do_socketpair(a[0], a[1], a[2], a[3]); 3760 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */ 3761 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 3762 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */ 3763 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 3764 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */ 3765 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 3766 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */ 3767 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 3768 case TARGET_SYS_SHUTDOWN: /* sockfd, how */ 3769 return get_errno(shutdown(a[0], a[1])); 3770 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 3771 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 3772 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 3773 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 3774 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */ 3775 return do_sendrecvmsg(a[0], a[1], a[2], 1); 3776 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */ 3777 return do_sendrecvmsg(a[0], a[1], a[2], 0); 3778 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */ 3779 return do_accept4(a[0], a[1], a[2], a[3]); 3780 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */ 3781 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0); 3782 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */ 3783 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1); 3784 default: 3785 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num); 3786 return -TARGET_EINVAL; 3787 } 3788 } 3789 #endif 3790 3791 #define N_SHM_REGIONS 32 3792 3793 static struct shm_region { 3794 abi_ulong start; 3795 abi_ulong size; 3796 bool in_use; 3797 } shm_regions[N_SHM_REGIONS]; 3798 3799 #ifndef TARGET_SEMID64_DS 3800 /* asm-generic version of this struct */ 3801 struct target_semid64_ds 3802 { 3803 struct target_ipc_perm sem_perm; 3804 abi_ulong sem_otime; 3805 #if TARGET_ABI_BITS == 32 3806 abi_ulong __unused1; 3807 #endif 3808 abi_ulong sem_ctime; 3809 #if TARGET_ABI_BITS == 32 3810 abi_ulong __unused2; 3811 #endif 3812 abi_ulong sem_nsems; 3813 abi_ulong __unused3; 3814 abi_ulong __unused4; 3815 }; 3816 #endif 3817 3818 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 3819 abi_ulong target_addr) 3820 { 3821 struct target_ipc_perm *target_ip; 3822 struct target_semid64_ds *target_sd; 3823 3824 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 3825 return -TARGET_EFAULT; 3826 target_ip = &(target_sd->sem_perm); 3827 host_ip->__key = tswap32(target_ip->__key); 3828 host_ip->uid = tswap32(target_ip->uid); 3829 host_ip->gid = tswap32(target_ip->gid); 3830 host_ip->cuid = tswap32(target_ip->cuid); 3831 host_ip->cgid = tswap32(target_ip->cgid); 3832 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 3833 host_ip->mode = tswap32(target_ip->mode); 3834 #else 3835 host_ip->mode = tswap16(target_ip->mode); 3836 #endif 3837 #if defined(TARGET_PPC) 3838 host_ip->__seq = tswap32(target_ip->__seq); 3839 #else 3840 host_ip->__seq = tswap16(target_ip->__seq); 3841 #endif 3842 unlock_user_struct(target_sd, target_addr, 0); 3843 return 0; 3844 } 3845 3846 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 3847 struct ipc_perm *host_ip) 3848 { 3849 struct target_ipc_perm *target_ip; 3850 struct target_semid64_ds *target_sd; 3851 3852 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3853 return -TARGET_EFAULT; 3854 target_ip = &(target_sd->sem_perm); 3855 target_ip->__key = tswap32(host_ip->__key); 3856 target_ip->uid = tswap32(host_ip->uid); 3857 target_ip->gid = tswap32(host_ip->gid); 3858 target_ip->cuid = tswap32(host_ip->cuid); 3859 target_ip->cgid = tswap32(host_ip->cgid); 3860 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 3861 target_ip->mode = tswap32(host_ip->mode); 3862 #else 3863 target_ip->mode = tswap16(host_ip->mode); 3864 #endif 3865 #if defined(TARGET_PPC) 3866 target_ip->__seq = tswap32(host_ip->__seq); 3867 #else 3868 target_ip->__seq = tswap16(host_ip->__seq); 3869 #endif 3870 unlock_user_struct(target_sd, target_addr, 1); 3871 return 0; 3872 } 3873 3874 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 3875 abi_ulong target_addr) 3876 { 3877 struct target_semid64_ds *target_sd; 3878 3879 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 3880 return -TARGET_EFAULT; 3881 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 3882 return -TARGET_EFAULT; 3883 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 3884 host_sd->sem_otime = tswapal(target_sd->sem_otime); 3885 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 3886 unlock_user_struct(target_sd, target_addr, 0); 3887 return 0; 3888 } 3889 3890 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 3891 struct semid_ds *host_sd) 3892 { 3893 struct target_semid64_ds *target_sd; 3894 3895 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3896 return -TARGET_EFAULT; 3897 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 3898 return -TARGET_EFAULT; 3899 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 3900 target_sd->sem_otime = tswapal(host_sd->sem_otime); 3901 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 3902 unlock_user_struct(target_sd, target_addr, 1); 3903 return 0; 3904 } 3905 3906 struct target_seminfo { 3907 int semmap; 3908 int semmni; 3909 int semmns; 3910 int semmnu; 3911 int semmsl; 3912 int semopm; 3913 int semume; 3914 int semusz; 3915 int semvmx; 3916 int semaem; 3917 }; 3918 3919 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 3920 struct seminfo *host_seminfo) 3921 { 3922 struct target_seminfo *target_seminfo; 3923 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 3924 return -TARGET_EFAULT; 3925 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 3926 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 3927 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 3928 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 3929 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 3930 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 3931 __put_user(host_seminfo->semume, &target_seminfo->semume); 3932 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 3933 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 3934 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 3935 unlock_user_struct(target_seminfo, target_addr, 1); 3936 return 0; 3937 } 3938 3939 union semun { 3940 int val; 3941 struct semid_ds *buf; 3942 unsigned short *array; 3943 struct seminfo *__buf; 3944 }; 3945 3946 union target_semun { 3947 int val; 3948 abi_ulong buf; 3949 abi_ulong array; 3950 abi_ulong __buf; 3951 }; 3952 3953 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 3954 abi_ulong target_addr) 3955 { 3956 int nsems; 3957 unsigned short *array; 3958 union semun semun; 3959 struct semid_ds semid_ds; 3960 int i, ret; 3961 3962 semun.buf = &semid_ds; 3963 3964 ret = semctl(semid, 0, IPC_STAT, semun); 3965 if (ret == -1) 3966 return get_errno(ret); 3967 3968 nsems = semid_ds.sem_nsems; 3969 3970 *host_array = g_try_new(unsigned short, nsems); 3971 if (!*host_array) { 3972 return -TARGET_ENOMEM; 3973 } 3974 array = lock_user(VERIFY_READ, target_addr, 3975 nsems*sizeof(unsigned short), 1); 3976 if (!array) { 3977 g_free(*host_array); 3978 return -TARGET_EFAULT; 3979 } 3980 3981 for(i=0; i<nsems; i++) { 3982 __get_user((*host_array)[i], &array[i]); 3983 } 3984 unlock_user(array, target_addr, 0); 3985 3986 return 0; 3987 } 3988 3989 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 3990 unsigned short **host_array) 3991 { 3992 int nsems; 3993 unsigned short *array; 3994 union semun semun; 3995 struct semid_ds semid_ds; 3996 int i, ret; 3997 3998 semun.buf = &semid_ds; 3999 4000 ret = semctl(semid, 0, IPC_STAT, semun); 4001 if (ret == -1) 4002 return get_errno(ret); 4003 4004 nsems = semid_ds.sem_nsems; 4005 4006 array = lock_user(VERIFY_WRITE, target_addr, 4007 nsems*sizeof(unsigned short), 0); 4008 if (!array) 4009 return -TARGET_EFAULT; 4010 4011 for(i=0; i<nsems; i++) { 4012 __put_user((*host_array)[i], &array[i]); 4013 } 4014 g_free(*host_array); 4015 unlock_user(array, target_addr, 1); 4016 4017 return 0; 4018 } 4019 4020 static inline abi_long do_semctl(int semid, int semnum, int cmd, 4021 abi_ulong target_arg) 4022 { 4023 union target_semun target_su = { .buf = target_arg }; 4024 union semun arg; 4025 struct semid_ds dsarg; 4026 unsigned short *array = NULL; 4027 struct seminfo seminfo; 4028 abi_long ret = -TARGET_EINVAL; 4029 abi_long err; 4030 cmd &= 0xff; 4031 4032 switch( cmd ) { 4033 case GETVAL: 4034 case SETVAL: 4035 /* In 64 bit cross-endian situations, we will erroneously pick up 4036 * the wrong half of the union for the "val" element. To rectify 4037 * this, the entire 8-byte structure is byteswapped, followed by 4038 * a swap of the 4 byte val field. In other cases, the data is 4039 * already in proper host byte order. */ 4040 if (sizeof(target_su.val) != (sizeof(target_su.buf))) { 4041 target_su.buf = tswapal(target_su.buf); 4042 arg.val = tswap32(target_su.val); 4043 } else { 4044 arg.val = target_su.val; 4045 } 4046 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4047 break; 4048 case GETALL: 4049 case SETALL: 4050 err = target_to_host_semarray(semid, &array, target_su.array); 4051 if (err) 4052 return err; 4053 arg.array = array; 4054 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4055 err = host_to_target_semarray(semid, target_su.array, &array); 4056 if (err) 4057 return err; 4058 break; 4059 case IPC_STAT: 4060 case IPC_SET: 4061 case SEM_STAT: 4062 err = target_to_host_semid_ds(&dsarg, target_su.buf); 4063 if (err) 4064 return err; 4065 arg.buf = &dsarg; 4066 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4067 err = host_to_target_semid_ds(target_su.buf, &dsarg); 4068 if (err) 4069 return err; 4070 break; 4071 case IPC_INFO: 4072 case SEM_INFO: 4073 arg.__buf = &seminfo; 4074 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4075 err = host_to_target_seminfo(target_su.__buf, &seminfo); 4076 if (err) 4077 return err; 4078 break; 4079 case IPC_RMID: 4080 case GETPID: 4081 case GETNCNT: 4082 case GETZCNT: 4083 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 4084 break; 4085 } 4086 4087 return ret; 4088 } 4089 4090 struct target_sembuf { 4091 unsigned short sem_num; 4092 short sem_op; 4093 short sem_flg; 4094 }; 4095 4096 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 4097 abi_ulong target_addr, 4098 unsigned nsops) 4099 { 4100 struct target_sembuf *target_sembuf; 4101 int i; 4102 4103 target_sembuf = lock_user(VERIFY_READ, target_addr, 4104 nsops*sizeof(struct target_sembuf), 1); 4105 if (!target_sembuf) 4106 return -TARGET_EFAULT; 4107 4108 for(i=0; i<nsops; i++) { 4109 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 4110 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 4111 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 4112 } 4113 4114 unlock_user(target_sembuf, target_addr, 0); 4115 4116 return 0; 4117 } 4118 4119 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \ 4120 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64) 4121 4122 /* 4123 * This macro is required to handle the s390 variants, which passes the 4124 * arguments in a different order than default. 4125 */ 4126 #ifdef __s390x__ 4127 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \ 4128 (__nsops), (__timeout), (__sops) 4129 #else 4130 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \ 4131 (__nsops), 0, (__sops), (__timeout) 4132 #endif 4133 4134 static inline abi_long do_semtimedop(int semid, 4135 abi_long ptr, 4136 unsigned nsops, 4137 abi_long timeout, bool time64) 4138 { 4139 struct sembuf *sops; 4140 struct timespec ts, *pts = NULL; 4141 abi_long ret; 4142 4143 if (timeout) { 4144 pts = &ts; 4145 if (time64) { 4146 if (target_to_host_timespec64(pts, timeout)) { 4147 return -TARGET_EFAULT; 4148 } 4149 } else { 4150 if (target_to_host_timespec(pts, timeout)) { 4151 return -TARGET_EFAULT; 4152 } 4153 } 4154 } 4155 4156 if (nsops > TARGET_SEMOPM) { 4157 return -TARGET_E2BIG; 4158 } 4159 4160 sops = g_new(struct sembuf, nsops); 4161 4162 if (target_to_host_sembuf(sops, ptr, nsops)) { 4163 g_free(sops); 4164 return -TARGET_EFAULT; 4165 } 4166 4167 ret = -TARGET_ENOSYS; 4168 #ifdef __NR_semtimedop 4169 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts)); 4170 #endif 4171 #ifdef __NR_ipc 4172 if (ret == -TARGET_ENOSYS) { 4173 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, 4174 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts))); 4175 } 4176 #endif 4177 g_free(sops); 4178 return ret; 4179 } 4180 #endif 4181 4182 struct target_msqid_ds 4183 { 4184 struct target_ipc_perm msg_perm; 4185 abi_ulong msg_stime; 4186 #if TARGET_ABI_BITS == 32 4187 abi_ulong __unused1; 4188 #endif 4189 abi_ulong msg_rtime; 4190 #if TARGET_ABI_BITS == 32 4191 abi_ulong __unused2; 4192 #endif 4193 abi_ulong msg_ctime; 4194 #if TARGET_ABI_BITS == 32 4195 abi_ulong __unused3; 4196 #endif 4197 abi_ulong __msg_cbytes; 4198 abi_ulong msg_qnum; 4199 abi_ulong msg_qbytes; 4200 abi_ulong msg_lspid; 4201 abi_ulong msg_lrpid; 4202 abi_ulong __unused4; 4203 abi_ulong __unused5; 4204 }; 4205 4206 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 4207 abi_ulong target_addr) 4208 { 4209 struct target_msqid_ds *target_md; 4210 4211 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 4212 return -TARGET_EFAULT; 4213 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 4214 return -TARGET_EFAULT; 4215 host_md->msg_stime = tswapal(target_md->msg_stime); 4216 host_md->msg_rtime = tswapal(target_md->msg_rtime); 4217 host_md->msg_ctime = tswapal(target_md->msg_ctime); 4218 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 4219 host_md->msg_qnum = tswapal(target_md->msg_qnum); 4220 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 4221 host_md->msg_lspid = tswapal(target_md->msg_lspid); 4222 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 4223 unlock_user_struct(target_md, target_addr, 0); 4224 return 0; 4225 } 4226 4227 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 4228 struct msqid_ds *host_md) 4229 { 4230 struct target_msqid_ds *target_md; 4231 4232 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 4233 return -TARGET_EFAULT; 4234 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 4235 return -TARGET_EFAULT; 4236 target_md->msg_stime = tswapal(host_md->msg_stime); 4237 target_md->msg_rtime = tswapal(host_md->msg_rtime); 4238 target_md->msg_ctime = tswapal(host_md->msg_ctime); 4239 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 4240 target_md->msg_qnum = tswapal(host_md->msg_qnum); 4241 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 4242 target_md->msg_lspid = tswapal(host_md->msg_lspid); 4243 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 4244 unlock_user_struct(target_md, target_addr, 1); 4245 return 0; 4246 } 4247 4248 struct target_msginfo { 4249 int msgpool; 4250 int msgmap; 4251 int msgmax; 4252 int msgmnb; 4253 int msgmni; 4254 int msgssz; 4255 int msgtql; 4256 unsigned short int msgseg; 4257 }; 4258 4259 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 4260 struct msginfo *host_msginfo) 4261 { 4262 struct target_msginfo *target_msginfo; 4263 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 4264 return -TARGET_EFAULT; 4265 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 4266 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 4267 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 4268 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 4269 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 4270 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 4271 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 4272 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 4273 unlock_user_struct(target_msginfo, target_addr, 1); 4274 return 0; 4275 } 4276 4277 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 4278 { 4279 struct msqid_ds dsarg; 4280 struct msginfo msginfo; 4281 abi_long ret = -TARGET_EINVAL; 4282 4283 cmd &= 0xff; 4284 4285 switch (cmd) { 4286 case IPC_STAT: 4287 case IPC_SET: 4288 case MSG_STAT: 4289 if (target_to_host_msqid_ds(&dsarg,ptr)) 4290 return -TARGET_EFAULT; 4291 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 4292 if (host_to_target_msqid_ds(ptr,&dsarg)) 4293 return -TARGET_EFAULT; 4294 break; 4295 case IPC_RMID: 4296 ret = get_errno(msgctl(msgid, cmd, NULL)); 4297 break; 4298 case IPC_INFO: 4299 case MSG_INFO: 4300 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 4301 if (host_to_target_msginfo(ptr, &msginfo)) 4302 return -TARGET_EFAULT; 4303 break; 4304 } 4305 4306 return ret; 4307 } 4308 4309 struct target_msgbuf { 4310 abi_long mtype; 4311 char mtext[1]; 4312 }; 4313 4314 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 4315 ssize_t msgsz, int msgflg) 4316 { 4317 struct target_msgbuf *target_mb; 4318 struct msgbuf *host_mb; 4319 abi_long ret = 0; 4320 4321 if (msgsz < 0) { 4322 return -TARGET_EINVAL; 4323 } 4324 4325 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 4326 return -TARGET_EFAULT; 4327 host_mb = g_try_malloc(msgsz + sizeof(long)); 4328 if (!host_mb) { 4329 unlock_user_struct(target_mb, msgp, 0); 4330 return -TARGET_ENOMEM; 4331 } 4332 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 4333 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 4334 ret = -TARGET_ENOSYS; 4335 #ifdef __NR_msgsnd 4336 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg)); 4337 #endif 4338 #ifdef __NR_ipc 4339 if (ret == -TARGET_ENOSYS) { 4340 #ifdef __s390x__ 4341 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg, 4342 host_mb)); 4343 #else 4344 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg, 4345 host_mb, 0)); 4346 #endif 4347 } 4348 #endif 4349 g_free(host_mb); 4350 unlock_user_struct(target_mb, msgp, 0); 4351 4352 return ret; 4353 } 4354 4355 #ifdef __NR_ipc 4356 #if defined(__sparc__) 4357 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */ 4358 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp 4359 #elif defined(__s390x__) 4360 /* The s390 sys_ipc variant has only five parameters. */ 4361 #define MSGRCV_ARGS(__msgp, __msgtyp) \ 4362 ((long int[]){(long int)__msgp, __msgtyp}) 4363 #else 4364 #define MSGRCV_ARGS(__msgp, __msgtyp) \ 4365 ((long int[]){(long int)__msgp, __msgtyp}), 0 4366 #endif 4367 #endif 4368 4369 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 4370 ssize_t msgsz, abi_long msgtyp, 4371 int msgflg) 4372 { 4373 struct target_msgbuf *target_mb; 4374 char *target_mtext; 4375 struct msgbuf *host_mb; 4376 abi_long ret = 0; 4377 4378 if (msgsz < 0) { 4379 return -TARGET_EINVAL; 4380 } 4381 4382 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 4383 return -TARGET_EFAULT; 4384 4385 host_mb = g_try_malloc(msgsz + sizeof(long)); 4386 if (!host_mb) { 4387 ret = -TARGET_ENOMEM; 4388 goto end; 4389 } 4390 ret = -TARGET_ENOSYS; 4391 #ifdef __NR_msgrcv 4392 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 4393 #endif 4394 #ifdef __NR_ipc 4395 if (ret == -TARGET_ENOSYS) { 4396 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz, 4397 msgflg, MSGRCV_ARGS(host_mb, msgtyp))); 4398 } 4399 #endif 4400 4401 if (ret > 0) { 4402 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 4403 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 4404 if (!target_mtext) { 4405 ret = -TARGET_EFAULT; 4406 goto end; 4407 } 4408 memcpy(target_mb->mtext, host_mb->mtext, ret); 4409 unlock_user(target_mtext, target_mtext_addr, ret); 4410 } 4411 4412 target_mb->mtype = tswapal(host_mb->mtype); 4413 4414 end: 4415 if (target_mb) 4416 unlock_user_struct(target_mb, msgp, 1); 4417 g_free(host_mb); 4418 return ret; 4419 } 4420 4421 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 4422 abi_ulong target_addr) 4423 { 4424 struct target_shmid_ds *target_sd; 4425 4426 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4427 return -TARGET_EFAULT; 4428 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 4429 return -TARGET_EFAULT; 4430 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4431 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 4432 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4433 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4434 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4435 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4436 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4437 unlock_user_struct(target_sd, target_addr, 0); 4438 return 0; 4439 } 4440 4441 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 4442 struct shmid_ds *host_sd) 4443 { 4444 struct target_shmid_ds *target_sd; 4445 4446 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4447 return -TARGET_EFAULT; 4448 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 4449 return -TARGET_EFAULT; 4450 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4451 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 4452 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4453 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4454 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4455 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4456 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4457 unlock_user_struct(target_sd, target_addr, 1); 4458 return 0; 4459 } 4460 4461 struct target_shminfo { 4462 abi_ulong shmmax; 4463 abi_ulong shmmin; 4464 abi_ulong shmmni; 4465 abi_ulong shmseg; 4466 abi_ulong shmall; 4467 }; 4468 4469 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 4470 struct shminfo *host_shminfo) 4471 { 4472 struct target_shminfo *target_shminfo; 4473 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 4474 return -TARGET_EFAULT; 4475 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 4476 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 4477 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 4478 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 4479 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 4480 unlock_user_struct(target_shminfo, target_addr, 1); 4481 return 0; 4482 } 4483 4484 struct target_shm_info { 4485 int used_ids; 4486 abi_ulong shm_tot; 4487 abi_ulong shm_rss; 4488 abi_ulong shm_swp; 4489 abi_ulong swap_attempts; 4490 abi_ulong swap_successes; 4491 }; 4492 4493 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 4494 struct shm_info *host_shm_info) 4495 { 4496 struct target_shm_info *target_shm_info; 4497 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 4498 return -TARGET_EFAULT; 4499 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 4500 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 4501 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 4502 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 4503 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 4504 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 4505 unlock_user_struct(target_shm_info, target_addr, 1); 4506 return 0; 4507 } 4508 4509 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 4510 { 4511 struct shmid_ds dsarg; 4512 struct shminfo shminfo; 4513 struct shm_info shm_info; 4514 abi_long ret = -TARGET_EINVAL; 4515 4516 cmd &= 0xff; 4517 4518 switch(cmd) { 4519 case IPC_STAT: 4520 case IPC_SET: 4521 case SHM_STAT: 4522 if (target_to_host_shmid_ds(&dsarg, buf)) 4523 return -TARGET_EFAULT; 4524 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 4525 if (host_to_target_shmid_ds(buf, &dsarg)) 4526 return -TARGET_EFAULT; 4527 break; 4528 case IPC_INFO: 4529 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 4530 if (host_to_target_shminfo(buf, &shminfo)) 4531 return -TARGET_EFAULT; 4532 break; 4533 case SHM_INFO: 4534 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 4535 if (host_to_target_shm_info(buf, &shm_info)) 4536 return -TARGET_EFAULT; 4537 break; 4538 case IPC_RMID: 4539 case SHM_LOCK: 4540 case SHM_UNLOCK: 4541 ret = get_errno(shmctl(shmid, cmd, NULL)); 4542 break; 4543 } 4544 4545 return ret; 4546 } 4547 4548 #ifndef TARGET_FORCE_SHMLBA 4549 /* For most architectures, SHMLBA is the same as the page size; 4550 * some architectures have larger values, in which case they should 4551 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function. 4552 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA 4553 * and defining its own value for SHMLBA. 4554 * 4555 * The kernel also permits SHMLBA to be set by the architecture to a 4556 * value larger than the page size without setting __ARCH_FORCE_SHMLBA; 4557 * this means that addresses are rounded to the large size if 4558 * SHM_RND is set but addresses not aligned to that size are not rejected 4559 * as long as they are at least page-aligned. Since the only architecture 4560 * which uses this is ia64 this code doesn't provide for that oddity. 4561 */ 4562 static inline abi_ulong target_shmlba(CPUArchState *cpu_env) 4563 { 4564 return TARGET_PAGE_SIZE; 4565 } 4566 #endif 4567 4568 static inline abi_ulong do_shmat(CPUArchState *cpu_env, 4569 int shmid, abi_ulong shmaddr, int shmflg) 4570 { 4571 abi_long raddr; 4572 void *host_raddr; 4573 struct shmid_ds shm_info; 4574 int i,ret; 4575 abi_ulong shmlba; 4576 4577 /* find out the length of the shared memory segment */ 4578 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 4579 if (is_error(ret)) { 4580 /* can't get length, bail out */ 4581 return ret; 4582 } 4583 4584 shmlba = target_shmlba(cpu_env); 4585 4586 if (shmaddr & (shmlba - 1)) { 4587 if (shmflg & SHM_RND) { 4588 shmaddr &= ~(shmlba - 1); 4589 } else { 4590 return -TARGET_EINVAL; 4591 } 4592 } 4593 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) { 4594 return -TARGET_EINVAL; 4595 } 4596 4597 mmap_lock(); 4598 4599 if (shmaddr) 4600 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 4601 else { 4602 abi_ulong mmap_start; 4603 4604 /* In order to use the host shmat, we need to honor host SHMLBA. */ 4605 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba)); 4606 4607 if (mmap_start == -1) { 4608 errno = ENOMEM; 4609 host_raddr = (void *)-1; 4610 } else 4611 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 4612 } 4613 4614 if (host_raddr == (void *)-1) { 4615 mmap_unlock(); 4616 return get_errno((long)host_raddr); 4617 } 4618 raddr=h2g((unsigned long)host_raddr); 4619 4620 page_set_flags(raddr, raddr + shm_info.shm_segsz, 4621 PAGE_VALID | PAGE_READ | 4622 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 4623 4624 for (i = 0; i < N_SHM_REGIONS; i++) { 4625 if (!shm_regions[i].in_use) { 4626 shm_regions[i].in_use = true; 4627 shm_regions[i].start = raddr; 4628 shm_regions[i].size = shm_info.shm_segsz; 4629 break; 4630 } 4631 } 4632 4633 mmap_unlock(); 4634 return raddr; 4635 4636 } 4637 4638 static inline abi_long do_shmdt(abi_ulong shmaddr) 4639 { 4640 int i; 4641 abi_long rv; 4642 4643 mmap_lock(); 4644 4645 for (i = 0; i < N_SHM_REGIONS; ++i) { 4646 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { 4647 shm_regions[i].in_use = false; 4648 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 4649 break; 4650 } 4651 } 4652 rv = get_errno(shmdt(g2h(shmaddr))); 4653 4654 mmap_unlock(); 4655 4656 return rv; 4657 } 4658 4659 #ifdef TARGET_NR_ipc 4660 /* ??? This only works with linear mappings. */ 4661 /* do_ipc() must return target values and target errnos. */ 4662 static abi_long do_ipc(CPUArchState *cpu_env, 4663 unsigned int call, abi_long first, 4664 abi_long second, abi_long third, 4665 abi_long ptr, abi_long fifth) 4666 { 4667 int version; 4668 abi_long ret = 0; 4669 4670 version = call >> 16; 4671 call &= 0xffff; 4672 4673 switch (call) { 4674 case IPCOP_semop: 4675 ret = do_semtimedop(first, ptr, second, 0, false); 4676 break; 4677 case IPCOP_semtimedop: 4678 /* 4679 * The s390 sys_ipc variant has only five parameters instead of six 4680 * (as for default variant) and the only difference is the handling of 4681 * SEMTIMEDOP where on s390 the third parameter is used as a pointer 4682 * to a struct timespec where the generic variant uses fifth parameter. 4683 */ 4684 #if defined(TARGET_S390X) 4685 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64); 4686 #else 4687 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64); 4688 #endif 4689 break; 4690 4691 case IPCOP_semget: 4692 ret = get_errno(semget(first, second, third)); 4693 break; 4694 4695 case IPCOP_semctl: { 4696 /* The semun argument to semctl is passed by value, so dereference the 4697 * ptr argument. */ 4698 abi_ulong atptr; 4699 get_user_ual(atptr, ptr); 4700 ret = do_semctl(first, second, third, atptr); 4701 break; 4702 } 4703 4704 case IPCOP_msgget: 4705 ret = get_errno(msgget(first, second)); 4706 break; 4707 4708 case IPCOP_msgsnd: 4709 ret = do_msgsnd(first, ptr, second, third); 4710 break; 4711 4712 case IPCOP_msgctl: 4713 ret = do_msgctl(first, second, ptr); 4714 break; 4715 4716 case IPCOP_msgrcv: 4717 switch (version) { 4718 case 0: 4719 { 4720 struct target_ipc_kludge { 4721 abi_long msgp; 4722 abi_long msgtyp; 4723 } *tmp; 4724 4725 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 4726 ret = -TARGET_EFAULT; 4727 break; 4728 } 4729 4730 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 4731 4732 unlock_user_struct(tmp, ptr, 0); 4733 break; 4734 } 4735 default: 4736 ret = do_msgrcv(first, ptr, second, fifth, third); 4737 } 4738 break; 4739 4740 case IPCOP_shmat: 4741 switch (version) { 4742 default: 4743 { 4744 abi_ulong raddr; 4745 raddr = do_shmat(cpu_env, first, ptr, second); 4746 if (is_error(raddr)) 4747 return get_errno(raddr); 4748 if (put_user_ual(raddr, third)) 4749 return -TARGET_EFAULT; 4750 break; 4751 } 4752 case 1: 4753 ret = -TARGET_EINVAL; 4754 break; 4755 } 4756 break; 4757 case IPCOP_shmdt: 4758 ret = do_shmdt(ptr); 4759 break; 4760 4761 case IPCOP_shmget: 4762 /* IPC_* flag values are the same on all linux platforms */ 4763 ret = get_errno(shmget(first, second, third)); 4764 break; 4765 4766 /* IPC_* and SHM_* command values are the same on all linux platforms */ 4767 case IPCOP_shmctl: 4768 ret = do_shmctl(first, second, ptr); 4769 break; 4770 default: 4771 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n", 4772 call, version); 4773 ret = -TARGET_ENOSYS; 4774 break; 4775 } 4776 return ret; 4777 } 4778 #endif 4779 4780 /* kernel structure types definitions */ 4781 4782 #define STRUCT(name, ...) STRUCT_ ## name, 4783 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 4784 enum { 4785 #include "syscall_types.h" 4786 STRUCT_MAX 4787 }; 4788 #undef STRUCT 4789 #undef STRUCT_SPECIAL 4790 4791 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 4792 #define STRUCT_SPECIAL(name) 4793 #include "syscall_types.h" 4794 #undef STRUCT 4795 #undef STRUCT_SPECIAL 4796 4797 #define MAX_STRUCT_SIZE 4096 4798 4799 #ifdef CONFIG_FIEMAP 4800 /* So fiemap access checks don't overflow on 32 bit systems. 4801 * This is very slightly smaller than the limit imposed by 4802 * the underlying kernel. 4803 */ 4804 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 4805 / sizeof(struct fiemap_extent)) 4806 4807 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 4808 int fd, int cmd, abi_long arg) 4809 { 4810 /* The parameter for this ioctl is a struct fiemap followed 4811 * by an array of struct fiemap_extent whose size is set 4812 * in fiemap->fm_extent_count. The array is filled in by the 4813 * ioctl. 4814 */ 4815 int target_size_in, target_size_out; 4816 struct fiemap *fm; 4817 const argtype *arg_type = ie->arg_type; 4818 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 4819 void *argptr, *p; 4820 abi_long ret; 4821 int i, extent_size = thunk_type_size(extent_arg_type, 0); 4822 uint32_t outbufsz; 4823 int free_fm = 0; 4824 4825 assert(arg_type[0] == TYPE_PTR); 4826 assert(ie->access == IOC_RW); 4827 arg_type++; 4828 target_size_in = thunk_type_size(arg_type, 0); 4829 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 4830 if (!argptr) { 4831 return -TARGET_EFAULT; 4832 } 4833 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4834 unlock_user(argptr, arg, 0); 4835 fm = (struct fiemap *)buf_temp; 4836 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 4837 return -TARGET_EINVAL; 4838 } 4839 4840 outbufsz = sizeof (*fm) + 4841 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 4842 4843 if (outbufsz > MAX_STRUCT_SIZE) { 4844 /* We can't fit all the extents into the fixed size buffer. 4845 * Allocate one that is large enough and use it instead. 4846 */ 4847 fm = g_try_malloc(outbufsz); 4848 if (!fm) { 4849 return -TARGET_ENOMEM; 4850 } 4851 memcpy(fm, buf_temp, sizeof(struct fiemap)); 4852 free_fm = 1; 4853 } 4854 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm)); 4855 if (!is_error(ret)) { 4856 target_size_out = target_size_in; 4857 /* An extent_count of 0 means we were only counting the extents 4858 * so there are no structs to copy 4859 */ 4860 if (fm->fm_extent_count != 0) { 4861 target_size_out += fm->fm_mapped_extents * extent_size; 4862 } 4863 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 4864 if (!argptr) { 4865 ret = -TARGET_EFAULT; 4866 } else { 4867 /* Convert the struct fiemap */ 4868 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 4869 if (fm->fm_extent_count != 0) { 4870 p = argptr + target_size_in; 4871 /* ...and then all the struct fiemap_extents */ 4872 for (i = 0; i < fm->fm_mapped_extents; i++) { 4873 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 4874 THUNK_TARGET); 4875 p += extent_size; 4876 } 4877 } 4878 unlock_user(argptr, arg, target_size_out); 4879 } 4880 } 4881 if (free_fm) { 4882 g_free(fm); 4883 } 4884 return ret; 4885 } 4886 #endif 4887 4888 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 4889 int fd, int cmd, abi_long arg) 4890 { 4891 const argtype *arg_type = ie->arg_type; 4892 int target_size; 4893 void *argptr; 4894 int ret; 4895 struct ifconf *host_ifconf; 4896 uint32_t outbufsz; 4897 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 4898 int target_ifreq_size; 4899 int nb_ifreq; 4900 int free_buf = 0; 4901 int i; 4902 int target_ifc_len; 4903 abi_long target_ifc_buf; 4904 int host_ifc_len; 4905 char *host_ifc_buf; 4906 4907 assert(arg_type[0] == TYPE_PTR); 4908 assert(ie->access == IOC_RW); 4909 4910 arg_type++; 4911 target_size = thunk_type_size(arg_type, 0); 4912 4913 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 4914 if (!argptr) 4915 return -TARGET_EFAULT; 4916 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4917 unlock_user(argptr, arg, 0); 4918 4919 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 4920 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 4921 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 4922 4923 if (target_ifc_buf != 0) { 4924 target_ifc_len = host_ifconf->ifc_len; 4925 nb_ifreq = target_ifc_len / target_ifreq_size; 4926 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 4927 4928 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 4929 if (outbufsz > MAX_STRUCT_SIZE) { 4930 /* 4931 * We can't fit all the extents into the fixed size buffer. 4932 * Allocate one that is large enough and use it instead. 4933 */ 4934 host_ifconf = malloc(outbufsz); 4935 if (!host_ifconf) { 4936 return -TARGET_ENOMEM; 4937 } 4938 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 4939 free_buf = 1; 4940 } 4941 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf); 4942 4943 host_ifconf->ifc_len = host_ifc_len; 4944 } else { 4945 host_ifc_buf = NULL; 4946 } 4947 host_ifconf->ifc_buf = host_ifc_buf; 4948 4949 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf)); 4950 if (!is_error(ret)) { 4951 /* convert host ifc_len to target ifc_len */ 4952 4953 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 4954 target_ifc_len = nb_ifreq * target_ifreq_size; 4955 host_ifconf->ifc_len = target_ifc_len; 4956 4957 /* restore target ifc_buf */ 4958 4959 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 4960 4961 /* copy struct ifconf to target user */ 4962 4963 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 4964 if (!argptr) 4965 return -TARGET_EFAULT; 4966 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 4967 unlock_user(argptr, arg, target_size); 4968 4969 if (target_ifc_buf != 0) { 4970 /* copy ifreq[] to target user */ 4971 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 4972 for (i = 0; i < nb_ifreq ; i++) { 4973 thunk_convert(argptr + i * target_ifreq_size, 4974 host_ifc_buf + i * sizeof(struct ifreq), 4975 ifreq_arg_type, THUNK_TARGET); 4976 } 4977 unlock_user(argptr, target_ifc_buf, target_ifc_len); 4978 } 4979 } 4980 4981 if (free_buf) { 4982 free(host_ifconf); 4983 } 4984 4985 return ret; 4986 } 4987 4988 #if defined(CONFIG_USBFS) 4989 #if HOST_LONG_BITS > 64 4990 #error USBDEVFS thunks do not support >64 bit hosts yet. 4991 #endif 4992 struct live_urb { 4993 uint64_t target_urb_adr; 4994 uint64_t target_buf_adr; 4995 char *target_buf_ptr; 4996 struct usbdevfs_urb host_urb; 4997 }; 4998 4999 static GHashTable *usbdevfs_urb_hashtable(void) 5000 { 5001 static GHashTable *urb_hashtable; 5002 5003 if (!urb_hashtable) { 5004 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal); 5005 } 5006 return urb_hashtable; 5007 } 5008 5009 static void urb_hashtable_insert(struct live_urb *urb) 5010 { 5011 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 5012 g_hash_table_insert(urb_hashtable, urb, urb); 5013 } 5014 5015 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr) 5016 { 5017 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 5018 return g_hash_table_lookup(urb_hashtable, &target_urb_adr); 5019 } 5020 5021 static void urb_hashtable_remove(struct live_urb *urb) 5022 { 5023 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 5024 g_hash_table_remove(urb_hashtable, urb); 5025 } 5026 5027 static abi_long 5028 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp, 5029 int fd, int cmd, abi_long arg) 5030 { 5031 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) }; 5032 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 }; 5033 struct live_urb *lurb; 5034 void *argptr; 5035 uint64_t hurb; 5036 int target_size; 5037 uintptr_t target_urb_adr; 5038 abi_long ret; 5039 5040 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET); 5041 5042 memset(buf_temp, 0, sizeof(uint64_t)); 5043 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5044 if (is_error(ret)) { 5045 return ret; 5046 } 5047 5048 memcpy(&hurb, buf_temp, sizeof(uint64_t)); 5049 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb)); 5050 if (!lurb->target_urb_adr) { 5051 return -TARGET_EFAULT; 5052 } 5053 urb_hashtable_remove(lurb); 5054 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 5055 lurb->host_urb.buffer_length); 5056 lurb->target_buf_ptr = NULL; 5057 5058 /* restore the guest buffer pointer */ 5059 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr; 5060 5061 /* update the guest urb struct */ 5062 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0); 5063 if (!argptr) { 5064 g_free(lurb); 5065 return -TARGET_EFAULT; 5066 } 5067 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET); 5068 unlock_user(argptr, lurb->target_urb_adr, target_size); 5069 5070 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET); 5071 /* write back the urb handle */ 5072 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5073 if (!argptr) { 5074 g_free(lurb); 5075 return -TARGET_EFAULT; 5076 } 5077 5078 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */ 5079 target_urb_adr = lurb->target_urb_adr; 5080 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET); 5081 unlock_user(argptr, arg, target_size); 5082 5083 g_free(lurb); 5084 return ret; 5085 } 5086 5087 static abi_long 5088 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie, 5089 uint8_t *buf_temp __attribute__((unused)), 5090 int fd, int cmd, abi_long arg) 5091 { 5092 struct live_urb *lurb; 5093 5094 /* map target address back to host URB with metadata. */ 5095 lurb = urb_hashtable_lookup(arg); 5096 if (!lurb) { 5097 return -TARGET_EFAULT; 5098 } 5099 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); 5100 } 5101 5102 static abi_long 5103 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp, 5104 int fd, int cmd, abi_long arg) 5105 { 5106 const argtype *arg_type = ie->arg_type; 5107 int target_size; 5108 abi_long ret; 5109 void *argptr; 5110 int rw_dir; 5111 struct live_urb *lurb; 5112 5113 /* 5114 * each submitted URB needs to map to a unique ID for the 5115 * kernel, and that unique ID needs to be a pointer to 5116 * host memory. hence, we need to malloc for each URB. 5117 * isochronous transfers have a variable length struct. 5118 */ 5119 arg_type++; 5120 target_size = thunk_type_size(arg_type, THUNK_TARGET); 5121 5122 /* construct host copy of urb and metadata */ 5123 lurb = g_try_malloc0(sizeof(struct live_urb)); 5124 if (!lurb) { 5125 return -TARGET_ENOMEM; 5126 } 5127 5128 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5129 if (!argptr) { 5130 g_free(lurb); 5131 return -TARGET_EFAULT; 5132 } 5133 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST); 5134 unlock_user(argptr, arg, 0); 5135 5136 lurb->target_urb_adr = arg; 5137 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer; 5138 5139 /* buffer space used depends on endpoint type so lock the entire buffer */ 5140 /* control type urbs should check the buffer contents for true direction */ 5141 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ; 5142 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr, 5143 lurb->host_urb.buffer_length, 1); 5144 if (lurb->target_buf_ptr == NULL) { 5145 g_free(lurb); 5146 return -TARGET_EFAULT; 5147 } 5148 5149 /* update buffer pointer in host copy */ 5150 lurb->host_urb.buffer = lurb->target_buf_ptr; 5151 5152 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); 5153 if (is_error(ret)) { 5154 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0); 5155 g_free(lurb); 5156 } else { 5157 urb_hashtable_insert(lurb); 5158 } 5159 5160 return ret; 5161 } 5162 #endif /* CONFIG_USBFS */ 5163 5164 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5165 int cmd, abi_long arg) 5166 { 5167 void *argptr; 5168 struct dm_ioctl *host_dm; 5169 abi_long guest_data; 5170 uint32_t guest_data_size; 5171 int target_size; 5172 const argtype *arg_type = ie->arg_type; 5173 abi_long ret; 5174 void *big_buf = NULL; 5175 char *host_data; 5176 5177 arg_type++; 5178 target_size = thunk_type_size(arg_type, 0); 5179 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5180 if (!argptr) { 5181 ret = -TARGET_EFAULT; 5182 goto out; 5183 } 5184 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5185 unlock_user(argptr, arg, 0); 5186 5187 /* buf_temp is too small, so fetch things into a bigger buffer */ 5188 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 5189 memcpy(big_buf, buf_temp, target_size); 5190 buf_temp = big_buf; 5191 host_dm = big_buf; 5192 5193 guest_data = arg + host_dm->data_start; 5194 if ((guest_data - arg) < 0) { 5195 ret = -TARGET_EINVAL; 5196 goto out; 5197 } 5198 guest_data_size = host_dm->data_size - host_dm->data_start; 5199 host_data = (char*)host_dm + host_dm->data_start; 5200 5201 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 5202 if (!argptr) { 5203 ret = -TARGET_EFAULT; 5204 goto out; 5205 } 5206 5207 switch (ie->host_cmd) { 5208 case DM_REMOVE_ALL: 5209 case DM_LIST_DEVICES: 5210 case DM_DEV_CREATE: 5211 case DM_DEV_REMOVE: 5212 case DM_DEV_SUSPEND: 5213 case DM_DEV_STATUS: 5214 case DM_DEV_WAIT: 5215 case DM_TABLE_STATUS: 5216 case DM_TABLE_CLEAR: 5217 case DM_TABLE_DEPS: 5218 case DM_LIST_VERSIONS: 5219 /* no input data */ 5220 break; 5221 case DM_DEV_RENAME: 5222 case DM_DEV_SET_GEOMETRY: 5223 /* data contains only strings */ 5224 memcpy(host_data, argptr, guest_data_size); 5225 break; 5226 case DM_TARGET_MSG: 5227 memcpy(host_data, argptr, guest_data_size); 5228 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 5229 break; 5230 case DM_TABLE_LOAD: 5231 { 5232 void *gspec = argptr; 5233 void *cur_data = host_data; 5234 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5235 int spec_size = thunk_type_size(arg_type, 0); 5236 int i; 5237 5238 for (i = 0; i < host_dm->target_count; i++) { 5239 struct dm_target_spec *spec = cur_data; 5240 uint32_t next; 5241 int slen; 5242 5243 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 5244 slen = strlen((char*)gspec + spec_size) + 1; 5245 next = spec->next; 5246 spec->next = sizeof(*spec) + slen; 5247 strcpy((char*)&spec[1], gspec + spec_size); 5248 gspec += next; 5249 cur_data += spec->next; 5250 } 5251 break; 5252 } 5253 default: 5254 ret = -TARGET_EINVAL; 5255 unlock_user(argptr, guest_data, 0); 5256 goto out; 5257 } 5258 unlock_user(argptr, guest_data, 0); 5259 5260 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5261 if (!is_error(ret)) { 5262 guest_data = arg + host_dm->data_start; 5263 guest_data_size = host_dm->data_size - host_dm->data_start; 5264 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 5265 switch (ie->host_cmd) { 5266 case DM_REMOVE_ALL: 5267 case DM_DEV_CREATE: 5268 case DM_DEV_REMOVE: 5269 case DM_DEV_RENAME: 5270 case DM_DEV_SUSPEND: 5271 case DM_DEV_STATUS: 5272 case DM_TABLE_LOAD: 5273 case DM_TABLE_CLEAR: 5274 case DM_TARGET_MSG: 5275 case DM_DEV_SET_GEOMETRY: 5276 /* no return data */ 5277 break; 5278 case DM_LIST_DEVICES: 5279 { 5280 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 5281 uint32_t remaining_data = guest_data_size; 5282 void *cur_data = argptr; 5283 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 5284 int nl_size = 12; /* can't use thunk_size due to alignment */ 5285 5286 while (1) { 5287 uint32_t next = nl->next; 5288 if (next) { 5289 nl->next = nl_size + (strlen(nl->name) + 1); 5290 } 5291 if (remaining_data < nl->next) { 5292 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5293 break; 5294 } 5295 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 5296 strcpy(cur_data + nl_size, nl->name); 5297 cur_data += nl->next; 5298 remaining_data -= nl->next; 5299 if (!next) { 5300 break; 5301 } 5302 nl = (void*)nl + next; 5303 } 5304 break; 5305 } 5306 case DM_DEV_WAIT: 5307 case DM_TABLE_STATUS: 5308 { 5309 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 5310 void *cur_data = argptr; 5311 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5312 int spec_size = thunk_type_size(arg_type, 0); 5313 int i; 5314 5315 for (i = 0; i < host_dm->target_count; i++) { 5316 uint32_t next = spec->next; 5317 int slen = strlen((char*)&spec[1]) + 1; 5318 spec->next = (cur_data - argptr) + spec_size + slen; 5319 if (guest_data_size < spec->next) { 5320 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5321 break; 5322 } 5323 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 5324 strcpy(cur_data + spec_size, (char*)&spec[1]); 5325 cur_data = argptr + spec->next; 5326 spec = (void*)host_dm + host_dm->data_start + next; 5327 } 5328 break; 5329 } 5330 case DM_TABLE_DEPS: 5331 { 5332 void *hdata = (void*)host_dm + host_dm->data_start; 5333 int count = *(uint32_t*)hdata; 5334 uint64_t *hdev = hdata + 8; 5335 uint64_t *gdev = argptr + 8; 5336 int i; 5337 5338 *(uint32_t*)argptr = tswap32(count); 5339 for (i = 0; i < count; i++) { 5340 *gdev = tswap64(*hdev); 5341 gdev++; 5342 hdev++; 5343 } 5344 break; 5345 } 5346 case DM_LIST_VERSIONS: 5347 { 5348 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 5349 uint32_t remaining_data = guest_data_size; 5350 void *cur_data = argptr; 5351 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 5352 int vers_size = thunk_type_size(arg_type, 0); 5353 5354 while (1) { 5355 uint32_t next = vers->next; 5356 if (next) { 5357 vers->next = vers_size + (strlen(vers->name) + 1); 5358 } 5359 if (remaining_data < vers->next) { 5360 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5361 break; 5362 } 5363 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 5364 strcpy(cur_data + vers_size, vers->name); 5365 cur_data += vers->next; 5366 remaining_data -= vers->next; 5367 if (!next) { 5368 break; 5369 } 5370 vers = (void*)vers + next; 5371 } 5372 break; 5373 } 5374 default: 5375 unlock_user(argptr, guest_data, 0); 5376 ret = -TARGET_EINVAL; 5377 goto out; 5378 } 5379 unlock_user(argptr, guest_data, guest_data_size); 5380 5381 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5382 if (!argptr) { 5383 ret = -TARGET_EFAULT; 5384 goto out; 5385 } 5386 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5387 unlock_user(argptr, arg, target_size); 5388 } 5389 out: 5390 g_free(big_buf); 5391 return ret; 5392 } 5393 5394 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5395 int cmd, abi_long arg) 5396 { 5397 void *argptr; 5398 int target_size; 5399 const argtype *arg_type = ie->arg_type; 5400 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) }; 5401 abi_long ret; 5402 5403 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp; 5404 struct blkpg_partition host_part; 5405 5406 /* Read and convert blkpg */ 5407 arg_type++; 5408 target_size = thunk_type_size(arg_type, 0); 5409 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5410 if (!argptr) { 5411 ret = -TARGET_EFAULT; 5412 goto out; 5413 } 5414 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5415 unlock_user(argptr, arg, 0); 5416 5417 switch (host_blkpg->op) { 5418 case BLKPG_ADD_PARTITION: 5419 case BLKPG_DEL_PARTITION: 5420 /* payload is struct blkpg_partition */ 5421 break; 5422 default: 5423 /* Unknown opcode */ 5424 ret = -TARGET_EINVAL; 5425 goto out; 5426 } 5427 5428 /* Read and convert blkpg->data */ 5429 arg = (abi_long)(uintptr_t)host_blkpg->data; 5430 target_size = thunk_type_size(part_arg_type, 0); 5431 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5432 if (!argptr) { 5433 ret = -TARGET_EFAULT; 5434 goto out; 5435 } 5436 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST); 5437 unlock_user(argptr, arg, 0); 5438 5439 /* Swizzle the data pointer to our local copy and call! */ 5440 host_blkpg->data = &host_part; 5441 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg)); 5442 5443 out: 5444 return ret; 5445 } 5446 5447 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 5448 int fd, int cmd, abi_long arg) 5449 { 5450 const argtype *arg_type = ie->arg_type; 5451 const StructEntry *se; 5452 const argtype *field_types; 5453 const int *dst_offsets, *src_offsets; 5454 int target_size; 5455 void *argptr; 5456 abi_ulong *target_rt_dev_ptr = NULL; 5457 unsigned long *host_rt_dev_ptr = NULL; 5458 abi_long ret; 5459 int i; 5460 5461 assert(ie->access == IOC_W); 5462 assert(*arg_type == TYPE_PTR); 5463 arg_type++; 5464 assert(*arg_type == TYPE_STRUCT); 5465 target_size = thunk_type_size(arg_type, 0); 5466 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5467 if (!argptr) { 5468 return -TARGET_EFAULT; 5469 } 5470 arg_type++; 5471 assert(*arg_type == (int)STRUCT_rtentry); 5472 se = struct_entries + *arg_type++; 5473 assert(se->convert[0] == NULL); 5474 /* convert struct here to be able to catch rt_dev string */ 5475 field_types = se->field_types; 5476 dst_offsets = se->field_offsets[THUNK_HOST]; 5477 src_offsets = se->field_offsets[THUNK_TARGET]; 5478 for (i = 0; i < se->nb_fields; i++) { 5479 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 5480 assert(*field_types == TYPE_PTRVOID); 5481 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 5482 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 5483 if (*target_rt_dev_ptr != 0) { 5484 *host_rt_dev_ptr = (unsigned long)lock_user_string( 5485 tswapal(*target_rt_dev_ptr)); 5486 if (!*host_rt_dev_ptr) { 5487 unlock_user(argptr, arg, 0); 5488 return -TARGET_EFAULT; 5489 } 5490 } else { 5491 *host_rt_dev_ptr = 0; 5492 } 5493 field_types++; 5494 continue; 5495 } 5496 field_types = thunk_convert(buf_temp + dst_offsets[i], 5497 argptr + src_offsets[i], 5498 field_types, THUNK_HOST); 5499 } 5500 unlock_user(argptr, arg, 0); 5501 5502 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5503 5504 assert(host_rt_dev_ptr != NULL); 5505 assert(target_rt_dev_ptr != NULL); 5506 if (*host_rt_dev_ptr != 0) { 5507 unlock_user((void *)*host_rt_dev_ptr, 5508 *target_rt_dev_ptr, 0); 5509 } 5510 return ret; 5511 } 5512 5513 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 5514 int fd, int cmd, abi_long arg) 5515 { 5516 int sig = target_to_host_signal(arg); 5517 return get_errno(safe_ioctl(fd, ie->host_cmd, sig)); 5518 } 5519 5520 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp, 5521 int fd, int cmd, abi_long arg) 5522 { 5523 struct timeval tv; 5524 abi_long ret; 5525 5526 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv)); 5527 if (is_error(ret)) { 5528 return ret; 5529 } 5530 5531 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) { 5532 if (copy_to_user_timeval(arg, &tv)) { 5533 return -TARGET_EFAULT; 5534 } 5535 } else { 5536 if (copy_to_user_timeval64(arg, &tv)) { 5537 return -TARGET_EFAULT; 5538 } 5539 } 5540 5541 return ret; 5542 } 5543 5544 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp, 5545 int fd, int cmd, abi_long arg) 5546 { 5547 struct timespec ts; 5548 abi_long ret; 5549 5550 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts)); 5551 if (is_error(ret)) { 5552 return ret; 5553 } 5554 5555 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) { 5556 if (host_to_target_timespec(arg, &ts)) { 5557 return -TARGET_EFAULT; 5558 } 5559 } else{ 5560 if (host_to_target_timespec64(arg, &ts)) { 5561 return -TARGET_EFAULT; 5562 } 5563 } 5564 5565 return ret; 5566 } 5567 5568 #ifdef TIOCGPTPEER 5569 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp, 5570 int fd, int cmd, abi_long arg) 5571 { 5572 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl); 5573 return get_errno(safe_ioctl(fd, ie->host_cmd, flags)); 5574 } 5575 #endif 5576 5577 #ifdef HAVE_DRM_H 5578 5579 static void unlock_drm_version(struct drm_version *host_ver, 5580 struct target_drm_version *target_ver, 5581 bool copy) 5582 { 5583 unlock_user(host_ver->name, target_ver->name, 5584 copy ? host_ver->name_len : 0); 5585 unlock_user(host_ver->date, target_ver->date, 5586 copy ? host_ver->date_len : 0); 5587 unlock_user(host_ver->desc, target_ver->desc, 5588 copy ? host_ver->desc_len : 0); 5589 } 5590 5591 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver, 5592 struct target_drm_version *target_ver) 5593 { 5594 memset(host_ver, 0, sizeof(*host_ver)); 5595 5596 __get_user(host_ver->name_len, &target_ver->name_len); 5597 if (host_ver->name_len) { 5598 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name, 5599 target_ver->name_len, 0); 5600 if (!host_ver->name) { 5601 return -EFAULT; 5602 } 5603 } 5604 5605 __get_user(host_ver->date_len, &target_ver->date_len); 5606 if (host_ver->date_len) { 5607 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date, 5608 target_ver->date_len, 0); 5609 if (!host_ver->date) { 5610 goto err; 5611 } 5612 } 5613 5614 __get_user(host_ver->desc_len, &target_ver->desc_len); 5615 if (host_ver->desc_len) { 5616 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc, 5617 target_ver->desc_len, 0); 5618 if (!host_ver->desc) { 5619 goto err; 5620 } 5621 } 5622 5623 return 0; 5624 err: 5625 unlock_drm_version(host_ver, target_ver, false); 5626 return -EFAULT; 5627 } 5628 5629 static inline void host_to_target_drmversion( 5630 struct target_drm_version *target_ver, 5631 struct drm_version *host_ver) 5632 { 5633 __put_user(host_ver->version_major, &target_ver->version_major); 5634 __put_user(host_ver->version_minor, &target_ver->version_minor); 5635 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel); 5636 __put_user(host_ver->name_len, &target_ver->name_len); 5637 __put_user(host_ver->date_len, &target_ver->date_len); 5638 __put_user(host_ver->desc_len, &target_ver->desc_len); 5639 unlock_drm_version(host_ver, target_ver, true); 5640 } 5641 5642 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp, 5643 int fd, int cmd, abi_long arg) 5644 { 5645 struct drm_version *ver; 5646 struct target_drm_version *target_ver; 5647 abi_long ret; 5648 5649 switch (ie->host_cmd) { 5650 case DRM_IOCTL_VERSION: 5651 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) { 5652 return -TARGET_EFAULT; 5653 } 5654 ver = (struct drm_version *)buf_temp; 5655 ret = target_to_host_drmversion(ver, target_ver); 5656 if (!is_error(ret)) { 5657 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver)); 5658 if (is_error(ret)) { 5659 unlock_drm_version(ver, target_ver, false); 5660 } else { 5661 host_to_target_drmversion(target_ver, ver); 5662 } 5663 } 5664 unlock_user_struct(target_ver, arg, 0); 5665 return ret; 5666 } 5667 return -TARGET_ENOSYS; 5668 } 5669 5670 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie, 5671 struct drm_i915_getparam *gparam, 5672 int fd, abi_long arg) 5673 { 5674 abi_long ret; 5675 int value; 5676 struct target_drm_i915_getparam *target_gparam; 5677 5678 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) { 5679 return -TARGET_EFAULT; 5680 } 5681 5682 __get_user(gparam->param, &target_gparam->param); 5683 gparam->value = &value; 5684 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam)); 5685 put_user_s32(value, target_gparam->value); 5686 5687 unlock_user_struct(target_gparam, arg, 0); 5688 return ret; 5689 } 5690 5691 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp, 5692 int fd, int cmd, abi_long arg) 5693 { 5694 switch (ie->host_cmd) { 5695 case DRM_IOCTL_I915_GETPARAM: 5696 return do_ioctl_drm_i915_getparam(ie, 5697 (struct drm_i915_getparam *)buf_temp, 5698 fd, arg); 5699 default: 5700 return -TARGET_ENOSYS; 5701 } 5702 } 5703 5704 #endif 5705 5706 IOCTLEntry ioctl_entries[] = { 5707 #define IOCTL(cmd, access, ...) \ 5708 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 5709 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 5710 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 5711 #define IOCTL_IGNORE(cmd) \ 5712 { TARGET_ ## cmd, 0, #cmd }, 5713 #include "ioctls.h" 5714 { 0, 0, }, 5715 }; 5716 5717 /* ??? Implement proper locking for ioctls. */ 5718 /* do_ioctl() Must return target values and target errnos. */ 5719 static abi_long do_ioctl(int fd, int cmd, abi_long arg) 5720 { 5721 const IOCTLEntry *ie; 5722 const argtype *arg_type; 5723 abi_long ret; 5724 uint8_t buf_temp[MAX_STRUCT_SIZE]; 5725 int target_size; 5726 void *argptr; 5727 5728 ie = ioctl_entries; 5729 for(;;) { 5730 if (ie->target_cmd == 0) { 5731 qemu_log_mask( 5732 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 5733 return -TARGET_ENOSYS; 5734 } 5735 if (ie->target_cmd == cmd) 5736 break; 5737 ie++; 5738 } 5739 arg_type = ie->arg_type; 5740 if (ie->do_ioctl) { 5741 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 5742 } else if (!ie->host_cmd) { 5743 /* Some architectures define BSD ioctls in their headers 5744 that are not implemented in Linux. */ 5745 return -TARGET_ENOSYS; 5746 } 5747 5748 switch(arg_type[0]) { 5749 case TYPE_NULL: 5750 /* no argument */ 5751 ret = get_errno(safe_ioctl(fd, ie->host_cmd)); 5752 break; 5753 case TYPE_PTRVOID: 5754 case TYPE_INT: 5755 case TYPE_LONG: 5756 case TYPE_ULONG: 5757 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg)); 5758 break; 5759 case TYPE_PTR: 5760 arg_type++; 5761 target_size = thunk_type_size(arg_type, 0); 5762 switch(ie->access) { 5763 case IOC_R: 5764 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5765 if (!is_error(ret)) { 5766 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5767 if (!argptr) 5768 return -TARGET_EFAULT; 5769 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5770 unlock_user(argptr, arg, target_size); 5771 } 5772 break; 5773 case IOC_W: 5774 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5775 if (!argptr) 5776 return -TARGET_EFAULT; 5777 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5778 unlock_user(argptr, arg, 0); 5779 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5780 break; 5781 default: 5782 case IOC_RW: 5783 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5784 if (!argptr) 5785 return -TARGET_EFAULT; 5786 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5787 unlock_user(argptr, arg, 0); 5788 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5789 if (!is_error(ret)) { 5790 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5791 if (!argptr) 5792 return -TARGET_EFAULT; 5793 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5794 unlock_user(argptr, arg, target_size); 5795 } 5796 break; 5797 } 5798 break; 5799 default: 5800 qemu_log_mask(LOG_UNIMP, 5801 "Unsupported ioctl type: cmd=0x%04lx type=%d\n", 5802 (long)cmd, arg_type[0]); 5803 ret = -TARGET_ENOSYS; 5804 break; 5805 } 5806 return ret; 5807 } 5808 5809 static const bitmask_transtbl iflag_tbl[] = { 5810 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 5811 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 5812 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 5813 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 5814 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 5815 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 5816 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 5817 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 5818 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 5819 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 5820 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 5821 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 5822 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 5823 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 5824 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8}, 5825 { 0, 0, 0, 0 } 5826 }; 5827 5828 static const bitmask_transtbl oflag_tbl[] = { 5829 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 5830 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 5831 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 5832 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 5833 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 5834 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 5835 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 5836 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 5837 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 5838 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 5839 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 5840 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 5841 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 5842 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 5843 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 5844 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 5845 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 5846 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 5847 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 5848 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 5849 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 5850 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 5851 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 5852 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 5853 { 0, 0, 0, 0 } 5854 }; 5855 5856 static const bitmask_transtbl cflag_tbl[] = { 5857 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 5858 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 5859 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 5860 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 5861 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 5862 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 5863 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 5864 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 5865 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 5866 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 5867 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 5868 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 5869 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 5870 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 5871 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 5872 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 5873 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 5874 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 5875 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 5876 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 5877 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 5878 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 5879 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 5880 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 5881 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 5882 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 5883 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 5884 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 5885 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 5886 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 5887 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 5888 { 0, 0, 0, 0 } 5889 }; 5890 5891 static const bitmask_transtbl lflag_tbl[] = { 5892 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 5893 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 5894 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 5895 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 5896 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 5897 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 5898 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 5899 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 5900 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 5901 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 5902 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 5903 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 5904 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 5905 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 5906 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 5907 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC}, 5908 { 0, 0, 0, 0 } 5909 }; 5910 5911 static void target_to_host_termios (void *dst, const void *src) 5912 { 5913 struct host_termios *host = dst; 5914 const struct target_termios *target = src; 5915 5916 host->c_iflag = 5917 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 5918 host->c_oflag = 5919 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 5920 host->c_cflag = 5921 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 5922 host->c_lflag = 5923 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 5924 host->c_line = target->c_line; 5925 5926 memset(host->c_cc, 0, sizeof(host->c_cc)); 5927 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 5928 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 5929 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 5930 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 5931 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 5932 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 5933 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 5934 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 5935 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 5936 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 5937 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 5938 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 5939 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 5940 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 5941 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 5942 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 5943 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 5944 } 5945 5946 static void host_to_target_termios (void *dst, const void *src) 5947 { 5948 struct target_termios *target = dst; 5949 const struct host_termios *host = src; 5950 5951 target->c_iflag = 5952 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 5953 target->c_oflag = 5954 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 5955 target->c_cflag = 5956 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 5957 target->c_lflag = 5958 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 5959 target->c_line = host->c_line; 5960 5961 memset(target->c_cc, 0, sizeof(target->c_cc)); 5962 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 5963 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 5964 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 5965 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 5966 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 5967 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 5968 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 5969 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 5970 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 5971 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 5972 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 5973 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 5974 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 5975 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 5976 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 5977 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 5978 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 5979 } 5980 5981 static const StructEntry struct_termios_def = { 5982 .convert = { host_to_target_termios, target_to_host_termios }, 5983 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 5984 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 5985 .print = print_termios, 5986 }; 5987 5988 static bitmask_transtbl mmap_flags_tbl[] = { 5989 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 5990 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 5991 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 5992 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, 5993 MAP_ANONYMOUS, MAP_ANONYMOUS }, 5994 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, 5995 MAP_GROWSDOWN, MAP_GROWSDOWN }, 5996 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, 5997 MAP_DENYWRITE, MAP_DENYWRITE }, 5998 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, 5999 MAP_EXECUTABLE, MAP_EXECUTABLE }, 6000 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 6001 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, 6002 MAP_NORESERVE, MAP_NORESERVE }, 6003 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB }, 6004 /* MAP_STACK had been ignored by the kernel for quite some time. 6005 Recognize it for the target insofar as we do not want to pass 6006 it through to the host. */ 6007 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 }, 6008 { 0, 0, 0, 0 } 6009 }; 6010 6011 /* 6012 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64) 6013 * TARGET_I386 is defined if TARGET_X86_64 is defined 6014 */ 6015 #if defined(TARGET_I386) 6016 6017 /* NOTE: there is really one LDT for all the threads */ 6018 static uint8_t *ldt_table; 6019 6020 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 6021 { 6022 int size; 6023 void *p; 6024 6025 if (!ldt_table) 6026 return 0; 6027 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 6028 if (size > bytecount) 6029 size = bytecount; 6030 p = lock_user(VERIFY_WRITE, ptr, size, 0); 6031 if (!p) 6032 return -TARGET_EFAULT; 6033 /* ??? Should this by byteswapped? */ 6034 memcpy(p, ldt_table, size); 6035 unlock_user(p, ptr, size); 6036 return size; 6037 } 6038 6039 /* XXX: add locking support */ 6040 static abi_long write_ldt(CPUX86State *env, 6041 abi_ulong ptr, unsigned long bytecount, int oldmode) 6042 { 6043 struct target_modify_ldt_ldt_s ldt_info; 6044 struct target_modify_ldt_ldt_s *target_ldt_info; 6045 int seg_32bit, contents, read_exec_only, limit_in_pages; 6046 int seg_not_present, useable, lm; 6047 uint32_t *lp, entry_1, entry_2; 6048 6049 if (bytecount != sizeof(ldt_info)) 6050 return -TARGET_EINVAL; 6051 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 6052 return -TARGET_EFAULT; 6053 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6054 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6055 ldt_info.limit = tswap32(target_ldt_info->limit); 6056 ldt_info.flags = tswap32(target_ldt_info->flags); 6057 unlock_user_struct(target_ldt_info, ptr, 0); 6058 6059 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 6060 return -TARGET_EINVAL; 6061 seg_32bit = ldt_info.flags & 1; 6062 contents = (ldt_info.flags >> 1) & 3; 6063 read_exec_only = (ldt_info.flags >> 3) & 1; 6064 limit_in_pages = (ldt_info.flags >> 4) & 1; 6065 seg_not_present = (ldt_info.flags >> 5) & 1; 6066 useable = (ldt_info.flags >> 6) & 1; 6067 #ifdef TARGET_ABI32 6068 lm = 0; 6069 #else 6070 lm = (ldt_info.flags >> 7) & 1; 6071 #endif 6072 if (contents == 3) { 6073 if (oldmode) 6074 return -TARGET_EINVAL; 6075 if (seg_not_present == 0) 6076 return -TARGET_EINVAL; 6077 } 6078 /* allocate the LDT */ 6079 if (!ldt_table) { 6080 env->ldt.base = target_mmap(0, 6081 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 6082 PROT_READ|PROT_WRITE, 6083 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 6084 if (env->ldt.base == -1) 6085 return -TARGET_ENOMEM; 6086 memset(g2h(env->ldt.base), 0, 6087 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 6088 env->ldt.limit = 0xffff; 6089 ldt_table = g2h(env->ldt.base); 6090 } 6091 6092 /* NOTE: same code as Linux kernel */ 6093 /* Allow LDTs to be cleared by the user. */ 6094 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6095 if (oldmode || 6096 (contents == 0 && 6097 read_exec_only == 1 && 6098 seg_32bit == 0 && 6099 limit_in_pages == 0 && 6100 seg_not_present == 1 && 6101 useable == 0 )) { 6102 entry_1 = 0; 6103 entry_2 = 0; 6104 goto install; 6105 } 6106 } 6107 6108 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6109 (ldt_info.limit & 0x0ffff); 6110 entry_2 = (ldt_info.base_addr & 0xff000000) | 6111 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6112 (ldt_info.limit & 0xf0000) | 6113 ((read_exec_only ^ 1) << 9) | 6114 (contents << 10) | 6115 ((seg_not_present ^ 1) << 15) | 6116 (seg_32bit << 22) | 6117 (limit_in_pages << 23) | 6118 (lm << 21) | 6119 0x7000; 6120 if (!oldmode) 6121 entry_2 |= (useable << 20); 6122 6123 /* Install the new entry ... */ 6124 install: 6125 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 6126 lp[0] = tswap32(entry_1); 6127 lp[1] = tswap32(entry_2); 6128 return 0; 6129 } 6130 6131 /* specific and weird i386 syscalls */ 6132 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 6133 unsigned long bytecount) 6134 { 6135 abi_long ret; 6136 6137 switch (func) { 6138 case 0: 6139 ret = read_ldt(ptr, bytecount); 6140 break; 6141 case 1: 6142 ret = write_ldt(env, ptr, bytecount, 1); 6143 break; 6144 case 0x11: 6145 ret = write_ldt(env, ptr, bytecount, 0); 6146 break; 6147 default: 6148 ret = -TARGET_ENOSYS; 6149 break; 6150 } 6151 return ret; 6152 } 6153 6154 #if defined(TARGET_ABI32) 6155 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 6156 { 6157 uint64_t *gdt_table = g2h(env->gdt.base); 6158 struct target_modify_ldt_ldt_s ldt_info; 6159 struct target_modify_ldt_ldt_s *target_ldt_info; 6160 int seg_32bit, contents, read_exec_only, limit_in_pages; 6161 int seg_not_present, useable, lm; 6162 uint32_t *lp, entry_1, entry_2; 6163 int i; 6164 6165 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6166 if (!target_ldt_info) 6167 return -TARGET_EFAULT; 6168 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6169 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6170 ldt_info.limit = tswap32(target_ldt_info->limit); 6171 ldt_info.flags = tswap32(target_ldt_info->flags); 6172 if (ldt_info.entry_number == -1) { 6173 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 6174 if (gdt_table[i] == 0) { 6175 ldt_info.entry_number = i; 6176 target_ldt_info->entry_number = tswap32(i); 6177 break; 6178 } 6179 } 6180 } 6181 unlock_user_struct(target_ldt_info, ptr, 1); 6182 6183 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 6184 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 6185 return -TARGET_EINVAL; 6186 seg_32bit = ldt_info.flags & 1; 6187 contents = (ldt_info.flags >> 1) & 3; 6188 read_exec_only = (ldt_info.flags >> 3) & 1; 6189 limit_in_pages = (ldt_info.flags >> 4) & 1; 6190 seg_not_present = (ldt_info.flags >> 5) & 1; 6191 useable = (ldt_info.flags >> 6) & 1; 6192 #ifdef TARGET_ABI32 6193 lm = 0; 6194 #else 6195 lm = (ldt_info.flags >> 7) & 1; 6196 #endif 6197 6198 if (contents == 3) { 6199 if (seg_not_present == 0) 6200 return -TARGET_EINVAL; 6201 } 6202 6203 /* NOTE: same code as Linux kernel */ 6204 /* Allow LDTs to be cleared by the user. */ 6205 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6206 if ((contents == 0 && 6207 read_exec_only == 1 && 6208 seg_32bit == 0 && 6209 limit_in_pages == 0 && 6210 seg_not_present == 1 && 6211 useable == 0 )) { 6212 entry_1 = 0; 6213 entry_2 = 0; 6214 goto install; 6215 } 6216 } 6217 6218 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6219 (ldt_info.limit & 0x0ffff); 6220 entry_2 = (ldt_info.base_addr & 0xff000000) | 6221 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6222 (ldt_info.limit & 0xf0000) | 6223 ((read_exec_only ^ 1) << 9) | 6224 (contents << 10) | 6225 ((seg_not_present ^ 1) << 15) | 6226 (seg_32bit << 22) | 6227 (limit_in_pages << 23) | 6228 (useable << 20) | 6229 (lm << 21) | 6230 0x7000; 6231 6232 /* Install the new entry ... */ 6233 install: 6234 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 6235 lp[0] = tswap32(entry_1); 6236 lp[1] = tswap32(entry_2); 6237 return 0; 6238 } 6239 6240 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 6241 { 6242 struct target_modify_ldt_ldt_s *target_ldt_info; 6243 uint64_t *gdt_table = g2h(env->gdt.base); 6244 uint32_t base_addr, limit, flags; 6245 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 6246 int seg_not_present, useable, lm; 6247 uint32_t *lp, entry_1, entry_2; 6248 6249 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6250 if (!target_ldt_info) 6251 return -TARGET_EFAULT; 6252 idx = tswap32(target_ldt_info->entry_number); 6253 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 6254 idx > TARGET_GDT_ENTRY_TLS_MAX) { 6255 unlock_user_struct(target_ldt_info, ptr, 1); 6256 return -TARGET_EINVAL; 6257 } 6258 lp = (uint32_t *)(gdt_table + idx); 6259 entry_1 = tswap32(lp[0]); 6260 entry_2 = tswap32(lp[1]); 6261 6262 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 6263 contents = (entry_2 >> 10) & 3; 6264 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 6265 seg_32bit = (entry_2 >> 22) & 1; 6266 limit_in_pages = (entry_2 >> 23) & 1; 6267 useable = (entry_2 >> 20) & 1; 6268 #ifdef TARGET_ABI32 6269 lm = 0; 6270 #else 6271 lm = (entry_2 >> 21) & 1; 6272 #endif 6273 flags = (seg_32bit << 0) | (contents << 1) | 6274 (read_exec_only << 3) | (limit_in_pages << 4) | 6275 (seg_not_present << 5) | (useable << 6) | (lm << 7); 6276 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 6277 base_addr = (entry_1 >> 16) | 6278 (entry_2 & 0xff000000) | 6279 ((entry_2 & 0xff) << 16); 6280 target_ldt_info->base_addr = tswapal(base_addr); 6281 target_ldt_info->limit = tswap32(limit); 6282 target_ldt_info->flags = tswap32(flags); 6283 unlock_user_struct(target_ldt_info, ptr, 1); 6284 return 0; 6285 } 6286 6287 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6288 { 6289 return -TARGET_ENOSYS; 6290 } 6291 #else 6292 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6293 { 6294 abi_long ret = 0; 6295 abi_ulong val; 6296 int idx; 6297 6298 switch(code) { 6299 case TARGET_ARCH_SET_GS: 6300 case TARGET_ARCH_SET_FS: 6301 if (code == TARGET_ARCH_SET_GS) 6302 idx = R_GS; 6303 else 6304 idx = R_FS; 6305 cpu_x86_load_seg(env, idx, 0); 6306 env->segs[idx].base = addr; 6307 break; 6308 case TARGET_ARCH_GET_GS: 6309 case TARGET_ARCH_GET_FS: 6310 if (code == TARGET_ARCH_GET_GS) 6311 idx = R_GS; 6312 else 6313 idx = R_FS; 6314 val = env->segs[idx].base; 6315 if (put_user(val, addr, abi_ulong)) 6316 ret = -TARGET_EFAULT; 6317 break; 6318 default: 6319 ret = -TARGET_EINVAL; 6320 break; 6321 } 6322 return ret; 6323 } 6324 #endif /* defined(TARGET_ABI32 */ 6325 6326 #endif /* defined(TARGET_I386) */ 6327 6328 #define NEW_STACK_SIZE 0x40000 6329 6330 6331 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 6332 typedef struct { 6333 CPUArchState *env; 6334 pthread_mutex_t mutex; 6335 pthread_cond_t cond; 6336 pthread_t thread; 6337 uint32_t tid; 6338 abi_ulong child_tidptr; 6339 abi_ulong parent_tidptr; 6340 sigset_t sigmask; 6341 } new_thread_info; 6342 6343 static void *clone_func(void *arg) 6344 { 6345 new_thread_info *info = arg; 6346 CPUArchState *env; 6347 CPUState *cpu; 6348 TaskState *ts; 6349 6350 rcu_register_thread(); 6351 tcg_register_thread(); 6352 env = info->env; 6353 cpu = env_cpu(env); 6354 thread_cpu = cpu; 6355 ts = (TaskState *)cpu->opaque; 6356 info->tid = sys_gettid(); 6357 task_settid(ts); 6358 if (info->child_tidptr) 6359 put_user_u32(info->tid, info->child_tidptr); 6360 if (info->parent_tidptr) 6361 put_user_u32(info->tid, info->parent_tidptr); 6362 qemu_guest_random_seed_thread_part2(cpu->random_seed); 6363 /* Enable signals. */ 6364 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 6365 /* Signal to the parent that we're ready. */ 6366 pthread_mutex_lock(&info->mutex); 6367 pthread_cond_broadcast(&info->cond); 6368 pthread_mutex_unlock(&info->mutex); 6369 /* Wait until the parent has finished initializing the tls state. */ 6370 pthread_mutex_lock(&clone_lock); 6371 pthread_mutex_unlock(&clone_lock); 6372 cpu_loop(env); 6373 /* never exits */ 6374 return NULL; 6375 } 6376 6377 /* do_fork() Must return host values and target errnos (unlike most 6378 do_*() functions). */ 6379 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 6380 abi_ulong parent_tidptr, target_ulong newtls, 6381 abi_ulong child_tidptr) 6382 { 6383 CPUState *cpu = env_cpu(env); 6384 int ret; 6385 TaskState *ts; 6386 CPUState *new_cpu; 6387 CPUArchState *new_env; 6388 sigset_t sigmask; 6389 6390 flags &= ~CLONE_IGNORED_FLAGS; 6391 6392 /* Emulate vfork() with fork() */ 6393 if (flags & CLONE_VFORK) 6394 flags &= ~(CLONE_VFORK | CLONE_VM); 6395 6396 if (flags & CLONE_VM) { 6397 TaskState *parent_ts = (TaskState *)cpu->opaque; 6398 new_thread_info info; 6399 pthread_attr_t attr; 6400 6401 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) || 6402 (flags & CLONE_INVALID_THREAD_FLAGS)) { 6403 return -TARGET_EINVAL; 6404 } 6405 6406 ts = g_new0(TaskState, 1); 6407 init_task_state(ts); 6408 6409 /* Grab a mutex so that thread setup appears atomic. */ 6410 pthread_mutex_lock(&clone_lock); 6411 6412 /* we create a new CPU instance. */ 6413 new_env = cpu_copy(env); 6414 /* Init regs that differ from the parent. */ 6415 cpu_clone_regs_child(new_env, newsp, flags); 6416 cpu_clone_regs_parent(env, flags); 6417 new_cpu = env_cpu(new_env); 6418 new_cpu->opaque = ts; 6419 ts->bprm = parent_ts->bprm; 6420 ts->info = parent_ts->info; 6421 ts->signal_mask = parent_ts->signal_mask; 6422 6423 if (flags & CLONE_CHILD_CLEARTID) { 6424 ts->child_tidptr = child_tidptr; 6425 } 6426 6427 if (flags & CLONE_SETTLS) { 6428 cpu_set_tls (new_env, newtls); 6429 } 6430 6431 memset(&info, 0, sizeof(info)); 6432 pthread_mutex_init(&info.mutex, NULL); 6433 pthread_mutex_lock(&info.mutex); 6434 pthread_cond_init(&info.cond, NULL); 6435 info.env = new_env; 6436 if (flags & CLONE_CHILD_SETTID) { 6437 info.child_tidptr = child_tidptr; 6438 } 6439 if (flags & CLONE_PARENT_SETTID) { 6440 info.parent_tidptr = parent_tidptr; 6441 } 6442 6443 ret = pthread_attr_init(&attr); 6444 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 6445 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 6446 /* It is not safe to deliver signals until the child has finished 6447 initializing, so temporarily block all signals. */ 6448 sigfillset(&sigmask); 6449 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 6450 cpu->random_seed = qemu_guest_random_seed_thread_part1(); 6451 6452 /* If this is our first additional thread, we need to ensure we 6453 * generate code for parallel execution and flush old translations. 6454 */ 6455 if (!parallel_cpus) { 6456 parallel_cpus = true; 6457 tb_flush(cpu); 6458 } 6459 6460 ret = pthread_create(&info.thread, &attr, clone_func, &info); 6461 /* TODO: Free new CPU state if thread creation failed. */ 6462 6463 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 6464 pthread_attr_destroy(&attr); 6465 if (ret == 0) { 6466 /* Wait for the child to initialize. */ 6467 pthread_cond_wait(&info.cond, &info.mutex); 6468 ret = info.tid; 6469 } else { 6470 ret = -1; 6471 } 6472 pthread_mutex_unlock(&info.mutex); 6473 pthread_cond_destroy(&info.cond); 6474 pthread_mutex_destroy(&info.mutex); 6475 pthread_mutex_unlock(&clone_lock); 6476 } else { 6477 /* if no CLONE_VM, we consider it is a fork */ 6478 if (flags & CLONE_INVALID_FORK_FLAGS) { 6479 return -TARGET_EINVAL; 6480 } 6481 6482 /* We can't support custom termination signals */ 6483 if ((flags & CSIGNAL) != TARGET_SIGCHLD) { 6484 return -TARGET_EINVAL; 6485 } 6486 6487 if (block_signals()) { 6488 return -TARGET_ERESTARTSYS; 6489 } 6490 6491 fork_start(); 6492 ret = fork(); 6493 if (ret == 0) { 6494 /* Child Process. */ 6495 cpu_clone_regs_child(env, newsp, flags); 6496 fork_end(1); 6497 /* There is a race condition here. The parent process could 6498 theoretically read the TID in the child process before the child 6499 tid is set. This would require using either ptrace 6500 (not implemented) or having *_tidptr to point at a shared memory 6501 mapping. We can't repeat the spinlock hack used above because 6502 the child process gets its own copy of the lock. */ 6503 if (flags & CLONE_CHILD_SETTID) 6504 put_user_u32(sys_gettid(), child_tidptr); 6505 if (flags & CLONE_PARENT_SETTID) 6506 put_user_u32(sys_gettid(), parent_tidptr); 6507 ts = (TaskState *)cpu->opaque; 6508 if (flags & CLONE_SETTLS) 6509 cpu_set_tls (env, newtls); 6510 if (flags & CLONE_CHILD_CLEARTID) 6511 ts->child_tidptr = child_tidptr; 6512 } else { 6513 cpu_clone_regs_parent(env, flags); 6514 fork_end(0); 6515 } 6516 } 6517 return ret; 6518 } 6519 6520 /* warning : doesn't handle linux specific flags... */ 6521 static int target_to_host_fcntl_cmd(int cmd) 6522 { 6523 int ret; 6524 6525 switch(cmd) { 6526 case TARGET_F_DUPFD: 6527 case TARGET_F_GETFD: 6528 case TARGET_F_SETFD: 6529 case TARGET_F_GETFL: 6530 case TARGET_F_SETFL: 6531 case TARGET_F_OFD_GETLK: 6532 case TARGET_F_OFD_SETLK: 6533 case TARGET_F_OFD_SETLKW: 6534 ret = cmd; 6535 break; 6536 case TARGET_F_GETLK: 6537 ret = F_GETLK64; 6538 break; 6539 case TARGET_F_SETLK: 6540 ret = F_SETLK64; 6541 break; 6542 case TARGET_F_SETLKW: 6543 ret = F_SETLKW64; 6544 break; 6545 case TARGET_F_GETOWN: 6546 ret = F_GETOWN; 6547 break; 6548 case TARGET_F_SETOWN: 6549 ret = F_SETOWN; 6550 break; 6551 case TARGET_F_GETSIG: 6552 ret = F_GETSIG; 6553 break; 6554 case TARGET_F_SETSIG: 6555 ret = F_SETSIG; 6556 break; 6557 #if TARGET_ABI_BITS == 32 6558 case TARGET_F_GETLK64: 6559 ret = F_GETLK64; 6560 break; 6561 case TARGET_F_SETLK64: 6562 ret = F_SETLK64; 6563 break; 6564 case TARGET_F_SETLKW64: 6565 ret = F_SETLKW64; 6566 break; 6567 #endif 6568 case TARGET_F_SETLEASE: 6569 ret = F_SETLEASE; 6570 break; 6571 case TARGET_F_GETLEASE: 6572 ret = F_GETLEASE; 6573 break; 6574 #ifdef F_DUPFD_CLOEXEC 6575 case TARGET_F_DUPFD_CLOEXEC: 6576 ret = F_DUPFD_CLOEXEC; 6577 break; 6578 #endif 6579 case TARGET_F_NOTIFY: 6580 ret = F_NOTIFY; 6581 break; 6582 #ifdef F_GETOWN_EX 6583 case TARGET_F_GETOWN_EX: 6584 ret = F_GETOWN_EX; 6585 break; 6586 #endif 6587 #ifdef F_SETOWN_EX 6588 case TARGET_F_SETOWN_EX: 6589 ret = F_SETOWN_EX; 6590 break; 6591 #endif 6592 #ifdef F_SETPIPE_SZ 6593 case TARGET_F_SETPIPE_SZ: 6594 ret = F_SETPIPE_SZ; 6595 break; 6596 case TARGET_F_GETPIPE_SZ: 6597 ret = F_GETPIPE_SZ; 6598 break; 6599 #endif 6600 default: 6601 ret = -TARGET_EINVAL; 6602 break; 6603 } 6604 6605 #if defined(__powerpc64__) 6606 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and 6607 * is not supported by kernel. The glibc fcntl call actually adjusts 6608 * them to 5, 6 and 7 before making the syscall(). Since we make the 6609 * syscall directly, adjust to what is supported by the kernel. 6610 */ 6611 if (ret >= F_GETLK64 && ret <= F_SETLKW64) { 6612 ret -= F_GETLK64 - 5; 6613 } 6614 #endif 6615 6616 return ret; 6617 } 6618 6619 #define FLOCK_TRANSTBL \ 6620 switch (type) { \ 6621 TRANSTBL_CONVERT(F_RDLCK); \ 6622 TRANSTBL_CONVERT(F_WRLCK); \ 6623 TRANSTBL_CONVERT(F_UNLCK); \ 6624 TRANSTBL_CONVERT(F_EXLCK); \ 6625 TRANSTBL_CONVERT(F_SHLCK); \ 6626 } 6627 6628 static int target_to_host_flock(int type) 6629 { 6630 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a 6631 FLOCK_TRANSTBL 6632 #undef TRANSTBL_CONVERT 6633 return -TARGET_EINVAL; 6634 } 6635 6636 static int host_to_target_flock(int type) 6637 { 6638 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a 6639 FLOCK_TRANSTBL 6640 #undef TRANSTBL_CONVERT 6641 /* if we don't know how to convert the value coming 6642 * from the host we copy to the target field as-is 6643 */ 6644 return type; 6645 } 6646 6647 static inline abi_long copy_from_user_flock(struct flock64 *fl, 6648 abi_ulong target_flock_addr) 6649 { 6650 struct target_flock *target_fl; 6651 int l_type; 6652 6653 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6654 return -TARGET_EFAULT; 6655 } 6656 6657 __get_user(l_type, &target_fl->l_type); 6658 l_type = target_to_host_flock(l_type); 6659 if (l_type < 0) { 6660 return l_type; 6661 } 6662 fl->l_type = l_type; 6663 __get_user(fl->l_whence, &target_fl->l_whence); 6664 __get_user(fl->l_start, &target_fl->l_start); 6665 __get_user(fl->l_len, &target_fl->l_len); 6666 __get_user(fl->l_pid, &target_fl->l_pid); 6667 unlock_user_struct(target_fl, target_flock_addr, 0); 6668 return 0; 6669 } 6670 6671 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr, 6672 const struct flock64 *fl) 6673 { 6674 struct target_flock *target_fl; 6675 short l_type; 6676 6677 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6678 return -TARGET_EFAULT; 6679 } 6680 6681 l_type = host_to_target_flock(fl->l_type); 6682 __put_user(l_type, &target_fl->l_type); 6683 __put_user(fl->l_whence, &target_fl->l_whence); 6684 __put_user(fl->l_start, &target_fl->l_start); 6685 __put_user(fl->l_len, &target_fl->l_len); 6686 __put_user(fl->l_pid, &target_fl->l_pid); 6687 unlock_user_struct(target_fl, target_flock_addr, 1); 6688 return 0; 6689 } 6690 6691 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr); 6692 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl); 6693 6694 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32 6695 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl, 6696 abi_ulong target_flock_addr) 6697 { 6698 struct target_oabi_flock64 *target_fl; 6699 int l_type; 6700 6701 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6702 return -TARGET_EFAULT; 6703 } 6704 6705 __get_user(l_type, &target_fl->l_type); 6706 l_type = target_to_host_flock(l_type); 6707 if (l_type < 0) { 6708 return l_type; 6709 } 6710 fl->l_type = l_type; 6711 __get_user(fl->l_whence, &target_fl->l_whence); 6712 __get_user(fl->l_start, &target_fl->l_start); 6713 __get_user(fl->l_len, &target_fl->l_len); 6714 __get_user(fl->l_pid, &target_fl->l_pid); 6715 unlock_user_struct(target_fl, target_flock_addr, 0); 6716 return 0; 6717 } 6718 6719 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr, 6720 const struct flock64 *fl) 6721 { 6722 struct target_oabi_flock64 *target_fl; 6723 short l_type; 6724 6725 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6726 return -TARGET_EFAULT; 6727 } 6728 6729 l_type = host_to_target_flock(fl->l_type); 6730 __put_user(l_type, &target_fl->l_type); 6731 __put_user(fl->l_whence, &target_fl->l_whence); 6732 __put_user(fl->l_start, &target_fl->l_start); 6733 __put_user(fl->l_len, &target_fl->l_len); 6734 __put_user(fl->l_pid, &target_fl->l_pid); 6735 unlock_user_struct(target_fl, target_flock_addr, 1); 6736 return 0; 6737 } 6738 #endif 6739 6740 static inline abi_long copy_from_user_flock64(struct flock64 *fl, 6741 abi_ulong target_flock_addr) 6742 { 6743 struct target_flock64 *target_fl; 6744 int l_type; 6745 6746 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6747 return -TARGET_EFAULT; 6748 } 6749 6750 __get_user(l_type, &target_fl->l_type); 6751 l_type = target_to_host_flock(l_type); 6752 if (l_type < 0) { 6753 return l_type; 6754 } 6755 fl->l_type = l_type; 6756 __get_user(fl->l_whence, &target_fl->l_whence); 6757 __get_user(fl->l_start, &target_fl->l_start); 6758 __get_user(fl->l_len, &target_fl->l_len); 6759 __get_user(fl->l_pid, &target_fl->l_pid); 6760 unlock_user_struct(target_fl, target_flock_addr, 0); 6761 return 0; 6762 } 6763 6764 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr, 6765 const struct flock64 *fl) 6766 { 6767 struct target_flock64 *target_fl; 6768 short l_type; 6769 6770 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6771 return -TARGET_EFAULT; 6772 } 6773 6774 l_type = host_to_target_flock(fl->l_type); 6775 __put_user(l_type, &target_fl->l_type); 6776 __put_user(fl->l_whence, &target_fl->l_whence); 6777 __put_user(fl->l_start, &target_fl->l_start); 6778 __put_user(fl->l_len, &target_fl->l_len); 6779 __put_user(fl->l_pid, &target_fl->l_pid); 6780 unlock_user_struct(target_fl, target_flock_addr, 1); 6781 return 0; 6782 } 6783 6784 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 6785 { 6786 struct flock64 fl64; 6787 #ifdef F_GETOWN_EX 6788 struct f_owner_ex fox; 6789 struct target_f_owner_ex *target_fox; 6790 #endif 6791 abi_long ret; 6792 int host_cmd = target_to_host_fcntl_cmd(cmd); 6793 6794 if (host_cmd == -TARGET_EINVAL) 6795 return host_cmd; 6796 6797 switch(cmd) { 6798 case TARGET_F_GETLK: 6799 ret = copy_from_user_flock(&fl64, arg); 6800 if (ret) { 6801 return ret; 6802 } 6803 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6804 if (ret == 0) { 6805 ret = copy_to_user_flock(arg, &fl64); 6806 } 6807 break; 6808 6809 case TARGET_F_SETLK: 6810 case TARGET_F_SETLKW: 6811 ret = copy_from_user_flock(&fl64, arg); 6812 if (ret) { 6813 return ret; 6814 } 6815 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6816 break; 6817 6818 case TARGET_F_GETLK64: 6819 case TARGET_F_OFD_GETLK: 6820 ret = copy_from_user_flock64(&fl64, arg); 6821 if (ret) { 6822 return ret; 6823 } 6824 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6825 if (ret == 0) { 6826 ret = copy_to_user_flock64(arg, &fl64); 6827 } 6828 break; 6829 case TARGET_F_SETLK64: 6830 case TARGET_F_SETLKW64: 6831 case TARGET_F_OFD_SETLK: 6832 case TARGET_F_OFD_SETLKW: 6833 ret = copy_from_user_flock64(&fl64, arg); 6834 if (ret) { 6835 return ret; 6836 } 6837 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 6838 break; 6839 6840 case TARGET_F_GETFL: 6841 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6842 if (ret >= 0) { 6843 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 6844 } 6845 break; 6846 6847 case TARGET_F_SETFL: 6848 ret = get_errno(safe_fcntl(fd, host_cmd, 6849 target_to_host_bitmask(arg, 6850 fcntl_flags_tbl))); 6851 break; 6852 6853 #ifdef F_GETOWN_EX 6854 case TARGET_F_GETOWN_EX: 6855 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6856 if (ret >= 0) { 6857 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 6858 return -TARGET_EFAULT; 6859 target_fox->type = tswap32(fox.type); 6860 target_fox->pid = tswap32(fox.pid); 6861 unlock_user_struct(target_fox, arg, 1); 6862 } 6863 break; 6864 #endif 6865 6866 #ifdef F_SETOWN_EX 6867 case TARGET_F_SETOWN_EX: 6868 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 6869 return -TARGET_EFAULT; 6870 fox.type = tswap32(target_fox->type); 6871 fox.pid = tswap32(target_fox->pid); 6872 unlock_user_struct(target_fox, arg, 0); 6873 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 6874 break; 6875 #endif 6876 6877 case TARGET_F_SETOWN: 6878 case TARGET_F_GETOWN: 6879 case TARGET_F_SETSIG: 6880 case TARGET_F_GETSIG: 6881 case TARGET_F_SETLEASE: 6882 case TARGET_F_GETLEASE: 6883 case TARGET_F_SETPIPE_SZ: 6884 case TARGET_F_GETPIPE_SZ: 6885 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 6886 break; 6887 6888 default: 6889 ret = get_errno(safe_fcntl(fd, cmd, arg)); 6890 break; 6891 } 6892 return ret; 6893 } 6894 6895 #ifdef USE_UID16 6896 6897 static inline int high2lowuid(int uid) 6898 { 6899 if (uid > 65535) 6900 return 65534; 6901 else 6902 return uid; 6903 } 6904 6905 static inline int high2lowgid(int gid) 6906 { 6907 if (gid > 65535) 6908 return 65534; 6909 else 6910 return gid; 6911 } 6912 6913 static inline int low2highuid(int uid) 6914 { 6915 if ((int16_t)uid == -1) 6916 return -1; 6917 else 6918 return uid; 6919 } 6920 6921 static inline int low2highgid(int gid) 6922 { 6923 if ((int16_t)gid == -1) 6924 return -1; 6925 else 6926 return gid; 6927 } 6928 static inline int tswapid(int id) 6929 { 6930 return tswap16(id); 6931 } 6932 6933 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 6934 6935 #else /* !USE_UID16 */ 6936 static inline int high2lowuid(int uid) 6937 { 6938 return uid; 6939 } 6940 static inline int high2lowgid(int gid) 6941 { 6942 return gid; 6943 } 6944 static inline int low2highuid(int uid) 6945 { 6946 return uid; 6947 } 6948 static inline int low2highgid(int gid) 6949 { 6950 return gid; 6951 } 6952 static inline int tswapid(int id) 6953 { 6954 return tswap32(id); 6955 } 6956 6957 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 6958 6959 #endif /* USE_UID16 */ 6960 6961 /* We must do direct syscalls for setting UID/GID, because we want to 6962 * implement the Linux system call semantics of "change only for this thread", 6963 * not the libc/POSIX semantics of "change for all threads in process". 6964 * (See http://ewontfix.com/17/ for more details.) 6965 * We use the 32-bit version of the syscalls if present; if it is not 6966 * then either the host architecture supports 32-bit UIDs natively with 6967 * the standard syscall, or the 16-bit UID is the best we can do. 6968 */ 6969 #ifdef __NR_setuid32 6970 #define __NR_sys_setuid __NR_setuid32 6971 #else 6972 #define __NR_sys_setuid __NR_setuid 6973 #endif 6974 #ifdef __NR_setgid32 6975 #define __NR_sys_setgid __NR_setgid32 6976 #else 6977 #define __NR_sys_setgid __NR_setgid 6978 #endif 6979 #ifdef __NR_setresuid32 6980 #define __NR_sys_setresuid __NR_setresuid32 6981 #else 6982 #define __NR_sys_setresuid __NR_setresuid 6983 #endif 6984 #ifdef __NR_setresgid32 6985 #define __NR_sys_setresgid __NR_setresgid32 6986 #else 6987 #define __NR_sys_setresgid __NR_setresgid 6988 #endif 6989 6990 _syscall1(int, sys_setuid, uid_t, uid) 6991 _syscall1(int, sys_setgid, gid_t, gid) 6992 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 6993 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 6994 6995 void syscall_init(void) 6996 { 6997 IOCTLEntry *ie; 6998 const argtype *arg_type; 6999 int size; 7000 int i; 7001 7002 thunk_init(STRUCT_MAX); 7003 7004 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 7005 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 7006 #include "syscall_types.h" 7007 #undef STRUCT 7008 #undef STRUCT_SPECIAL 7009 7010 /* Build target_to_host_errno_table[] table from 7011 * host_to_target_errno_table[]. */ 7012 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 7013 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 7014 } 7015 7016 /* we patch the ioctl size if necessary. We rely on the fact that 7017 no ioctl has all the bits at '1' in the size field */ 7018 ie = ioctl_entries; 7019 while (ie->target_cmd != 0) { 7020 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 7021 TARGET_IOC_SIZEMASK) { 7022 arg_type = ie->arg_type; 7023 if (arg_type[0] != TYPE_PTR) { 7024 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 7025 ie->target_cmd); 7026 exit(1); 7027 } 7028 arg_type++; 7029 size = thunk_type_size(arg_type, 0); 7030 ie->target_cmd = (ie->target_cmd & 7031 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 7032 (size << TARGET_IOC_SIZESHIFT); 7033 } 7034 7035 /* automatic consistency check if same arch */ 7036 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 7037 (defined(__x86_64__) && defined(TARGET_X86_64)) 7038 if (unlikely(ie->target_cmd != ie->host_cmd)) { 7039 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 7040 ie->name, ie->target_cmd, ie->host_cmd); 7041 } 7042 #endif 7043 ie++; 7044 } 7045 } 7046 7047 #ifdef TARGET_NR_truncate64 7048 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 7049 abi_long arg2, 7050 abi_long arg3, 7051 abi_long arg4) 7052 { 7053 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) { 7054 arg2 = arg3; 7055 arg3 = arg4; 7056 } 7057 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 7058 } 7059 #endif 7060 7061 #ifdef TARGET_NR_ftruncate64 7062 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 7063 abi_long arg2, 7064 abi_long arg3, 7065 abi_long arg4) 7066 { 7067 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) { 7068 arg2 = arg3; 7069 arg3 = arg4; 7070 } 7071 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 7072 } 7073 #endif 7074 7075 #if defined(TARGET_NR_timer_settime) || \ 7076 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)) 7077 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its, 7078 abi_ulong target_addr) 7079 { 7080 if (target_to_host_timespec(&host_its->it_interval, target_addr + 7081 offsetof(struct target_itimerspec, 7082 it_interval)) || 7083 target_to_host_timespec(&host_its->it_value, target_addr + 7084 offsetof(struct target_itimerspec, 7085 it_value))) { 7086 return -TARGET_EFAULT; 7087 } 7088 7089 return 0; 7090 } 7091 #endif 7092 7093 #if defined(TARGET_NR_timer_settime64) || \ 7094 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) 7095 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its, 7096 abi_ulong target_addr) 7097 { 7098 if (target_to_host_timespec64(&host_its->it_interval, target_addr + 7099 offsetof(struct target__kernel_itimerspec, 7100 it_interval)) || 7101 target_to_host_timespec64(&host_its->it_value, target_addr + 7102 offsetof(struct target__kernel_itimerspec, 7103 it_value))) { 7104 return -TARGET_EFAULT; 7105 } 7106 7107 return 0; 7108 } 7109 #endif 7110 7111 #if ((defined(TARGET_NR_timerfd_gettime) || \ 7112 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \ 7113 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime) 7114 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 7115 struct itimerspec *host_its) 7116 { 7117 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec, 7118 it_interval), 7119 &host_its->it_interval) || 7120 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec, 7121 it_value), 7122 &host_its->it_value)) { 7123 return -TARGET_EFAULT; 7124 } 7125 return 0; 7126 } 7127 #endif 7128 7129 #if ((defined(TARGET_NR_timerfd_gettime64) || \ 7130 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \ 7131 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64) 7132 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr, 7133 struct itimerspec *host_its) 7134 { 7135 if (host_to_target_timespec64(target_addr + 7136 offsetof(struct target__kernel_itimerspec, 7137 it_interval), 7138 &host_its->it_interval) || 7139 host_to_target_timespec64(target_addr + 7140 offsetof(struct target__kernel_itimerspec, 7141 it_value), 7142 &host_its->it_value)) { 7143 return -TARGET_EFAULT; 7144 } 7145 return 0; 7146 } 7147 #endif 7148 7149 #if defined(TARGET_NR_adjtimex) || \ 7150 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)) 7151 static inline abi_long target_to_host_timex(struct timex *host_tx, 7152 abi_long target_addr) 7153 { 7154 struct target_timex *target_tx; 7155 7156 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7157 return -TARGET_EFAULT; 7158 } 7159 7160 __get_user(host_tx->modes, &target_tx->modes); 7161 __get_user(host_tx->offset, &target_tx->offset); 7162 __get_user(host_tx->freq, &target_tx->freq); 7163 __get_user(host_tx->maxerror, &target_tx->maxerror); 7164 __get_user(host_tx->esterror, &target_tx->esterror); 7165 __get_user(host_tx->status, &target_tx->status); 7166 __get_user(host_tx->constant, &target_tx->constant); 7167 __get_user(host_tx->precision, &target_tx->precision); 7168 __get_user(host_tx->tolerance, &target_tx->tolerance); 7169 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7170 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7171 __get_user(host_tx->tick, &target_tx->tick); 7172 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7173 __get_user(host_tx->jitter, &target_tx->jitter); 7174 __get_user(host_tx->shift, &target_tx->shift); 7175 __get_user(host_tx->stabil, &target_tx->stabil); 7176 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7177 __get_user(host_tx->calcnt, &target_tx->calcnt); 7178 __get_user(host_tx->errcnt, &target_tx->errcnt); 7179 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7180 __get_user(host_tx->tai, &target_tx->tai); 7181 7182 unlock_user_struct(target_tx, target_addr, 0); 7183 return 0; 7184 } 7185 7186 static inline abi_long host_to_target_timex(abi_long target_addr, 7187 struct timex *host_tx) 7188 { 7189 struct target_timex *target_tx; 7190 7191 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7192 return -TARGET_EFAULT; 7193 } 7194 7195 __put_user(host_tx->modes, &target_tx->modes); 7196 __put_user(host_tx->offset, &target_tx->offset); 7197 __put_user(host_tx->freq, &target_tx->freq); 7198 __put_user(host_tx->maxerror, &target_tx->maxerror); 7199 __put_user(host_tx->esterror, &target_tx->esterror); 7200 __put_user(host_tx->status, &target_tx->status); 7201 __put_user(host_tx->constant, &target_tx->constant); 7202 __put_user(host_tx->precision, &target_tx->precision); 7203 __put_user(host_tx->tolerance, &target_tx->tolerance); 7204 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7205 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7206 __put_user(host_tx->tick, &target_tx->tick); 7207 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7208 __put_user(host_tx->jitter, &target_tx->jitter); 7209 __put_user(host_tx->shift, &target_tx->shift); 7210 __put_user(host_tx->stabil, &target_tx->stabil); 7211 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7212 __put_user(host_tx->calcnt, &target_tx->calcnt); 7213 __put_user(host_tx->errcnt, &target_tx->errcnt); 7214 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7215 __put_user(host_tx->tai, &target_tx->tai); 7216 7217 unlock_user_struct(target_tx, target_addr, 1); 7218 return 0; 7219 } 7220 #endif 7221 7222 7223 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 7224 static inline abi_long target_to_host_timex64(struct timex *host_tx, 7225 abi_long target_addr) 7226 { 7227 struct target__kernel_timex *target_tx; 7228 7229 if (copy_from_user_timeval64(&host_tx->time, target_addr + 7230 offsetof(struct target__kernel_timex, 7231 time))) { 7232 return -TARGET_EFAULT; 7233 } 7234 7235 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7236 return -TARGET_EFAULT; 7237 } 7238 7239 __get_user(host_tx->modes, &target_tx->modes); 7240 __get_user(host_tx->offset, &target_tx->offset); 7241 __get_user(host_tx->freq, &target_tx->freq); 7242 __get_user(host_tx->maxerror, &target_tx->maxerror); 7243 __get_user(host_tx->esterror, &target_tx->esterror); 7244 __get_user(host_tx->status, &target_tx->status); 7245 __get_user(host_tx->constant, &target_tx->constant); 7246 __get_user(host_tx->precision, &target_tx->precision); 7247 __get_user(host_tx->tolerance, &target_tx->tolerance); 7248 __get_user(host_tx->tick, &target_tx->tick); 7249 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7250 __get_user(host_tx->jitter, &target_tx->jitter); 7251 __get_user(host_tx->shift, &target_tx->shift); 7252 __get_user(host_tx->stabil, &target_tx->stabil); 7253 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7254 __get_user(host_tx->calcnt, &target_tx->calcnt); 7255 __get_user(host_tx->errcnt, &target_tx->errcnt); 7256 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7257 __get_user(host_tx->tai, &target_tx->tai); 7258 7259 unlock_user_struct(target_tx, target_addr, 0); 7260 return 0; 7261 } 7262 7263 static inline abi_long host_to_target_timex64(abi_long target_addr, 7264 struct timex *host_tx) 7265 { 7266 struct target__kernel_timex *target_tx; 7267 7268 if (copy_to_user_timeval64(target_addr + 7269 offsetof(struct target__kernel_timex, time), 7270 &host_tx->time)) { 7271 return -TARGET_EFAULT; 7272 } 7273 7274 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7275 return -TARGET_EFAULT; 7276 } 7277 7278 __put_user(host_tx->modes, &target_tx->modes); 7279 __put_user(host_tx->offset, &target_tx->offset); 7280 __put_user(host_tx->freq, &target_tx->freq); 7281 __put_user(host_tx->maxerror, &target_tx->maxerror); 7282 __put_user(host_tx->esterror, &target_tx->esterror); 7283 __put_user(host_tx->status, &target_tx->status); 7284 __put_user(host_tx->constant, &target_tx->constant); 7285 __put_user(host_tx->precision, &target_tx->precision); 7286 __put_user(host_tx->tolerance, &target_tx->tolerance); 7287 __put_user(host_tx->tick, &target_tx->tick); 7288 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7289 __put_user(host_tx->jitter, &target_tx->jitter); 7290 __put_user(host_tx->shift, &target_tx->shift); 7291 __put_user(host_tx->stabil, &target_tx->stabil); 7292 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7293 __put_user(host_tx->calcnt, &target_tx->calcnt); 7294 __put_user(host_tx->errcnt, &target_tx->errcnt); 7295 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7296 __put_user(host_tx->tai, &target_tx->tai); 7297 7298 unlock_user_struct(target_tx, target_addr, 1); 7299 return 0; 7300 } 7301 #endif 7302 7303 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, 7304 abi_ulong target_addr) 7305 { 7306 struct target_sigevent *target_sevp; 7307 7308 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { 7309 return -TARGET_EFAULT; 7310 } 7311 7312 /* This union is awkward on 64 bit systems because it has a 32 bit 7313 * integer and a pointer in it; we follow the conversion approach 7314 * used for handling sigval types in signal.c so the guest should get 7315 * the correct value back even if we did a 64 bit byteswap and it's 7316 * using the 32 bit integer. 7317 */ 7318 host_sevp->sigev_value.sival_ptr = 7319 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); 7320 host_sevp->sigev_signo = 7321 target_to_host_signal(tswap32(target_sevp->sigev_signo)); 7322 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); 7323 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid); 7324 7325 unlock_user_struct(target_sevp, target_addr, 1); 7326 return 0; 7327 } 7328 7329 #if defined(TARGET_NR_mlockall) 7330 static inline int target_to_host_mlockall_arg(int arg) 7331 { 7332 int result = 0; 7333 7334 if (arg & TARGET_MCL_CURRENT) { 7335 result |= MCL_CURRENT; 7336 } 7337 if (arg & TARGET_MCL_FUTURE) { 7338 result |= MCL_FUTURE; 7339 } 7340 #ifdef MCL_ONFAULT 7341 if (arg & TARGET_MCL_ONFAULT) { 7342 result |= MCL_ONFAULT; 7343 } 7344 #endif 7345 7346 return result; 7347 } 7348 #endif 7349 7350 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \ 7351 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \ 7352 defined(TARGET_NR_newfstatat)) 7353 static inline abi_long host_to_target_stat64(void *cpu_env, 7354 abi_ulong target_addr, 7355 struct stat *host_st) 7356 { 7357 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 7358 if (((CPUARMState *)cpu_env)->eabi) { 7359 struct target_eabi_stat64 *target_st; 7360 7361 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7362 return -TARGET_EFAULT; 7363 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 7364 __put_user(host_st->st_dev, &target_st->st_dev); 7365 __put_user(host_st->st_ino, &target_st->st_ino); 7366 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7367 __put_user(host_st->st_ino, &target_st->__st_ino); 7368 #endif 7369 __put_user(host_st->st_mode, &target_st->st_mode); 7370 __put_user(host_st->st_nlink, &target_st->st_nlink); 7371 __put_user(host_st->st_uid, &target_st->st_uid); 7372 __put_user(host_st->st_gid, &target_st->st_gid); 7373 __put_user(host_st->st_rdev, &target_st->st_rdev); 7374 __put_user(host_st->st_size, &target_st->st_size); 7375 __put_user(host_st->st_blksize, &target_st->st_blksize); 7376 __put_user(host_st->st_blocks, &target_st->st_blocks); 7377 __put_user(host_st->st_atime, &target_st->target_st_atime); 7378 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7379 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7380 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700 7381 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); 7382 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); 7383 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); 7384 #endif 7385 unlock_user_struct(target_st, target_addr, 1); 7386 } else 7387 #endif 7388 { 7389 #if defined(TARGET_HAS_STRUCT_STAT64) 7390 struct target_stat64 *target_st; 7391 #else 7392 struct target_stat *target_st; 7393 #endif 7394 7395 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7396 return -TARGET_EFAULT; 7397 memset(target_st, 0, sizeof(*target_st)); 7398 __put_user(host_st->st_dev, &target_st->st_dev); 7399 __put_user(host_st->st_ino, &target_st->st_ino); 7400 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7401 __put_user(host_st->st_ino, &target_st->__st_ino); 7402 #endif 7403 __put_user(host_st->st_mode, &target_st->st_mode); 7404 __put_user(host_st->st_nlink, &target_st->st_nlink); 7405 __put_user(host_st->st_uid, &target_st->st_uid); 7406 __put_user(host_st->st_gid, &target_st->st_gid); 7407 __put_user(host_st->st_rdev, &target_st->st_rdev); 7408 /* XXX: better use of kernel struct */ 7409 __put_user(host_st->st_size, &target_st->st_size); 7410 __put_user(host_st->st_blksize, &target_st->st_blksize); 7411 __put_user(host_st->st_blocks, &target_st->st_blocks); 7412 __put_user(host_st->st_atime, &target_st->target_st_atime); 7413 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7414 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7415 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700 7416 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); 7417 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); 7418 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); 7419 #endif 7420 unlock_user_struct(target_st, target_addr, 1); 7421 } 7422 7423 return 0; 7424 } 7425 #endif 7426 7427 #if defined(TARGET_NR_statx) && defined(__NR_statx) 7428 static inline abi_long host_to_target_statx(struct target_statx *host_stx, 7429 abi_ulong target_addr) 7430 { 7431 struct target_statx *target_stx; 7432 7433 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) { 7434 return -TARGET_EFAULT; 7435 } 7436 memset(target_stx, 0, sizeof(*target_stx)); 7437 7438 __put_user(host_stx->stx_mask, &target_stx->stx_mask); 7439 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize); 7440 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes); 7441 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink); 7442 __put_user(host_stx->stx_uid, &target_stx->stx_uid); 7443 __put_user(host_stx->stx_gid, &target_stx->stx_gid); 7444 __put_user(host_stx->stx_mode, &target_stx->stx_mode); 7445 __put_user(host_stx->stx_ino, &target_stx->stx_ino); 7446 __put_user(host_stx->stx_size, &target_stx->stx_size); 7447 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks); 7448 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask); 7449 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec); 7450 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec); 7451 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec); 7452 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec); 7453 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec); 7454 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec); 7455 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec); 7456 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec); 7457 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major); 7458 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor); 7459 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major); 7460 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor); 7461 7462 unlock_user_struct(target_stx, target_addr, 1); 7463 7464 return 0; 7465 } 7466 #endif 7467 7468 static int do_sys_futex(int *uaddr, int op, int val, 7469 const struct timespec *timeout, int *uaddr2, 7470 int val3) 7471 { 7472 #if HOST_LONG_BITS == 64 7473 #if defined(__NR_futex) 7474 /* always a 64-bit time_t, it doesn't define _time64 version */ 7475 return sys_futex(uaddr, op, val, timeout, uaddr2, val3); 7476 7477 #endif 7478 #else /* HOST_LONG_BITS == 64 */ 7479 #if defined(__NR_futex_time64) 7480 if (sizeof(timeout->tv_sec) == 8) { 7481 /* _time64 function on 32bit arch */ 7482 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3); 7483 } 7484 #endif 7485 #if defined(__NR_futex) 7486 /* old function on 32bit arch */ 7487 return sys_futex(uaddr, op, val, timeout, uaddr2, val3); 7488 #endif 7489 #endif /* HOST_LONG_BITS == 64 */ 7490 g_assert_not_reached(); 7491 } 7492 7493 static int do_safe_futex(int *uaddr, int op, int val, 7494 const struct timespec *timeout, int *uaddr2, 7495 int val3) 7496 { 7497 #if HOST_LONG_BITS == 64 7498 #if defined(__NR_futex) 7499 /* always a 64-bit time_t, it doesn't define _time64 version */ 7500 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3)); 7501 #endif 7502 #else /* HOST_LONG_BITS == 64 */ 7503 #if defined(__NR_futex_time64) 7504 if (sizeof(timeout->tv_sec) == 8) { 7505 /* _time64 function on 32bit arch */ 7506 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2, 7507 val3)); 7508 } 7509 #endif 7510 #if defined(__NR_futex) 7511 /* old function on 32bit arch */ 7512 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3)); 7513 #endif 7514 #endif /* HOST_LONG_BITS == 64 */ 7515 return -TARGET_ENOSYS; 7516 } 7517 7518 /* ??? Using host futex calls even when target atomic operations 7519 are not really atomic probably breaks things. However implementing 7520 futexes locally would make futexes shared between multiple processes 7521 tricky. However they're probably useless because guest atomic 7522 operations won't work either. */ 7523 #if defined(TARGET_NR_futex) 7524 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 7525 target_ulong uaddr2, int val3) 7526 { 7527 struct timespec ts, *pts; 7528 int base_op; 7529 7530 /* ??? We assume FUTEX_* constants are the same on both host 7531 and target. */ 7532 #ifdef FUTEX_CMD_MASK 7533 base_op = op & FUTEX_CMD_MASK; 7534 #else 7535 base_op = op; 7536 #endif 7537 switch (base_op) { 7538 case FUTEX_WAIT: 7539 case FUTEX_WAIT_BITSET: 7540 if (timeout) { 7541 pts = &ts; 7542 target_to_host_timespec(pts, timeout); 7543 } else { 7544 pts = NULL; 7545 } 7546 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3); 7547 case FUTEX_WAKE: 7548 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0); 7549 case FUTEX_FD: 7550 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0); 7551 case FUTEX_REQUEUE: 7552 case FUTEX_CMP_REQUEUE: 7553 case FUTEX_WAKE_OP: 7554 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 7555 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 7556 But the prototype takes a `struct timespec *'; insert casts 7557 to satisfy the compiler. We do not need to tswap TIMEOUT 7558 since it's not compared to guest memory. */ 7559 pts = (struct timespec *)(uintptr_t) timeout; 7560 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2), 7561 (base_op == FUTEX_CMP_REQUEUE 7562 ? tswap32(val3) 7563 : val3)); 7564 default: 7565 return -TARGET_ENOSYS; 7566 } 7567 } 7568 #endif 7569 7570 #if defined(TARGET_NR_futex_time64) 7571 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout, 7572 target_ulong uaddr2, int val3) 7573 { 7574 struct timespec ts, *pts; 7575 int base_op; 7576 7577 /* ??? We assume FUTEX_* constants are the same on both host 7578 and target. */ 7579 #ifdef FUTEX_CMD_MASK 7580 base_op = op & FUTEX_CMD_MASK; 7581 #else 7582 base_op = op; 7583 #endif 7584 switch (base_op) { 7585 case FUTEX_WAIT: 7586 case FUTEX_WAIT_BITSET: 7587 if (timeout) { 7588 pts = &ts; 7589 target_to_host_timespec64(pts, timeout); 7590 } else { 7591 pts = NULL; 7592 } 7593 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3); 7594 case FUTEX_WAKE: 7595 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0); 7596 case FUTEX_FD: 7597 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0); 7598 case FUTEX_REQUEUE: 7599 case FUTEX_CMP_REQUEUE: 7600 case FUTEX_WAKE_OP: 7601 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 7602 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 7603 But the prototype takes a `struct timespec *'; insert casts 7604 to satisfy the compiler. We do not need to tswap TIMEOUT 7605 since it's not compared to guest memory. */ 7606 pts = (struct timespec *)(uintptr_t) timeout; 7607 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2), 7608 (base_op == FUTEX_CMP_REQUEUE 7609 ? tswap32(val3) 7610 : val3)); 7611 default: 7612 return -TARGET_ENOSYS; 7613 } 7614 } 7615 #endif 7616 7617 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7618 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname, 7619 abi_long handle, abi_long mount_id, 7620 abi_long flags) 7621 { 7622 struct file_handle *target_fh; 7623 struct file_handle *fh; 7624 int mid = 0; 7625 abi_long ret; 7626 char *name; 7627 unsigned int size, total_size; 7628 7629 if (get_user_s32(size, handle)) { 7630 return -TARGET_EFAULT; 7631 } 7632 7633 name = lock_user_string(pathname); 7634 if (!name) { 7635 return -TARGET_EFAULT; 7636 } 7637 7638 total_size = sizeof(struct file_handle) + size; 7639 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0); 7640 if (!target_fh) { 7641 unlock_user(name, pathname, 0); 7642 return -TARGET_EFAULT; 7643 } 7644 7645 fh = g_malloc0(total_size); 7646 fh->handle_bytes = size; 7647 7648 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags)); 7649 unlock_user(name, pathname, 0); 7650 7651 /* man name_to_handle_at(2): 7652 * Other than the use of the handle_bytes field, the caller should treat 7653 * the file_handle structure as an opaque data type 7654 */ 7655 7656 memcpy(target_fh, fh, total_size); 7657 target_fh->handle_bytes = tswap32(fh->handle_bytes); 7658 target_fh->handle_type = tswap32(fh->handle_type); 7659 g_free(fh); 7660 unlock_user(target_fh, handle, total_size); 7661 7662 if (put_user_s32(mid, mount_id)) { 7663 return -TARGET_EFAULT; 7664 } 7665 7666 return ret; 7667 7668 } 7669 #endif 7670 7671 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7672 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle, 7673 abi_long flags) 7674 { 7675 struct file_handle *target_fh; 7676 struct file_handle *fh; 7677 unsigned int size, total_size; 7678 abi_long ret; 7679 7680 if (get_user_s32(size, handle)) { 7681 return -TARGET_EFAULT; 7682 } 7683 7684 total_size = sizeof(struct file_handle) + size; 7685 target_fh = lock_user(VERIFY_READ, handle, total_size, 1); 7686 if (!target_fh) { 7687 return -TARGET_EFAULT; 7688 } 7689 7690 fh = g_memdup(target_fh, total_size); 7691 fh->handle_bytes = size; 7692 fh->handle_type = tswap32(target_fh->handle_type); 7693 7694 ret = get_errno(open_by_handle_at(mount_fd, fh, 7695 target_to_host_bitmask(flags, fcntl_flags_tbl))); 7696 7697 g_free(fh); 7698 7699 unlock_user(target_fh, handle, total_size); 7700 7701 return ret; 7702 } 7703 #endif 7704 7705 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4) 7706 7707 static abi_long do_signalfd4(int fd, abi_long mask, int flags) 7708 { 7709 int host_flags; 7710 target_sigset_t *target_mask; 7711 sigset_t host_mask; 7712 abi_long ret; 7713 7714 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) { 7715 return -TARGET_EINVAL; 7716 } 7717 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) { 7718 return -TARGET_EFAULT; 7719 } 7720 7721 target_to_host_sigset(&host_mask, target_mask); 7722 7723 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 7724 7725 ret = get_errno(signalfd(fd, &host_mask, host_flags)); 7726 if (ret >= 0) { 7727 fd_trans_register(ret, &target_signalfd_trans); 7728 } 7729 7730 unlock_user_struct(target_mask, mask, 0); 7731 7732 return ret; 7733 } 7734 #endif 7735 7736 /* Map host to target signal numbers for the wait family of syscalls. 7737 Assume all other status bits are the same. */ 7738 int host_to_target_waitstatus(int status) 7739 { 7740 if (WIFSIGNALED(status)) { 7741 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 7742 } 7743 if (WIFSTOPPED(status)) { 7744 return (host_to_target_signal(WSTOPSIG(status)) << 8) 7745 | (status & 0xff); 7746 } 7747 return status; 7748 } 7749 7750 static int open_self_cmdline(void *cpu_env, int fd) 7751 { 7752 CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 7753 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; 7754 int i; 7755 7756 for (i = 0; i < bprm->argc; i++) { 7757 size_t len = strlen(bprm->argv[i]) + 1; 7758 7759 if (write(fd, bprm->argv[i], len) != len) { 7760 return -1; 7761 } 7762 } 7763 7764 return 0; 7765 } 7766 7767 static int open_self_maps(void *cpu_env, int fd) 7768 { 7769 CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 7770 TaskState *ts = cpu->opaque; 7771 GSList *map_info = read_self_maps(); 7772 GSList *s; 7773 int count; 7774 7775 for (s = map_info; s; s = g_slist_next(s)) { 7776 MapInfo *e = (MapInfo *) s->data; 7777 7778 if (h2g_valid(e->start)) { 7779 unsigned long min = e->start; 7780 unsigned long max = e->end; 7781 int flags = page_get_flags(h2g(min)); 7782 const char *path; 7783 7784 max = h2g_valid(max - 1) ? 7785 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1; 7786 7787 if (page_check_range(h2g(min), max - min, flags) == -1) { 7788 continue; 7789 } 7790 7791 if (h2g(min) == ts->info->stack_limit) { 7792 path = "[stack]"; 7793 } else { 7794 path = e->path; 7795 } 7796 7797 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr 7798 " %c%c%c%c %08" PRIx64 " %s %"PRId64, 7799 h2g(min), h2g(max - 1) + 1, 7800 e->is_read ? 'r' : '-', 7801 e->is_write ? 'w' : '-', 7802 e->is_exec ? 'x' : '-', 7803 e->is_priv ? 'p' : '-', 7804 (uint64_t) e->offset, e->dev, e->inode); 7805 if (path) { 7806 dprintf(fd, "%*s%s\n", 73 - count, "", path); 7807 } else { 7808 dprintf(fd, "\n"); 7809 } 7810 } 7811 } 7812 7813 free_self_maps(map_info); 7814 7815 #ifdef TARGET_VSYSCALL_PAGE 7816 /* 7817 * We only support execution from the vsyscall page. 7818 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3. 7819 */ 7820 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx 7821 " --xp 00000000 00:00 0", 7822 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE); 7823 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]"); 7824 #endif 7825 7826 return 0; 7827 } 7828 7829 static int open_self_stat(void *cpu_env, int fd) 7830 { 7831 CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 7832 TaskState *ts = cpu->opaque; 7833 g_autoptr(GString) buf = g_string_new(NULL); 7834 int i; 7835 7836 for (i = 0; i < 44; i++) { 7837 if (i == 0) { 7838 /* pid */ 7839 g_string_printf(buf, FMT_pid " ", getpid()); 7840 } else if (i == 1) { 7841 /* app name */ 7842 gchar *bin = g_strrstr(ts->bprm->argv[0], "/"); 7843 bin = bin ? bin + 1 : ts->bprm->argv[0]; 7844 g_string_printf(buf, "(%.15s) ", bin); 7845 } else if (i == 27) { 7846 /* stack bottom */ 7847 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack); 7848 } else { 7849 /* for the rest, there is MasterCard */ 7850 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' '); 7851 } 7852 7853 if (write(fd, buf->str, buf->len) != buf->len) { 7854 return -1; 7855 } 7856 } 7857 7858 return 0; 7859 } 7860 7861 static int open_self_auxv(void *cpu_env, int fd) 7862 { 7863 CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 7864 TaskState *ts = cpu->opaque; 7865 abi_ulong auxv = ts->info->saved_auxv; 7866 abi_ulong len = ts->info->auxv_len; 7867 char *ptr; 7868 7869 /* 7870 * Auxiliary vector is stored in target process stack. 7871 * read in whole auxv vector and copy it to file 7872 */ 7873 ptr = lock_user(VERIFY_READ, auxv, len, 0); 7874 if (ptr != NULL) { 7875 while (len > 0) { 7876 ssize_t r; 7877 r = write(fd, ptr, len); 7878 if (r <= 0) { 7879 break; 7880 } 7881 len -= r; 7882 ptr += r; 7883 } 7884 lseek(fd, 0, SEEK_SET); 7885 unlock_user(ptr, auxv, len); 7886 } 7887 7888 return 0; 7889 } 7890 7891 static int is_proc_myself(const char *filename, const char *entry) 7892 { 7893 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 7894 filename += strlen("/proc/"); 7895 if (!strncmp(filename, "self/", strlen("self/"))) { 7896 filename += strlen("self/"); 7897 } else if (*filename >= '1' && *filename <= '9') { 7898 char myself[80]; 7899 snprintf(myself, sizeof(myself), "%d/", getpid()); 7900 if (!strncmp(filename, myself, strlen(myself))) { 7901 filename += strlen(myself); 7902 } else { 7903 return 0; 7904 } 7905 } else { 7906 return 0; 7907 } 7908 if (!strcmp(filename, entry)) { 7909 return 1; 7910 } 7911 } 7912 return 0; 7913 } 7914 7915 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \ 7916 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) 7917 static int is_proc(const char *filename, const char *entry) 7918 { 7919 return strcmp(filename, entry) == 0; 7920 } 7921 #endif 7922 7923 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 7924 static int open_net_route(void *cpu_env, int fd) 7925 { 7926 FILE *fp; 7927 char *line = NULL; 7928 size_t len = 0; 7929 ssize_t read; 7930 7931 fp = fopen("/proc/net/route", "r"); 7932 if (fp == NULL) { 7933 return -1; 7934 } 7935 7936 /* read header */ 7937 7938 read = getline(&line, &len, fp); 7939 dprintf(fd, "%s", line); 7940 7941 /* read routes */ 7942 7943 while ((read = getline(&line, &len, fp)) != -1) { 7944 char iface[16]; 7945 uint32_t dest, gw, mask; 7946 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 7947 int fields; 7948 7949 fields = sscanf(line, 7950 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 7951 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 7952 &mask, &mtu, &window, &irtt); 7953 if (fields != 11) { 7954 continue; 7955 } 7956 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 7957 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 7958 metric, tswap32(mask), mtu, window, irtt); 7959 } 7960 7961 free(line); 7962 fclose(fp); 7963 7964 return 0; 7965 } 7966 #endif 7967 7968 #if defined(TARGET_SPARC) 7969 static int open_cpuinfo(void *cpu_env, int fd) 7970 { 7971 dprintf(fd, "type\t\t: sun4u\n"); 7972 return 0; 7973 } 7974 #endif 7975 7976 #if defined(TARGET_HPPA) 7977 static int open_cpuinfo(void *cpu_env, int fd) 7978 { 7979 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n"); 7980 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n"); 7981 dprintf(fd, "capabilities\t: os32\n"); 7982 dprintf(fd, "model\t\t: 9000/778/B160L\n"); 7983 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n"); 7984 return 0; 7985 } 7986 #endif 7987 7988 #if defined(TARGET_M68K) 7989 static int open_hardware(void *cpu_env, int fd) 7990 { 7991 dprintf(fd, "Model:\t\tqemu-m68k\n"); 7992 return 0; 7993 } 7994 #endif 7995 7996 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) 7997 { 7998 struct fake_open { 7999 const char *filename; 8000 int (*fill)(void *cpu_env, int fd); 8001 int (*cmp)(const char *s1, const char *s2); 8002 }; 8003 const struct fake_open *fake_open; 8004 static const struct fake_open fakes[] = { 8005 { "maps", open_self_maps, is_proc_myself }, 8006 { "stat", open_self_stat, is_proc_myself }, 8007 { "auxv", open_self_auxv, is_proc_myself }, 8008 { "cmdline", open_self_cmdline, is_proc_myself }, 8009 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 8010 { "/proc/net/route", open_net_route, is_proc }, 8011 #endif 8012 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) 8013 { "/proc/cpuinfo", open_cpuinfo, is_proc }, 8014 #endif 8015 #if defined(TARGET_M68K) 8016 { "/proc/hardware", open_hardware, is_proc }, 8017 #endif 8018 { NULL, NULL, NULL } 8019 }; 8020 8021 if (is_proc_myself(pathname, "exe")) { 8022 int execfd = qemu_getauxval(AT_EXECFD); 8023 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode); 8024 } 8025 8026 for (fake_open = fakes; fake_open->filename; fake_open++) { 8027 if (fake_open->cmp(pathname, fake_open->filename)) { 8028 break; 8029 } 8030 } 8031 8032 if (fake_open->filename) { 8033 const char *tmpdir; 8034 char filename[PATH_MAX]; 8035 int fd, r; 8036 8037 /* create temporary file to map stat to */ 8038 tmpdir = getenv("TMPDIR"); 8039 if (!tmpdir) 8040 tmpdir = "/tmp"; 8041 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 8042 fd = mkstemp(filename); 8043 if (fd < 0) { 8044 return fd; 8045 } 8046 unlink(filename); 8047 8048 if ((r = fake_open->fill(cpu_env, fd))) { 8049 int e = errno; 8050 close(fd); 8051 errno = e; 8052 return r; 8053 } 8054 lseek(fd, 0, SEEK_SET); 8055 8056 return fd; 8057 } 8058 8059 return safe_openat(dirfd, path(pathname), flags, mode); 8060 } 8061 8062 #define TIMER_MAGIC 0x0caf0000 8063 #define TIMER_MAGIC_MASK 0xffff0000 8064 8065 /* Convert QEMU provided timer ID back to internal 16bit index format */ 8066 static target_timer_t get_timer_id(abi_long arg) 8067 { 8068 target_timer_t timerid = arg; 8069 8070 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) { 8071 return -TARGET_EINVAL; 8072 } 8073 8074 timerid &= 0xffff; 8075 8076 if (timerid >= ARRAY_SIZE(g_posix_timers)) { 8077 return -TARGET_EINVAL; 8078 } 8079 8080 return timerid; 8081 } 8082 8083 static int target_to_host_cpu_mask(unsigned long *host_mask, 8084 size_t host_size, 8085 abi_ulong target_addr, 8086 size_t target_size) 8087 { 8088 unsigned target_bits = sizeof(abi_ulong) * 8; 8089 unsigned host_bits = sizeof(*host_mask) * 8; 8090 abi_ulong *target_mask; 8091 unsigned i, j; 8092 8093 assert(host_size >= target_size); 8094 8095 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1); 8096 if (!target_mask) { 8097 return -TARGET_EFAULT; 8098 } 8099 memset(host_mask, 0, host_size); 8100 8101 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 8102 unsigned bit = i * target_bits; 8103 abi_ulong val; 8104 8105 __get_user(val, &target_mask[i]); 8106 for (j = 0; j < target_bits; j++, bit++) { 8107 if (val & (1UL << j)) { 8108 host_mask[bit / host_bits] |= 1UL << (bit % host_bits); 8109 } 8110 } 8111 } 8112 8113 unlock_user(target_mask, target_addr, 0); 8114 return 0; 8115 } 8116 8117 static int host_to_target_cpu_mask(const unsigned long *host_mask, 8118 size_t host_size, 8119 abi_ulong target_addr, 8120 size_t target_size) 8121 { 8122 unsigned target_bits = sizeof(abi_ulong) * 8; 8123 unsigned host_bits = sizeof(*host_mask) * 8; 8124 abi_ulong *target_mask; 8125 unsigned i, j; 8126 8127 assert(host_size >= target_size); 8128 8129 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0); 8130 if (!target_mask) { 8131 return -TARGET_EFAULT; 8132 } 8133 8134 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 8135 unsigned bit = i * target_bits; 8136 abi_ulong val = 0; 8137 8138 for (j = 0; j < target_bits; j++, bit++) { 8139 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) { 8140 val |= 1UL << j; 8141 } 8142 } 8143 __put_user(val, &target_mask[i]); 8144 } 8145 8146 unlock_user(target_mask, target_addr, target_size); 8147 return 0; 8148 } 8149 8150 /* This is an internal helper for do_syscall so that it is easier 8151 * to have a single return point, so that actions, such as logging 8152 * of syscall results, can be performed. 8153 * All errnos that do_syscall() returns must be -TARGET_<errcode>. 8154 */ 8155 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, 8156 abi_long arg2, abi_long arg3, abi_long arg4, 8157 abi_long arg5, abi_long arg6, abi_long arg7, 8158 abi_long arg8) 8159 { 8160 CPUState *cpu = env_cpu(cpu_env); 8161 abi_long ret; 8162 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \ 8163 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \ 8164 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \ 8165 || defined(TARGET_NR_statx) 8166 struct stat st; 8167 #endif 8168 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \ 8169 || defined(TARGET_NR_fstatfs) 8170 struct statfs stfs; 8171 #endif 8172 void *p; 8173 8174 switch(num) { 8175 case TARGET_NR_exit: 8176 /* In old applications this may be used to implement _exit(2). 8177 However in threaded applictions it is used for thread termination, 8178 and _exit_group is used for application termination. 8179 Do thread termination if we have more then one thread. */ 8180 8181 if (block_signals()) { 8182 return -TARGET_ERESTARTSYS; 8183 } 8184 8185 pthread_mutex_lock(&clone_lock); 8186 8187 if (CPU_NEXT(first_cpu)) { 8188 TaskState *ts = cpu->opaque; 8189 8190 object_property_set_bool(OBJECT(cpu), "realized", false, NULL); 8191 object_unref(OBJECT(cpu)); 8192 /* 8193 * At this point the CPU should be unrealized and removed 8194 * from cpu lists. We can clean-up the rest of the thread 8195 * data without the lock held. 8196 */ 8197 8198 pthread_mutex_unlock(&clone_lock); 8199 8200 if (ts->child_tidptr) { 8201 put_user_u32(0, ts->child_tidptr); 8202 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 8203 NULL, NULL, 0); 8204 } 8205 thread_cpu = NULL; 8206 g_free(ts); 8207 rcu_unregister_thread(); 8208 pthread_exit(NULL); 8209 } 8210 8211 pthread_mutex_unlock(&clone_lock); 8212 preexit_cleanup(cpu_env, arg1); 8213 _exit(arg1); 8214 return 0; /* avoid warning */ 8215 case TARGET_NR_read: 8216 if (arg2 == 0 && arg3 == 0) { 8217 return get_errno(safe_read(arg1, 0, 0)); 8218 } else { 8219 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 8220 return -TARGET_EFAULT; 8221 ret = get_errno(safe_read(arg1, p, arg3)); 8222 if (ret >= 0 && 8223 fd_trans_host_to_target_data(arg1)) { 8224 ret = fd_trans_host_to_target_data(arg1)(p, ret); 8225 } 8226 unlock_user(p, arg2, ret); 8227 } 8228 return ret; 8229 case TARGET_NR_write: 8230 if (arg2 == 0 && arg3 == 0) { 8231 return get_errno(safe_write(arg1, 0, 0)); 8232 } 8233 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 8234 return -TARGET_EFAULT; 8235 if (fd_trans_target_to_host_data(arg1)) { 8236 void *copy = g_malloc(arg3); 8237 memcpy(copy, p, arg3); 8238 ret = fd_trans_target_to_host_data(arg1)(copy, arg3); 8239 if (ret >= 0) { 8240 ret = get_errno(safe_write(arg1, copy, ret)); 8241 } 8242 g_free(copy); 8243 } else { 8244 ret = get_errno(safe_write(arg1, p, arg3)); 8245 } 8246 unlock_user(p, arg2, 0); 8247 return ret; 8248 8249 #ifdef TARGET_NR_open 8250 case TARGET_NR_open: 8251 if (!(p = lock_user_string(arg1))) 8252 return -TARGET_EFAULT; 8253 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, 8254 target_to_host_bitmask(arg2, fcntl_flags_tbl), 8255 arg3)); 8256 fd_trans_unregister(ret); 8257 unlock_user(p, arg1, 0); 8258 return ret; 8259 #endif 8260 case TARGET_NR_openat: 8261 if (!(p = lock_user_string(arg2))) 8262 return -TARGET_EFAULT; 8263 ret = get_errno(do_openat(cpu_env, arg1, p, 8264 target_to_host_bitmask(arg3, fcntl_flags_tbl), 8265 arg4)); 8266 fd_trans_unregister(ret); 8267 unlock_user(p, arg2, 0); 8268 return ret; 8269 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 8270 case TARGET_NR_name_to_handle_at: 8271 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5); 8272 return ret; 8273 #endif 8274 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 8275 case TARGET_NR_open_by_handle_at: 8276 ret = do_open_by_handle_at(arg1, arg2, arg3); 8277 fd_trans_unregister(ret); 8278 return ret; 8279 #endif 8280 case TARGET_NR_close: 8281 fd_trans_unregister(arg1); 8282 return get_errno(close(arg1)); 8283 8284 case TARGET_NR_brk: 8285 return do_brk(arg1); 8286 #ifdef TARGET_NR_fork 8287 case TARGET_NR_fork: 8288 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0)); 8289 #endif 8290 #ifdef TARGET_NR_waitpid 8291 case TARGET_NR_waitpid: 8292 { 8293 int status; 8294 ret = get_errno(safe_wait4(arg1, &status, arg3, 0)); 8295 if (!is_error(ret) && arg2 && ret 8296 && put_user_s32(host_to_target_waitstatus(status), arg2)) 8297 return -TARGET_EFAULT; 8298 } 8299 return ret; 8300 #endif 8301 #ifdef TARGET_NR_waitid 8302 case TARGET_NR_waitid: 8303 { 8304 siginfo_t info; 8305 info.si_pid = 0; 8306 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL)); 8307 if (!is_error(ret) && arg3 && info.si_pid != 0) { 8308 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 8309 return -TARGET_EFAULT; 8310 host_to_target_siginfo(p, &info); 8311 unlock_user(p, arg3, sizeof(target_siginfo_t)); 8312 } 8313 } 8314 return ret; 8315 #endif 8316 #ifdef TARGET_NR_creat /* not on alpha */ 8317 case TARGET_NR_creat: 8318 if (!(p = lock_user_string(arg1))) 8319 return -TARGET_EFAULT; 8320 ret = get_errno(creat(p, arg2)); 8321 fd_trans_unregister(ret); 8322 unlock_user(p, arg1, 0); 8323 return ret; 8324 #endif 8325 #ifdef TARGET_NR_link 8326 case TARGET_NR_link: 8327 { 8328 void * p2; 8329 p = lock_user_string(arg1); 8330 p2 = lock_user_string(arg2); 8331 if (!p || !p2) 8332 ret = -TARGET_EFAULT; 8333 else 8334 ret = get_errno(link(p, p2)); 8335 unlock_user(p2, arg2, 0); 8336 unlock_user(p, arg1, 0); 8337 } 8338 return ret; 8339 #endif 8340 #if defined(TARGET_NR_linkat) 8341 case TARGET_NR_linkat: 8342 { 8343 void * p2 = NULL; 8344 if (!arg2 || !arg4) 8345 return -TARGET_EFAULT; 8346 p = lock_user_string(arg2); 8347 p2 = lock_user_string(arg4); 8348 if (!p || !p2) 8349 ret = -TARGET_EFAULT; 8350 else 8351 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 8352 unlock_user(p, arg2, 0); 8353 unlock_user(p2, arg4, 0); 8354 } 8355 return ret; 8356 #endif 8357 #ifdef TARGET_NR_unlink 8358 case TARGET_NR_unlink: 8359 if (!(p = lock_user_string(arg1))) 8360 return -TARGET_EFAULT; 8361 ret = get_errno(unlink(p)); 8362 unlock_user(p, arg1, 0); 8363 return ret; 8364 #endif 8365 #if defined(TARGET_NR_unlinkat) 8366 case TARGET_NR_unlinkat: 8367 if (!(p = lock_user_string(arg2))) 8368 return -TARGET_EFAULT; 8369 ret = get_errno(unlinkat(arg1, p, arg3)); 8370 unlock_user(p, arg2, 0); 8371 return ret; 8372 #endif 8373 case TARGET_NR_execve: 8374 { 8375 char **argp, **envp; 8376 int argc, envc; 8377 abi_ulong gp; 8378 abi_ulong guest_argp; 8379 abi_ulong guest_envp; 8380 abi_ulong addr; 8381 char **q; 8382 int total_size = 0; 8383 8384 argc = 0; 8385 guest_argp = arg2; 8386 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 8387 if (get_user_ual(addr, gp)) 8388 return -TARGET_EFAULT; 8389 if (!addr) 8390 break; 8391 argc++; 8392 } 8393 envc = 0; 8394 guest_envp = arg3; 8395 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 8396 if (get_user_ual(addr, gp)) 8397 return -TARGET_EFAULT; 8398 if (!addr) 8399 break; 8400 envc++; 8401 } 8402 8403 argp = g_new0(char *, argc + 1); 8404 envp = g_new0(char *, envc + 1); 8405 8406 for (gp = guest_argp, q = argp; gp; 8407 gp += sizeof(abi_ulong), q++) { 8408 if (get_user_ual(addr, gp)) 8409 goto execve_efault; 8410 if (!addr) 8411 break; 8412 if (!(*q = lock_user_string(addr))) 8413 goto execve_efault; 8414 total_size += strlen(*q) + 1; 8415 } 8416 *q = NULL; 8417 8418 for (gp = guest_envp, q = envp; gp; 8419 gp += sizeof(abi_ulong), q++) { 8420 if (get_user_ual(addr, gp)) 8421 goto execve_efault; 8422 if (!addr) 8423 break; 8424 if (!(*q = lock_user_string(addr))) 8425 goto execve_efault; 8426 total_size += strlen(*q) + 1; 8427 } 8428 *q = NULL; 8429 8430 if (!(p = lock_user_string(arg1))) 8431 goto execve_efault; 8432 /* Although execve() is not an interruptible syscall it is 8433 * a special case where we must use the safe_syscall wrapper: 8434 * if we allow a signal to happen before we make the host 8435 * syscall then we will 'lose' it, because at the point of 8436 * execve the process leaves QEMU's control. So we use the 8437 * safe syscall wrapper to ensure that we either take the 8438 * signal as a guest signal, or else it does not happen 8439 * before the execve completes and makes it the other 8440 * program's problem. 8441 */ 8442 ret = get_errno(safe_execve(p, argp, envp)); 8443 unlock_user(p, arg1, 0); 8444 8445 goto execve_end; 8446 8447 execve_efault: 8448 ret = -TARGET_EFAULT; 8449 8450 execve_end: 8451 for (gp = guest_argp, q = argp; *q; 8452 gp += sizeof(abi_ulong), q++) { 8453 if (get_user_ual(addr, gp) 8454 || !addr) 8455 break; 8456 unlock_user(*q, addr, 0); 8457 } 8458 for (gp = guest_envp, q = envp; *q; 8459 gp += sizeof(abi_ulong), q++) { 8460 if (get_user_ual(addr, gp) 8461 || !addr) 8462 break; 8463 unlock_user(*q, addr, 0); 8464 } 8465 8466 g_free(argp); 8467 g_free(envp); 8468 } 8469 return ret; 8470 case TARGET_NR_chdir: 8471 if (!(p = lock_user_string(arg1))) 8472 return -TARGET_EFAULT; 8473 ret = get_errno(chdir(p)); 8474 unlock_user(p, arg1, 0); 8475 return ret; 8476 #ifdef TARGET_NR_time 8477 case TARGET_NR_time: 8478 { 8479 time_t host_time; 8480 ret = get_errno(time(&host_time)); 8481 if (!is_error(ret) 8482 && arg1 8483 && put_user_sal(host_time, arg1)) 8484 return -TARGET_EFAULT; 8485 } 8486 return ret; 8487 #endif 8488 #ifdef TARGET_NR_mknod 8489 case TARGET_NR_mknod: 8490 if (!(p = lock_user_string(arg1))) 8491 return -TARGET_EFAULT; 8492 ret = get_errno(mknod(p, arg2, arg3)); 8493 unlock_user(p, arg1, 0); 8494 return ret; 8495 #endif 8496 #if defined(TARGET_NR_mknodat) 8497 case TARGET_NR_mknodat: 8498 if (!(p = lock_user_string(arg2))) 8499 return -TARGET_EFAULT; 8500 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 8501 unlock_user(p, arg2, 0); 8502 return ret; 8503 #endif 8504 #ifdef TARGET_NR_chmod 8505 case TARGET_NR_chmod: 8506 if (!(p = lock_user_string(arg1))) 8507 return -TARGET_EFAULT; 8508 ret = get_errno(chmod(p, arg2)); 8509 unlock_user(p, arg1, 0); 8510 return ret; 8511 #endif 8512 #ifdef TARGET_NR_lseek 8513 case TARGET_NR_lseek: 8514 return get_errno(lseek(arg1, arg2, arg3)); 8515 #endif 8516 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 8517 /* Alpha specific */ 8518 case TARGET_NR_getxpid: 8519 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 8520 return get_errno(getpid()); 8521 #endif 8522 #ifdef TARGET_NR_getpid 8523 case TARGET_NR_getpid: 8524 return get_errno(getpid()); 8525 #endif 8526 case TARGET_NR_mount: 8527 { 8528 /* need to look at the data field */ 8529 void *p2, *p3; 8530 8531 if (arg1) { 8532 p = lock_user_string(arg1); 8533 if (!p) { 8534 return -TARGET_EFAULT; 8535 } 8536 } else { 8537 p = NULL; 8538 } 8539 8540 p2 = lock_user_string(arg2); 8541 if (!p2) { 8542 if (arg1) { 8543 unlock_user(p, arg1, 0); 8544 } 8545 return -TARGET_EFAULT; 8546 } 8547 8548 if (arg3) { 8549 p3 = lock_user_string(arg3); 8550 if (!p3) { 8551 if (arg1) { 8552 unlock_user(p, arg1, 0); 8553 } 8554 unlock_user(p2, arg2, 0); 8555 return -TARGET_EFAULT; 8556 } 8557 } else { 8558 p3 = NULL; 8559 } 8560 8561 /* FIXME - arg5 should be locked, but it isn't clear how to 8562 * do that since it's not guaranteed to be a NULL-terminated 8563 * string. 8564 */ 8565 if (!arg5) { 8566 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 8567 } else { 8568 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)); 8569 } 8570 ret = get_errno(ret); 8571 8572 if (arg1) { 8573 unlock_user(p, arg1, 0); 8574 } 8575 unlock_user(p2, arg2, 0); 8576 if (arg3) { 8577 unlock_user(p3, arg3, 0); 8578 } 8579 } 8580 return ret; 8581 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount) 8582 #if defined(TARGET_NR_umount) 8583 case TARGET_NR_umount: 8584 #endif 8585 #if defined(TARGET_NR_oldumount) 8586 case TARGET_NR_oldumount: 8587 #endif 8588 if (!(p = lock_user_string(arg1))) 8589 return -TARGET_EFAULT; 8590 ret = get_errno(umount(p)); 8591 unlock_user(p, arg1, 0); 8592 return ret; 8593 #endif 8594 #ifdef TARGET_NR_stime /* not on alpha */ 8595 case TARGET_NR_stime: 8596 { 8597 struct timespec ts; 8598 ts.tv_nsec = 0; 8599 if (get_user_sal(ts.tv_sec, arg1)) { 8600 return -TARGET_EFAULT; 8601 } 8602 return get_errno(clock_settime(CLOCK_REALTIME, &ts)); 8603 } 8604 #endif 8605 #ifdef TARGET_NR_alarm /* not on alpha */ 8606 case TARGET_NR_alarm: 8607 return alarm(arg1); 8608 #endif 8609 #ifdef TARGET_NR_pause /* not on alpha */ 8610 case TARGET_NR_pause: 8611 if (!block_signals()) { 8612 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask); 8613 } 8614 return -TARGET_EINTR; 8615 #endif 8616 #ifdef TARGET_NR_utime 8617 case TARGET_NR_utime: 8618 { 8619 struct utimbuf tbuf, *host_tbuf; 8620 struct target_utimbuf *target_tbuf; 8621 if (arg2) { 8622 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 8623 return -TARGET_EFAULT; 8624 tbuf.actime = tswapal(target_tbuf->actime); 8625 tbuf.modtime = tswapal(target_tbuf->modtime); 8626 unlock_user_struct(target_tbuf, arg2, 0); 8627 host_tbuf = &tbuf; 8628 } else { 8629 host_tbuf = NULL; 8630 } 8631 if (!(p = lock_user_string(arg1))) 8632 return -TARGET_EFAULT; 8633 ret = get_errno(utime(p, host_tbuf)); 8634 unlock_user(p, arg1, 0); 8635 } 8636 return ret; 8637 #endif 8638 #ifdef TARGET_NR_utimes 8639 case TARGET_NR_utimes: 8640 { 8641 struct timeval *tvp, tv[2]; 8642 if (arg2) { 8643 if (copy_from_user_timeval(&tv[0], arg2) 8644 || copy_from_user_timeval(&tv[1], 8645 arg2 + sizeof(struct target_timeval))) 8646 return -TARGET_EFAULT; 8647 tvp = tv; 8648 } else { 8649 tvp = NULL; 8650 } 8651 if (!(p = lock_user_string(arg1))) 8652 return -TARGET_EFAULT; 8653 ret = get_errno(utimes(p, tvp)); 8654 unlock_user(p, arg1, 0); 8655 } 8656 return ret; 8657 #endif 8658 #if defined(TARGET_NR_futimesat) 8659 case TARGET_NR_futimesat: 8660 { 8661 struct timeval *tvp, tv[2]; 8662 if (arg3) { 8663 if (copy_from_user_timeval(&tv[0], arg3) 8664 || copy_from_user_timeval(&tv[1], 8665 arg3 + sizeof(struct target_timeval))) 8666 return -TARGET_EFAULT; 8667 tvp = tv; 8668 } else { 8669 tvp = NULL; 8670 } 8671 if (!(p = lock_user_string(arg2))) { 8672 return -TARGET_EFAULT; 8673 } 8674 ret = get_errno(futimesat(arg1, path(p), tvp)); 8675 unlock_user(p, arg2, 0); 8676 } 8677 return ret; 8678 #endif 8679 #ifdef TARGET_NR_access 8680 case TARGET_NR_access: 8681 if (!(p = lock_user_string(arg1))) { 8682 return -TARGET_EFAULT; 8683 } 8684 ret = get_errno(access(path(p), arg2)); 8685 unlock_user(p, arg1, 0); 8686 return ret; 8687 #endif 8688 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 8689 case TARGET_NR_faccessat: 8690 if (!(p = lock_user_string(arg2))) { 8691 return -TARGET_EFAULT; 8692 } 8693 ret = get_errno(faccessat(arg1, p, arg3, 0)); 8694 unlock_user(p, arg2, 0); 8695 return ret; 8696 #endif 8697 #ifdef TARGET_NR_nice /* not on alpha */ 8698 case TARGET_NR_nice: 8699 return get_errno(nice(arg1)); 8700 #endif 8701 case TARGET_NR_sync: 8702 sync(); 8703 return 0; 8704 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS) 8705 case TARGET_NR_syncfs: 8706 return get_errno(syncfs(arg1)); 8707 #endif 8708 case TARGET_NR_kill: 8709 return get_errno(safe_kill(arg1, target_to_host_signal(arg2))); 8710 #ifdef TARGET_NR_rename 8711 case TARGET_NR_rename: 8712 { 8713 void *p2; 8714 p = lock_user_string(arg1); 8715 p2 = lock_user_string(arg2); 8716 if (!p || !p2) 8717 ret = -TARGET_EFAULT; 8718 else 8719 ret = get_errno(rename(p, p2)); 8720 unlock_user(p2, arg2, 0); 8721 unlock_user(p, arg1, 0); 8722 } 8723 return ret; 8724 #endif 8725 #if defined(TARGET_NR_renameat) 8726 case TARGET_NR_renameat: 8727 { 8728 void *p2; 8729 p = lock_user_string(arg2); 8730 p2 = lock_user_string(arg4); 8731 if (!p || !p2) 8732 ret = -TARGET_EFAULT; 8733 else 8734 ret = get_errno(renameat(arg1, p, arg3, p2)); 8735 unlock_user(p2, arg4, 0); 8736 unlock_user(p, arg2, 0); 8737 } 8738 return ret; 8739 #endif 8740 #if defined(TARGET_NR_renameat2) 8741 case TARGET_NR_renameat2: 8742 { 8743 void *p2; 8744 p = lock_user_string(arg2); 8745 p2 = lock_user_string(arg4); 8746 if (!p || !p2) { 8747 ret = -TARGET_EFAULT; 8748 } else { 8749 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5)); 8750 } 8751 unlock_user(p2, arg4, 0); 8752 unlock_user(p, arg2, 0); 8753 } 8754 return ret; 8755 #endif 8756 #ifdef TARGET_NR_mkdir 8757 case TARGET_NR_mkdir: 8758 if (!(p = lock_user_string(arg1))) 8759 return -TARGET_EFAULT; 8760 ret = get_errno(mkdir(p, arg2)); 8761 unlock_user(p, arg1, 0); 8762 return ret; 8763 #endif 8764 #if defined(TARGET_NR_mkdirat) 8765 case TARGET_NR_mkdirat: 8766 if (!(p = lock_user_string(arg2))) 8767 return -TARGET_EFAULT; 8768 ret = get_errno(mkdirat(arg1, p, arg3)); 8769 unlock_user(p, arg2, 0); 8770 return ret; 8771 #endif 8772 #ifdef TARGET_NR_rmdir 8773 case TARGET_NR_rmdir: 8774 if (!(p = lock_user_string(arg1))) 8775 return -TARGET_EFAULT; 8776 ret = get_errno(rmdir(p)); 8777 unlock_user(p, arg1, 0); 8778 return ret; 8779 #endif 8780 case TARGET_NR_dup: 8781 ret = get_errno(dup(arg1)); 8782 if (ret >= 0) { 8783 fd_trans_dup(arg1, ret); 8784 } 8785 return ret; 8786 #ifdef TARGET_NR_pipe 8787 case TARGET_NR_pipe: 8788 return do_pipe(cpu_env, arg1, 0, 0); 8789 #endif 8790 #ifdef TARGET_NR_pipe2 8791 case TARGET_NR_pipe2: 8792 return do_pipe(cpu_env, arg1, 8793 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 8794 #endif 8795 case TARGET_NR_times: 8796 { 8797 struct target_tms *tmsp; 8798 struct tms tms; 8799 ret = get_errno(times(&tms)); 8800 if (arg1) { 8801 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 8802 if (!tmsp) 8803 return -TARGET_EFAULT; 8804 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 8805 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 8806 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 8807 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 8808 } 8809 if (!is_error(ret)) 8810 ret = host_to_target_clock_t(ret); 8811 } 8812 return ret; 8813 case TARGET_NR_acct: 8814 if (arg1 == 0) { 8815 ret = get_errno(acct(NULL)); 8816 } else { 8817 if (!(p = lock_user_string(arg1))) { 8818 return -TARGET_EFAULT; 8819 } 8820 ret = get_errno(acct(path(p))); 8821 unlock_user(p, arg1, 0); 8822 } 8823 return ret; 8824 #ifdef TARGET_NR_umount2 8825 case TARGET_NR_umount2: 8826 if (!(p = lock_user_string(arg1))) 8827 return -TARGET_EFAULT; 8828 ret = get_errno(umount2(p, arg2)); 8829 unlock_user(p, arg1, 0); 8830 return ret; 8831 #endif 8832 case TARGET_NR_ioctl: 8833 return do_ioctl(arg1, arg2, arg3); 8834 #ifdef TARGET_NR_fcntl 8835 case TARGET_NR_fcntl: 8836 return do_fcntl(arg1, arg2, arg3); 8837 #endif 8838 case TARGET_NR_setpgid: 8839 return get_errno(setpgid(arg1, arg2)); 8840 case TARGET_NR_umask: 8841 return get_errno(umask(arg1)); 8842 case TARGET_NR_chroot: 8843 if (!(p = lock_user_string(arg1))) 8844 return -TARGET_EFAULT; 8845 ret = get_errno(chroot(p)); 8846 unlock_user(p, arg1, 0); 8847 return ret; 8848 #ifdef TARGET_NR_dup2 8849 case TARGET_NR_dup2: 8850 ret = get_errno(dup2(arg1, arg2)); 8851 if (ret >= 0) { 8852 fd_trans_dup(arg1, arg2); 8853 } 8854 return ret; 8855 #endif 8856 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 8857 case TARGET_NR_dup3: 8858 { 8859 int host_flags; 8860 8861 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) { 8862 return -EINVAL; 8863 } 8864 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl); 8865 ret = get_errno(dup3(arg1, arg2, host_flags)); 8866 if (ret >= 0) { 8867 fd_trans_dup(arg1, arg2); 8868 } 8869 return ret; 8870 } 8871 #endif 8872 #ifdef TARGET_NR_getppid /* not on alpha */ 8873 case TARGET_NR_getppid: 8874 return get_errno(getppid()); 8875 #endif 8876 #ifdef TARGET_NR_getpgrp 8877 case TARGET_NR_getpgrp: 8878 return get_errno(getpgrp()); 8879 #endif 8880 case TARGET_NR_setsid: 8881 return get_errno(setsid()); 8882 #ifdef TARGET_NR_sigaction 8883 case TARGET_NR_sigaction: 8884 { 8885 #if defined(TARGET_ALPHA) 8886 struct target_sigaction act, oact, *pact = 0; 8887 struct target_old_sigaction *old_act; 8888 if (arg2) { 8889 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8890 return -TARGET_EFAULT; 8891 act._sa_handler = old_act->_sa_handler; 8892 target_siginitset(&act.sa_mask, old_act->sa_mask); 8893 act.sa_flags = old_act->sa_flags; 8894 act.sa_restorer = 0; 8895 unlock_user_struct(old_act, arg2, 0); 8896 pact = &act; 8897 } 8898 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8899 if (!is_error(ret) && arg3) { 8900 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8901 return -TARGET_EFAULT; 8902 old_act->_sa_handler = oact._sa_handler; 8903 old_act->sa_mask = oact.sa_mask.sig[0]; 8904 old_act->sa_flags = oact.sa_flags; 8905 unlock_user_struct(old_act, arg3, 1); 8906 } 8907 #elif defined(TARGET_MIPS) 8908 struct target_sigaction act, oact, *pact, *old_act; 8909 8910 if (arg2) { 8911 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8912 return -TARGET_EFAULT; 8913 act._sa_handler = old_act->_sa_handler; 8914 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 8915 act.sa_flags = old_act->sa_flags; 8916 unlock_user_struct(old_act, arg2, 0); 8917 pact = &act; 8918 } else { 8919 pact = NULL; 8920 } 8921 8922 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8923 8924 if (!is_error(ret) && arg3) { 8925 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8926 return -TARGET_EFAULT; 8927 old_act->_sa_handler = oact._sa_handler; 8928 old_act->sa_flags = oact.sa_flags; 8929 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 8930 old_act->sa_mask.sig[1] = 0; 8931 old_act->sa_mask.sig[2] = 0; 8932 old_act->sa_mask.sig[3] = 0; 8933 unlock_user_struct(old_act, arg3, 1); 8934 } 8935 #else 8936 struct target_old_sigaction *old_act; 8937 struct target_sigaction act, oact, *pact; 8938 if (arg2) { 8939 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 8940 return -TARGET_EFAULT; 8941 act._sa_handler = old_act->_sa_handler; 8942 target_siginitset(&act.sa_mask, old_act->sa_mask); 8943 act.sa_flags = old_act->sa_flags; 8944 act.sa_restorer = old_act->sa_restorer; 8945 #ifdef TARGET_ARCH_HAS_KA_RESTORER 8946 act.ka_restorer = 0; 8947 #endif 8948 unlock_user_struct(old_act, arg2, 0); 8949 pact = &act; 8950 } else { 8951 pact = NULL; 8952 } 8953 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8954 if (!is_error(ret) && arg3) { 8955 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 8956 return -TARGET_EFAULT; 8957 old_act->_sa_handler = oact._sa_handler; 8958 old_act->sa_mask = oact.sa_mask.sig[0]; 8959 old_act->sa_flags = oact.sa_flags; 8960 old_act->sa_restorer = oact.sa_restorer; 8961 unlock_user_struct(old_act, arg3, 1); 8962 } 8963 #endif 8964 } 8965 return ret; 8966 #endif 8967 case TARGET_NR_rt_sigaction: 8968 { 8969 #if defined(TARGET_ALPHA) 8970 /* For Alpha and SPARC this is a 5 argument syscall, with 8971 * a 'restorer' parameter which must be copied into the 8972 * sa_restorer field of the sigaction struct. 8973 * For Alpha that 'restorer' is arg5; for SPARC it is arg4, 8974 * and arg5 is the sigsetsize. 8975 * Alpha also has a separate rt_sigaction struct that it uses 8976 * here; SPARC uses the usual sigaction struct. 8977 */ 8978 struct target_rt_sigaction *rt_act; 8979 struct target_sigaction act, oact, *pact = 0; 8980 8981 if (arg4 != sizeof(target_sigset_t)) { 8982 return -TARGET_EINVAL; 8983 } 8984 if (arg2) { 8985 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 8986 return -TARGET_EFAULT; 8987 act._sa_handler = rt_act->_sa_handler; 8988 act.sa_mask = rt_act->sa_mask; 8989 act.sa_flags = rt_act->sa_flags; 8990 act.sa_restorer = arg5; 8991 unlock_user_struct(rt_act, arg2, 0); 8992 pact = &act; 8993 } 8994 ret = get_errno(do_sigaction(arg1, pact, &oact)); 8995 if (!is_error(ret) && arg3) { 8996 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 8997 return -TARGET_EFAULT; 8998 rt_act->_sa_handler = oact._sa_handler; 8999 rt_act->sa_mask = oact.sa_mask; 9000 rt_act->sa_flags = oact.sa_flags; 9001 unlock_user_struct(rt_act, arg3, 1); 9002 } 9003 #else 9004 #ifdef TARGET_SPARC 9005 target_ulong restorer = arg4; 9006 target_ulong sigsetsize = arg5; 9007 #else 9008 target_ulong sigsetsize = arg4; 9009 #endif 9010 struct target_sigaction *act; 9011 struct target_sigaction *oact; 9012 9013 if (sigsetsize != sizeof(target_sigset_t)) { 9014 return -TARGET_EINVAL; 9015 } 9016 if (arg2) { 9017 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) { 9018 return -TARGET_EFAULT; 9019 } 9020 #ifdef TARGET_ARCH_HAS_KA_RESTORER 9021 act->ka_restorer = restorer; 9022 #endif 9023 } else { 9024 act = NULL; 9025 } 9026 if (arg3) { 9027 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 9028 ret = -TARGET_EFAULT; 9029 goto rt_sigaction_fail; 9030 } 9031 } else 9032 oact = NULL; 9033 ret = get_errno(do_sigaction(arg1, act, oact)); 9034 rt_sigaction_fail: 9035 if (act) 9036 unlock_user_struct(act, arg2, 0); 9037 if (oact) 9038 unlock_user_struct(oact, arg3, 1); 9039 #endif 9040 } 9041 return ret; 9042 #ifdef TARGET_NR_sgetmask /* not on alpha */ 9043 case TARGET_NR_sgetmask: 9044 { 9045 sigset_t cur_set; 9046 abi_ulong target_set; 9047 ret = do_sigprocmask(0, NULL, &cur_set); 9048 if (!ret) { 9049 host_to_target_old_sigset(&target_set, &cur_set); 9050 ret = target_set; 9051 } 9052 } 9053 return ret; 9054 #endif 9055 #ifdef TARGET_NR_ssetmask /* not on alpha */ 9056 case TARGET_NR_ssetmask: 9057 { 9058 sigset_t set, oset; 9059 abi_ulong target_set = arg1; 9060 target_to_host_old_sigset(&set, &target_set); 9061 ret = do_sigprocmask(SIG_SETMASK, &set, &oset); 9062 if (!ret) { 9063 host_to_target_old_sigset(&target_set, &oset); 9064 ret = target_set; 9065 } 9066 } 9067 return ret; 9068 #endif 9069 #ifdef TARGET_NR_sigprocmask 9070 case TARGET_NR_sigprocmask: 9071 { 9072 #if defined(TARGET_ALPHA) 9073 sigset_t set, oldset; 9074 abi_ulong mask; 9075 int how; 9076 9077 switch (arg1) { 9078 case TARGET_SIG_BLOCK: 9079 how = SIG_BLOCK; 9080 break; 9081 case TARGET_SIG_UNBLOCK: 9082 how = SIG_UNBLOCK; 9083 break; 9084 case TARGET_SIG_SETMASK: 9085 how = SIG_SETMASK; 9086 break; 9087 default: 9088 return -TARGET_EINVAL; 9089 } 9090 mask = arg2; 9091 target_to_host_old_sigset(&set, &mask); 9092 9093 ret = do_sigprocmask(how, &set, &oldset); 9094 if (!is_error(ret)) { 9095 host_to_target_old_sigset(&mask, &oldset); 9096 ret = mask; 9097 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 9098 } 9099 #else 9100 sigset_t set, oldset, *set_ptr; 9101 int how; 9102 9103 if (arg2) { 9104 switch (arg1) { 9105 case TARGET_SIG_BLOCK: 9106 how = SIG_BLOCK; 9107 break; 9108 case TARGET_SIG_UNBLOCK: 9109 how = SIG_UNBLOCK; 9110 break; 9111 case TARGET_SIG_SETMASK: 9112 how = SIG_SETMASK; 9113 break; 9114 default: 9115 return -TARGET_EINVAL; 9116 } 9117 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 9118 return -TARGET_EFAULT; 9119 target_to_host_old_sigset(&set, p); 9120 unlock_user(p, arg2, 0); 9121 set_ptr = &set; 9122 } else { 9123 how = 0; 9124 set_ptr = NULL; 9125 } 9126 ret = do_sigprocmask(how, set_ptr, &oldset); 9127 if (!is_error(ret) && arg3) { 9128 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9129 return -TARGET_EFAULT; 9130 host_to_target_old_sigset(p, &oldset); 9131 unlock_user(p, arg3, sizeof(target_sigset_t)); 9132 } 9133 #endif 9134 } 9135 return ret; 9136 #endif 9137 case TARGET_NR_rt_sigprocmask: 9138 { 9139 int how = arg1; 9140 sigset_t set, oldset, *set_ptr; 9141 9142 if (arg4 != sizeof(target_sigset_t)) { 9143 return -TARGET_EINVAL; 9144 } 9145 9146 if (arg2) { 9147 switch(how) { 9148 case TARGET_SIG_BLOCK: 9149 how = SIG_BLOCK; 9150 break; 9151 case TARGET_SIG_UNBLOCK: 9152 how = SIG_UNBLOCK; 9153 break; 9154 case TARGET_SIG_SETMASK: 9155 how = SIG_SETMASK; 9156 break; 9157 default: 9158 return -TARGET_EINVAL; 9159 } 9160 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 9161 return -TARGET_EFAULT; 9162 target_to_host_sigset(&set, p); 9163 unlock_user(p, arg2, 0); 9164 set_ptr = &set; 9165 } else { 9166 how = 0; 9167 set_ptr = NULL; 9168 } 9169 ret = do_sigprocmask(how, set_ptr, &oldset); 9170 if (!is_error(ret) && arg3) { 9171 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9172 return -TARGET_EFAULT; 9173 host_to_target_sigset(p, &oldset); 9174 unlock_user(p, arg3, sizeof(target_sigset_t)); 9175 } 9176 } 9177 return ret; 9178 #ifdef TARGET_NR_sigpending 9179 case TARGET_NR_sigpending: 9180 { 9181 sigset_t set; 9182 ret = get_errno(sigpending(&set)); 9183 if (!is_error(ret)) { 9184 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9185 return -TARGET_EFAULT; 9186 host_to_target_old_sigset(p, &set); 9187 unlock_user(p, arg1, sizeof(target_sigset_t)); 9188 } 9189 } 9190 return ret; 9191 #endif 9192 case TARGET_NR_rt_sigpending: 9193 { 9194 sigset_t set; 9195 9196 /* Yes, this check is >, not != like most. We follow the kernel's 9197 * logic and it does it like this because it implements 9198 * NR_sigpending through the same code path, and in that case 9199 * the old_sigset_t is smaller in size. 9200 */ 9201 if (arg2 > sizeof(target_sigset_t)) { 9202 return -TARGET_EINVAL; 9203 } 9204 9205 ret = get_errno(sigpending(&set)); 9206 if (!is_error(ret)) { 9207 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9208 return -TARGET_EFAULT; 9209 host_to_target_sigset(p, &set); 9210 unlock_user(p, arg1, sizeof(target_sigset_t)); 9211 } 9212 } 9213 return ret; 9214 #ifdef TARGET_NR_sigsuspend 9215 case TARGET_NR_sigsuspend: 9216 { 9217 TaskState *ts = cpu->opaque; 9218 #if defined(TARGET_ALPHA) 9219 abi_ulong mask = arg1; 9220 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask); 9221 #else 9222 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9223 return -TARGET_EFAULT; 9224 target_to_host_old_sigset(&ts->sigsuspend_mask, p); 9225 unlock_user(p, arg1, 0); 9226 #endif 9227 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 9228 SIGSET_T_SIZE)); 9229 if (ret != -TARGET_ERESTARTSYS) { 9230 ts->in_sigsuspend = 1; 9231 } 9232 } 9233 return ret; 9234 #endif 9235 case TARGET_NR_rt_sigsuspend: 9236 { 9237 TaskState *ts = cpu->opaque; 9238 9239 if (arg2 != sizeof(target_sigset_t)) { 9240 return -TARGET_EINVAL; 9241 } 9242 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9243 return -TARGET_EFAULT; 9244 target_to_host_sigset(&ts->sigsuspend_mask, p); 9245 unlock_user(p, arg1, 0); 9246 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, 9247 SIGSET_T_SIZE)); 9248 if (ret != -TARGET_ERESTARTSYS) { 9249 ts->in_sigsuspend = 1; 9250 } 9251 } 9252 return ret; 9253 #ifdef TARGET_NR_rt_sigtimedwait 9254 case TARGET_NR_rt_sigtimedwait: 9255 { 9256 sigset_t set; 9257 struct timespec uts, *puts; 9258 siginfo_t uinfo; 9259 9260 if (arg4 != sizeof(target_sigset_t)) { 9261 return -TARGET_EINVAL; 9262 } 9263 9264 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9265 return -TARGET_EFAULT; 9266 target_to_host_sigset(&set, p); 9267 unlock_user(p, arg1, 0); 9268 if (arg3) { 9269 puts = &uts; 9270 if (target_to_host_timespec(puts, arg3)) { 9271 return -TARGET_EFAULT; 9272 } 9273 } else { 9274 puts = NULL; 9275 } 9276 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9277 SIGSET_T_SIZE)); 9278 if (!is_error(ret)) { 9279 if (arg2) { 9280 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 9281 0); 9282 if (!p) { 9283 return -TARGET_EFAULT; 9284 } 9285 host_to_target_siginfo(p, &uinfo); 9286 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9287 } 9288 ret = host_to_target_signal(ret); 9289 } 9290 } 9291 return ret; 9292 #endif 9293 #ifdef TARGET_NR_rt_sigtimedwait_time64 9294 case TARGET_NR_rt_sigtimedwait_time64: 9295 { 9296 sigset_t set; 9297 struct timespec uts, *puts; 9298 siginfo_t uinfo; 9299 9300 if (arg4 != sizeof(target_sigset_t)) { 9301 return -TARGET_EINVAL; 9302 } 9303 9304 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1); 9305 if (!p) { 9306 return -TARGET_EFAULT; 9307 } 9308 target_to_host_sigset(&set, p); 9309 unlock_user(p, arg1, 0); 9310 if (arg3) { 9311 puts = &uts; 9312 if (target_to_host_timespec64(puts, arg3)) { 9313 return -TARGET_EFAULT; 9314 } 9315 } else { 9316 puts = NULL; 9317 } 9318 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9319 SIGSET_T_SIZE)); 9320 if (!is_error(ret)) { 9321 if (arg2) { 9322 p = lock_user(VERIFY_WRITE, arg2, 9323 sizeof(target_siginfo_t), 0); 9324 if (!p) { 9325 return -TARGET_EFAULT; 9326 } 9327 host_to_target_siginfo(p, &uinfo); 9328 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9329 } 9330 ret = host_to_target_signal(ret); 9331 } 9332 } 9333 return ret; 9334 #endif 9335 case TARGET_NR_rt_sigqueueinfo: 9336 { 9337 siginfo_t uinfo; 9338 9339 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); 9340 if (!p) { 9341 return -TARGET_EFAULT; 9342 } 9343 target_to_host_siginfo(&uinfo, p); 9344 unlock_user(p, arg3, 0); 9345 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 9346 } 9347 return ret; 9348 case TARGET_NR_rt_tgsigqueueinfo: 9349 { 9350 siginfo_t uinfo; 9351 9352 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1); 9353 if (!p) { 9354 return -TARGET_EFAULT; 9355 } 9356 target_to_host_siginfo(&uinfo, p); 9357 unlock_user(p, arg4, 0); 9358 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo)); 9359 } 9360 return ret; 9361 #ifdef TARGET_NR_sigreturn 9362 case TARGET_NR_sigreturn: 9363 if (block_signals()) { 9364 return -TARGET_ERESTARTSYS; 9365 } 9366 return do_sigreturn(cpu_env); 9367 #endif 9368 case TARGET_NR_rt_sigreturn: 9369 if (block_signals()) { 9370 return -TARGET_ERESTARTSYS; 9371 } 9372 return do_rt_sigreturn(cpu_env); 9373 case TARGET_NR_sethostname: 9374 if (!(p = lock_user_string(arg1))) 9375 return -TARGET_EFAULT; 9376 ret = get_errno(sethostname(p, arg2)); 9377 unlock_user(p, arg1, 0); 9378 return ret; 9379 #ifdef TARGET_NR_setrlimit 9380 case TARGET_NR_setrlimit: 9381 { 9382 int resource = target_to_host_resource(arg1); 9383 struct target_rlimit *target_rlim; 9384 struct rlimit rlim; 9385 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 9386 return -TARGET_EFAULT; 9387 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 9388 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 9389 unlock_user_struct(target_rlim, arg2, 0); 9390 /* 9391 * If we just passed through resource limit settings for memory then 9392 * they would also apply to QEMU's own allocations, and QEMU will 9393 * crash or hang or die if its allocations fail. Ideally we would 9394 * track the guest allocations in QEMU and apply the limits ourselves. 9395 * For now, just tell the guest the call succeeded but don't actually 9396 * limit anything. 9397 */ 9398 if (resource != RLIMIT_AS && 9399 resource != RLIMIT_DATA && 9400 resource != RLIMIT_STACK) { 9401 return get_errno(setrlimit(resource, &rlim)); 9402 } else { 9403 return 0; 9404 } 9405 } 9406 #endif 9407 #ifdef TARGET_NR_getrlimit 9408 case TARGET_NR_getrlimit: 9409 { 9410 int resource = target_to_host_resource(arg1); 9411 struct target_rlimit *target_rlim; 9412 struct rlimit rlim; 9413 9414 ret = get_errno(getrlimit(resource, &rlim)); 9415 if (!is_error(ret)) { 9416 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 9417 return -TARGET_EFAULT; 9418 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 9419 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 9420 unlock_user_struct(target_rlim, arg2, 1); 9421 } 9422 } 9423 return ret; 9424 #endif 9425 case TARGET_NR_getrusage: 9426 { 9427 struct rusage rusage; 9428 ret = get_errno(getrusage(arg1, &rusage)); 9429 if (!is_error(ret)) { 9430 ret = host_to_target_rusage(arg2, &rusage); 9431 } 9432 } 9433 return ret; 9434 #if defined(TARGET_NR_gettimeofday) 9435 case TARGET_NR_gettimeofday: 9436 { 9437 struct timeval tv; 9438 struct timezone tz; 9439 9440 ret = get_errno(gettimeofday(&tv, &tz)); 9441 if (!is_error(ret)) { 9442 if (arg1 && copy_to_user_timeval(arg1, &tv)) { 9443 return -TARGET_EFAULT; 9444 } 9445 if (arg2 && copy_to_user_timezone(arg2, &tz)) { 9446 return -TARGET_EFAULT; 9447 } 9448 } 9449 } 9450 return ret; 9451 #endif 9452 #if defined(TARGET_NR_settimeofday) 9453 case TARGET_NR_settimeofday: 9454 { 9455 struct timeval tv, *ptv = NULL; 9456 struct timezone tz, *ptz = NULL; 9457 9458 if (arg1) { 9459 if (copy_from_user_timeval(&tv, arg1)) { 9460 return -TARGET_EFAULT; 9461 } 9462 ptv = &tv; 9463 } 9464 9465 if (arg2) { 9466 if (copy_from_user_timezone(&tz, arg2)) { 9467 return -TARGET_EFAULT; 9468 } 9469 ptz = &tz; 9470 } 9471 9472 return get_errno(settimeofday(ptv, ptz)); 9473 } 9474 #endif 9475 #if defined(TARGET_NR_select) 9476 case TARGET_NR_select: 9477 #if defined(TARGET_WANT_NI_OLD_SELECT) 9478 /* some architectures used to have old_select here 9479 * but now ENOSYS it. 9480 */ 9481 ret = -TARGET_ENOSYS; 9482 #elif defined(TARGET_WANT_OLD_SYS_SELECT) 9483 ret = do_old_select(arg1); 9484 #else 9485 ret = do_select(arg1, arg2, arg3, arg4, arg5); 9486 #endif 9487 return ret; 9488 #endif 9489 #ifdef TARGET_NR_pselect6 9490 case TARGET_NR_pselect6: 9491 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false); 9492 #endif 9493 #ifdef TARGET_NR_pselect6_time64 9494 case TARGET_NR_pselect6_time64: 9495 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true); 9496 #endif 9497 #ifdef TARGET_NR_symlink 9498 case TARGET_NR_symlink: 9499 { 9500 void *p2; 9501 p = lock_user_string(arg1); 9502 p2 = lock_user_string(arg2); 9503 if (!p || !p2) 9504 ret = -TARGET_EFAULT; 9505 else 9506 ret = get_errno(symlink(p, p2)); 9507 unlock_user(p2, arg2, 0); 9508 unlock_user(p, arg1, 0); 9509 } 9510 return ret; 9511 #endif 9512 #if defined(TARGET_NR_symlinkat) 9513 case TARGET_NR_symlinkat: 9514 { 9515 void *p2; 9516 p = lock_user_string(arg1); 9517 p2 = lock_user_string(arg3); 9518 if (!p || !p2) 9519 ret = -TARGET_EFAULT; 9520 else 9521 ret = get_errno(symlinkat(p, arg2, p2)); 9522 unlock_user(p2, arg3, 0); 9523 unlock_user(p, arg1, 0); 9524 } 9525 return ret; 9526 #endif 9527 #ifdef TARGET_NR_readlink 9528 case TARGET_NR_readlink: 9529 { 9530 void *p2; 9531 p = lock_user_string(arg1); 9532 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9533 if (!p || !p2) { 9534 ret = -TARGET_EFAULT; 9535 } else if (!arg3) { 9536 /* Short circuit this for the magic exe check. */ 9537 ret = -TARGET_EINVAL; 9538 } else if (is_proc_myself((const char *)p, "exe")) { 9539 char real[PATH_MAX], *temp; 9540 temp = realpath(exec_path, real); 9541 /* Return value is # of bytes that we wrote to the buffer. */ 9542 if (temp == NULL) { 9543 ret = get_errno(-1); 9544 } else { 9545 /* Don't worry about sign mismatch as earlier mapping 9546 * logic would have thrown a bad address error. */ 9547 ret = MIN(strlen(real), arg3); 9548 /* We cannot NUL terminate the string. */ 9549 memcpy(p2, real, ret); 9550 } 9551 } else { 9552 ret = get_errno(readlink(path(p), p2, arg3)); 9553 } 9554 unlock_user(p2, arg2, ret); 9555 unlock_user(p, arg1, 0); 9556 } 9557 return ret; 9558 #endif 9559 #if defined(TARGET_NR_readlinkat) 9560 case TARGET_NR_readlinkat: 9561 { 9562 void *p2; 9563 p = lock_user_string(arg2); 9564 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 9565 if (!p || !p2) { 9566 ret = -TARGET_EFAULT; 9567 } else if (is_proc_myself((const char *)p, "exe")) { 9568 char real[PATH_MAX], *temp; 9569 temp = realpath(exec_path, real); 9570 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 9571 snprintf((char *)p2, arg4, "%s", real); 9572 } else { 9573 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 9574 } 9575 unlock_user(p2, arg3, ret); 9576 unlock_user(p, arg2, 0); 9577 } 9578 return ret; 9579 #endif 9580 #ifdef TARGET_NR_swapon 9581 case TARGET_NR_swapon: 9582 if (!(p = lock_user_string(arg1))) 9583 return -TARGET_EFAULT; 9584 ret = get_errno(swapon(p, arg2)); 9585 unlock_user(p, arg1, 0); 9586 return ret; 9587 #endif 9588 case TARGET_NR_reboot: 9589 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 9590 /* arg4 must be ignored in all other cases */ 9591 p = lock_user_string(arg4); 9592 if (!p) { 9593 return -TARGET_EFAULT; 9594 } 9595 ret = get_errno(reboot(arg1, arg2, arg3, p)); 9596 unlock_user(p, arg4, 0); 9597 } else { 9598 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 9599 } 9600 return ret; 9601 #ifdef TARGET_NR_mmap 9602 case TARGET_NR_mmap: 9603 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 9604 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 9605 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 9606 || defined(TARGET_S390X) 9607 { 9608 abi_ulong *v; 9609 abi_ulong v1, v2, v3, v4, v5, v6; 9610 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 9611 return -TARGET_EFAULT; 9612 v1 = tswapal(v[0]); 9613 v2 = tswapal(v[1]); 9614 v3 = tswapal(v[2]); 9615 v4 = tswapal(v[3]); 9616 v5 = tswapal(v[4]); 9617 v6 = tswapal(v[5]); 9618 unlock_user(v, arg1, 0); 9619 ret = get_errno(target_mmap(v1, v2, v3, 9620 target_to_host_bitmask(v4, mmap_flags_tbl), 9621 v5, v6)); 9622 } 9623 #else 9624 ret = get_errno(target_mmap(arg1, arg2, arg3, 9625 target_to_host_bitmask(arg4, mmap_flags_tbl), 9626 arg5, 9627 arg6)); 9628 #endif 9629 return ret; 9630 #endif 9631 #ifdef TARGET_NR_mmap2 9632 case TARGET_NR_mmap2: 9633 #ifndef MMAP_SHIFT 9634 #define MMAP_SHIFT 12 9635 #endif 9636 ret = target_mmap(arg1, arg2, arg3, 9637 target_to_host_bitmask(arg4, mmap_flags_tbl), 9638 arg5, arg6 << MMAP_SHIFT); 9639 return get_errno(ret); 9640 #endif 9641 case TARGET_NR_munmap: 9642 return get_errno(target_munmap(arg1, arg2)); 9643 case TARGET_NR_mprotect: 9644 { 9645 TaskState *ts = cpu->opaque; 9646 /* Special hack to detect libc making the stack executable. */ 9647 if ((arg3 & PROT_GROWSDOWN) 9648 && arg1 >= ts->info->stack_limit 9649 && arg1 <= ts->info->start_stack) { 9650 arg3 &= ~PROT_GROWSDOWN; 9651 arg2 = arg2 + arg1 - ts->info->stack_limit; 9652 arg1 = ts->info->stack_limit; 9653 } 9654 } 9655 return get_errno(target_mprotect(arg1, arg2, arg3)); 9656 #ifdef TARGET_NR_mremap 9657 case TARGET_NR_mremap: 9658 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 9659 #endif 9660 /* ??? msync/mlock/munlock are broken for softmmu. */ 9661 #ifdef TARGET_NR_msync 9662 case TARGET_NR_msync: 9663 return get_errno(msync(g2h(arg1), arg2, arg3)); 9664 #endif 9665 #ifdef TARGET_NR_mlock 9666 case TARGET_NR_mlock: 9667 return get_errno(mlock(g2h(arg1), arg2)); 9668 #endif 9669 #ifdef TARGET_NR_munlock 9670 case TARGET_NR_munlock: 9671 return get_errno(munlock(g2h(arg1), arg2)); 9672 #endif 9673 #ifdef TARGET_NR_mlockall 9674 case TARGET_NR_mlockall: 9675 return get_errno(mlockall(target_to_host_mlockall_arg(arg1))); 9676 #endif 9677 #ifdef TARGET_NR_munlockall 9678 case TARGET_NR_munlockall: 9679 return get_errno(munlockall()); 9680 #endif 9681 #ifdef TARGET_NR_truncate 9682 case TARGET_NR_truncate: 9683 if (!(p = lock_user_string(arg1))) 9684 return -TARGET_EFAULT; 9685 ret = get_errno(truncate(p, arg2)); 9686 unlock_user(p, arg1, 0); 9687 return ret; 9688 #endif 9689 #ifdef TARGET_NR_ftruncate 9690 case TARGET_NR_ftruncate: 9691 return get_errno(ftruncate(arg1, arg2)); 9692 #endif 9693 case TARGET_NR_fchmod: 9694 return get_errno(fchmod(arg1, arg2)); 9695 #if defined(TARGET_NR_fchmodat) 9696 case TARGET_NR_fchmodat: 9697 if (!(p = lock_user_string(arg2))) 9698 return -TARGET_EFAULT; 9699 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 9700 unlock_user(p, arg2, 0); 9701 return ret; 9702 #endif 9703 case TARGET_NR_getpriority: 9704 /* Note that negative values are valid for getpriority, so we must 9705 differentiate based on errno settings. */ 9706 errno = 0; 9707 ret = getpriority(arg1, arg2); 9708 if (ret == -1 && errno != 0) { 9709 return -host_to_target_errno(errno); 9710 } 9711 #ifdef TARGET_ALPHA 9712 /* Return value is the unbiased priority. Signal no error. */ 9713 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 9714 #else 9715 /* Return value is a biased priority to avoid negative numbers. */ 9716 ret = 20 - ret; 9717 #endif 9718 return ret; 9719 case TARGET_NR_setpriority: 9720 return get_errno(setpriority(arg1, arg2, arg3)); 9721 #ifdef TARGET_NR_statfs 9722 case TARGET_NR_statfs: 9723 if (!(p = lock_user_string(arg1))) { 9724 return -TARGET_EFAULT; 9725 } 9726 ret = get_errno(statfs(path(p), &stfs)); 9727 unlock_user(p, arg1, 0); 9728 convert_statfs: 9729 if (!is_error(ret)) { 9730 struct target_statfs *target_stfs; 9731 9732 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 9733 return -TARGET_EFAULT; 9734 __put_user(stfs.f_type, &target_stfs->f_type); 9735 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9736 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9737 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9738 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9739 __put_user(stfs.f_files, &target_stfs->f_files); 9740 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9741 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9742 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9743 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9744 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9745 #ifdef _STATFS_F_FLAGS 9746 __put_user(stfs.f_flags, &target_stfs->f_flags); 9747 #else 9748 __put_user(0, &target_stfs->f_flags); 9749 #endif 9750 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9751 unlock_user_struct(target_stfs, arg2, 1); 9752 } 9753 return ret; 9754 #endif 9755 #ifdef TARGET_NR_fstatfs 9756 case TARGET_NR_fstatfs: 9757 ret = get_errno(fstatfs(arg1, &stfs)); 9758 goto convert_statfs; 9759 #endif 9760 #ifdef TARGET_NR_statfs64 9761 case TARGET_NR_statfs64: 9762 if (!(p = lock_user_string(arg1))) { 9763 return -TARGET_EFAULT; 9764 } 9765 ret = get_errno(statfs(path(p), &stfs)); 9766 unlock_user(p, arg1, 0); 9767 convert_statfs64: 9768 if (!is_error(ret)) { 9769 struct target_statfs64 *target_stfs; 9770 9771 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 9772 return -TARGET_EFAULT; 9773 __put_user(stfs.f_type, &target_stfs->f_type); 9774 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 9775 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 9776 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 9777 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 9778 __put_user(stfs.f_files, &target_stfs->f_files); 9779 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 9780 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 9781 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 9782 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 9783 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 9784 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 9785 unlock_user_struct(target_stfs, arg3, 1); 9786 } 9787 return ret; 9788 case TARGET_NR_fstatfs64: 9789 ret = get_errno(fstatfs(arg1, &stfs)); 9790 goto convert_statfs64; 9791 #endif 9792 #ifdef TARGET_NR_socketcall 9793 case TARGET_NR_socketcall: 9794 return do_socketcall(arg1, arg2); 9795 #endif 9796 #ifdef TARGET_NR_accept 9797 case TARGET_NR_accept: 9798 return do_accept4(arg1, arg2, arg3, 0); 9799 #endif 9800 #ifdef TARGET_NR_accept4 9801 case TARGET_NR_accept4: 9802 return do_accept4(arg1, arg2, arg3, arg4); 9803 #endif 9804 #ifdef TARGET_NR_bind 9805 case TARGET_NR_bind: 9806 return do_bind(arg1, arg2, arg3); 9807 #endif 9808 #ifdef TARGET_NR_connect 9809 case TARGET_NR_connect: 9810 return do_connect(arg1, arg2, arg3); 9811 #endif 9812 #ifdef TARGET_NR_getpeername 9813 case TARGET_NR_getpeername: 9814 return do_getpeername(arg1, arg2, arg3); 9815 #endif 9816 #ifdef TARGET_NR_getsockname 9817 case TARGET_NR_getsockname: 9818 return do_getsockname(arg1, arg2, arg3); 9819 #endif 9820 #ifdef TARGET_NR_getsockopt 9821 case TARGET_NR_getsockopt: 9822 return do_getsockopt(arg1, arg2, arg3, arg4, arg5); 9823 #endif 9824 #ifdef TARGET_NR_listen 9825 case TARGET_NR_listen: 9826 return get_errno(listen(arg1, arg2)); 9827 #endif 9828 #ifdef TARGET_NR_recv 9829 case TARGET_NR_recv: 9830 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 9831 #endif 9832 #ifdef TARGET_NR_recvfrom 9833 case TARGET_NR_recvfrom: 9834 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 9835 #endif 9836 #ifdef TARGET_NR_recvmsg 9837 case TARGET_NR_recvmsg: 9838 return do_sendrecvmsg(arg1, arg2, arg3, 0); 9839 #endif 9840 #ifdef TARGET_NR_send 9841 case TARGET_NR_send: 9842 return do_sendto(arg1, arg2, arg3, arg4, 0, 0); 9843 #endif 9844 #ifdef TARGET_NR_sendmsg 9845 case TARGET_NR_sendmsg: 9846 return do_sendrecvmsg(arg1, arg2, arg3, 1); 9847 #endif 9848 #ifdef TARGET_NR_sendmmsg 9849 case TARGET_NR_sendmmsg: 9850 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 9851 #endif 9852 #ifdef TARGET_NR_recvmmsg 9853 case TARGET_NR_recvmmsg: 9854 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 9855 #endif 9856 #ifdef TARGET_NR_sendto 9857 case TARGET_NR_sendto: 9858 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 9859 #endif 9860 #ifdef TARGET_NR_shutdown 9861 case TARGET_NR_shutdown: 9862 return get_errno(shutdown(arg1, arg2)); 9863 #endif 9864 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 9865 case TARGET_NR_getrandom: 9866 p = lock_user(VERIFY_WRITE, arg1, arg2, 0); 9867 if (!p) { 9868 return -TARGET_EFAULT; 9869 } 9870 ret = get_errno(getrandom(p, arg2, arg3)); 9871 unlock_user(p, arg1, ret); 9872 return ret; 9873 #endif 9874 #ifdef TARGET_NR_socket 9875 case TARGET_NR_socket: 9876 return do_socket(arg1, arg2, arg3); 9877 #endif 9878 #ifdef TARGET_NR_socketpair 9879 case TARGET_NR_socketpair: 9880 return do_socketpair(arg1, arg2, arg3, arg4); 9881 #endif 9882 #ifdef TARGET_NR_setsockopt 9883 case TARGET_NR_setsockopt: 9884 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 9885 #endif 9886 #if defined(TARGET_NR_syslog) 9887 case TARGET_NR_syslog: 9888 { 9889 int len = arg2; 9890 9891 switch (arg1) { 9892 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */ 9893 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */ 9894 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */ 9895 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */ 9896 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */ 9897 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */ 9898 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */ 9899 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */ 9900 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3)); 9901 case TARGET_SYSLOG_ACTION_READ: /* Read from log */ 9902 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */ 9903 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */ 9904 { 9905 if (len < 0) { 9906 return -TARGET_EINVAL; 9907 } 9908 if (len == 0) { 9909 return 0; 9910 } 9911 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9912 if (!p) { 9913 return -TARGET_EFAULT; 9914 } 9915 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 9916 unlock_user(p, arg2, arg3); 9917 } 9918 return ret; 9919 default: 9920 return -TARGET_EINVAL; 9921 } 9922 } 9923 break; 9924 #endif 9925 case TARGET_NR_setitimer: 9926 { 9927 struct itimerval value, ovalue, *pvalue; 9928 9929 if (arg2) { 9930 pvalue = &value; 9931 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 9932 || copy_from_user_timeval(&pvalue->it_value, 9933 arg2 + sizeof(struct target_timeval))) 9934 return -TARGET_EFAULT; 9935 } else { 9936 pvalue = NULL; 9937 } 9938 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 9939 if (!is_error(ret) && arg3) { 9940 if (copy_to_user_timeval(arg3, 9941 &ovalue.it_interval) 9942 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 9943 &ovalue.it_value)) 9944 return -TARGET_EFAULT; 9945 } 9946 } 9947 return ret; 9948 case TARGET_NR_getitimer: 9949 { 9950 struct itimerval value; 9951 9952 ret = get_errno(getitimer(arg1, &value)); 9953 if (!is_error(ret) && arg2) { 9954 if (copy_to_user_timeval(arg2, 9955 &value.it_interval) 9956 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 9957 &value.it_value)) 9958 return -TARGET_EFAULT; 9959 } 9960 } 9961 return ret; 9962 #ifdef TARGET_NR_stat 9963 case TARGET_NR_stat: 9964 if (!(p = lock_user_string(arg1))) { 9965 return -TARGET_EFAULT; 9966 } 9967 ret = get_errno(stat(path(p), &st)); 9968 unlock_user(p, arg1, 0); 9969 goto do_stat; 9970 #endif 9971 #ifdef TARGET_NR_lstat 9972 case TARGET_NR_lstat: 9973 if (!(p = lock_user_string(arg1))) { 9974 return -TARGET_EFAULT; 9975 } 9976 ret = get_errno(lstat(path(p), &st)); 9977 unlock_user(p, arg1, 0); 9978 goto do_stat; 9979 #endif 9980 #ifdef TARGET_NR_fstat 9981 case TARGET_NR_fstat: 9982 { 9983 ret = get_errno(fstat(arg1, &st)); 9984 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat) 9985 do_stat: 9986 #endif 9987 if (!is_error(ret)) { 9988 struct target_stat *target_st; 9989 9990 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 9991 return -TARGET_EFAULT; 9992 memset(target_st, 0, sizeof(*target_st)); 9993 __put_user(st.st_dev, &target_st->st_dev); 9994 __put_user(st.st_ino, &target_st->st_ino); 9995 __put_user(st.st_mode, &target_st->st_mode); 9996 __put_user(st.st_uid, &target_st->st_uid); 9997 __put_user(st.st_gid, &target_st->st_gid); 9998 __put_user(st.st_nlink, &target_st->st_nlink); 9999 __put_user(st.st_rdev, &target_st->st_rdev); 10000 __put_user(st.st_size, &target_st->st_size); 10001 __put_user(st.st_blksize, &target_st->st_blksize); 10002 __put_user(st.st_blocks, &target_st->st_blocks); 10003 __put_user(st.st_atime, &target_st->target_st_atime); 10004 __put_user(st.st_mtime, &target_st->target_st_mtime); 10005 __put_user(st.st_ctime, &target_st->target_st_ctime); 10006 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \ 10007 defined(TARGET_STAT_HAVE_NSEC) 10008 __put_user(st.st_atim.tv_nsec, 10009 &target_st->target_st_atime_nsec); 10010 __put_user(st.st_mtim.tv_nsec, 10011 &target_st->target_st_mtime_nsec); 10012 __put_user(st.st_ctim.tv_nsec, 10013 &target_st->target_st_ctime_nsec); 10014 #endif 10015 unlock_user_struct(target_st, arg2, 1); 10016 } 10017 } 10018 return ret; 10019 #endif 10020 case TARGET_NR_vhangup: 10021 return get_errno(vhangup()); 10022 #ifdef TARGET_NR_syscall 10023 case TARGET_NR_syscall: 10024 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 10025 arg6, arg7, arg8, 0); 10026 #endif 10027 #if defined(TARGET_NR_wait4) 10028 case TARGET_NR_wait4: 10029 { 10030 int status; 10031 abi_long status_ptr = arg2; 10032 struct rusage rusage, *rusage_ptr; 10033 abi_ulong target_rusage = arg4; 10034 abi_long rusage_err; 10035 if (target_rusage) 10036 rusage_ptr = &rusage; 10037 else 10038 rusage_ptr = NULL; 10039 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr)); 10040 if (!is_error(ret)) { 10041 if (status_ptr && ret) { 10042 status = host_to_target_waitstatus(status); 10043 if (put_user_s32(status, status_ptr)) 10044 return -TARGET_EFAULT; 10045 } 10046 if (target_rusage) { 10047 rusage_err = host_to_target_rusage(target_rusage, &rusage); 10048 if (rusage_err) { 10049 ret = rusage_err; 10050 } 10051 } 10052 } 10053 } 10054 return ret; 10055 #endif 10056 #ifdef TARGET_NR_swapoff 10057 case TARGET_NR_swapoff: 10058 if (!(p = lock_user_string(arg1))) 10059 return -TARGET_EFAULT; 10060 ret = get_errno(swapoff(p)); 10061 unlock_user(p, arg1, 0); 10062 return ret; 10063 #endif 10064 case TARGET_NR_sysinfo: 10065 { 10066 struct target_sysinfo *target_value; 10067 struct sysinfo value; 10068 ret = get_errno(sysinfo(&value)); 10069 if (!is_error(ret) && arg1) 10070 { 10071 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 10072 return -TARGET_EFAULT; 10073 __put_user(value.uptime, &target_value->uptime); 10074 __put_user(value.loads[0], &target_value->loads[0]); 10075 __put_user(value.loads[1], &target_value->loads[1]); 10076 __put_user(value.loads[2], &target_value->loads[2]); 10077 __put_user(value.totalram, &target_value->totalram); 10078 __put_user(value.freeram, &target_value->freeram); 10079 __put_user(value.sharedram, &target_value->sharedram); 10080 __put_user(value.bufferram, &target_value->bufferram); 10081 __put_user(value.totalswap, &target_value->totalswap); 10082 __put_user(value.freeswap, &target_value->freeswap); 10083 __put_user(value.procs, &target_value->procs); 10084 __put_user(value.totalhigh, &target_value->totalhigh); 10085 __put_user(value.freehigh, &target_value->freehigh); 10086 __put_user(value.mem_unit, &target_value->mem_unit); 10087 unlock_user_struct(target_value, arg1, 1); 10088 } 10089 } 10090 return ret; 10091 #ifdef TARGET_NR_ipc 10092 case TARGET_NR_ipc: 10093 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); 10094 #endif 10095 #ifdef TARGET_NR_semget 10096 case TARGET_NR_semget: 10097 return get_errno(semget(arg1, arg2, arg3)); 10098 #endif 10099 #ifdef TARGET_NR_semop 10100 case TARGET_NR_semop: 10101 return do_semtimedop(arg1, arg2, arg3, 0, false); 10102 #endif 10103 #ifdef TARGET_NR_semtimedop 10104 case TARGET_NR_semtimedop: 10105 return do_semtimedop(arg1, arg2, arg3, arg4, false); 10106 #endif 10107 #ifdef TARGET_NR_semtimedop_time64 10108 case TARGET_NR_semtimedop_time64: 10109 return do_semtimedop(arg1, arg2, arg3, arg4, true); 10110 #endif 10111 #ifdef TARGET_NR_semctl 10112 case TARGET_NR_semctl: 10113 return do_semctl(arg1, arg2, arg3, arg4); 10114 #endif 10115 #ifdef TARGET_NR_msgctl 10116 case TARGET_NR_msgctl: 10117 return do_msgctl(arg1, arg2, arg3); 10118 #endif 10119 #ifdef TARGET_NR_msgget 10120 case TARGET_NR_msgget: 10121 return get_errno(msgget(arg1, arg2)); 10122 #endif 10123 #ifdef TARGET_NR_msgrcv 10124 case TARGET_NR_msgrcv: 10125 return do_msgrcv(arg1, arg2, arg3, arg4, arg5); 10126 #endif 10127 #ifdef TARGET_NR_msgsnd 10128 case TARGET_NR_msgsnd: 10129 return do_msgsnd(arg1, arg2, arg3, arg4); 10130 #endif 10131 #ifdef TARGET_NR_shmget 10132 case TARGET_NR_shmget: 10133 return get_errno(shmget(arg1, arg2, arg3)); 10134 #endif 10135 #ifdef TARGET_NR_shmctl 10136 case TARGET_NR_shmctl: 10137 return do_shmctl(arg1, arg2, arg3); 10138 #endif 10139 #ifdef TARGET_NR_shmat 10140 case TARGET_NR_shmat: 10141 return do_shmat(cpu_env, arg1, arg2, arg3); 10142 #endif 10143 #ifdef TARGET_NR_shmdt 10144 case TARGET_NR_shmdt: 10145 return do_shmdt(arg1); 10146 #endif 10147 case TARGET_NR_fsync: 10148 return get_errno(fsync(arg1)); 10149 case TARGET_NR_clone: 10150 /* Linux manages to have three different orderings for its 10151 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 10152 * match the kernel's CONFIG_CLONE_* settings. 10153 * Microblaze is further special in that it uses a sixth 10154 * implicit argument to clone for the TLS pointer. 10155 */ 10156 #if defined(TARGET_MICROBLAZE) 10157 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 10158 #elif defined(TARGET_CLONE_BACKWARDS) 10159 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 10160 #elif defined(TARGET_CLONE_BACKWARDS2) 10161 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 10162 #else 10163 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 10164 #endif 10165 return ret; 10166 #ifdef __NR_exit_group 10167 /* new thread calls */ 10168 case TARGET_NR_exit_group: 10169 preexit_cleanup(cpu_env, arg1); 10170 return get_errno(exit_group(arg1)); 10171 #endif 10172 case TARGET_NR_setdomainname: 10173 if (!(p = lock_user_string(arg1))) 10174 return -TARGET_EFAULT; 10175 ret = get_errno(setdomainname(p, arg2)); 10176 unlock_user(p, arg1, 0); 10177 return ret; 10178 case TARGET_NR_uname: 10179 /* no need to transcode because we use the linux syscall */ 10180 { 10181 struct new_utsname * buf; 10182 10183 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 10184 return -TARGET_EFAULT; 10185 ret = get_errno(sys_uname(buf)); 10186 if (!is_error(ret)) { 10187 /* Overwrite the native machine name with whatever is being 10188 emulated. */ 10189 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env), 10190 sizeof(buf->machine)); 10191 /* Allow the user to override the reported release. */ 10192 if (qemu_uname_release && *qemu_uname_release) { 10193 g_strlcpy(buf->release, qemu_uname_release, 10194 sizeof(buf->release)); 10195 } 10196 } 10197 unlock_user_struct(buf, arg1, 1); 10198 } 10199 return ret; 10200 #ifdef TARGET_I386 10201 case TARGET_NR_modify_ldt: 10202 return do_modify_ldt(cpu_env, arg1, arg2, arg3); 10203 #if !defined(TARGET_X86_64) 10204 case TARGET_NR_vm86: 10205 return do_vm86(cpu_env, arg1, arg2); 10206 #endif 10207 #endif 10208 #if defined(TARGET_NR_adjtimex) 10209 case TARGET_NR_adjtimex: 10210 { 10211 struct timex host_buf; 10212 10213 if (target_to_host_timex(&host_buf, arg1) != 0) { 10214 return -TARGET_EFAULT; 10215 } 10216 ret = get_errno(adjtimex(&host_buf)); 10217 if (!is_error(ret)) { 10218 if (host_to_target_timex(arg1, &host_buf) != 0) { 10219 return -TARGET_EFAULT; 10220 } 10221 } 10222 } 10223 return ret; 10224 #endif 10225 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) 10226 case TARGET_NR_clock_adjtime: 10227 { 10228 struct timex htx, *phtx = &htx; 10229 10230 if (target_to_host_timex(phtx, arg2) != 0) { 10231 return -TARGET_EFAULT; 10232 } 10233 ret = get_errno(clock_adjtime(arg1, phtx)); 10234 if (!is_error(ret) && phtx) { 10235 if (host_to_target_timex(arg2, phtx) != 0) { 10236 return -TARGET_EFAULT; 10237 } 10238 } 10239 } 10240 return ret; 10241 #endif 10242 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 10243 case TARGET_NR_clock_adjtime64: 10244 { 10245 struct timex htx; 10246 10247 if (target_to_host_timex64(&htx, arg2) != 0) { 10248 return -TARGET_EFAULT; 10249 } 10250 ret = get_errno(clock_adjtime(arg1, &htx)); 10251 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) { 10252 return -TARGET_EFAULT; 10253 } 10254 } 10255 return ret; 10256 #endif 10257 case TARGET_NR_getpgid: 10258 return get_errno(getpgid(arg1)); 10259 case TARGET_NR_fchdir: 10260 return get_errno(fchdir(arg1)); 10261 case TARGET_NR_personality: 10262 return get_errno(personality(arg1)); 10263 #ifdef TARGET_NR__llseek /* Not on alpha */ 10264 case TARGET_NR__llseek: 10265 { 10266 int64_t res; 10267 #if !defined(__NR_llseek) 10268 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5); 10269 if (res == -1) { 10270 ret = get_errno(res); 10271 } else { 10272 ret = 0; 10273 } 10274 #else 10275 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 10276 #endif 10277 if ((ret == 0) && put_user_s64(res, arg4)) { 10278 return -TARGET_EFAULT; 10279 } 10280 } 10281 return ret; 10282 #endif 10283 #ifdef TARGET_NR_getdents 10284 case TARGET_NR_getdents: 10285 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 10286 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 10287 { 10288 struct target_dirent *target_dirp; 10289 struct linux_dirent *dirp; 10290 abi_long count = arg3; 10291 10292 dirp = g_try_malloc(count); 10293 if (!dirp) { 10294 return -TARGET_ENOMEM; 10295 } 10296 10297 ret = get_errno(sys_getdents(arg1, dirp, count)); 10298 if (!is_error(ret)) { 10299 struct linux_dirent *de; 10300 struct target_dirent *tde; 10301 int len = ret; 10302 int reclen, treclen; 10303 int count1, tnamelen; 10304 10305 count1 = 0; 10306 de = dirp; 10307 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10308 return -TARGET_EFAULT; 10309 tde = target_dirp; 10310 while (len > 0) { 10311 reclen = de->d_reclen; 10312 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 10313 assert(tnamelen >= 0); 10314 treclen = tnamelen + offsetof(struct target_dirent, d_name); 10315 assert(count1 + treclen <= count); 10316 tde->d_reclen = tswap16(treclen); 10317 tde->d_ino = tswapal(de->d_ino); 10318 tde->d_off = tswapal(de->d_off); 10319 memcpy(tde->d_name, de->d_name, tnamelen); 10320 de = (struct linux_dirent *)((char *)de + reclen); 10321 len -= reclen; 10322 tde = (struct target_dirent *)((char *)tde + treclen); 10323 count1 += treclen; 10324 } 10325 ret = count1; 10326 unlock_user(target_dirp, arg2, ret); 10327 } 10328 g_free(dirp); 10329 } 10330 #else 10331 { 10332 struct linux_dirent *dirp; 10333 abi_long count = arg3; 10334 10335 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10336 return -TARGET_EFAULT; 10337 ret = get_errno(sys_getdents(arg1, dirp, count)); 10338 if (!is_error(ret)) { 10339 struct linux_dirent *de; 10340 int len = ret; 10341 int reclen; 10342 de = dirp; 10343 while (len > 0) { 10344 reclen = de->d_reclen; 10345 if (reclen > len) 10346 break; 10347 de->d_reclen = tswap16(reclen); 10348 tswapls(&de->d_ino); 10349 tswapls(&de->d_off); 10350 de = (struct linux_dirent *)((char *)de + reclen); 10351 len -= reclen; 10352 } 10353 } 10354 unlock_user(dirp, arg2, ret); 10355 } 10356 #endif 10357 #else 10358 /* Implement getdents in terms of getdents64 */ 10359 { 10360 struct linux_dirent64 *dirp; 10361 abi_long count = arg3; 10362 10363 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 10364 if (!dirp) { 10365 return -TARGET_EFAULT; 10366 } 10367 ret = get_errno(sys_getdents64(arg1, dirp, count)); 10368 if (!is_error(ret)) { 10369 /* Convert the dirent64 structs to target dirent. We do this 10370 * in-place, since we can guarantee that a target_dirent is no 10371 * larger than a dirent64; however this means we have to be 10372 * careful to read everything before writing in the new format. 10373 */ 10374 struct linux_dirent64 *de; 10375 struct target_dirent *tde; 10376 int len = ret; 10377 int tlen = 0; 10378 10379 de = dirp; 10380 tde = (struct target_dirent *)dirp; 10381 while (len > 0) { 10382 int namelen, treclen; 10383 int reclen = de->d_reclen; 10384 uint64_t ino = de->d_ino; 10385 int64_t off = de->d_off; 10386 uint8_t type = de->d_type; 10387 10388 namelen = strlen(de->d_name); 10389 treclen = offsetof(struct target_dirent, d_name) 10390 + namelen + 2; 10391 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 10392 10393 memmove(tde->d_name, de->d_name, namelen + 1); 10394 tde->d_ino = tswapal(ino); 10395 tde->d_off = tswapal(off); 10396 tde->d_reclen = tswap16(treclen); 10397 /* The target_dirent type is in what was formerly a padding 10398 * byte at the end of the structure: 10399 */ 10400 *(((char *)tde) + treclen - 1) = type; 10401 10402 de = (struct linux_dirent64 *)((char *)de + reclen); 10403 tde = (struct target_dirent *)((char *)tde + treclen); 10404 len -= reclen; 10405 tlen += treclen; 10406 } 10407 ret = tlen; 10408 } 10409 unlock_user(dirp, arg2, ret); 10410 } 10411 #endif 10412 return ret; 10413 #endif /* TARGET_NR_getdents */ 10414 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 10415 case TARGET_NR_getdents64: 10416 { 10417 struct linux_dirent64 *dirp; 10418 abi_long count = arg3; 10419 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 10420 return -TARGET_EFAULT; 10421 ret = get_errno(sys_getdents64(arg1, dirp, count)); 10422 if (!is_error(ret)) { 10423 struct linux_dirent64 *de; 10424 int len = ret; 10425 int reclen; 10426 de = dirp; 10427 while (len > 0) { 10428 reclen = de->d_reclen; 10429 if (reclen > len) 10430 break; 10431 de->d_reclen = tswap16(reclen); 10432 tswap64s((uint64_t *)&de->d_ino); 10433 tswap64s((uint64_t *)&de->d_off); 10434 de = (struct linux_dirent64 *)((char *)de + reclen); 10435 len -= reclen; 10436 } 10437 } 10438 unlock_user(dirp, arg2, ret); 10439 } 10440 return ret; 10441 #endif /* TARGET_NR_getdents64 */ 10442 #if defined(TARGET_NR__newselect) 10443 case TARGET_NR__newselect: 10444 return do_select(arg1, arg2, arg3, arg4, arg5); 10445 #endif 10446 #ifdef TARGET_NR_poll 10447 case TARGET_NR_poll: 10448 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false); 10449 #endif 10450 #ifdef TARGET_NR_ppoll 10451 case TARGET_NR_ppoll: 10452 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false); 10453 #endif 10454 #ifdef TARGET_NR_ppoll_time64 10455 case TARGET_NR_ppoll_time64: 10456 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true); 10457 #endif 10458 case TARGET_NR_flock: 10459 /* NOTE: the flock constant seems to be the same for every 10460 Linux platform */ 10461 return get_errno(safe_flock(arg1, arg2)); 10462 case TARGET_NR_readv: 10463 { 10464 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10465 if (vec != NULL) { 10466 ret = get_errno(safe_readv(arg1, vec, arg3)); 10467 unlock_iovec(vec, arg2, arg3, 1); 10468 } else { 10469 ret = -host_to_target_errno(errno); 10470 } 10471 } 10472 return ret; 10473 case TARGET_NR_writev: 10474 { 10475 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10476 if (vec != NULL) { 10477 ret = get_errno(safe_writev(arg1, vec, arg3)); 10478 unlock_iovec(vec, arg2, arg3, 0); 10479 } else { 10480 ret = -host_to_target_errno(errno); 10481 } 10482 } 10483 return ret; 10484 #if defined(TARGET_NR_preadv) 10485 case TARGET_NR_preadv: 10486 { 10487 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10488 if (vec != NULL) { 10489 unsigned long low, high; 10490 10491 target_to_host_low_high(arg4, arg5, &low, &high); 10492 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high)); 10493 unlock_iovec(vec, arg2, arg3, 1); 10494 } else { 10495 ret = -host_to_target_errno(errno); 10496 } 10497 } 10498 return ret; 10499 #endif 10500 #if defined(TARGET_NR_pwritev) 10501 case TARGET_NR_pwritev: 10502 { 10503 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10504 if (vec != NULL) { 10505 unsigned long low, high; 10506 10507 target_to_host_low_high(arg4, arg5, &low, &high); 10508 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high)); 10509 unlock_iovec(vec, arg2, arg3, 0); 10510 } else { 10511 ret = -host_to_target_errno(errno); 10512 } 10513 } 10514 return ret; 10515 #endif 10516 case TARGET_NR_getsid: 10517 return get_errno(getsid(arg1)); 10518 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 10519 case TARGET_NR_fdatasync: 10520 return get_errno(fdatasync(arg1)); 10521 #endif 10522 #ifdef TARGET_NR__sysctl 10523 case TARGET_NR__sysctl: 10524 /* We don't implement this, but ENOTDIR is always a safe 10525 return value. */ 10526 return -TARGET_ENOTDIR; 10527 #endif 10528 case TARGET_NR_sched_getaffinity: 10529 { 10530 unsigned int mask_size; 10531 unsigned long *mask; 10532 10533 /* 10534 * sched_getaffinity needs multiples of ulong, so need to take 10535 * care of mismatches between target ulong and host ulong sizes. 10536 */ 10537 if (arg2 & (sizeof(abi_ulong) - 1)) { 10538 return -TARGET_EINVAL; 10539 } 10540 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10541 10542 mask = alloca(mask_size); 10543 memset(mask, 0, mask_size); 10544 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 10545 10546 if (!is_error(ret)) { 10547 if (ret > arg2) { 10548 /* More data returned than the caller's buffer will fit. 10549 * This only happens if sizeof(abi_long) < sizeof(long) 10550 * and the caller passed us a buffer holding an odd number 10551 * of abi_longs. If the host kernel is actually using the 10552 * extra 4 bytes then fail EINVAL; otherwise we can just 10553 * ignore them and only copy the interesting part. 10554 */ 10555 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 10556 if (numcpus > arg2 * 8) { 10557 return -TARGET_EINVAL; 10558 } 10559 ret = arg2; 10560 } 10561 10562 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) { 10563 return -TARGET_EFAULT; 10564 } 10565 } 10566 } 10567 return ret; 10568 case TARGET_NR_sched_setaffinity: 10569 { 10570 unsigned int mask_size; 10571 unsigned long *mask; 10572 10573 /* 10574 * sched_setaffinity needs multiples of ulong, so need to take 10575 * care of mismatches between target ulong and host ulong sizes. 10576 */ 10577 if (arg2 & (sizeof(abi_ulong) - 1)) { 10578 return -TARGET_EINVAL; 10579 } 10580 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10581 mask = alloca(mask_size); 10582 10583 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2); 10584 if (ret) { 10585 return ret; 10586 } 10587 10588 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 10589 } 10590 case TARGET_NR_getcpu: 10591 { 10592 unsigned cpu, node; 10593 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL, 10594 arg2 ? &node : NULL, 10595 NULL)); 10596 if (is_error(ret)) { 10597 return ret; 10598 } 10599 if (arg1 && put_user_u32(cpu, arg1)) { 10600 return -TARGET_EFAULT; 10601 } 10602 if (arg2 && put_user_u32(node, arg2)) { 10603 return -TARGET_EFAULT; 10604 } 10605 } 10606 return ret; 10607 case TARGET_NR_sched_setparam: 10608 { 10609 struct sched_param *target_schp; 10610 struct sched_param schp; 10611 10612 if (arg2 == 0) { 10613 return -TARGET_EINVAL; 10614 } 10615 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 10616 return -TARGET_EFAULT; 10617 schp.sched_priority = tswap32(target_schp->sched_priority); 10618 unlock_user_struct(target_schp, arg2, 0); 10619 return get_errno(sched_setparam(arg1, &schp)); 10620 } 10621 case TARGET_NR_sched_getparam: 10622 { 10623 struct sched_param *target_schp; 10624 struct sched_param schp; 10625 10626 if (arg2 == 0) { 10627 return -TARGET_EINVAL; 10628 } 10629 ret = get_errno(sched_getparam(arg1, &schp)); 10630 if (!is_error(ret)) { 10631 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 10632 return -TARGET_EFAULT; 10633 target_schp->sched_priority = tswap32(schp.sched_priority); 10634 unlock_user_struct(target_schp, arg2, 1); 10635 } 10636 } 10637 return ret; 10638 case TARGET_NR_sched_setscheduler: 10639 { 10640 struct sched_param *target_schp; 10641 struct sched_param schp; 10642 if (arg3 == 0) { 10643 return -TARGET_EINVAL; 10644 } 10645 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 10646 return -TARGET_EFAULT; 10647 schp.sched_priority = tswap32(target_schp->sched_priority); 10648 unlock_user_struct(target_schp, arg3, 0); 10649 return get_errno(sched_setscheduler(arg1, arg2, &schp)); 10650 } 10651 case TARGET_NR_sched_getscheduler: 10652 return get_errno(sched_getscheduler(arg1)); 10653 case TARGET_NR_sched_yield: 10654 return get_errno(sched_yield()); 10655 case TARGET_NR_sched_get_priority_max: 10656 return get_errno(sched_get_priority_max(arg1)); 10657 case TARGET_NR_sched_get_priority_min: 10658 return get_errno(sched_get_priority_min(arg1)); 10659 #ifdef TARGET_NR_sched_rr_get_interval 10660 case TARGET_NR_sched_rr_get_interval: 10661 { 10662 struct timespec ts; 10663 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 10664 if (!is_error(ret)) { 10665 ret = host_to_target_timespec(arg2, &ts); 10666 } 10667 } 10668 return ret; 10669 #endif 10670 #ifdef TARGET_NR_sched_rr_get_interval_time64 10671 case TARGET_NR_sched_rr_get_interval_time64: 10672 { 10673 struct timespec ts; 10674 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 10675 if (!is_error(ret)) { 10676 ret = host_to_target_timespec64(arg2, &ts); 10677 } 10678 } 10679 return ret; 10680 #endif 10681 #if defined(TARGET_NR_nanosleep) 10682 case TARGET_NR_nanosleep: 10683 { 10684 struct timespec req, rem; 10685 target_to_host_timespec(&req, arg1); 10686 ret = get_errno(safe_nanosleep(&req, &rem)); 10687 if (is_error(ret) && arg2) { 10688 host_to_target_timespec(arg2, &rem); 10689 } 10690 } 10691 return ret; 10692 #endif 10693 case TARGET_NR_prctl: 10694 switch (arg1) { 10695 case PR_GET_PDEATHSIG: 10696 { 10697 int deathsig; 10698 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 10699 if (!is_error(ret) && arg2 10700 && put_user_ual(deathsig, arg2)) { 10701 return -TARGET_EFAULT; 10702 } 10703 return ret; 10704 } 10705 #ifdef PR_GET_NAME 10706 case PR_GET_NAME: 10707 { 10708 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 10709 if (!name) { 10710 return -TARGET_EFAULT; 10711 } 10712 ret = get_errno(prctl(arg1, (unsigned long)name, 10713 arg3, arg4, arg5)); 10714 unlock_user(name, arg2, 16); 10715 return ret; 10716 } 10717 case PR_SET_NAME: 10718 { 10719 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 10720 if (!name) { 10721 return -TARGET_EFAULT; 10722 } 10723 ret = get_errno(prctl(arg1, (unsigned long)name, 10724 arg3, arg4, arg5)); 10725 unlock_user(name, arg2, 0); 10726 return ret; 10727 } 10728 #endif 10729 #ifdef TARGET_MIPS 10730 case TARGET_PR_GET_FP_MODE: 10731 { 10732 CPUMIPSState *env = ((CPUMIPSState *)cpu_env); 10733 ret = 0; 10734 if (env->CP0_Status & (1 << CP0St_FR)) { 10735 ret |= TARGET_PR_FP_MODE_FR; 10736 } 10737 if (env->CP0_Config5 & (1 << CP0C5_FRE)) { 10738 ret |= TARGET_PR_FP_MODE_FRE; 10739 } 10740 return ret; 10741 } 10742 case TARGET_PR_SET_FP_MODE: 10743 { 10744 CPUMIPSState *env = ((CPUMIPSState *)cpu_env); 10745 bool old_fr = env->CP0_Status & (1 << CP0St_FR); 10746 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE); 10747 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR; 10748 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE; 10749 10750 const unsigned int known_bits = TARGET_PR_FP_MODE_FR | 10751 TARGET_PR_FP_MODE_FRE; 10752 10753 /* If nothing to change, return right away, successfully. */ 10754 if (old_fr == new_fr && old_fre == new_fre) { 10755 return 0; 10756 } 10757 /* Check the value is valid */ 10758 if (arg2 & ~known_bits) { 10759 return -TARGET_EOPNOTSUPP; 10760 } 10761 /* Setting FRE without FR is not supported. */ 10762 if (new_fre && !new_fr) { 10763 return -TARGET_EOPNOTSUPP; 10764 } 10765 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) { 10766 /* FR1 is not supported */ 10767 return -TARGET_EOPNOTSUPP; 10768 } 10769 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64)) 10770 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) { 10771 /* cannot set FR=0 */ 10772 return -TARGET_EOPNOTSUPP; 10773 } 10774 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) { 10775 /* Cannot set FRE=1 */ 10776 return -TARGET_EOPNOTSUPP; 10777 } 10778 10779 int i; 10780 fpr_t *fpr = env->active_fpu.fpr; 10781 for (i = 0; i < 32 ; i += 2) { 10782 if (!old_fr && new_fr) { 10783 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX]; 10784 } else if (old_fr && !new_fr) { 10785 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX]; 10786 } 10787 } 10788 10789 if (new_fr) { 10790 env->CP0_Status |= (1 << CP0St_FR); 10791 env->hflags |= MIPS_HFLAG_F64; 10792 } else { 10793 env->CP0_Status &= ~(1 << CP0St_FR); 10794 env->hflags &= ~MIPS_HFLAG_F64; 10795 } 10796 if (new_fre) { 10797 env->CP0_Config5 |= (1 << CP0C5_FRE); 10798 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) { 10799 env->hflags |= MIPS_HFLAG_FRE; 10800 } 10801 } else { 10802 env->CP0_Config5 &= ~(1 << CP0C5_FRE); 10803 env->hflags &= ~MIPS_HFLAG_FRE; 10804 } 10805 10806 return 0; 10807 } 10808 #endif /* MIPS */ 10809 #ifdef TARGET_AARCH64 10810 case TARGET_PR_SVE_SET_VL: 10811 /* 10812 * We cannot support either PR_SVE_SET_VL_ONEXEC or 10813 * PR_SVE_VL_INHERIT. Note the kernel definition 10814 * of sve_vl_valid allows for VQ=512, i.e. VL=8192, 10815 * even though the current architectural maximum is VQ=16. 10816 */ 10817 ret = -TARGET_EINVAL; 10818 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env)) 10819 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { 10820 CPUARMState *env = cpu_env; 10821 ARMCPU *cpu = env_archcpu(env); 10822 uint32_t vq, old_vq; 10823 10824 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1; 10825 vq = MAX(arg2 / 16, 1); 10826 vq = MIN(vq, cpu->sve_max_vq); 10827 10828 if (vq < old_vq) { 10829 aarch64_sve_narrow_vq(env, vq); 10830 } 10831 env->vfp.zcr_el[1] = vq - 1; 10832 arm_rebuild_hflags(env); 10833 ret = vq * 16; 10834 } 10835 return ret; 10836 case TARGET_PR_SVE_GET_VL: 10837 ret = -TARGET_EINVAL; 10838 { 10839 ARMCPU *cpu = env_archcpu(cpu_env); 10840 if (cpu_isar_feature(aa64_sve, cpu)) { 10841 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16; 10842 } 10843 } 10844 return ret; 10845 case TARGET_PR_PAC_RESET_KEYS: 10846 { 10847 CPUARMState *env = cpu_env; 10848 ARMCPU *cpu = env_archcpu(env); 10849 10850 if (arg3 || arg4 || arg5) { 10851 return -TARGET_EINVAL; 10852 } 10853 if (cpu_isar_feature(aa64_pauth, cpu)) { 10854 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY | 10855 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY | 10856 TARGET_PR_PAC_APGAKEY); 10857 int ret = 0; 10858 Error *err = NULL; 10859 10860 if (arg2 == 0) { 10861 arg2 = all; 10862 } else if (arg2 & ~all) { 10863 return -TARGET_EINVAL; 10864 } 10865 if (arg2 & TARGET_PR_PAC_APIAKEY) { 10866 ret |= qemu_guest_getrandom(&env->keys.apia, 10867 sizeof(ARMPACKey), &err); 10868 } 10869 if (arg2 & TARGET_PR_PAC_APIBKEY) { 10870 ret |= qemu_guest_getrandom(&env->keys.apib, 10871 sizeof(ARMPACKey), &err); 10872 } 10873 if (arg2 & TARGET_PR_PAC_APDAKEY) { 10874 ret |= qemu_guest_getrandom(&env->keys.apda, 10875 sizeof(ARMPACKey), &err); 10876 } 10877 if (arg2 & TARGET_PR_PAC_APDBKEY) { 10878 ret |= qemu_guest_getrandom(&env->keys.apdb, 10879 sizeof(ARMPACKey), &err); 10880 } 10881 if (arg2 & TARGET_PR_PAC_APGAKEY) { 10882 ret |= qemu_guest_getrandom(&env->keys.apga, 10883 sizeof(ARMPACKey), &err); 10884 } 10885 if (ret != 0) { 10886 /* 10887 * Some unknown failure in the crypto. The best 10888 * we can do is log it and fail the syscall. 10889 * The real syscall cannot fail this way. 10890 */ 10891 qemu_log_mask(LOG_UNIMP, 10892 "PR_PAC_RESET_KEYS: Crypto failure: %s", 10893 error_get_pretty(err)); 10894 error_free(err); 10895 return -TARGET_EIO; 10896 } 10897 return 0; 10898 } 10899 } 10900 return -TARGET_EINVAL; 10901 #endif /* AARCH64 */ 10902 case PR_GET_SECCOMP: 10903 case PR_SET_SECCOMP: 10904 /* Disable seccomp to prevent the target disabling syscalls we 10905 * need. */ 10906 return -TARGET_EINVAL; 10907 default: 10908 /* Most prctl options have no pointer arguments */ 10909 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 10910 } 10911 break; 10912 #ifdef TARGET_NR_arch_prctl 10913 case TARGET_NR_arch_prctl: 10914 return do_arch_prctl(cpu_env, arg1, arg2); 10915 #endif 10916 #ifdef TARGET_NR_pread64 10917 case TARGET_NR_pread64: 10918 if (regpairs_aligned(cpu_env, num)) { 10919 arg4 = arg5; 10920 arg5 = arg6; 10921 } 10922 if (arg2 == 0 && arg3 == 0) { 10923 /* Special-case NULL buffer and zero length, which should succeed */ 10924 p = 0; 10925 } else { 10926 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 10927 if (!p) { 10928 return -TARGET_EFAULT; 10929 } 10930 } 10931 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 10932 unlock_user(p, arg2, ret); 10933 return ret; 10934 case TARGET_NR_pwrite64: 10935 if (regpairs_aligned(cpu_env, num)) { 10936 arg4 = arg5; 10937 arg5 = arg6; 10938 } 10939 if (arg2 == 0 && arg3 == 0) { 10940 /* Special-case NULL buffer and zero length, which should succeed */ 10941 p = 0; 10942 } else { 10943 p = lock_user(VERIFY_READ, arg2, arg3, 1); 10944 if (!p) { 10945 return -TARGET_EFAULT; 10946 } 10947 } 10948 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 10949 unlock_user(p, arg2, 0); 10950 return ret; 10951 #endif 10952 case TARGET_NR_getcwd: 10953 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 10954 return -TARGET_EFAULT; 10955 ret = get_errno(sys_getcwd1(p, arg2)); 10956 unlock_user(p, arg1, ret); 10957 return ret; 10958 case TARGET_NR_capget: 10959 case TARGET_NR_capset: 10960 { 10961 struct target_user_cap_header *target_header; 10962 struct target_user_cap_data *target_data = NULL; 10963 struct __user_cap_header_struct header; 10964 struct __user_cap_data_struct data[2]; 10965 struct __user_cap_data_struct *dataptr = NULL; 10966 int i, target_datalen; 10967 int data_items = 1; 10968 10969 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 10970 return -TARGET_EFAULT; 10971 } 10972 header.version = tswap32(target_header->version); 10973 header.pid = tswap32(target_header->pid); 10974 10975 if (header.version != _LINUX_CAPABILITY_VERSION) { 10976 /* Version 2 and up takes pointer to two user_data structs */ 10977 data_items = 2; 10978 } 10979 10980 target_datalen = sizeof(*target_data) * data_items; 10981 10982 if (arg2) { 10983 if (num == TARGET_NR_capget) { 10984 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 10985 } else { 10986 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 10987 } 10988 if (!target_data) { 10989 unlock_user_struct(target_header, arg1, 0); 10990 return -TARGET_EFAULT; 10991 } 10992 10993 if (num == TARGET_NR_capset) { 10994 for (i = 0; i < data_items; i++) { 10995 data[i].effective = tswap32(target_data[i].effective); 10996 data[i].permitted = tswap32(target_data[i].permitted); 10997 data[i].inheritable = tswap32(target_data[i].inheritable); 10998 } 10999 } 11000 11001 dataptr = data; 11002 } 11003 11004 if (num == TARGET_NR_capget) { 11005 ret = get_errno(capget(&header, dataptr)); 11006 } else { 11007 ret = get_errno(capset(&header, dataptr)); 11008 } 11009 11010 /* The kernel always updates version for both capget and capset */ 11011 target_header->version = tswap32(header.version); 11012 unlock_user_struct(target_header, arg1, 1); 11013 11014 if (arg2) { 11015 if (num == TARGET_NR_capget) { 11016 for (i = 0; i < data_items; i++) { 11017 target_data[i].effective = tswap32(data[i].effective); 11018 target_data[i].permitted = tswap32(data[i].permitted); 11019 target_data[i].inheritable = tswap32(data[i].inheritable); 11020 } 11021 unlock_user(target_data, arg2, target_datalen); 11022 } else { 11023 unlock_user(target_data, arg2, 0); 11024 } 11025 } 11026 return ret; 11027 } 11028 case TARGET_NR_sigaltstack: 11029 return do_sigaltstack(arg1, arg2, 11030 get_sp_from_cpustate((CPUArchState *)cpu_env)); 11031 11032 #ifdef CONFIG_SENDFILE 11033 #ifdef TARGET_NR_sendfile 11034 case TARGET_NR_sendfile: 11035 { 11036 off_t *offp = NULL; 11037 off_t off; 11038 if (arg3) { 11039 ret = get_user_sal(off, arg3); 11040 if (is_error(ret)) { 11041 return ret; 11042 } 11043 offp = &off; 11044 } 11045 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 11046 if (!is_error(ret) && arg3) { 11047 abi_long ret2 = put_user_sal(off, arg3); 11048 if (is_error(ret2)) { 11049 ret = ret2; 11050 } 11051 } 11052 return ret; 11053 } 11054 #endif 11055 #ifdef TARGET_NR_sendfile64 11056 case TARGET_NR_sendfile64: 11057 { 11058 off_t *offp = NULL; 11059 off_t off; 11060 if (arg3) { 11061 ret = get_user_s64(off, arg3); 11062 if (is_error(ret)) { 11063 return ret; 11064 } 11065 offp = &off; 11066 } 11067 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 11068 if (!is_error(ret) && arg3) { 11069 abi_long ret2 = put_user_s64(off, arg3); 11070 if (is_error(ret2)) { 11071 ret = ret2; 11072 } 11073 } 11074 return ret; 11075 } 11076 #endif 11077 #endif 11078 #ifdef TARGET_NR_vfork 11079 case TARGET_NR_vfork: 11080 return get_errno(do_fork(cpu_env, 11081 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD, 11082 0, 0, 0, 0)); 11083 #endif 11084 #ifdef TARGET_NR_ugetrlimit 11085 case TARGET_NR_ugetrlimit: 11086 { 11087 struct rlimit rlim; 11088 int resource = target_to_host_resource(arg1); 11089 ret = get_errno(getrlimit(resource, &rlim)); 11090 if (!is_error(ret)) { 11091 struct target_rlimit *target_rlim; 11092 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 11093 return -TARGET_EFAULT; 11094 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 11095 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 11096 unlock_user_struct(target_rlim, arg2, 1); 11097 } 11098 return ret; 11099 } 11100 #endif 11101 #ifdef TARGET_NR_truncate64 11102 case TARGET_NR_truncate64: 11103 if (!(p = lock_user_string(arg1))) 11104 return -TARGET_EFAULT; 11105 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 11106 unlock_user(p, arg1, 0); 11107 return ret; 11108 #endif 11109 #ifdef TARGET_NR_ftruncate64 11110 case TARGET_NR_ftruncate64: 11111 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 11112 #endif 11113 #ifdef TARGET_NR_stat64 11114 case TARGET_NR_stat64: 11115 if (!(p = lock_user_string(arg1))) { 11116 return -TARGET_EFAULT; 11117 } 11118 ret = get_errno(stat(path(p), &st)); 11119 unlock_user(p, arg1, 0); 11120 if (!is_error(ret)) 11121 ret = host_to_target_stat64(cpu_env, arg2, &st); 11122 return ret; 11123 #endif 11124 #ifdef TARGET_NR_lstat64 11125 case TARGET_NR_lstat64: 11126 if (!(p = lock_user_string(arg1))) { 11127 return -TARGET_EFAULT; 11128 } 11129 ret = get_errno(lstat(path(p), &st)); 11130 unlock_user(p, arg1, 0); 11131 if (!is_error(ret)) 11132 ret = host_to_target_stat64(cpu_env, arg2, &st); 11133 return ret; 11134 #endif 11135 #ifdef TARGET_NR_fstat64 11136 case TARGET_NR_fstat64: 11137 ret = get_errno(fstat(arg1, &st)); 11138 if (!is_error(ret)) 11139 ret = host_to_target_stat64(cpu_env, arg2, &st); 11140 return ret; 11141 #endif 11142 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 11143 #ifdef TARGET_NR_fstatat64 11144 case TARGET_NR_fstatat64: 11145 #endif 11146 #ifdef TARGET_NR_newfstatat 11147 case TARGET_NR_newfstatat: 11148 #endif 11149 if (!(p = lock_user_string(arg2))) { 11150 return -TARGET_EFAULT; 11151 } 11152 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 11153 unlock_user(p, arg2, 0); 11154 if (!is_error(ret)) 11155 ret = host_to_target_stat64(cpu_env, arg3, &st); 11156 return ret; 11157 #endif 11158 #if defined(TARGET_NR_statx) 11159 case TARGET_NR_statx: 11160 { 11161 struct target_statx *target_stx; 11162 int dirfd = arg1; 11163 int flags = arg3; 11164 11165 p = lock_user_string(arg2); 11166 if (p == NULL) { 11167 return -TARGET_EFAULT; 11168 } 11169 #if defined(__NR_statx) 11170 { 11171 /* 11172 * It is assumed that struct statx is architecture independent. 11173 */ 11174 struct target_statx host_stx; 11175 int mask = arg4; 11176 11177 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx)); 11178 if (!is_error(ret)) { 11179 if (host_to_target_statx(&host_stx, arg5) != 0) { 11180 unlock_user(p, arg2, 0); 11181 return -TARGET_EFAULT; 11182 } 11183 } 11184 11185 if (ret != -TARGET_ENOSYS) { 11186 unlock_user(p, arg2, 0); 11187 return ret; 11188 } 11189 } 11190 #endif 11191 ret = get_errno(fstatat(dirfd, path(p), &st, flags)); 11192 unlock_user(p, arg2, 0); 11193 11194 if (!is_error(ret)) { 11195 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) { 11196 return -TARGET_EFAULT; 11197 } 11198 memset(target_stx, 0, sizeof(*target_stx)); 11199 __put_user(major(st.st_dev), &target_stx->stx_dev_major); 11200 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor); 11201 __put_user(st.st_ino, &target_stx->stx_ino); 11202 __put_user(st.st_mode, &target_stx->stx_mode); 11203 __put_user(st.st_uid, &target_stx->stx_uid); 11204 __put_user(st.st_gid, &target_stx->stx_gid); 11205 __put_user(st.st_nlink, &target_stx->stx_nlink); 11206 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major); 11207 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor); 11208 __put_user(st.st_size, &target_stx->stx_size); 11209 __put_user(st.st_blksize, &target_stx->stx_blksize); 11210 __put_user(st.st_blocks, &target_stx->stx_blocks); 11211 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec); 11212 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec); 11213 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec); 11214 unlock_user_struct(target_stx, arg5, 1); 11215 } 11216 } 11217 return ret; 11218 #endif 11219 #ifdef TARGET_NR_lchown 11220 case TARGET_NR_lchown: 11221 if (!(p = lock_user_string(arg1))) 11222 return -TARGET_EFAULT; 11223 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 11224 unlock_user(p, arg1, 0); 11225 return ret; 11226 #endif 11227 #ifdef TARGET_NR_getuid 11228 case TARGET_NR_getuid: 11229 return get_errno(high2lowuid(getuid())); 11230 #endif 11231 #ifdef TARGET_NR_getgid 11232 case TARGET_NR_getgid: 11233 return get_errno(high2lowgid(getgid())); 11234 #endif 11235 #ifdef TARGET_NR_geteuid 11236 case TARGET_NR_geteuid: 11237 return get_errno(high2lowuid(geteuid())); 11238 #endif 11239 #ifdef TARGET_NR_getegid 11240 case TARGET_NR_getegid: 11241 return get_errno(high2lowgid(getegid())); 11242 #endif 11243 case TARGET_NR_setreuid: 11244 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 11245 case TARGET_NR_setregid: 11246 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 11247 case TARGET_NR_getgroups: 11248 { 11249 int gidsetsize = arg1; 11250 target_id *target_grouplist; 11251 gid_t *grouplist; 11252 int i; 11253 11254 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11255 ret = get_errno(getgroups(gidsetsize, grouplist)); 11256 if (gidsetsize == 0) 11257 return ret; 11258 if (!is_error(ret)) { 11259 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 11260 if (!target_grouplist) 11261 return -TARGET_EFAULT; 11262 for(i = 0;i < ret; i++) 11263 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 11264 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 11265 } 11266 } 11267 return ret; 11268 case TARGET_NR_setgroups: 11269 { 11270 int gidsetsize = arg1; 11271 target_id *target_grouplist; 11272 gid_t *grouplist = NULL; 11273 int i; 11274 if (gidsetsize) { 11275 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11276 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 11277 if (!target_grouplist) { 11278 return -TARGET_EFAULT; 11279 } 11280 for (i = 0; i < gidsetsize; i++) { 11281 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 11282 } 11283 unlock_user(target_grouplist, arg2, 0); 11284 } 11285 return get_errno(setgroups(gidsetsize, grouplist)); 11286 } 11287 case TARGET_NR_fchown: 11288 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 11289 #if defined(TARGET_NR_fchownat) 11290 case TARGET_NR_fchownat: 11291 if (!(p = lock_user_string(arg2))) 11292 return -TARGET_EFAULT; 11293 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 11294 low2highgid(arg4), arg5)); 11295 unlock_user(p, arg2, 0); 11296 return ret; 11297 #endif 11298 #ifdef TARGET_NR_setresuid 11299 case TARGET_NR_setresuid: 11300 return get_errno(sys_setresuid(low2highuid(arg1), 11301 low2highuid(arg2), 11302 low2highuid(arg3))); 11303 #endif 11304 #ifdef TARGET_NR_getresuid 11305 case TARGET_NR_getresuid: 11306 { 11307 uid_t ruid, euid, suid; 11308 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11309 if (!is_error(ret)) { 11310 if (put_user_id(high2lowuid(ruid), arg1) 11311 || put_user_id(high2lowuid(euid), arg2) 11312 || put_user_id(high2lowuid(suid), arg3)) 11313 return -TARGET_EFAULT; 11314 } 11315 } 11316 return ret; 11317 #endif 11318 #ifdef TARGET_NR_getresgid 11319 case TARGET_NR_setresgid: 11320 return get_errno(sys_setresgid(low2highgid(arg1), 11321 low2highgid(arg2), 11322 low2highgid(arg3))); 11323 #endif 11324 #ifdef TARGET_NR_getresgid 11325 case TARGET_NR_getresgid: 11326 { 11327 gid_t rgid, egid, sgid; 11328 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11329 if (!is_error(ret)) { 11330 if (put_user_id(high2lowgid(rgid), arg1) 11331 || put_user_id(high2lowgid(egid), arg2) 11332 || put_user_id(high2lowgid(sgid), arg3)) 11333 return -TARGET_EFAULT; 11334 } 11335 } 11336 return ret; 11337 #endif 11338 #ifdef TARGET_NR_chown 11339 case TARGET_NR_chown: 11340 if (!(p = lock_user_string(arg1))) 11341 return -TARGET_EFAULT; 11342 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 11343 unlock_user(p, arg1, 0); 11344 return ret; 11345 #endif 11346 case TARGET_NR_setuid: 11347 return get_errno(sys_setuid(low2highuid(arg1))); 11348 case TARGET_NR_setgid: 11349 return get_errno(sys_setgid(low2highgid(arg1))); 11350 case TARGET_NR_setfsuid: 11351 return get_errno(setfsuid(arg1)); 11352 case TARGET_NR_setfsgid: 11353 return get_errno(setfsgid(arg1)); 11354 11355 #ifdef TARGET_NR_lchown32 11356 case TARGET_NR_lchown32: 11357 if (!(p = lock_user_string(arg1))) 11358 return -TARGET_EFAULT; 11359 ret = get_errno(lchown(p, arg2, arg3)); 11360 unlock_user(p, arg1, 0); 11361 return ret; 11362 #endif 11363 #ifdef TARGET_NR_getuid32 11364 case TARGET_NR_getuid32: 11365 return get_errno(getuid()); 11366 #endif 11367 11368 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 11369 /* Alpha specific */ 11370 case TARGET_NR_getxuid: 11371 { 11372 uid_t euid; 11373 euid=geteuid(); 11374 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 11375 } 11376 return get_errno(getuid()); 11377 #endif 11378 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 11379 /* Alpha specific */ 11380 case TARGET_NR_getxgid: 11381 { 11382 uid_t egid; 11383 egid=getegid(); 11384 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 11385 } 11386 return get_errno(getgid()); 11387 #endif 11388 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 11389 /* Alpha specific */ 11390 case TARGET_NR_osf_getsysinfo: 11391 ret = -TARGET_EOPNOTSUPP; 11392 switch (arg1) { 11393 case TARGET_GSI_IEEE_FP_CONTROL: 11394 { 11395 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env); 11396 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr; 11397 11398 swcr &= ~SWCR_STATUS_MASK; 11399 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK; 11400 11401 if (put_user_u64 (swcr, arg2)) 11402 return -TARGET_EFAULT; 11403 ret = 0; 11404 } 11405 break; 11406 11407 /* case GSI_IEEE_STATE_AT_SIGNAL: 11408 -- Not implemented in linux kernel. 11409 case GSI_UACPROC: 11410 -- Retrieves current unaligned access state; not much used. 11411 case GSI_PROC_TYPE: 11412 -- Retrieves implver information; surely not used. 11413 case GSI_GET_HWRPB: 11414 -- Grabs a copy of the HWRPB; surely not used. 11415 */ 11416 } 11417 return ret; 11418 #endif 11419 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 11420 /* Alpha specific */ 11421 case TARGET_NR_osf_setsysinfo: 11422 ret = -TARGET_EOPNOTSUPP; 11423 switch (arg1) { 11424 case TARGET_SSI_IEEE_FP_CONTROL: 11425 { 11426 uint64_t swcr, fpcr; 11427 11428 if (get_user_u64 (swcr, arg2)) { 11429 return -TARGET_EFAULT; 11430 } 11431 11432 /* 11433 * The kernel calls swcr_update_status to update the 11434 * status bits from the fpcr at every point that it 11435 * could be queried. Therefore, we store the status 11436 * bits only in FPCR. 11437 */ 11438 ((CPUAlphaState *)cpu_env)->swcr 11439 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK); 11440 11441 fpcr = cpu_alpha_load_fpcr(cpu_env); 11442 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32); 11443 fpcr |= alpha_ieee_swcr_to_fpcr(swcr); 11444 cpu_alpha_store_fpcr(cpu_env, fpcr); 11445 ret = 0; 11446 } 11447 break; 11448 11449 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 11450 { 11451 uint64_t exc, fpcr, fex; 11452 11453 if (get_user_u64(exc, arg2)) { 11454 return -TARGET_EFAULT; 11455 } 11456 exc &= SWCR_STATUS_MASK; 11457 fpcr = cpu_alpha_load_fpcr(cpu_env); 11458 11459 /* Old exceptions are not signaled. */ 11460 fex = alpha_ieee_fpcr_to_swcr(fpcr); 11461 fex = exc & ~fex; 11462 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT; 11463 fex &= ((CPUArchState *)cpu_env)->swcr; 11464 11465 /* Update the hardware fpcr. */ 11466 fpcr |= alpha_ieee_swcr_to_fpcr(exc); 11467 cpu_alpha_store_fpcr(cpu_env, fpcr); 11468 11469 if (fex) { 11470 int si_code = TARGET_FPE_FLTUNK; 11471 target_siginfo_t info; 11472 11473 if (fex & SWCR_TRAP_ENABLE_DNO) { 11474 si_code = TARGET_FPE_FLTUND; 11475 } 11476 if (fex & SWCR_TRAP_ENABLE_INE) { 11477 si_code = TARGET_FPE_FLTRES; 11478 } 11479 if (fex & SWCR_TRAP_ENABLE_UNF) { 11480 si_code = TARGET_FPE_FLTUND; 11481 } 11482 if (fex & SWCR_TRAP_ENABLE_OVF) { 11483 si_code = TARGET_FPE_FLTOVF; 11484 } 11485 if (fex & SWCR_TRAP_ENABLE_DZE) { 11486 si_code = TARGET_FPE_FLTDIV; 11487 } 11488 if (fex & SWCR_TRAP_ENABLE_INV) { 11489 si_code = TARGET_FPE_FLTINV; 11490 } 11491 11492 info.si_signo = SIGFPE; 11493 info.si_errno = 0; 11494 info.si_code = si_code; 11495 info._sifields._sigfault._addr 11496 = ((CPUArchState *)cpu_env)->pc; 11497 queue_signal((CPUArchState *)cpu_env, info.si_signo, 11498 QEMU_SI_FAULT, &info); 11499 } 11500 ret = 0; 11501 } 11502 break; 11503 11504 /* case SSI_NVPAIRS: 11505 -- Used with SSIN_UACPROC to enable unaligned accesses. 11506 case SSI_IEEE_STATE_AT_SIGNAL: 11507 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 11508 -- Not implemented in linux kernel 11509 */ 11510 } 11511 return ret; 11512 #endif 11513 #ifdef TARGET_NR_osf_sigprocmask 11514 /* Alpha specific. */ 11515 case TARGET_NR_osf_sigprocmask: 11516 { 11517 abi_ulong mask; 11518 int how; 11519 sigset_t set, oldset; 11520 11521 switch(arg1) { 11522 case TARGET_SIG_BLOCK: 11523 how = SIG_BLOCK; 11524 break; 11525 case TARGET_SIG_UNBLOCK: 11526 how = SIG_UNBLOCK; 11527 break; 11528 case TARGET_SIG_SETMASK: 11529 how = SIG_SETMASK; 11530 break; 11531 default: 11532 return -TARGET_EINVAL; 11533 } 11534 mask = arg2; 11535 target_to_host_old_sigset(&set, &mask); 11536 ret = do_sigprocmask(how, &set, &oldset); 11537 if (!ret) { 11538 host_to_target_old_sigset(&mask, &oldset); 11539 ret = mask; 11540 } 11541 } 11542 return ret; 11543 #endif 11544 11545 #ifdef TARGET_NR_getgid32 11546 case TARGET_NR_getgid32: 11547 return get_errno(getgid()); 11548 #endif 11549 #ifdef TARGET_NR_geteuid32 11550 case TARGET_NR_geteuid32: 11551 return get_errno(geteuid()); 11552 #endif 11553 #ifdef TARGET_NR_getegid32 11554 case TARGET_NR_getegid32: 11555 return get_errno(getegid()); 11556 #endif 11557 #ifdef TARGET_NR_setreuid32 11558 case TARGET_NR_setreuid32: 11559 return get_errno(setreuid(arg1, arg2)); 11560 #endif 11561 #ifdef TARGET_NR_setregid32 11562 case TARGET_NR_setregid32: 11563 return get_errno(setregid(arg1, arg2)); 11564 #endif 11565 #ifdef TARGET_NR_getgroups32 11566 case TARGET_NR_getgroups32: 11567 { 11568 int gidsetsize = arg1; 11569 uint32_t *target_grouplist; 11570 gid_t *grouplist; 11571 int i; 11572 11573 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11574 ret = get_errno(getgroups(gidsetsize, grouplist)); 11575 if (gidsetsize == 0) 11576 return ret; 11577 if (!is_error(ret)) { 11578 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 11579 if (!target_grouplist) { 11580 return -TARGET_EFAULT; 11581 } 11582 for(i = 0;i < ret; i++) 11583 target_grouplist[i] = tswap32(grouplist[i]); 11584 unlock_user(target_grouplist, arg2, gidsetsize * 4); 11585 } 11586 } 11587 return ret; 11588 #endif 11589 #ifdef TARGET_NR_setgroups32 11590 case TARGET_NR_setgroups32: 11591 { 11592 int gidsetsize = arg1; 11593 uint32_t *target_grouplist; 11594 gid_t *grouplist; 11595 int i; 11596 11597 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11598 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 11599 if (!target_grouplist) { 11600 return -TARGET_EFAULT; 11601 } 11602 for(i = 0;i < gidsetsize; i++) 11603 grouplist[i] = tswap32(target_grouplist[i]); 11604 unlock_user(target_grouplist, arg2, 0); 11605 return get_errno(setgroups(gidsetsize, grouplist)); 11606 } 11607 #endif 11608 #ifdef TARGET_NR_fchown32 11609 case TARGET_NR_fchown32: 11610 return get_errno(fchown(arg1, arg2, arg3)); 11611 #endif 11612 #ifdef TARGET_NR_setresuid32 11613 case TARGET_NR_setresuid32: 11614 return get_errno(sys_setresuid(arg1, arg2, arg3)); 11615 #endif 11616 #ifdef TARGET_NR_getresuid32 11617 case TARGET_NR_getresuid32: 11618 { 11619 uid_t ruid, euid, suid; 11620 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11621 if (!is_error(ret)) { 11622 if (put_user_u32(ruid, arg1) 11623 || put_user_u32(euid, arg2) 11624 || put_user_u32(suid, arg3)) 11625 return -TARGET_EFAULT; 11626 } 11627 } 11628 return ret; 11629 #endif 11630 #ifdef TARGET_NR_setresgid32 11631 case TARGET_NR_setresgid32: 11632 return get_errno(sys_setresgid(arg1, arg2, arg3)); 11633 #endif 11634 #ifdef TARGET_NR_getresgid32 11635 case TARGET_NR_getresgid32: 11636 { 11637 gid_t rgid, egid, sgid; 11638 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11639 if (!is_error(ret)) { 11640 if (put_user_u32(rgid, arg1) 11641 || put_user_u32(egid, arg2) 11642 || put_user_u32(sgid, arg3)) 11643 return -TARGET_EFAULT; 11644 } 11645 } 11646 return ret; 11647 #endif 11648 #ifdef TARGET_NR_chown32 11649 case TARGET_NR_chown32: 11650 if (!(p = lock_user_string(arg1))) 11651 return -TARGET_EFAULT; 11652 ret = get_errno(chown(p, arg2, arg3)); 11653 unlock_user(p, arg1, 0); 11654 return ret; 11655 #endif 11656 #ifdef TARGET_NR_setuid32 11657 case TARGET_NR_setuid32: 11658 return get_errno(sys_setuid(arg1)); 11659 #endif 11660 #ifdef TARGET_NR_setgid32 11661 case TARGET_NR_setgid32: 11662 return get_errno(sys_setgid(arg1)); 11663 #endif 11664 #ifdef TARGET_NR_setfsuid32 11665 case TARGET_NR_setfsuid32: 11666 return get_errno(setfsuid(arg1)); 11667 #endif 11668 #ifdef TARGET_NR_setfsgid32 11669 case TARGET_NR_setfsgid32: 11670 return get_errno(setfsgid(arg1)); 11671 #endif 11672 #ifdef TARGET_NR_mincore 11673 case TARGET_NR_mincore: 11674 { 11675 void *a = lock_user(VERIFY_READ, arg1, arg2, 0); 11676 if (!a) { 11677 return -TARGET_ENOMEM; 11678 } 11679 p = lock_user_string(arg3); 11680 if (!p) { 11681 ret = -TARGET_EFAULT; 11682 } else { 11683 ret = get_errno(mincore(a, arg2, p)); 11684 unlock_user(p, arg3, ret); 11685 } 11686 unlock_user(a, arg1, 0); 11687 } 11688 return ret; 11689 #endif 11690 #ifdef TARGET_NR_arm_fadvise64_64 11691 case TARGET_NR_arm_fadvise64_64: 11692 /* arm_fadvise64_64 looks like fadvise64_64 but 11693 * with different argument order: fd, advice, offset, len 11694 * rather than the usual fd, offset, len, advice. 11695 * Note that offset and len are both 64-bit so appear as 11696 * pairs of 32-bit registers. 11697 */ 11698 ret = posix_fadvise(arg1, target_offset64(arg3, arg4), 11699 target_offset64(arg5, arg6), arg2); 11700 return -host_to_target_errno(ret); 11701 #endif 11702 11703 #if TARGET_ABI_BITS == 32 11704 11705 #ifdef TARGET_NR_fadvise64_64 11706 case TARGET_NR_fadvise64_64: 11707 #if defined(TARGET_PPC) || defined(TARGET_XTENSA) 11708 /* 6 args: fd, advice, offset (high, low), len (high, low) */ 11709 ret = arg2; 11710 arg2 = arg3; 11711 arg3 = arg4; 11712 arg4 = arg5; 11713 arg5 = arg6; 11714 arg6 = ret; 11715 #else 11716 /* 6 args: fd, offset (high, low), len (high, low), advice */ 11717 if (regpairs_aligned(cpu_env, num)) { 11718 /* offset is in (3,4), len in (5,6) and advice in 7 */ 11719 arg2 = arg3; 11720 arg3 = arg4; 11721 arg4 = arg5; 11722 arg5 = arg6; 11723 arg6 = arg7; 11724 } 11725 #endif 11726 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), 11727 target_offset64(arg4, arg5), arg6); 11728 return -host_to_target_errno(ret); 11729 #endif 11730 11731 #ifdef TARGET_NR_fadvise64 11732 case TARGET_NR_fadvise64: 11733 /* 5 args: fd, offset (high, low), len, advice */ 11734 if (regpairs_aligned(cpu_env, num)) { 11735 /* offset is in (3,4), len in 5 and advice in 6 */ 11736 arg2 = arg3; 11737 arg3 = arg4; 11738 arg4 = arg5; 11739 arg5 = arg6; 11740 } 11741 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5); 11742 return -host_to_target_errno(ret); 11743 #endif 11744 11745 #else /* not a 32-bit ABI */ 11746 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64) 11747 #ifdef TARGET_NR_fadvise64_64 11748 case TARGET_NR_fadvise64_64: 11749 #endif 11750 #ifdef TARGET_NR_fadvise64 11751 case TARGET_NR_fadvise64: 11752 #endif 11753 #ifdef TARGET_S390X 11754 switch (arg4) { 11755 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 11756 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 11757 case 6: arg4 = POSIX_FADV_DONTNEED; break; 11758 case 7: arg4 = POSIX_FADV_NOREUSE; break; 11759 default: break; 11760 } 11761 #endif 11762 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4)); 11763 #endif 11764 #endif /* end of 64-bit ABI fadvise handling */ 11765 11766 #ifdef TARGET_NR_madvise 11767 case TARGET_NR_madvise: 11768 /* A straight passthrough may not be safe because qemu sometimes 11769 turns private file-backed mappings into anonymous mappings. 11770 This will break MADV_DONTNEED. 11771 This is a hint, so ignoring and returning success is ok. */ 11772 return 0; 11773 #endif 11774 #ifdef TARGET_NR_fcntl64 11775 case TARGET_NR_fcntl64: 11776 { 11777 int cmd; 11778 struct flock64 fl; 11779 from_flock64_fn *copyfrom = copy_from_user_flock64; 11780 to_flock64_fn *copyto = copy_to_user_flock64; 11781 11782 #ifdef TARGET_ARM 11783 if (!((CPUARMState *)cpu_env)->eabi) { 11784 copyfrom = copy_from_user_oabi_flock64; 11785 copyto = copy_to_user_oabi_flock64; 11786 } 11787 #endif 11788 11789 cmd = target_to_host_fcntl_cmd(arg2); 11790 if (cmd == -TARGET_EINVAL) { 11791 return cmd; 11792 } 11793 11794 switch(arg2) { 11795 case TARGET_F_GETLK64: 11796 ret = copyfrom(&fl, arg3); 11797 if (ret) { 11798 break; 11799 } 11800 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 11801 if (ret == 0) { 11802 ret = copyto(arg3, &fl); 11803 } 11804 break; 11805 11806 case TARGET_F_SETLK64: 11807 case TARGET_F_SETLKW64: 11808 ret = copyfrom(&fl, arg3); 11809 if (ret) { 11810 break; 11811 } 11812 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 11813 break; 11814 default: 11815 ret = do_fcntl(arg1, arg2, arg3); 11816 break; 11817 } 11818 return ret; 11819 } 11820 #endif 11821 #ifdef TARGET_NR_cacheflush 11822 case TARGET_NR_cacheflush: 11823 /* self-modifying code is handled automatically, so nothing needed */ 11824 return 0; 11825 #endif 11826 #ifdef TARGET_NR_getpagesize 11827 case TARGET_NR_getpagesize: 11828 return TARGET_PAGE_SIZE; 11829 #endif 11830 case TARGET_NR_gettid: 11831 return get_errno(sys_gettid()); 11832 #ifdef TARGET_NR_readahead 11833 case TARGET_NR_readahead: 11834 #if TARGET_ABI_BITS == 32 11835 if (regpairs_aligned(cpu_env, num)) { 11836 arg2 = arg3; 11837 arg3 = arg4; 11838 arg4 = arg5; 11839 } 11840 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4)); 11841 #else 11842 ret = get_errno(readahead(arg1, arg2, arg3)); 11843 #endif 11844 return ret; 11845 #endif 11846 #ifdef CONFIG_ATTR 11847 #ifdef TARGET_NR_setxattr 11848 case TARGET_NR_listxattr: 11849 case TARGET_NR_llistxattr: 11850 { 11851 void *p, *b = 0; 11852 if (arg2) { 11853 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11854 if (!b) { 11855 return -TARGET_EFAULT; 11856 } 11857 } 11858 p = lock_user_string(arg1); 11859 if (p) { 11860 if (num == TARGET_NR_listxattr) { 11861 ret = get_errno(listxattr(p, b, arg3)); 11862 } else { 11863 ret = get_errno(llistxattr(p, b, arg3)); 11864 } 11865 } else { 11866 ret = -TARGET_EFAULT; 11867 } 11868 unlock_user(p, arg1, 0); 11869 unlock_user(b, arg2, arg3); 11870 return ret; 11871 } 11872 case TARGET_NR_flistxattr: 11873 { 11874 void *b = 0; 11875 if (arg2) { 11876 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11877 if (!b) { 11878 return -TARGET_EFAULT; 11879 } 11880 } 11881 ret = get_errno(flistxattr(arg1, b, arg3)); 11882 unlock_user(b, arg2, arg3); 11883 return ret; 11884 } 11885 case TARGET_NR_setxattr: 11886 case TARGET_NR_lsetxattr: 11887 { 11888 void *p, *n, *v = 0; 11889 if (arg3) { 11890 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11891 if (!v) { 11892 return -TARGET_EFAULT; 11893 } 11894 } 11895 p = lock_user_string(arg1); 11896 n = lock_user_string(arg2); 11897 if (p && n) { 11898 if (num == TARGET_NR_setxattr) { 11899 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 11900 } else { 11901 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 11902 } 11903 } else { 11904 ret = -TARGET_EFAULT; 11905 } 11906 unlock_user(p, arg1, 0); 11907 unlock_user(n, arg2, 0); 11908 unlock_user(v, arg3, 0); 11909 } 11910 return ret; 11911 case TARGET_NR_fsetxattr: 11912 { 11913 void *n, *v = 0; 11914 if (arg3) { 11915 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11916 if (!v) { 11917 return -TARGET_EFAULT; 11918 } 11919 } 11920 n = lock_user_string(arg2); 11921 if (n) { 11922 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 11923 } else { 11924 ret = -TARGET_EFAULT; 11925 } 11926 unlock_user(n, arg2, 0); 11927 unlock_user(v, arg3, 0); 11928 } 11929 return ret; 11930 case TARGET_NR_getxattr: 11931 case TARGET_NR_lgetxattr: 11932 { 11933 void *p, *n, *v = 0; 11934 if (arg3) { 11935 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 11936 if (!v) { 11937 return -TARGET_EFAULT; 11938 } 11939 } 11940 p = lock_user_string(arg1); 11941 n = lock_user_string(arg2); 11942 if (p && n) { 11943 if (num == TARGET_NR_getxattr) { 11944 ret = get_errno(getxattr(p, n, v, arg4)); 11945 } else { 11946 ret = get_errno(lgetxattr(p, n, v, arg4)); 11947 } 11948 } else { 11949 ret = -TARGET_EFAULT; 11950 } 11951 unlock_user(p, arg1, 0); 11952 unlock_user(n, arg2, 0); 11953 unlock_user(v, arg3, arg4); 11954 } 11955 return ret; 11956 case TARGET_NR_fgetxattr: 11957 { 11958 void *n, *v = 0; 11959 if (arg3) { 11960 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 11961 if (!v) { 11962 return -TARGET_EFAULT; 11963 } 11964 } 11965 n = lock_user_string(arg2); 11966 if (n) { 11967 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 11968 } else { 11969 ret = -TARGET_EFAULT; 11970 } 11971 unlock_user(n, arg2, 0); 11972 unlock_user(v, arg3, arg4); 11973 } 11974 return ret; 11975 case TARGET_NR_removexattr: 11976 case TARGET_NR_lremovexattr: 11977 { 11978 void *p, *n; 11979 p = lock_user_string(arg1); 11980 n = lock_user_string(arg2); 11981 if (p && n) { 11982 if (num == TARGET_NR_removexattr) { 11983 ret = get_errno(removexattr(p, n)); 11984 } else { 11985 ret = get_errno(lremovexattr(p, n)); 11986 } 11987 } else { 11988 ret = -TARGET_EFAULT; 11989 } 11990 unlock_user(p, arg1, 0); 11991 unlock_user(n, arg2, 0); 11992 } 11993 return ret; 11994 case TARGET_NR_fremovexattr: 11995 { 11996 void *n; 11997 n = lock_user_string(arg2); 11998 if (n) { 11999 ret = get_errno(fremovexattr(arg1, n)); 12000 } else { 12001 ret = -TARGET_EFAULT; 12002 } 12003 unlock_user(n, arg2, 0); 12004 } 12005 return ret; 12006 #endif 12007 #endif /* CONFIG_ATTR */ 12008 #ifdef TARGET_NR_set_thread_area 12009 case TARGET_NR_set_thread_area: 12010 #if defined(TARGET_MIPS) 12011 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1; 12012 return 0; 12013 #elif defined(TARGET_CRIS) 12014 if (arg1 & 0xff) 12015 ret = -TARGET_EINVAL; 12016 else { 12017 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 12018 ret = 0; 12019 } 12020 return ret; 12021 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 12022 return do_set_thread_area(cpu_env, arg1); 12023 #elif defined(TARGET_M68K) 12024 { 12025 TaskState *ts = cpu->opaque; 12026 ts->tp_value = arg1; 12027 return 0; 12028 } 12029 #else 12030 return -TARGET_ENOSYS; 12031 #endif 12032 #endif 12033 #ifdef TARGET_NR_get_thread_area 12034 case TARGET_NR_get_thread_area: 12035 #if defined(TARGET_I386) && defined(TARGET_ABI32) 12036 return do_get_thread_area(cpu_env, arg1); 12037 #elif defined(TARGET_M68K) 12038 { 12039 TaskState *ts = cpu->opaque; 12040 return ts->tp_value; 12041 } 12042 #else 12043 return -TARGET_ENOSYS; 12044 #endif 12045 #endif 12046 #ifdef TARGET_NR_getdomainname 12047 case TARGET_NR_getdomainname: 12048 return -TARGET_ENOSYS; 12049 #endif 12050 12051 #ifdef TARGET_NR_clock_settime 12052 case TARGET_NR_clock_settime: 12053 { 12054 struct timespec ts; 12055 12056 ret = target_to_host_timespec(&ts, arg2); 12057 if (!is_error(ret)) { 12058 ret = get_errno(clock_settime(arg1, &ts)); 12059 } 12060 return ret; 12061 } 12062 #endif 12063 #ifdef TARGET_NR_clock_settime64 12064 case TARGET_NR_clock_settime64: 12065 { 12066 struct timespec ts; 12067 12068 ret = target_to_host_timespec64(&ts, arg2); 12069 if (!is_error(ret)) { 12070 ret = get_errno(clock_settime(arg1, &ts)); 12071 } 12072 return ret; 12073 } 12074 #endif 12075 #ifdef TARGET_NR_clock_gettime 12076 case TARGET_NR_clock_gettime: 12077 { 12078 struct timespec ts; 12079 ret = get_errno(clock_gettime(arg1, &ts)); 12080 if (!is_error(ret)) { 12081 ret = host_to_target_timespec(arg2, &ts); 12082 } 12083 return ret; 12084 } 12085 #endif 12086 #ifdef TARGET_NR_clock_gettime64 12087 case TARGET_NR_clock_gettime64: 12088 { 12089 struct timespec ts; 12090 ret = get_errno(clock_gettime(arg1, &ts)); 12091 if (!is_error(ret)) { 12092 ret = host_to_target_timespec64(arg2, &ts); 12093 } 12094 return ret; 12095 } 12096 #endif 12097 #ifdef TARGET_NR_clock_getres 12098 case TARGET_NR_clock_getres: 12099 { 12100 struct timespec ts; 12101 ret = get_errno(clock_getres(arg1, &ts)); 12102 if (!is_error(ret)) { 12103 host_to_target_timespec(arg2, &ts); 12104 } 12105 return ret; 12106 } 12107 #endif 12108 #ifdef TARGET_NR_clock_getres_time64 12109 case TARGET_NR_clock_getres_time64: 12110 { 12111 struct timespec ts; 12112 ret = get_errno(clock_getres(arg1, &ts)); 12113 if (!is_error(ret)) { 12114 host_to_target_timespec64(arg2, &ts); 12115 } 12116 return ret; 12117 } 12118 #endif 12119 #ifdef TARGET_NR_clock_nanosleep 12120 case TARGET_NR_clock_nanosleep: 12121 { 12122 struct timespec ts; 12123 if (target_to_host_timespec(&ts, arg3)) { 12124 return -TARGET_EFAULT; 12125 } 12126 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 12127 &ts, arg4 ? &ts : NULL)); 12128 /* 12129 * if the call is interrupted by a signal handler, it fails 12130 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not 12131 * TIMER_ABSTIME, it returns the remaining unslept time in arg4. 12132 */ 12133 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME && 12134 host_to_target_timespec(arg4, &ts)) { 12135 return -TARGET_EFAULT; 12136 } 12137 12138 return ret; 12139 } 12140 #endif 12141 #ifdef TARGET_NR_clock_nanosleep_time64 12142 case TARGET_NR_clock_nanosleep_time64: 12143 { 12144 struct timespec ts; 12145 12146 if (target_to_host_timespec64(&ts, arg3)) { 12147 return -TARGET_EFAULT; 12148 } 12149 12150 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 12151 &ts, arg4 ? &ts : NULL)); 12152 12153 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME && 12154 host_to_target_timespec64(arg4, &ts)) { 12155 return -TARGET_EFAULT; 12156 } 12157 return ret; 12158 } 12159 #endif 12160 12161 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 12162 case TARGET_NR_set_tid_address: 12163 return get_errno(set_tid_address((int *)g2h(arg1))); 12164 #endif 12165 12166 case TARGET_NR_tkill: 12167 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2))); 12168 12169 case TARGET_NR_tgkill: 12170 return get_errno(safe_tgkill((int)arg1, (int)arg2, 12171 target_to_host_signal(arg3))); 12172 12173 #ifdef TARGET_NR_set_robust_list 12174 case TARGET_NR_set_robust_list: 12175 case TARGET_NR_get_robust_list: 12176 /* The ABI for supporting robust futexes has userspace pass 12177 * the kernel a pointer to a linked list which is updated by 12178 * userspace after the syscall; the list is walked by the kernel 12179 * when the thread exits. Since the linked list in QEMU guest 12180 * memory isn't a valid linked list for the host and we have 12181 * no way to reliably intercept the thread-death event, we can't 12182 * support these. Silently return ENOSYS so that guest userspace 12183 * falls back to a non-robust futex implementation (which should 12184 * be OK except in the corner case of the guest crashing while 12185 * holding a mutex that is shared with another process via 12186 * shared memory). 12187 */ 12188 return -TARGET_ENOSYS; 12189 #endif 12190 12191 #if defined(TARGET_NR_utimensat) 12192 case TARGET_NR_utimensat: 12193 { 12194 struct timespec *tsp, ts[2]; 12195 if (!arg3) { 12196 tsp = NULL; 12197 } else { 12198 if (target_to_host_timespec(ts, arg3)) { 12199 return -TARGET_EFAULT; 12200 } 12201 if (target_to_host_timespec(ts + 1, arg3 + 12202 sizeof(struct target_timespec))) { 12203 return -TARGET_EFAULT; 12204 } 12205 tsp = ts; 12206 } 12207 if (!arg2) 12208 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 12209 else { 12210 if (!(p = lock_user_string(arg2))) { 12211 return -TARGET_EFAULT; 12212 } 12213 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 12214 unlock_user(p, arg2, 0); 12215 } 12216 } 12217 return ret; 12218 #endif 12219 #ifdef TARGET_NR_utimensat_time64 12220 case TARGET_NR_utimensat_time64: 12221 { 12222 struct timespec *tsp, ts[2]; 12223 if (!arg3) { 12224 tsp = NULL; 12225 } else { 12226 if (target_to_host_timespec64(ts, arg3)) { 12227 return -TARGET_EFAULT; 12228 } 12229 if (target_to_host_timespec64(ts + 1, arg3 + 12230 sizeof(struct target__kernel_timespec))) { 12231 return -TARGET_EFAULT; 12232 } 12233 tsp = ts; 12234 } 12235 if (!arg2) 12236 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 12237 else { 12238 p = lock_user_string(arg2); 12239 if (!p) { 12240 return -TARGET_EFAULT; 12241 } 12242 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 12243 unlock_user(p, arg2, 0); 12244 } 12245 } 12246 return ret; 12247 #endif 12248 #ifdef TARGET_NR_futex 12249 case TARGET_NR_futex: 12250 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 12251 #endif 12252 #ifdef TARGET_NR_futex_time64 12253 case TARGET_NR_futex_time64: 12254 return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6); 12255 #endif 12256 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 12257 case TARGET_NR_inotify_init: 12258 ret = get_errno(sys_inotify_init()); 12259 if (ret >= 0) { 12260 fd_trans_register(ret, &target_inotify_trans); 12261 } 12262 return ret; 12263 #endif 12264 #ifdef CONFIG_INOTIFY1 12265 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 12266 case TARGET_NR_inotify_init1: 12267 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1, 12268 fcntl_flags_tbl))); 12269 if (ret >= 0) { 12270 fd_trans_register(ret, &target_inotify_trans); 12271 } 12272 return ret; 12273 #endif 12274 #endif 12275 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 12276 case TARGET_NR_inotify_add_watch: 12277 p = lock_user_string(arg2); 12278 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 12279 unlock_user(p, arg2, 0); 12280 return ret; 12281 #endif 12282 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 12283 case TARGET_NR_inotify_rm_watch: 12284 return get_errno(sys_inotify_rm_watch(arg1, arg2)); 12285 #endif 12286 12287 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 12288 case TARGET_NR_mq_open: 12289 { 12290 struct mq_attr posix_mq_attr; 12291 struct mq_attr *pposix_mq_attr; 12292 int host_flags; 12293 12294 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl); 12295 pposix_mq_attr = NULL; 12296 if (arg4) { 12297 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) { 12298 return -TARGET_EFAULT; 12299 } 12300 pposix_mq_attr = &posix_mq_attr; 12301 } 12302 p = lock_user_string(arg1 - 1); 12303 if (!p) { 12304 return -TARGET_EFAULT; 12305 } 12306 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr)); 12307 unlock_user (p, arg1, 0); 12308 } 12309 return ret; 12310 12311 case TARGET_NR_mq_unlink: 12312 p = lock_user_string(arg1 - 1); 12313 if (!p) { 12314 return -TARGET_EFAULT; 12315 } 12316 ret = get_errno(mq_unlink(p)); 12317 unlock_user (p, arg1, 0); 12318 return ret; 12319 12320 #ifdef TARGET_NR_mq_timedsend 12321 case TARGET_NR_mq_timedsend: 12322 { 12323 struct timespec ts; 12324 12325 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12326 if (arg5 != 0) { 12327 if (target_to_host_timespec(&ts, arg5)) { 12328 return -TARGET_EFAULT; 12329 } 12330 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12331 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) { 12332 return -TARGET_EFAULT; 12333 } 12334 } else { 12335 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12336 } 12337 unlock_user (p, arg2, arg3); 12338 } 12339 return ret; 12340 #endif 12341 #ifdef TARGET_NR_mq_timedsend_time64 12342 case TARGET_NR_mq_timedsend_time64: 12343 { 12344 struct timespec ts; 12345 12346 p = lock_user(VERIFY_READ, arg2, arg3, 1); 12347 if (arg5 != 0) { 12348 if (target_to_host_timespec64(&ts, arg5)) { 12349 return -TARGET_EFAULT; 12350 } 12351 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12352 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) { 12353 return -TARGET_EFAULT; 12354 } 12355 } else { 12356 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12357 } 12358 unlock_user(p, arg2, arg3); 12359 } 12360 return ret; 12361 #endif 12362 12363 #ifdef TARGET_NR_mq_timedreceive 12364 case TARGET_NR_mq_timedreceive: 12365 { 12366 struct timespec ts; 12367 unsigned int prio; 12368 12369 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12370 if (arg5 != 0) { 12371 if (target_to_host_timespec(&ts, arg5)) { 12372 return -TARGET_EFAULT; 12373 } 12374 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12375 &prio, &ts)); 12376 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) { 12377 return -TARGET_EFAULT; 12378 } 12379 } else { 12380 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12381 &prio, NULL)); 12382 } 12383 unlock_user (p, arg2, arg3); 12384 if (arg4 != 0) 12385 put_user_u32(prio, arg4); 12386 } 12387 return ret; 12388 #endif 12389 #ifdef TARGET_NR_mq_timedreceive_time64 12390 case TARGET_NR_mq_timedreceive_time64: 12391 { 12392 struct timespec ts; 12393 unsigned int prio; 12394 12395 p = lock_user(VERIFY_READ, arg2, arg3, 1); 12396 if (arg5 != 0) { 12397 if (target_to_host_timespec64(&ts, arg5)) { 12398 return -TARGET_EFAULT; 12399 } 12400 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12401 &prio, &ts)); 12402 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) { 12403 return -TARGET_EFAULT; 12404 } 12405 } else { 12406 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12407 &prio, NULL)); 12408 } 12409 unlock_user(p, arg2, arg3); 12410 if (arg4 != 0) { 12411 put_user_u32(prio, arg4); 12412 } 12413 } 12414 return ret; 12415 #endif 12416 12417 /* Not implemented for now... */ 12418 /* case TARGET_NR_mq_notify: */ 12419 /* break; */ 12420 12421 case TARGET_NR_mq_getsetattr: 12422 { 12423 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 12424 ret = 0; 12425 if (arg2 != 0) { 12426 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 12427 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in, 12428 &posix_mq_attr_out)); 12429 } else if (arg3 != 0) { 12430 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out)); 12431 } 12432 if (ret == 0 && arg3 != 0) { 12433 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 12434 } 12435 } 12436 return ret; 12437 #endif 12438 12439 #ifdef CONFIG_SPLICE 12440 #ifdef TARGET_NR_tee 12441 case TARGET_NR_tee: 12442 { 12443 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 12444 } 12445 return ret; 12446 #endif 12447 #ifdef TARGET_NR_splice 12448 case TARGET_NR_splice: 12449 { 12450 loff_t loff_in, loff_out; 12451 loff_t *ploff_in = NULL, *ploff_out = NULL; 12452 if (arg2) { 12453 if (get_user_u64(loff_in, arg2)) { 12454 return -TARGET_EFAULT; 12455 } 12456 ploff_in = &loff_in; 12457 } 12458 if (arg4) { 12459 if (get_user_u64(loff_out, arg4)) { 12460 return -TARGET_EFAULT; 12461 } 12462 ploff_out = &loff_out; 12463 } 12464 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 12465 if (arg2) { 12466 if (put_user_u64(loff_in, arg2)) { 12467 return -TARGET_EFAULT; 12468 } 12469 } 12470 if (arg4) { 12471 if (put_user_u64(loff_out, arg4)) { 12472 return -TARGET_EFAULT; 12473 } 12474 } 12475 } 12476 return ret; 12477 #endif 12478 #ifdef TARGET_NR_vmsplice 12479 case TARGET_NR_vmsplice: 12480 { 12481 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 12482 if (vec != NULL) { 12483 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 12484 unlock_iovec(vec, arg2, arg3, 0); 12485 } else { 12486 ret = -host_to_target_errno(errno); 12487 } 12488 } 12489 return ret; 12490 #endif 12491 #endif /* CONFIG_SPLICE */ 12492 #ifdef CONFIG_EVENTFD 12493 #if defined(TARGET_NR_eventfd) 12494 case TARGET_NR_eventfd: 12495 ret = get_errno(eventfd(arg1, 0)); 12496 if (ret >= 0) { 12497 fd_trans_register(ret, &target_eventfd_trans); 12498 } 12499 return ret; 12500 #endif 12501 #if defined(TARGET_NR_eventfd2) 12502 case TARGET_NR_eventfd2: 12503 { 12504 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 12505 if (arg2 & TARGET_O_NONBLOCK) { 12506 host_flags |= O_NONBLOCK; 12507 } 12508 if (arg2 & TARGET_O_CLOEXEC) { 12509 host_flags |= O_CLOEXEC; 12510 } 12511 ret = get_errno(eventfd(arg1, host_flags)); 12512 if (ret >= 0) { 12513 fd_trans_register(ret, &target_eventfd_trans); 12514 } 12515 return ret; 12516 } 12517 #endif 12518 #endif /* CONFIG_EVENTFD */ 12519 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 12520 case TARGET_NR_fallocate: 12521 #if TARGET_ABI_BITS == 32 12522 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 12523 target_offset64(arg5, arg6))); 12524 #else 12525 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 12526 #endif 12527 return ret; 12528 #endif 12529 #if defined(CONFIG_SYNC_FILE_RANGE) 12530 #if defined(TARGET_NR_sync_file_range) 12531 case TARGET_NR_sync_file_range: 12532 #if TARGET_ABI_BITS == 32 12533 #if defined(TARGET_MIPS) 12534 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12535 target_offset64(arg5, arg6), arg7)); 12536 #else 12537 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 12538 target_offset64(arg4, arg5), arg6)); 12539 #endif /* !TARGET_MIPS */ 12540 #else 12541 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 12542 #endif 12543 return ret; 12544 #endif 12545 #if defined(TARGET_NR_sync_file_range2) || \ 12546 defined(TARGET_NR_arm_sync_file_range) 12547 #if defined(TARGET_NR_sync_file_range2) 12548 case TARGET_NR_sync_file_range2: 12549 #endif 12550 #if defined(TARGET_NR_arm_sync_file_range) 12551 case TARGET_NR_arm_sync_file_range: 12552 #endif 12553 /* This is like sync_file_range but the arguments are reordered */ 12554 #if TARGET_ABI_BITS == 32 12555 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12556 target_offset64(arg5, arg6), arg2)); 12557 #else 12558 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 12559 #endif 12560 return ret; 12561 #endif 12562 #endif 12563 #if defined(TARGET_NR_signalfd4) 12564 case TARGET_NR_signalfd4: 12565 return do_signalfd4(arg1, arg2, arg4); 12566 #endif 12567 #if defined(TARGET_NR_signalfd) 12568 case TARGET_NR_signalfd: 12569 return do_signalfd4(arg1, arg2, 0); 12570 #endif 12571 #if defined(CONFIG_EPOLL) 12572 #if defined(TARGET_NR_epoll_create) 12573 case TARGET_NR_epoll_create: 12574 return get_errno(epoll_create(arg1)); 12575 #endif 12576 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 12577 case TARGET_NR_epoll_create1: 12578 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl))); 12579 #endif 12580 #if defined(TARGET_NR_epoll_ctl) 12581 case TARGET_NR_epoll_ctl: 12582 { 12583 struct epoll_event ep; 12584 struct epoll_event *epp = 0; 12585 if (arg4) { 12586 struct target_epoll_event *target_ep; 12587 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 12588 return -TARGET_EFAULT; 12589 } 12590 ep.events = tswap32(target_ep->events); 12591 /* The epoll_data_t union is just opaque data to the kernel, 12592 * so we transfer all 64 bits across and need not worry what 12593 * actual data type it is. 12594 */ 12595 ep.data.u64 = tswap64(target_ep->data.u64); 12596 unlock_user_struct(target_ep, arg4, 0); 12597 epp = &ep; 12598 } 12599 return get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 12600 } 12601 #endif 12602 12603 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait) 12604 #if defined(TARGET_NR_epoll_wait) 12605 case TARGET_NR_epoll_wait: 12606 #endif 12607 #if defined(TARGET_NR_epoll_pwait) 12608 case TARGET_NR_epoll_pwait: 12609 #endif 12610 { 12611 struct target_epoll_event *target_ep; 12612 struct epoll_event *ep; 12613 int epfd = arg1; 12614 int maxevents = arg3; 12615 int timeout = arg4; 12616 12617 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) { 12618 return -TARGET_EINVAL; 12619 } 12620 12621 target_ep = lock_user(VERIFY_WRITE, arg2, 12622 maxevents * sizeof(struct target_epoll_event), 1); 12623 if (!target_ep) { 12624 return -TARGET_EFAULT; 12625 } 12626 12627 ep = g_try_new(struct epoll_event, maxevents); 12628 if (!ep) { 12629 unlock_user(target_ep, arg2, 0); 12630 return -TARGET_ENOMEM; 12631 } 12632 12633 switch (num) { 12634 #if defined(TARGET_NR_epoll_pwait) 12635 case TARGET_NR_epoll_pwait: 12636 { 12637 target_sigset_t *target_set; 12638 sigset_t _set, *set = &_set; 12639 12640 if (arg5) { 12641 if (arg6 != sizeof(target_sigset_t)) { 12642 ret = -TARGET_EINVAL; 12643 break; 12644 } 12645 12646 target_set = lock_user(VERIFY_READ, arg5, 12647 sizeof(target_sigset_t), 1); 12648 if (!target_set) { 12649 ret = -TARGET_EFAULT; 12650 break; 12651 } 12652 target_to_host_sigset(set, target_set); 12653 unlock_user(target_set, arg5, 0); 12654 } else { 12655 set = NULL; 12656 } 12657 12658 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12659 set, SIGSET_T_SIZE)); 12660 break; 12661 } 12662 #endif 12663 #if defined(TARGET_NR_epoll_wait) 12664 case TARGET_NR_epoll_wait: 12665 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12666 NULL, 0)); 12667 break; 12668 #endif 12669 default: 12670 ret = -TARGET_ENOSYS; 12671 } 12672 if (!is_error(ret)) { 12673 int i; 12674 for (i = 0; i < ret; i++) { 12675 target_ep[i].events = tswap32(ep[i].events); 12676 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 12677 } 12678 unlock_user(target_ep, arg2, 12679 ret * sizeof(struct target_epoll_event)); 12680 } else { 12681 unlock_user(target_ep, arg2, 0); 12682 } 12683 g_free(ep); 12684 return ret; 12685 } 12686 #endif 12687 #endif 12688 #ifdef TARGET_NR_prlimit64 12689 case TARGET_NR_prlimit64: 12690 { 12691 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 12692 struct target_rlimit64 *target_rnew, *target_rold; 12693 struct host_rlimit64 rnew, rold, *rnewp = 0; 12694 int resource = target_to_host_resource(arg2); 12695 12696 if (arg3 && (resource != RLIMIT_AS && 12697 resource != RLIMIT_DATA && 12698 resource != RLIMIT_STACK)) { 12699 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 12700 return -TARGET_EFAULT; 12701 } 12702 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 12703 rnew.rlim_max = tswap64(target_rnew->rlim_max); 12704 unlock_user_struct(target_rnew, arg3, 0); 12705 rnewp = &rnew; 12706 } 12707 12708 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0)); 12709 if (!is_error(ret) && arg4) { 12710 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 12711 return -TARGET_EFAULT; 12712 } 12713 target_rold->rlim_cur = tswap64(rold.rlim_cur); 12714 target_rold->rlim_max = tswap64(rold.rlim_max); 12715 unlock_user_struct(target_rold, arg4, 1); 12716 } 12717 return ret; 12718 } 12719 #endif 12720 #ifdef TARGET_NR_gethostname 12721 case TARGET_NR_gethostname: 12722 { 12723 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 12724 if (name) { 12725 ret = get_errno(gethostname(name, arg2)); 12726 unlock_user(name, arg1, arg2); 12727 } else { 12728 ret = -TARGET_EFAULT; 12729 } 12730 return ret; 12731 } 12732 #endif 12733 #ifdef TARGET_NR_atomic_cmpxchg_32 12734 case TARGET_NR_atomic_cmpxchg_32: 12735 { 12736 /* should use start_exclusive from main.c */ 12737 abi_ulong mem_value; 12738 if (get_user_u32(mem_value, arg6)) { 12739 target_siginfo_t info; 12740 info.si_signo = SIGSEGV; 12741 info.si_errno = 0; 12742 info.si_code = TARGET_SEGV_MAPERR; 12743 info._sifields._sigfault._addr = arg6; 12744 queue_signal((CPUArchState *)cpu_env, info.si_signo, 12745 QEMU_SI_FAULT, &info); 12746 ret = 0xdeadbeef; 12747 12748 } 12749 if (mem_value == arg2) 12750 put_user_u32(arg1, arg6); 12751 return mem_value; 12752 } 12753 #endif 12754 #ifdef TARGET_NR_atomic_barrier 12755 case TARGET_NR_atomic_barrier: 12756 /* Like the kernel implementation and the 12757 qemu arm barrier, no-op this? */ 12758 return 0; 12759 #endif 12760 12761 #ifdef TARGET_NR_timer_create 12762 case TARGET_NR_timer_create: 12763 { 12764 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 12765 12766 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 12767 12768 int clkid = arg1; 12769 int timer_index = next_free_host_timer(); 12770 12771 if (timer_index < 0) { 12772 ret = -TARGET_EAGAIN; 12773 } else { 12774 timer_t *phtimer = g_posix_timers + timer_index; 12775 12776 if (arg2) { 12777 phost_sevp = &host_sevp; 12778 ret = target_to_host_sigevent(phost_sevp, arg2); 12779 if (ret != 0) { 12780 return ret; 12781 } 12782 } 12783 12784 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 12785 if (ret) { 12786 phtimer = NULL; 12787 } else { 12788 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { 12789 return -TARGET_EFAULT; 12790 } 12791 } 12792 } 12793 return ret; 12794 } 12795 #endif 12796 12797 #ifdef TARGET_NR_timer_settime 12798 case TARGET_NR_timer_settime: 12799 { 12800 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 12801 * struct itimerspec * old_value */ 12802 target_timer_t timerid = get_timer_id(arg1); 12803 12804 if (timerid < 0) { 12805 ret = timerid; 12806 } else if (arg3 == 0) { 12807 ret = -TARGET_EINVAL; 12808 } else { 12809 timer_t htimer = g_posix_timers[timerid]; 12810 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 12811 12812 if (target_to_host_itimerspec(&hspec_new, arg3)) { 12813 return -TARGET_EFAULT; 12814 } 12815 ret = get_errno( 12816 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 12817 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) { 12818 return -TARGET_EFAULT; 12819 } 12820 } 12821 return ret; 12822 } 12823 #endif 12824 12825 #ifdef TARGET_NR_timer_settime64 12826 case TARGET_NR_timer_settime64: 12827 { 12828 target_timer_t timerid = get_timer_id(arg1); 12829 12830 if (timerid < 0) { 12831 ret = timerid; 12832 } else if (arg3 == 0) { 12833 ret = -TARGET_EINVAL; 12834 } else { 12835 timer_t htimer = g_posix_timers[timerid]; 12836 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 12837 12838 if (target_to_host_itimerspec64(&hspec_new, arg3)) { 12839 return -TARGET_EFAULT; 12840 } 12841 ret = get_errno( 12842 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 12843 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) { 12844 return -TARGET_EFAULT; 12845 } 12846 } 12847 return ret; 12848 } 12849 #endif 12850 12851 #ifdef TARGET_NR_timer_gettime 12852 case TARGET_NR_timer_gettime: 12853 { 12854 /* args: timer_t timerid, struct itimerspec *curr_value */ 12855 target_timer_t timerid = get_timer_id(arg1); 12856 12857 if (timerid < 0) { 12858 ret = timerid; 12859 } else if (!arg2) { 12860 ret = -TARGET_EFAULT; 12861 } else { 12862 timer_t htimer = g_posix_timers[timerid]; 12863 struct itimerspec hspec; 12864 ret = get_errno(timer_gettime(htimer, &hspec)); 12865 12866 if (host_to_target_itimerspec(arg2, &hspec)) { 12867 ret = -TARGET_EFAULT; 12868 } 12869 } 12870 return ret; 12871 } 12872 #endif 12873 12874 #ifdef TARGET_NR_timer_gettime64 12875 case TARGET_NR_timer_gettime64: 12876 { 12877 /* args: timer_t timerid, struct itimerspec64 *curr_value */ 12878 target_timer_t timerid = get_timer_id(arg1); 12879 12880 if (timerid < 0) { 12881 ret = timerid; 12882 } else if (!arg2) { 12883 ret = -TARGET_EFAULT; 12884 } else { 12885 timer_t htimer = g_posix_timers[timerid]; 12886 struct itimerspec hspec; 12887 ret = get_errno(timer_gettime(htimer, &hspec)); 12888 12889 if (host_to_target_itimerspec64(arg2, &hspec)) { 12890 ret = -TARGET_EFAULT; 12891 } 12892 } 12893 return ret; 12894 } 12895 #endif 12896 12897 #ifdef TARGET_NR_timer_getoverrun 12898 case TARGET_NR_timer_getoverrun: 12899 { 12900 /* args: timer_t timerid */ 12901 target_timer_t timerid = get_timer_id(arg1); 12902 12903 if (timerid < 0) { 12904 ret = timerid; 12905 } else { 12906 timer_t htimer = g_posix_timers[timerid]; 12907 ret = get_errno(timer_getoverrun(htimer)); 12908 } 12909 return ret; 12910 } 12911 #endif 12912 12913 #ifdef TARGET_NR_timer_delete 12914 case TARGET_NR_timer_delete: 12915 { 12916 /* args: timer_t timerid */ 12917 target_timer_t timerid = get_timer_id(arg1); 12918 12919 if (timerid < 0) { 12920 ret = timerid; 12921 } else { 12922 timer_t htimer = g_posix_timers[timerid]; 12923 ret = get_errno(timer_delete(htimer)); 12924 g_posix_timers[timerid] = 0; 12925 } 12926 return ret; 12927 } 12928 #endif 12929 12930 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) 12931 case TARGET_NR_timerfd_create: 12932 return get_errno(timerfd_create(arg1, 12933 target_to_host_bitmask(arg2, fcntl_flags_tbl))); 12934 #endif 12935 12936 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) 12937 case TARGET_NR_timerfd_gettime: 12938 { 12939 struct itimerspec its_curr; 12940 12941 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 12942 12943 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { 12944 return -TARGET_EFAULT; 12945 } 12946 } 12947 return ret; 12948 #endif 12949 12950 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD) 12951 case TARGET_NR_timerfd_gettime64: 12952 { 12953 struct itimerspec its_curr; 12954 12955 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 12956 12957 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) { 12958 return -TARGET_EFAULT; 12959 } 12960 } 12961 return ret; 12962 #endif 12963 12964 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) 12965 case TARGET_NR_timerfd_settime: 12966 { 12967 struct itimerspec its_new, its_old, *p_new; 12968 12969 if (arg3) { 12970 if (target_to_host_itimerspec(&its_new, arg3)) { 12971 return -TARGET_EFAULT; 12972 } 12973 p_new = &its_new; 12974 } else { 12975 p_new = NULL; 12976 } 12977 12978 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 12979 12980 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { 12981 return -TARGET_EFAULT; 12982 } 12983 } 12984 return ret; 12985 #endif 12986 12987 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD) 12988 case TARGET_NR_timerfd_settime64: 12989 { 12990 struct itimerspec its_new, its_old, *p_new; 12991 12992 if (arg3) { 12993 if (target_to_host_itimerspec64(&its_new, arg3)) { 12994 return -TARGET_EFAULT; 12995 } 12996 p_new = &its_new; 12997 } else { 12998 p_new = NULL; 12999 } 13000 13001 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 13002 13003 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) { 13004 return -TARGET_EFAULT; 13005 } 13006 } 13007 return ret; 13008 #endif 13009 13010 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 13011 case TARGET_NR_ioprio_get: 13012 return get_errno(ioprio_get(arg1, arg2)); 13013 #endif 13014 13015 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 13016 case TARGET_NR_ioprio_set: 13017 return get_errno(ioprio_set(arg1, arg2, arg3)); 13018 #endif 13019 13020 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) 13021 case TARGET_NR_setns: 13022 return get_errno(setns(arg1, arg2)); 13023 #endif 13024 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) 13025 case TARGET_NR_unshare: 13026 return get_errno(unshare(arg1)); 13027 #endif 13028 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 13029 case TARGET_NR_kcmp: 13030 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5)); 13031 #endif 13032 #ifdef TARGET_NR_swapcontext 13033 case TARGET_NR_swapcontext: 13034 /* PowerPC specific. */ 13035 return do_swapcontext(cpu_env, arg1, arg2, arg3); 13036 #endif 13037 #ifdef TARGET_NR_memfd_create 13038 case TARGET_NR_memfd_create: 13039 p = lock_user_string(arg1); 13040 if (!p) { 13041 return -TARGET_EFAULT; 13042 } 13043 ret = get_errno(memfd_create(p, arg2)); 13044 fd_trans_unregister(ret); 13045 unlock_user(p, arg1, 0); 13046 return ret; 13047 #endif 13048 #if defined TARGET_NR_membarrier && defined __NR_membarrier 13049 case TARGET_NR_membarrier: 13050 return get_errno(membarrier(arg1, arg2)); 13051 #endif 13052 13053 default: 13054 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num); 13055 return -TARGET_ENOSYS; 13056 } 13057 return ret; 13058 } 13059 13060 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 13061 abi_long arg2, abi_long arg3, abi_long arg4, 13062 abi_long arg5, abi_long arg6, abi_long arg7, 13063 abi_long arg8) 13064 { 13065 CPUState *cpu = env_cpu(cpu_env); 13066 abi_long ret; 13067 13068 #ifdef DEBUG_ERESTARTSYS 13069 /* Debug-only code for exercising the syscall-restart code paths 13070 * in the per-architecture cpu main loops: restart every syscall 13071 * the guest makes once before letting it through. 13072 */ 13073 { 13074 static bool flag; 13075 flag = !flag; 13076 if (flag) { 13077 return -TARGET_ERESTARTSYS; 13078 } 13079 } 13080 #endif 13081 13082 record_syscall_start(cpu, num, arg1, 13083 arg2, arg3, arg4, arg5, arg6, arg7, arg8); 13084 13085 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { 13086 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6); 13087 } 13088 13089 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4, 13090 arg5, arg6, arg7, arg8); 13091 13092 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { 13093 print_syscall_ret(cpu_env, num, ret, arg1, arg2, 13094 arg3, arg4, arg5, arg6); 13095 } 13096 13097 record_syscall_return(cpu, num, ret); 13098 return ret; 13099 } 13100