1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include "qemu/osdep.h" 21 #include "qemu/cutils.h" 22 #include "qemu/path.h" 23 #include "qemu/memfd.h" 24 #include "qemu/queue.h" 25 #include "qemu/plugin.h" 26 #include "target_mman.h" 27 #include <elf.h> 28 #include <endian.h> 29 #include <grp.h> 30 #include <sys/ipc.h> 31 #include <sys/msg.h> 32 #include <sys/wait.h> 33 #include <sys/mount.h> 34 #include <sys/file.h> 35 #include <sys/fsuid.h> 36 #include <sys/personality.h> 37 #include <sys/prctl.h> 38 #include <sys/resource.h> 39 #include <sys/swap.h> 40 #include <linux/capability.h> 41 #include <sched.h> 42 #include <sys/timex.h> 43 #include <sys/socket.h> 44 #include <linux/sockios.h> 45 #include <sys/un.h> 46 #include <sys/uio.h> 47 #include <poll.h> 48 #include <sys/times.h> 49 #include <sys/shm.h> 50 #include <sys/sem.h> 51 #include <sys/statfs.h> 52 #include <utime.h> 53 #include <sys/sysinfo.h> 54 #include <sys/signalfd.h> 55 //#include <sys/user.h> 56 #include <netinet/in.h> 57 #include <netinet/ip.h> 58 #include <netinet/tcp.h> 59 #include <netinet/udp.h> 60 #include <linux/wireless.h> 61 #include <linux/icmp.h> 62 #include <linux/icmpv6.h> 63 #include <linux/if_tun.h> 64 #include <linux/in6.h> 65 #include <linux/errqueue.h> 66 #include <linux/random.h> 67 #ifdef CONFIG_TIMERFD 68 #include <sys/timerfd.h> 69 #endif 70 #ifdef CONFIG_EVENTFD 71 #include <sys/eventfd.h> 72 #endif 73 #ifdef CONFIG_EPOLL 74 #include <sys/epoll.h> 75 #endif 76 #ifdef CONFIG_ATTR 77 #include "qemu/xattr.h" 78 #endif 79 #ifdef CONFIG_SENDFILE 80 #include <sys/sendfile.h> 81 #endif 82 #ifdef HAVE_SYS_KCOV_H 83 #include <sys/kcov.h> 84 #endif 85 86 #define termios host_termios 87 #define winsize host_winsize 88 #define termio host_termio 89 #define sgttyb host_sgttyb /* same as target */ 90 #define tchars host_tchars /* same as target */ 91 #define ltchars host_ltchars /* same as target */ 92 93 #include <linux/termios.h> 94 #include <linux/unistd.h> 95 #include <linux/cdrom.h> 96 #include <linux/hdreg.h> 97 #include <linux/soundcard.h> 98 #include <linux/kd.h> 99 #include <linux/mtio.h> 100 #include <linux/fs.h> 101 #include <linux/fd.h> 102 #if defined(CONFIG_FIEMAP) 103 #include <linux/fiemap.h> 104 #endif 105 #include <linux/fb.h> 106 #if defined(CONFIG_USBFS) 107 #include <linux/usbdevice_fs.h> 108 #include <linux/usb/ch9.h> 109 #endif 110 #include <linux/vt.h> 111 #include <linux/dm-ioctl.h> 112 #include <linux/reboot.h> 113 #include <linux/route.h> 114 #include <linux/filter.h> 115 #include <linux/blkpg.h> 116 #include <netpacket/packet.h> 117 #include <linux/netlink.h> 118 #include <linux/if_alg.h> 119 #include <linux/rtc.h> 120 #include <sound/asound.h> 121 #ifdef HAVE_BTRFS_H 122 #include <linux/btrfs.h> 123 #endif 124 #ifdef HAVE_DRM_H 125 #include <libdrm/drm.h> 126 #include <libdrm/i915_drm.h> 127 #endif 128 #include "linux_loop.h" 129 #include "uname.h" 130 131 #include "qemu.h" 132 #include "user-internals.h" 133 #include "strace.h" 134 #include "signal-common.h" 135 #include "loader.h" 136 #include "user-mmap.h" 137 #include "user/safe-syscall.h" 138 #include "qemu/guest-random.h" 139 #include "qemu/selfmap.h" 140 #include "user/syscall-trace.h" 141 #include "special-errno.h" 142 #include "qapi/error.h" 143 #include "fd-trans.h" 144 #include "tcg/tcg.h" 145 #include "cpu_loop-common.h" 146 147 #ifndef CLONE_IO 148 #define CLONE_IO 0x80000000 /* Clone io context */ 149 #endif 150 151 /* We can't directly call the host clone syscall, because this will 152 * badly confuse libc (breaking mutexes, for example). So we must 153 * divide clone flags into: 154 * * flag combinations that look like pthread_create() 155 * * flag combinations that look like fork() 156 * * flags we can implement within QEMU itself 157 * * flags we can't support and will return an error for 158 */ 159 /* For thread creation, all these flags must be present; for 160 * fork, none must be present. 161 */ 162 #define CLONE_THREAD_FLAGS \ 163 (CLONE_VM | CLONE_FS | CLONE_FILES | \ 164 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM) 165 166 /* These flags are ignored: 167 * CLONE_DETACHED is now ignored by the kernel; 168 * CLONE_IO is just an optimisation hint to the I/O scheduler 169 */ 170 #define CLONE_IGNORED_FLAGS \ 171 (CLONE_DETACHED | CLONE_IO) 172 173 #ifndef CLONE_PIDFD 174 # define CLONE_PIDFD 0x00001000 175 #endif 176 177 /* Flags for fork which we can implement within QEMU itself */ 178 #define CLONE_OPTIONAL_FORK_FLAGS \ 179 (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \ 180 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID) 181 182 /* Flags for thread creation which we can implement within QEMU itself */ 183 #define CLONE_OPTIONAL_THREAD_FLAGS \ 184 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 185 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT) 186 187 #define CLONE_INVALID_FORK_FLAGS \ 188 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS)) 189 190 #define CLONE_INVALID_THREAD_FLAGS \ 191 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \ 192 CLONE_IGNORED_FLAGS)) 193 194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits 195 * have almost all been allocated. We cannot support any of 196 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC, 197 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED. 198 * The checks against the invalid thread masks above will catch these. 199 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.) 200 */ 201 202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted 203 * once. This exercises the codepaths for restart. 204 */ 205 //#define DEBUG_ERESTARTSYS 206 207 //#include <linux/msdos_fs.h> 208 #define VFAT_IOCTL_READDIR_BOTH \ 209 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2) 210 #define VFAT_IOCTL_READDIR_SHORT \ 211 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2) 212 213 #undef _syscall0 214 #undef _syscall1 215 #undef _syscall2 216 #undef _syscall3 217 #undef _syscall4 218 #undef _syscall5 219 #undef _syscall6 220 221 #define _syscall0(type,name) \ 222 static type name (void) \ 223 { \ 224 return syscall(__NR_##name); \ 225 } 226 227 #define _syscall1(type,name,type1,arg1) \ 228 static type name (type1 arg1) \ 229 { \ 230 return syscall(__NR_##name, arg1); \ 231 } 232 233 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 234 static type name (type1 arg1,type2 arg2) \ 235 { \ 236 return syscall(__NR_##name, arg1, arg2); \ 237 } 238 239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 240 static type name (type1 arg1,type2 arg2,type3 arg3) \ 241 { \ 242 return syscall(__NR_##name, arg1, arg2, arg3); \ 243 } 244 245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 247 { \ 248 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 249 } 250 251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 252 type5,arg5) \ 253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 254 { \ 255 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 256 } 257 258 259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 260 type5,arg5,type6,arg6) \ 261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 262 type6 arg6) \ 263 { \ 264 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 265 } 266 267 268 #define __NR_sys_uname __NR_uname 269 #define __NR_sys_getcwd1 __NR_getcwd 270 #define __NR_sys_getdents __NR_getdents 271 #define __NR_sys_getdents64 __NR_getdents64 272 #define __NR_sys_getpriority __NR_getpriority 273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo 275 #define __NR_sys_syslog __NR_syslog 276 #if defined(__NR_futex) 277 # define __NR_sys_futex __NR_futex 278 #endif 279 #if defined(__NR_futex_time64) 280 # define __NR_sys_futex_time64 __NR_futex_time64 281 #endif 282 #define __NR_sys_statx __NR_statx 283 284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__) 285 #define __NR__llseek __NR_lseek 286 #endif 287 288 /* Newer kernel ports have llseek() instead of _llseek() */ 289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 290 #define TARGET_NR__llseek TARGET_NR_llseek 291 #endif 292 293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */ 294 #ifndef TARGET_O_NONBLOCK_MASK 295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK 296 #endif 297 298 #define __NR_sys_gettid __NR_gettid 299 _syscall0(int, sys_gettid) 300 301 /* For the 64-bit guest on 32-bit host case we must emulate 302 * getdents using getdents64, because otherwise the host 303 * might hand us back more dirent records than we can fit 304 * into the guest buffer after structure format conversion. 305 * Otherwise we emulate getdents with getdents if the host has it. 306 */ 307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS 308 #define EMULATE_GETDENTS_WITH_GETDENTS 309 #endif 310 311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS) 312 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 313 #endif 314 #if (defined(TARGET_NR_getdents) && \ 315 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \ 316 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 317 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 318 #endif 319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 320 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 321 loff_t *, res, uint, wh); 322 #endif 323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo) 324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig, 325 siginfo_t *, uinfo) 326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 327 #ifdef __NR_exit_group 328 _syscall1(int,exit_group,int,error_code) 329 #endif 330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range) 331 #define __NR_sys_close_range __NR_close_range 332 _syscall3(int,sys_close_range,int,first,int,last,int,flags) 333 #ifndef CLOSE_RANGE_CLOEXEC 334 #define CLOSE_RANGE_CLOEXEC (1U << 2) 335 #endif 336 #endif 337 #if defined(__NR_futex) 338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 339 const struct timespec *,timeout,int *,uaddr2,int,val3) 340 #endif 341 #if defined(__NR_futex_time64) 342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val, 343 const struct timespec *,timeout,int *,uaddr2,int,val3) 344 #endif 345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open) 346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags); 347 #endif 348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal) 349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info, 350 unsigned int, flags); 351 #endif 352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd) 353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags); 354 #endif 355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 357 unsigned long *, user_mask_ptr); 358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 360 unsigned long *, user_mask_ptr); 361 /* sched_attr is not defined in glibc */ 362 struct sched_attr { 363 uint32_t size; 364 uint32_t sched_policy; 365 uint64_t sched_flags; 366 int32_t sched_nice; 367 uint32_t sched_priority; 368 uint64_t sched_runtime; 369 uint64_t sched_deadline; 370 uint64_t sched_period; 371 uint32_t sched_util_min; 372 uint32_t sched_util_max; 373 }; 374 #define __NR_sys_sched_getattr __NR_sched_getattr 375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr, 376 unsigned int, size, unsigned int, flags); 377 #define __NR_sys_sched_setattr __NR_sched_setattr 378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr, 379 unsigned int, flags); 380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler 381 _syscall1(int, sys_sched_getscheduler, pid_t, pid); 382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler 383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy, 384 const struct sched_param *, param); 385 #define __NR_sys_sched_getparam __NR_sched_getparam 386 _syscall2(int, sys_sched_getparam, pid_t, pid, 387 struct sched_param *, param); 388 #define __NR_sys_sched_setparam __NR_sched_setparam 389 _syscall2(int, sys_sched_setparam, pid_t, pid, 390 const struct sched_param *, param); 391 #define __NR_sys_getcpu __NR_getcpu 392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache); 393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 394 void *, arg); 395 _syscall2(int, capget, struct __user_cap_header_struct *, header, 396 struct __user_cap_data_struct *, data); 397 _syscall2(int, capset, struct __user_cap_header_struct *, header, 398 struct __user_cap_data_struct *, data); 399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 400 _syscall2(int, ioprio_get, int, which, int, who) 401 #endif 402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio) 404 #endif 405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags) 407 #endif 408 409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type, 411 unsigned long, idx1, unsigned long, idx2) 412 #endif 413 414 /* 415 * It is assumed that struct statx is architecture independent. 416 */ 417 #if defined(TARGET_NR_statx) && defined(__NR_statx) 418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags, 419 unsigned int, mask, struct target_statx *, statxbuf) 420 #endif 421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier) 422 _syscall2(int, membarrier, int, cmd, int, flags) 423 #endif 424 425 static const bitmask_transtbl fcntl_flags_tbl[] = { 426 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 427 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 428 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 429 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 430 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 431 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 432 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 433 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 434 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 435 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 436 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 437 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 438 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 439 #if defined(O_DIRECT) 440 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 441 #endif 442 #if defined(O_NOATIME) 443 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 444 #endif 445 #if defined(O_CLOEXEC) 446 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 447 #endif 448 #if defined(O_PATH) 449 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 450 #endif 451 #if defined(O_TMPFILE) 452 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE }, 453 #endif 454 /* Don't terminate the list prematurely on 64-bit host+guest. */ 455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 456 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 457 #endif 458 { 0, 0, 0, 0 } 459 }; 460 461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size) 462 463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64) 464 #if defined(__NR_utimensat) 465 #define __NR_sys_utimensat __NR_utimensat 466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 467 const struct timespec *,tsp,int,flags) 468 #else 469 static int sys_utimensat(int dirfd, const char *pathname, 470 const struct timespec times[2], int flags) 471 { 472 errno = ENOSYS; 473 return -1; 474 } 475 #endif 476 #endif /* TARGET_NR_utimensat */ 477 478 #ifdef TARGET_NR_renameat2 479 #if defined(__NR_renameat2) 480 #define __NR_sys_renameat2 __NR_renameat2 481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd, 482 const char *, new, unsigned int, flags) 483 #else 484 static int sys_renameat2(int oldfd, const char *old, 485 int newfd, const char *new, int flags) 486 { 487 if (flags == 0) { 488 return renameat(oldfd, old, newfd, new); 489 } 490 errno = ENOSYS; 491 return -1; 492 } 493 #endif 494 #endif /* TARGET_NR_renameat2 */ 495 496 #ifdef CONFIG_INOTIFY 497 #include <sys/inotify.h> 498 #else 499 /* Userspace can usually survive runtime without inotify */ 500 #undef TARGET_NR_inotify_init 501 #undef TARGET_NR_inotify_init1 502 #undef TARGET_NR_inotify_add_watch 503 #undef TARGET_NR_inotify_rm_watch 504 #endif /* CONFIG_INOTIFY */ 505 506 #if defined(TARGET_NR_prlimit64) 507 #ifndef __NR_prlimit64 508 # define __NR_prlimit64 -1 509 #endif 510 #define __NR_sys_prlimit64 __NR_prlimit64 511 /* The glibc rlimit structure may not be that used by the underlying syscall */ 512 struct host_rlimit64 { 513 uint64_t rlim_cur; 514 uint64_t rlim_max; 515 }; 516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 517 const struct host_rlimit64 *, new_limit, 518 struct host_rlimit64 *, old_limit) 519 #endif 520 521 522 #if defined(TARGET_NR_timer_create) 523 /* Maximum of 32 active POSIX timers allowed at any one time. */ 524 #define GUEST_TIMER_MAX 32 525 static timer_t g_posix_timers[GUEST_TIMER_MAX]; 526 static int g_posix_timer_allocated[GUEST_TIMER_MAX]; 527 528 static inline int next_free_host_timer(void) 529 { 530 int k; 531 for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) { 532 if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) { 533 return k; 534 } 535 } 536 return -1; 537 } 538 539 static inline void free_host_timer_slot(int id) 540 { 541 qatomic_store_release(g_posix_timer_allocated + id, 0); 542 } 543 #endif 544 545 static inline int host_to_target_errno(int host_errno) 546 { 547 switch (host_errno) { 548 #define E(X) case X: return TARGET_##X; 549 #include "errnos.c.inc" 550 #undef E 551 default: 552 return host_errno; 553 } 554 } 555 556 static inline int target_to_host_errno(int target_errno) 557 { 558 switch (target_errno) { 559 #define E(X) case TARGET_##X: return X; 560 #include "errnos.c.inc" 561 #undef E 562 default: 563 return target_errno; 564 } 565 } 566 567 abi_long get_errno(abi_long ret) 568 { 569 if (ret == -1) 570 return -host_to_target_errno(errno); 571 else 572 return ret; 573 } 574 575 const char *target_strerror(int err) 576 { 577 if (err == QEMU_ERESTARTSYS) { 578 return "To be restarted"; 579 } 580 if (err == QEMU_ESIGRETURN) { 581 return "Successful exit from sigreturn"; 582 } 583 584 return strerror(target_to_host_errno(err)); 585 } 586 587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize) 588 { 589 int i; 590 uint8_t b; 591 if (usize <= ksize) { 592 return 1; 593 } 594 for (i = ksize; i < usize; i++) { 595 if (get_user_u8(b, addr + i)) { 596 return -TARGET_EFAULT; 597 } 598 if (b != 0) { 599 return 0; 600 } 601 } 602 return 1; 603 } 604 605 #define safe_syscall0(type, name) \ 606 static type safe_##name(void) \ 607 { \ 608 return safe_syscall(__NR_##name); \ 609 } 610 611 #define safe_syscall1(type, name, type1, arg1) \ 612 static type safe_##name(type1 arg1) \ 613 { \ 614 return safe_syscall(__NR_##name, arg1); \ 615 } 616 617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \ 618 static type safe_##name(type1 arg1, type2 arg2) \ 619 { \ 620 return safe_syscall(__NR_##name, arg1, arg2); \ 621 } 622 623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \ 625 { \ 626 return safe_syscall(__NR_##name, arg1, arg2, arg3); \ 627 } 628 629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ 630 type4, arg4) \ 631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ 632 { \ 633 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 634 } 635 636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ 637 type4, arg4, type5, arg5) \ 638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 639 type5 arg5) \ 640 { \ 641 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 642 } 643 644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ 645 type4, arg4, type5, arg5, type6, arg6) \ 646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 647 type5 arg5, type6 arg6) \ 648 { \ 649 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 650 } 651 652 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count) 653 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count) 654 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \ 655 int, flags, mode_t, mode) 656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid) 657 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \ 658 struct rusage *, rusage) 659 #endif 660 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \ 661 int, options, struct rusage *, rusage) 662 safe_syscall5(int, execveat, int, dirfd, const char *, filename, 663 char **, argv, char **, envp, int, flags) 664 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \ 665 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 666 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \ 667 fd_set *, exceptfds, struct timespec *, timeout, void *, sig) 668 #endif 669 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64) 670 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds, 671 struct timespec *, tsp, const sigset_t *, sigmask, 672 size_t, sigsetsize) 673 #endif 674 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events, 675 int, maxevents, int, timeout, const sigset_t *, sigmask, 676 size_t, sigsetsize) 677 #if defined(__NR_futex) 678 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \ 679 const struct timespec *,timeout,int *,uaddr2,int,val3) 680 #endif 681 #if defined(__NR_futex_time64) 682 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \ 683 const struct timespec *,timeout,int *,uaddr2,int,val3) 684 #endif 685 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize) 686 safe_syscall2(int, kill, pid_t, pid, int, sig) 687 safe_syscall2(int, tkill, int, tid, int, sig) 688 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig) 689 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt) 690 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt) 691 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt, 692 unsigned long, pos_l, unsigned long, pos_h) 693 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt, 694 unsigned long, pos_l, unsigned long, pos_h) 695 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr, 696 socklen_t, addrlen) 697 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len, 698 int, flags, const struct sockaddr *, addr, socklen_t, addrlen) 699 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len, 700 int, flags, struct sockaddr *, addr, socklen_t *, addrlen) 701 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags) 702 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags) 703 safe_syscall2(int, flock, int, fd, int, operation) 704 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64) 705 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo, 706 const struct timespec *, uts, size_t, sigsetsize) 707 #endif 708 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len, 709 int, flags) 710 #if defined(TARGET_NR_nanosleep) 711 safe_syscall2(int, nanosleep, const struct timespec *, req, 712 struct timespec *, rem) 713 #endif 714 #if defined(TARGET_NR_clock_nanosleep) || \ 715 defined(TARGET_NR_clock_nanosleep_time64) 716 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags, 717 const struct timespec *, req, struct timespec *, rem) 718 #endif 719 #ifdef __NR_ipc 720 #ifdef __s390x__ 721 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third, 722 void *, ptr) 723 #else 724 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third, 725 void *, ptr, long, fifth) 726 #endif 727 #endif 728 #ifdef __NR_msgsnd 729 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz, 730 int, flags) 731 #endif 732 #ifdef __NR_msgrcv 733 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz, 734 long, msgtype, int, flags) 735 #endif 736 #ifdef __NR_semtimedop 737 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops, 738 unsigned, nsops, const struct timespec *, timeout) 739 #endif 740 #if defined(TARGET_NR_mq_timedsend) || \ 741 defined(TARGET_NR_mq_timedsend_time64) 742 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr, 743 size_t, len, unsigned, prio, const struct timespec *, timeout) 744 #endif 745 #if defined(TARGET_NR_mq_timedreceive) || \ 746 defined(TARGET_NR_mq_timedreceive_time64) 747 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr, 748 size_t, len, unsigned *, prio, const struct timespec *, timeout) 749 #endif 750 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range) 751 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff, 752 int, outfd, loff_t *, poutoff, size_t, length, 753 unsigned int, flags) 754 #endif 755 756 /* We do ioctl like this rather than via safe_syscall3 to preserve the 757 * "third argument might be integer or pointer or not present" behaviour of 758 * the libc function. 759 */ 760 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__) 761 /* Similarly for fcntl. Note that callers must always: 762 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK 763 * use the flock64 struct rather than unsuffixed flock 764 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts. 765 */ 766 #ifdef __NR_fcntl64 767 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__) 768 #else 769 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__) 770 #endif 771 772 static inline int host_to_target_sock_type(int host_type) 773 { 774 int target_type; 775 776 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 777 case SOCK_DGRAM: 778 target_type = TARGET_SOCK_DGRAM; 779 break; 780 case SOCK_STREAM: 781 target_type = TARGET_SOCK_STREAM; 782 break; 783 default: 784 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 785 break; 786 } 787 788 #if defined(SOCK_CLOEXEC) 789 if (host_type & SOCK_CLOEXEC) { 790 target_type |= TARGET_SOCK_CLOEXEC; 791 } 792 #endif 793 794 #if defined(SOCK_NONBLOCK) 795 if (host_type & SOCK_NONBLOCK) { 796 target_type |= TARGET_SOCK_NONBLOCK; 797 } 798 #endif 799 800 return target_type; 801 } 802 803 static abi_ulong target_brk; 804 static abi_ulong brk_page; 805 806 void target_set_brk(abi_ulong new_brk) 807 { 808 target_brk = new_brk; 809 brk_page = HOST_PAGE_ALIGN(target_brk); 810 } 811 812 /* do_brk() must return target values and target errnos. */ 813 abi_long do_brk(abi_ulong brk_val) 814 { 815 abi_long mapped_addr; 816 abi_ulong new_alloc_size; 817 abi_ulong new_brk, new_host_brk_page; 818 819 /* brk pointers are always untagged */ 820 821 /* return old brk value if brk_val unchanged or zero */ 822 if (!brk_val || brk_val == target_brk) { 823 return target_brk; 824 } 825 826 new_brk = TARGET_PAGE_ALIGN(brk_val); 827 new_host_brk_page = HOST_PAGE_ALIGN(brk_val); 828 829 /* brk_val and old target_brk might be on the same page */ 830 if (new_brk == TARGET_PAGE_ALIGN(target_brk)) { 831 if (brk_val > target_brk) { 832 /* empty remaining bytes in (possibly larger) host page */ 833 memset(g2h_untagged(target_brk), 0, new_host_brk_page - target_brk); 834 } 835 target_brk = brk_val; 836 return target_brk; 837 } 838 839 /* Release heap if necesary */ 840 if (new_brk < target_brk) { 841 /* empty remaining bytes in (possibly larger) host page */ 842 memset(g2h_untagged(brk_val), 0, new_host_brk_page - brk_val); 843 844 /* free unused host pages and set new brk_page */ 845 target_munmap(new_host_brk_page, brk_page - new_host_brk_page); 846 brk_page = new_host_brk_page; 847 848 target_brk = brk_val; 849 return target_brk; 850 } 851 852 /* We need to allocate more memory after the brk... Note that 853 * we don't use MAP_FIXED because that will map over the top of 854 * any existing mapping (like the one with the host libc or qemu 855 * itself); instead we treat "mapped but at wrong address" as 856 * a failure and unmap again. 857 */ 858 new_alloc_size = new_host_brk_page - brk_page; 859 if (new_alloc_size) { 860 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 861 PROT_READ|PROT_WRITE, 862 MAP_ANON|MAP_PRIVATE, 0, 0)); 863 } else { 864 mapped_addr = brk_page; 865 } 866 867 if (mapped_addr == brk_page) { 868 /* Heap contents are initialized to zero, as for anonymous 869 * mapped pages. Technically the new pages are already 870 * initialized to zero since they *are* anonymous mapped 871 * pages, however we have to take care with the contents that 872 * come from the remaining part of the previous page: it may 873 * contains garbage data due to a previous heap usage (grown 874 * then shrunken). */ 875 memset(g2h_untagged(target_brk), 0, brk_page - target_brk); 876 877 target_brk = brk_val; 878 brk_page = new_host_brk_page; 879 return target_brk; 880 } else if (mapped_addr != -1) { 881 /* Mapped but at wrong address, meaning there wasn't actually 882 * enough space for this brk. 883 */ 884 target_munmap(mapped_addr, new_alloc_size); 885 mapped_addr = -1; 886 } 887 888 #if defined(TARGET_ALPHA) 889 /* We (partially) emulate OSF/1 on Alpha, which requires we 890 return a proper errno, not an unchanged brk value. */ 891 return -TARGET_ENOMEM; 892 #endif 893 /* For everything else, return the previous break. */ 894 return target_brk; 895 } 896 897 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \ 898 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 899 static inline abi_long copy_from_user_fdset(fd_set *fds, 900 abi_ulong target_fds_addr, 901 int n) 902 { 903 int i, nw, j, k; 904 abi_ulong b, *target_fds; 905 906 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 907 if (!(target_fds = lock_user(VERIFY_READ, 908 target_fds_addr, 909 sizeof(abi_ulong) * nw, 910 1))) 911 return -TARGET_EFAULT; 912 913 FD_ZERO(fds); 914 k = 0; 915 for (i = 0; i < nw; i++) { 916 /* grab the abi_ulong */ 917 __get_user(b, &target_fds[i]); 918 for (j = 0; j < TARGET_ABI_BITS; j++) { 919 /* check the bit inside the abi_ulong */ 920 if ((b >> j) & 1) 921 FD_SET(k, fds); 922 k++; 923 } 924 } 925 926 unlock_user(target_fds, target_fds_addr, 0); 927 928 return 0; 929 } 930 931 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 932 abi_ulong target_fds_addr, 933 int n) 934 { 935 if (target_fds_addr) { 936 if (copy_from_user_fdset(fds, target_fds_addr, n)) 937 return -TARGET_EFAULT; 938 *fds_ptr = fds; 939 } else { 940 *fds_ptr = NULL; 941 } 942 return 0; 943 } 944 945 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 946 const fd_set *fds, 947 int n) 948 { 949 int i, nw, j, k; 950 abi_long v; 951 abi_ulong *target_fds; 952 953 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 954 if (!(target_fds = lock_user(VERIFY_WRITE, 955 target_fds_addr, 956 sizeof(abi_ulong) * nw, 957 0))) 958 return -TARGET_EFAULT; 959 960 k = 0; 961 for (i = 0; i < nw; i++) { 962 v = 0; 963 for (j = 0; j < TARGET_ABI_BITS; j++) { 964 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 965 k++; 966 } 967 __put_user(v, &target_fds[i]); 968 } 969 970 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 971 972 return 0; 973 } 974 #endif 975 976 #if defined(__alpha__) 977 #define HOST_HZ 1024 978 #else 979 #define HOST_HZ 100 980 #endif 981 982 static inline abi_long host_to_target_clock_t(long ticks) 983 { 984 #if HOST_HZ == TARGET_HZ 985 return ticks; 986 #else 987 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 988 #endif 989 } 990 991 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 992 const struct rusage *rusage) 993 { 994 struct target_rusage *target_rusage; 995 996 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 997 return -TARGET_EFAULT; 998 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 999 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 1000 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 1001 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 1002 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 1003 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 1004 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 1005 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 1006 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 1007 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 1008 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 1009 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 1010 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 1011 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 1012 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 1013 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 1014 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 1015 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 1016 unlock_user_struct(target_rusage, target_addr, 1); 1017 1018 return 0; 1019 } 1020 1021 #ifdef TARGET_NR_setrlimit 1022 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 1023 { 1024 abi_ulong target_rlim_swap; 1025 rlim_t result; 1026 1027 target_rlim_swap = tswapal(target_rlim); 1028 if (target_rlim_swap == TARGET_RLIM_INFINITY) 1029 return RLIM_INFINITY; 1030 1031 result = target_rlim_swap; 1032 if (target_rlim_swap != (rlim_t)result) 1033 return RLIM_INFINITY; 1034 1035 return result; 1036 } 1037 #endif 1038 1039 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit) 1040 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 1041 { 1042 abi_ulong target_rlim_swap; 1043 abi_ulong result; 1044 1045 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 1046 target_rlim_swap = TARGET_RLIM_INFINITY; 1047 else 1048 target_rlim_swap = rlim; 1049 result = tswapal(target_rlim_swap); 1050 1051 return result; 1052 } 1053 #endif 1054 1055 static inline int target_to_host_resource(int code) 1056 { 1057 switch (code) { 1058 case TARGET_RLIMIT_AS: 1059 return RLIMIT_AS; 1060 case TARGET_RLIMIT_CORE: 1061 return RLIMIT_CORE; 1062 case TARGET_RLIMIT_CPU: 1063 return RLIMIT_CPU; 1064 case TARGET_RLIMIT_DATA: 1065 return RLIMIT_DATA; 1066 case TARGET_RLIMIT_FSIZE: 1067 return RLIMIT_FSIZE; 1068 case TARGET_RLIMIT_LOCKS: 1069 return RLIMIT_LOCKS; 1070 case TARGET_RLIMIT_MEMLOCK: 1071 return RLIMIT_MEMLOCK; 1072 case TARGET_RLIMIT_MSGQUEUE: 1073 return RLIMIT_MSGQUEUE; 1074 case TARGET_RLIMIT_NICE: 1075 return RLIMIT_NICE; 1076 case TARGET_RLIMIT_NOFILE: 1077 return RLIMIT_NOFILE; 1078 case TARGET_RLIMIT_NPROC: 1079 return RLIMIT_NPROC; 1080 case TARGET_RLIMIT_RSS: 1081 return RLIMIT_RSS; 1082 case TARGET_RLIMIT_RTPRIO: 1083 return RLIMIT_RTPRIO; 1084 #ifdef RLIMIT_RTTIME 1085 case TARGET_RLIMIT_RTTIME: 1086 return RLIMIT_RTTIME; 1087 #endif 1088 case TARGET_RLIMIT_SIGPENDING: 1089 return RLIMIT_SIGPENDING; 1090 case TARGET_RLIMIT_STACK: 1091 return RLIMIT_STACK; 1092 default: 1093 return code; 1094 } 1095 } 1096 1097 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1098 abi_ulong target_tv_addr) 1099 { 1100 struct target_timeval *target_tv; 1101 1102 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) { 1103 return -TARGET_EFAULT; 1104 } 1105 1106 __get_user(tv->tv_sec, &target_tv->tv_sec); 1107 __get_user(tv->tv_usec, &target_tv->tv_usec); 1108 1109 unlock_user_struct(target_tv, target_tv_addr, 0); 1110 1111 return 0; 1112 } 1113 1114 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1115 const struct timeval *tv) 1116 { 1117 struct target_timeval *target_tv; 1118 1119 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) { 1120 return -TARGET_EFAULT; 1121 } 1122 1123 __put_user(tv->tv_sec, &target_tv->tv_sec); 1124 __put_user(tv->tv_usec, &target_tv->tv_usec); 1125 1126 unlock_user_struct(target_tv, target_tv_addr, 1); 1127 1128 return 0; 1129 } 1130 1131 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 1132 static inline abi_long copy_from_user_timeval64(struct timeval *tv, 1133 abi_ulong target_tv_addr) 1134 { 1135 struct target__kernel_sock_timeval *target_tv; 1136 1137 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) { 1138 return -TARGET_EFAULT; 1139 } 1140 1141 __get_user(tv->tv_sec, &target_tv->tv_sec); 1142 __get_user(tv->tv_usec, &target_tv->tv_usec); 1143 1144 unlock_user_struct(target_tv, target_tv_addr, 0); 1145 1146 return 0; 1147 } 1148 #endif 1149 1150 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr, 1151 const struct timeval *tv) 1152 { 1153 struct target__kernel_sock_timeval *target_tv; 1154 1155 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) { 1156 return -TARGET_EFAULT; 1157 } 1158 1159 __put_user(tv->tv_sec, &target_tv->tv_sec); 1160 __put_user(tv->tv_usec, &target_tv->tv_usec); 1161 1162 unlock_user_struct(target_tv, target_tv_addr, 1); 1163 1164 return 0; 1165 } 1166 1167 #if defined(TARGET_NR_futex) || \ 1168 defined(TARGET_NR_rt_sigtimedwait) || \ 1169 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \ 1170 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \ 1171 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \ 1172 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \ 1173 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \ 1174 defined(TARGET_NR_timer_settime) || \ 1175 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)) 1176 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 1177 abi_ulong target_addr) 1178 { 1179 struct target_timespec *target_ts; 1180 1181 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) { 1182 return -TARGET_EFAULT; 1183 } 1184 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 1185 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1186 unlock_user_struct(target_ts, target_addr, 0); 1187 return 0; 1188 } 1189 #endif 1190 1191 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \ 1192 defined(TARGET_NR_timer_settime64) || \ 1193 defined(TARGET_NR_mq_timedsend_time64) || \ 1194 defined(TARGET_NR_mq_timedreceive_time64) || \ 1195 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \ 1196 defined(TARGET_NR_clock_nanosleep_time64) || \ 1197 defined(TARGET_NR_rt_sigtimedwait_time64) || \ 1198 defined(TARGET_NR_utimensat) || \ 1199 defined(TARGET_NR_utimensat_time64) || \ 1200 defined(TARGET_NR_semtimedop_time64) || \ 1201 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64) 1202 static inline abi_long target_to_host_timespec64(struct timespec *host_ts, 1203 abi_ulong target_addr) 1204 { 1205 struct target__kernel_timespec *target_ts; 1206 1207 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) { 1208 return -TARGET_EFAULT; 1209 } 1210 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 1211 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1212 /* in 32bit mode, this drops the padding */ 1213 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec; 1214 unlock_user_struct(target_ts, target_addr, 0); 1215 return 0; 1216 } 1217 #endif 1218 1219 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 1220 struct timespec *host_ts) 1221 { 1222 struct target_timespec *target_ts; 1223 1224 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) { 1225 return -TARGET_EFAULT; 1226 } 1227 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 1228 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1229 unlock_user_struct(target_ts, target_addr, 1); 1230 return 0; 1231 } 1232 1233 static inline abi_long host_to_target_timespec64(abi_ulong target_addr, 1234 struct timespec *host_ts) 1235 { 1236 struct target__kernel_timespec *target_ts; 1237 1238 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) { 1239 return -TARGET_EFAULT; 1240 } 1241 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 1242 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1243 unlock_user_struct(target_ts, target_addr, 1); 1244 return 0; 1245 } 1246 1247 #if defined(TARGET_NR_gettimeofday) 1248 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr, 1249 struct timezone *tz) 1250 { 1251 struct target_timezone *target_tz; 1252 1253 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) { 1254 return -TARGET_EFAULT; 1255 } 1256 1257 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1258 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1259 1260 unlock_user_struct(target_tz, target_tz_addr, 1); 1261 1262 return 0; 1263 } 1264 #endif 1265 1266 #if defined(TARGET_NR_settimeofday) 1267 static inline abi_long copy_from_user_timezone(struct timezone *tz, 1268 abi_ulong target_tz_addr) 1269 { 1270 struct target_timezone *target_tz; 1271 1272 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 1273 return -TARGET_EFAULT; 1274 } 1275 1276 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1277 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1278 1279 unlock_user_struct(target_tz, target_tz_addr, 0); 1280 1281 return 0; 1282 } 1283 #endif 1284 1285 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1286 #include <mqueue.h> 1287 1288 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1289 abi_ulong target_mq_attr_addr) 1290 { 1291 struct target_mq_attr *target_mq_attr; 1292 1293 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1294 target_mq_attr_addr, 1)) 1295 return -TARGET_EFAULT; 1296 1297 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1298 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1299 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1300 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1301 1302 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1303 1304 return 0; 1305 } 1306 1307 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1308 const struct mq_attr *attr) 1309 { 1310 struct target_mq_attr *target_mq_attr; 1311 1312 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1313 target_mq_attr_addr, 0)) 1314 return -TARGET_EFAULT; 1315 1316 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1317 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1318 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1319 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1320 1321 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1322 1323 return 0; 1324 } 1325 #endif 1326 1327 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1328 /* do_select() must return target values and target errnos. */ 1329 static abi_long do_select(int n, 1330 abi_ulong rfd_addr, abi_ulong wfd_addr, 1331 abi_ulong efd_addr, abi_ulong target_tv_addr) 1332 { 1333 fd_set rfds, wfds, efds; 1334 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1335 struct timeval tv; 1336 struct timespec ts, *ts_ptr; 1337 abi_long ret; 1338 1339 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1340 if (ret) { 1341 return ret; 1342 } 1343 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1344 if (ret) { 1345 return ret; 1346 } 1347 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1348 if (ret) { 1349 return ret; 1350 } 1351 1352 if (target_tv_addr) { 1353 if (copy_from_user_timeval(&tv, target_tv_addr)) 1354 return -TARGET_EFAULT; 1355 ts.tv_sec = tv.tv_sec; 1356 ts.tv_nsec = tv.tv_usec * 1000; 1357 ts_ptr = &ts; 1358 } else { 1359 ts_ptr = NULL; 1360 } 1361 1362 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1363 ts_ptr, NULL)); 1364 1365 if (!is_error(ret)) { 1366 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1367 return -TARGET_EFAULT; 1368 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1369 return -TARGET_EFAULT; 1370 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1371 return -TARGET_EFAULT; 1372 1373 if (target_tv_addr) { 1374 tv.tv_sec = ts.tv_sec; 1375 tv.tv_usec = ts.tv_nsec / 1000; 1376 if (copy_to_user_timeval(target_tv_addr, &tv)) { 1377 return -TARGET_EFAULT; 1378 } 1379 } 1380 } 1381 1382 return ret; 1383 } 1384 1385 #if defined(TARGET_WANT_OLD_SYS_SELECT) 1386 static abi_long do_old_select(abi_ulong arg1) 1387 { 1388 struct target_sel_arg_struct *sel; 1389 abi_ulong inp, outp, exp, tvp; 1390 long nsel; 1391 1392 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) { 1393 return -TARGET_EFAULT; 1394 } 1395 1396 nsel = tswapal(sel->n); 1397 inp = tswapal(sel->inp); 1398 outp = tswapal(sel->outp); 1399 exp = tswapal(sel->exp); 1400 tvp = tswapal(sel->tvp); 1401 1402 unlock_user_struct(sel, arg1, 0); 1403 1404 return do_select(nsel, inp, outp, exp, tvp); 1405 } 1406 #endif 1407 #endif 1408 1409 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 1410 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3, 1411 abi_long arg4, abi_long arg5, abi_long arg6, 1412 bool time64) 1413 { 1414 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 1415 fd_set rfds, wfds, efds; 1416 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1417 struct timespec ts, *ts_ptr; 1418 abi_long ret; 1419 1420 /* 1421 * The 6th arg is actually two args smashed together, 1422 * so we cannot use the C library. 1423 */ 1424 struct { 1425 sigset_t *set; 1426 size_t size; 1427 } sig, *sig_ptr; 1428 1429 abi_ulong arg_sigset, arg_sigsize, *arg7; 1430 1431 n = arg1; 1432 rfd_addr = arg2; 1433 wfd_addr = arg3; 1434 efd_addr = arg4; 1435 ts_addr = arg5; 1436 1437 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1438 if (ret) { 1439 return ret; 1440 } 1441 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1442 if (ret) { 1443 return ret; 1444 } 1445 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1446 if (ret) { 1447 return ret; 1448 } 1449 1450 /* 1451 * This takes a timespec, and not a timeval, so we cannot 1452 * use the do_select() helper ... 1453 */ 1454 if (ts_addr) { 1455 if (time64) { 1456 if (target_to_host_timespec64(&ts, ts_addr)) { 1457 return -TARGET_EFAULT; 1458 } 1459 } else { 1460 if (target_to_host_timespec(&ts, ts_addr)) { 1461 return -TARGET_EFAULT; 1462 } 1463 } 1464 ts_ptr = &ts; 1465 } else { 1466 ts_ptr = NULL; 1467 } 1468 1469 /* Extract the two packed args for the sigset */ 1470 sig_ptr = NULL; 1471 if (arg6) { 1472 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 1473 if (!arg7) { 1474 return -TARGET_EFAULT; 1475 } 1476 arg_sigset = tswapal(arg7[0]); 1477 arg_sigsize = tswapal(arg7[1]); 1478 unlock_user(arg7, arg6, 0); 1479 1480 if (arg_sigset) { 1481 ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize); 1482 if (ret != 0) { 1483 return ret; 1484 } 1485 sig_ptr = &sig; 1486 sig.size = SIGSET_T_SIZE; 1487 } 1488 } 1489 1490 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1491 ts_ptr, sig_ptr)); 1492 1493 if (sig_ptr) { 1494 finish_sigsuspend_mask(ret); 1495 } 1496 1497 if (!is_error(ret)) { 1498 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) { 1499 return -TARGET_EFAULT; 1500 } 1501 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) { 1502 return -TARGET_EFAULT; 1503 } 1504 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) { 1505 return -TARGET_EFAULT; 1506 } 1507 if (time64) { 1508 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) { 1509 return -TARGET_EFAULT; 1510 } 1511 } else { 1512 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) { 1513 return -TARGET_EFAULT; 1514 } 1515 } 1516 } 1517 return ret; 1518 } 1519 #endif 1520 1521 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \ 1522 defined(TARGET_NR_ppoll_time64) 1523 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3, 1524 abi_long arg4, abi_long arg5, bool ppoll, bool time64) 1525 { 1526 struct target_pollfd *target_pfd; 1527 unsigned int nfds = arg2; 1528 struct pollfd *pfd; 1529 unsigned int i; 1530 abi_long ret; 1531 1532 pfd = NULL; 1533 target_pfd = NULL; 1534 if (nfds) { 1535 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) { 1536 return -TARGET_EINVAL; 1537 } 1538 target_pfd = lock_user(VERIFY_WRITE, arg1, 1539 sizeof(struct target_pollfd) * nfds, 1); 1540 if (!target_pfd) { 1541 return -TARGET_EFAULT; 1542 } 1543 1544 pfd = alloca(sizeof(struct pollfd) * nfds); 1545 for (i = 0; i < nfds; i++) { 1546 pfd[i].fd = tswap32(target_pfd[i].fd); 1547 pfd[i].events = tswap16(target_pfd[i].events); 1548 } 1549 } 1550 if (ppoll) { 1551 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 1552 sigset_t *set = NULL; 1553 1554 if (arg3) { 1555 if (time64) { 1556 if (target_to_host_timespec64(timeout_ts, arg3)) { 1557 unlock_user(target_pfd, arg1, 0); 1558 return -TARGET_EFAULT; 1559 } 1560 } else { 1561 if (target_to_host_timespec(timeout_ts, arg3)) { 1562 unlock_user(target_pfd, arg1, 0); 1563 return -TARGET_EFAULT; 1564 } 1565 } 1566 } else { 1567 timeout_ts = NULL; 1568 } 1569 1570 if (arg4) { 1571 ret = process_sigsuspend_mask(&set, arg4, arg5); 1572 if (ret != 0) { 1573 unlock_user(target_pfd, arg1, 0); 1574 return ret; 1575 } 1576 } 1577 1578 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts, 1579 set, SIGSET_T_SIZE)); 1580 1581 if (set) { 1582 finish_sigsuspend_mask(ret); 1583 } 1584 if (!is_error(ret) && arg3) { 1585 if (time64) { 1586 if (host_to_target_timespec64(arg3, timeout_ts)) { 1587 return -TARGET_EFAULT; 1588 } 1589 } else { 1590 if (host_to_target_timespec(arg3, timeout_ts)) { 1591 return -TARGET_EFAULT; 1592 } 1593 } 1594 } 1595 } else { 1596 struct timespec ts, *pts; 1597 1598 if (arg3 >= 0) { 1599 /* Convert ms to secs, ns */ 1600 ts.tv_sec = arg3 / 1000; 1601 ts.tv_nsec = (arg3 % 1000) * 1000000LL; 1602 pts = &ts; 1603 } else { 1604 /* -ve poll() timeout means "infinite" */ 1605 pts = NULL; 1606 } 1607 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0)); 1608 } 1609 1610 if (!is_error(ret)) { 1611 for (i = 0; i < nfds; i++) { 1612 target_pfd[i].revents = tswap16(pfd[i].revents); 1613 } 1614 } 1615 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 1616 return ret; 1617 } 1618 #endif 1619 1620 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes, 1621 int flags, int is_pipe2) 1622 { 1623 int host_pipe[2]; 1624 abi_long ret; 1625 ret = pipe2(host_pipe, flags); 1626 1627 if (is_error(ret)) 1628 return get_errno(ret); 1629 1630 /* Several targets have special calling conventions for the original 1631 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1632 if (!is_pipe2) { 1633 #if defined(TARGET_ALPHA) 1634 cpu_env->ir[IR_A4] = host_pipe[1]; 1635 return host_pipe[0]; 1636 #elif defined(TARGET_MIPS) 1637 cpu_env->active_tc.gpr[3] = host_pipe[1]; 1638 return host_pipe[0]; 1639 #elif defined(TARGET_SH4) 1640 cpu_env->gregs[1] = host_pipe[1]; 1641 return host_pipe[0]; 1642 #elif defined(TARGET_SPARC) 1643 cpu_env->regwptr[1] = host_pipe[1]; 1644 return host_pipe[0]; 1645 #endif 1646 } 1647 1648 if (put_user_s32(host_pipe[0], pipedes) 1649 || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int))) 1650 return -TARGET_EFAULT; 1651 return get_errno(ret); 1652 } 1653 1654 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1655 abi_ulong target_addr, 1656 socklen_t len) 1657 { 1658 struct target_ip_mreqn *target_smreqn; 1659 1660 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1661 if (!target_smreqn) 1662 return -TARGET_EFAULT; 1663 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1664 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1665 if (len == sizeof(struct target_ip_mreqn)) 1666 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1667 unlock_user(target_smreqn, target_addr, 0); 1668 1669 return 0; 1670 } 1671 1672 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, 1673 abi_ulong target_addr, 1674 socklen_t len) 1675 { 1676 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1677 sa_family_t sa_family; 1678 struct target_sockaddr *target_saddr; 1679 1680 if (fd_trans_target_to_host_addr(fd)) { 1681 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len); 1682 } 1683 1684 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1685 if (!target_saddr) 1686 return -TARGET_EFAULT; 1687 1688 sa_family = tswap16(target_saddr->sa_family); 1689 1690 /* Oops. The caller might send a incomplete sun_path; sun_path 1691 * must be terminated by \0 (see the manual page), but 1692 * unfortunately it is quite common to specify sockaddr_un 1693 * length as "strlen(x->sun_path)" while it should be 1694 * "strlen(...) + 1". We'll fix that here if needed. 1695 * Linux kernel has a similar feature. 1696 */ 1697 1698 if (sa_family == AF_UNIX) { 1699 if (len < unix_maxlen && len > 0) { 1700 char *cp = (char*)target_saddr; 1701 1702 if ( cp[len-1] && !cp[len] ) 1703 len++; 1704 } 1705 if (len > unix_maxlen) 1706 len = unix_maxlen; 1707 } 1708 1709 memcpy(addr, target_saddr, len); 1710 addr->sa_family = sa_family; 1711 if (sa_family == AF_NETLINK) { 1712 struct sockaddr_nl *nladdr; 1713 1714 nladdr = (struct sockaddr_nl *)addr; 1715 nladdr->nl_pid = tswap32(nladdr->nl_pid); 1716 nladdr->nl_groups = tswap32(nladdr->nl_groups); 1717 } else if (sa_family == AF_PACKET) { 1718 struct target_sockaddr_ll *lladdr; 1719 1720 lladdr = (struct target_sockaddr_ll *)addr; 1721 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); 1722 lladdr->sll_hatype = tswap16(lladdr->sll_hatype); 1723 } else if (sa_family == AF_INET6) { 1724 struct sockaddr_in6 *in6addr; 1725 1726 in6addr = (struct sockaddr_in6 *)addr; 1727 in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id); 1728 } 1729 unlock_user(target_saddr, target_addr, 0); 1730 1731 return 0; 1732 } 1733 1734 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1735 struct sockaddr *addr, 1736 socklen_t len) 1737 { 1738 struct target_sockaddr *target_saddr; 1739 1740 if (len == 0) { 1741 return 0; 1742 } 1743 assert(addr); 1744 1745 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1746 if (!target_saddr) 1747 return -TARGET_EFAULT; 1748 memcpy(target_saddr, addr, len); 1749 if (len >= offsetof(struct target_sockaddr, sa_family) + 1750 sizeof(target_saddr->sa_family)) { 1751 target_saddr->sa_family = tswap16(addr->sa_family); 1752 } 1753 if (addr->sa_family == AF_NETLINK && 1754 len >= sizeof(struct target_sockaddr_nl)) { 1755 struct target_sockaddr_nl *target_nl = 1756 (struct target_sockaddr_nl *)target_saddr; 1757 target_nl->nl_pid = tswap32(target_nl->nl_pid); 1758 target_nl->nl_groups = tswap32(target_nl->nl_groups); 1759 } else if (addr->sa_family == AF_PACKET) { 1760 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr; 1761 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex); 1762 target_ll->sll_hatype = tswap16(target_ll->sll_hatype); 1763 } else if (addr->sa_family == AF_INET6 && 1764 len >= sizeof(struct target_sockaddr_in6)) { 1765 struct target_sockaddr_in6 *target_in6 = 1766 (struct target_sockaddr_in6 *)target_saddr; 1767 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id); 1768 } 1769 unlock_user(target_saddr, target_addr, len); 1770 1771 return 0; 1772 } 1773 1774 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1775 struct target_msghdr *target_msgh) 1776 { 1777 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1778 abi_long msg_controllen; 1779 abi_ulong target_cmsg_addr; 1780 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1781 socklen_t space = 0; 1782 1783 msg_controllen = tswapal(target_msgh->msg_controllen); 1784 if (msg_controllen < sizeof (struct target_cmsghdr)) 1785 goto the_end; 1786 target_cmsg_addr = tswapal(target_msgh->msg_control); 1787 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1788 target_cmsg_start = target_cmsg; 1789 if (!target_cmsg) 1790 return -TARGET_EFAULT; 1791 1792 while (cmsg && target_cmsg) { 1793 void *data = CMSG_DATA(cmsg); 1794 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1795 1796 int len = tswapal(target_cmsg->cmsg_len) 1797 - sizeof(struct target_cmsghdr); 1798 1799 space += CMSG_SPACE(len); 1800 if (space > msgh->msg_controllen) { 1801 space -= CMSG_SPACE(len); 1802 /* This is a QEMU bug, since we allocated the payload 1803 * area ourselves (unlike overflow in host-to-target 1804 * conversion, which is just the guest giving us a buffer 1805 * that's too small). It can't happen for the payload types 1806 * we currently support; if it becomes an issue in future 1807 * we would need to improve our allocation strategy to 1808 * something more intelligent than "twice the size of the 1809 * target buffer we're reading from". 1810 */ 1811 qemu_log_mask(LOG_UNIMP, 1812 ("Unsupported ancillary data %d/%d: " 1813 "unhandled msg size\n"), 1814 tswap32(target_cmsg->cmsg_level), 1815 tswap32(target_cmsg->cmsg_type)); 1816 break; 1817 } 1818 1819 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1820 cmsg->cmsg_level = SOL_SOCKET; 1821 } else { 1822 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1823 } 1824 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1825 cmsg->cmsg_len = CMSG_LEN(len); 1826 1827 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 1828 int *fd = (int *)data; 1829 int *target_fd = (int *)target_data; 1830 int i, numfds = len / sizeof(int); 1831 1832 for (i = 0; i < numfds; i++) { 1833 __get_user(fd[i], target_fd + i); 1834 } 1835 } else if (cmsg->cmsg_level == SOL_SOCKET 1836 && cmsg->cmsg_type == SCM_CREDENTIALS) { 1837 struct ucred *cred = (struct ucred *)data; 1838 struct target_ucred *target_cred = 1839 (struct target_ucred *)target_data; 1840 1841 __get_user(cred->pid, &target_cred->pid); 1842 __get_user(cred->uid, &target_cred->uid); 1843 __get_user(cred->gid, &target_cred->gid); 1844 } else if (cmsg->cmsg_level == SOL_ALG) { 1845 uint32_t *dst = (uint32_t *)data; 1846 1847 memcpy(dst, target_data, len); 1848 /* fix endianess of first 32-bit word */ 1849 if (len >= sizeof(uint32_t)) { 1850 *dst = tswap32(*dst); 1851 } 1852 } else { 1853 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", 1854 cmsg->cmsg_level, cmsg->cmsg_type); 1855 memcpy(data, target_data, len); 1856 } 1857 1858 cmsg = CMSG_NXTHDR(msgh, cmsg); 1859 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1860 target_cmsg_start); 1861 } 1862 unlock_user(target_cmsg, target_cmsg_addr, 0); 1863 the_end: 1864 msgh->msg_controllen = space; 1865 return 0; 1866 } 1867 1868 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1869 struct msghdr *msgh) 1870 { 1871 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1872 abi_long msg_controllen; 1873 abi_ulong target_cmsg_addr; 1874 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1875 socklen_t space = 0; 1876 1877 msg_controllen = tswapal(target_msgh->msg_controllen); 1878 if (msg_controllen < sizeof (struct target_cmsghdr)) 1879 goto the_end; 1880 target_cmsg_addr = tswapal(target_msgh->msg_control); 1881 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1882 target_cmsg_start = target_cmsg; 1883 if (!target_cmsg) 1884 return -TARGET_EFAULT; 1885 1886 while (cmsg && target_cmsg) { 1887 void *data = CMSG_DATA(cmsg); 1888 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1889 1890 int len = cmsg->cmsg_len - sizeof(struct cmsghdr); 1891 int tgt_len, tgt_space; 1892 1893 /* We never copy a half-header but may copy half-data; 1894 * this is Linux's behaviour in put_cmsg(). Note that 1895 * truncation here is a guest problem (which we report 1896 * to the guest via the CTRUNC bit), unlike truncation 1897 * in target_to_host_cmsg, which is a QEMU bug. 1898 */ 1899 if (msg_controllen < sizeof(struct target_cmsghdr)) { 1900 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1901 break; 1902 } 1903 1904 if (cmsg->cmsg_level == SOL_SOCKET) { 1905 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1906 } else { 1907 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1908 } 1909 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1910 1911 /* Payload types which need a different size of payload on 1912 * the target must adjust tgt_len here. 1913 */ 1914 tgt_len = len; 1915 switch (cmsg->cmsg_level) { 1916 case SOL_SOCKET: 1917 switch (cmsg->cmsg_type) { 1918 case SO_TIMESTAMP: 1919 tgt_len = sizeof(struct target_timeval); 1920 break; 1921 default: 1922 break; 1923 } 1924 break; 1925 default: 1926 break; 1927 } 1928 1929 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) { 1930 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1931 tgt_len = msg_controllen - sizeof(struct target_cmsghdr); 1932 } 1933 1934 /* We must now copy-and-convert len bytes of payload 1935 * into tgt_len bytes of destination space. Bear in mind 1936 * that in both source and destination we may be dealing 1937 * with a truncated value! 1938 */ 1939 switch (cmsg->cmsg_level) { 1940 case SOL_SOCKET: 1941 switch (cmsg->cmsg_type) { 1942 case SCM_RIGHTS: 1943 { 1944 int *fd = (int *)data; 1945 int *target_fd = (int *)target_data; 1946 int i, numfds = tgt_len / sizeof(int); 1947 1948 for (i = 0; i < numfds; i++) { 1949 __put_user(fd[i], target_fd + i); 1950 } 1951 break; 1952 } 1953 case SO_TIMESTAMP: 1954 { 1955 struct timeval *tv = (struct timeval *)data; 1956 struct target_timeval *target_tv = 1957 (struct target_timeval *)target_data; 1958 1959 if (len != sizeof(struct timeval) || 1960 tgt_len != sizeof(struct target_timeval)) { 1961 goto unimplemented; 1962 } 1963 1964 /* copy struct timeval to target */ 1965 __put_user(tv->tv_sec, &target_tv->tv_sec); 1966 __put_user(tv->tv_usec, &target_tv->tv_usec); 1967 break; 1968 } 1969 case SCM_CREDENTIALS: 1970 { 1971 struct ucred *cred = (struct ucred *)data; 1972 struct target_ucred *target_cred = 1973 (struct target_ucred *)target_data; 1974 1975 __put_user(cred->pid, &target_cred->pid); 1976 __put_user(cred->uid, &target_cred->uid); 1977 __put_user(cred->gid, &target_cred->gid); 1978 break; 1979 } 1980 default: 1981 goto unimplemented; 1982 } 1983 break; 1984 1985 case SOL_IP: 1986 switch (cmsg->cmsg_type) { 1987 case IP_TTL: 1988 { 1989 uint32_t *v = (uint32_t *)data; 1990 uint32_t *t_int = (uint32_t *)target_data; 1991 1992 if (len != sizeof(uint32_t) || 1993 tgt_len != sizeof(uint32_t)) { 1994 goto unimplemented; 1995 } 1996 __put_user(*v, t_int); 1997 break; 1998 } 1999 case IP_RECVERR: 2000 { 2001 struct errhdr_t { 2002 struct sock_extended_err ee; 2003 struct sockaddr_in offender; 2004 }; 2005 struct errhdr_t *errh = (struct errhdr_t *)data; 2006 struct errhdr_t *target_errh = 2007 (struct errhdr_t *)target_data; 2008 2009 if (len != sizeof(struct errhdr_t) || 2010 tgt_len != sizeof(struct errhdr_t)) { 2011 goto unimplemented; 2012 } 2013 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 2014 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 2015 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 2016 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 2017 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 2018 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 2019 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 2020 host_to_target_sockaddr((unsigned long) &target_errh->offender, 2021 (void *) &errh->offender, sizeof(errh->offender)); 2022 break; 2023 } 2024 default: 2025 goto unimplemented; 2026 } 2027 break; 2028 2029 case SOL_IPV6: 2030 switch (cmsg->cmsg_type) { 2031 case IPV6_HOPLIMIT: 2032 { 2033 uint32_t *v = (uint32_t *)data; 2034 uint32_t *t_int = (uint32_t *)target_data; 2035 2036 if (len != sizeof(uint32_t) || 2037 tgt_len != sizeof(uint32_t)) { 2038 goto unimplemented; 2039 } 2040 __put_user(*v, t_int); 2041 break; 2042 } 2043 case IPV6_RECVERR: 2044 { 2045 struct errhdr6_t { 2046 struct sock_extended_err ee; 2047 struct sockaddr_in6 offender; 2048 }; 2049 struct errhdr6_t *errh = (struct errhdr6_t *)data; 2050 struct errhdr6_t *target_errh = 2051 (struct errhdr6_t *)target_data; 2052 2053 if (len != sizeof(struct errhdr6_t) || 2054 tgt_len != sizeof(struct errhdr6_t)) { 2055 goto unimplemented; 2056 } 2057 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 2058 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 2059 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 2060 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 2061 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 2062 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 2063 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 2064 host_to_target_sockaddr((unsigned long) &target_errh->offender, 2065 (void *) &errh->offender, sizeof(errh->offender)); 2066 break; 2067 } 2068 default: 2069 goto unimplemented; 2070 } 2071 break; 2072 2073 default: 2074 unimplemented: 2075 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", 2076 cmsg->cmsg_level, cmsg->cmsg_type); 2077 memcpy(target_data, data, MIN(len, tgt_len)); 2078 if (tgt_len > len) { 2079 memset(target_data + len, 0, tgt_len - len); 2080 } 2081 } 2082 2083 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len)); 2084 tgt_space = TARGET_CMSG_SPACE(tgt_len); 2085 if (msg_controllen < tgt_space) { 2086 tgt_space = msg_controllen; 2087 } 2088 msg_controllen -= tgt_space; 2089 space += tgt_space; 2090 cmsg = CMSG_NXTHDR(msgh, cmsg); 2091 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 2092 target_cmsg_start); 2093 } 2094 unlock_user(target_cmsg, target_cmsg_addr, space); 2095 the_end: 2096 target_msgh->msg_controllen = tswapal(space); 2097 return 0; 2098 } 2099 2100 /* do_setsockopt() Must return target values and target errnos. */ 2101 static abi_long do_setsockopt(int sockfd, int level, int optname, 2102 abi_ulong optval_addr, socklen_t optlen) 2103 { 2104 abi_long ret; 2105 int val; 2106 struct ip_mreqn *ip_mreq; 2107 struct ip_mreq_source *ip_mreq_source; 2108 2109 switch(level) { 2110 case SOL_TCP: 2111 case SOL_UDP: 2112 /* TCP and UDP options all take an 'int' value. */ 2113 if (optlen < sizeof(uint32_t)) 2114 return -TARGET_EINVAL; 2115 2116 if (get_user_u32(val, optval_addr)) 2117 return -TARGET_EFAULT; 2118 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2119 break; 2120 case SOL_IP: 2121 switch(optname) { 2122 case IP_TOS: 2123 case IP_TTL: 2124 case IP_HDRINCL: 2125 case IP_ROUTER_ALERT: 2126 case IP_RECVOPTS: 2127 case IP_RETOPTS: 2128 case IP_PKTINFO: 2129 case IP_MTU_DISCOVER: 2130 case IP_RECVERR: 2131 case IP_RECVTTL: 2132 case IP_RECVTOS: 2133 #ifdef IP_FREEBIND 2134 case IP_FREEBIND: 2135 #endif 2136 case IP_MULTICAST_TTL: 2137 case IP_MULTICAST_LOOP: 2138 val = 0; 2139 if (optlen >= sizeof(uint32_t)) { 2140 if (get_user_u32(val, optval_addr)) 2141 return -TARGET_EFAULT; 2142 } else if (optlen >= 1) { 2143 if (get_user_u8(val, optval_addr)) 2144 return -TARGET_EFAULT; 2145 } 2146 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2147 break; 2148 case IP_ADD_MEMBERSHIP: 2149 case IP_DROP_MEMBERSHIP: 2150 if (optlen < sizeof (struct target_ip_mreq) || 2151 optlen > sizeof (struct target_ip_mreqn)) 2152 return -TARGET_EINVAL; 2153 2154 ip_mreq = (struct ip_mreqn *) alloca(optlen); 2155 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 2156 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 2157 break; 2158 2159 case IP_BLOCK_SOURCE: 2160 case IP_UNBLOCK_SOURCE: 2161 case IP_ADD_SOURCE_MEMBERSHIP: 2162 case IP_DROP_SOURCE_MEMBERSHIP: 2163 if (optlen != sizeof (struct target_ip_mreq_source)) 2164 return -TARGET_EINVAL; 2165 2166 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2167 if (!ip_mreq_source) { 2168 return -TARGET_EFAULT; 2169 } 2170 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 2171 unlock_user (ip_mreq_source, optval_addr, 0); 2172 break; 2173 2174 default: 2175 goto unimplemented; 2176 } 2177 break; 2178 case SOL_IPV6: 2179 switch (optname) { 2180 case IPV6_MTU_DISCOVER: 2181 case IPV6_MTU: 2182 case IPV6_V6ONLY: 2183 case IPV6_RECVPKTINFO: 2184 case IPV6_UNICAST_HOPS: 2185 case IPV6_MULTICAST_HOPS: 2186 case IPV6_MULTICAST_LOOP: 2187 case IPV6_RECVERR: 2188 case IPV6_RECVHOPLIMIT: 2189 case IPV6_2292HOPLIMIT: 2190 case IPV6_CHECKSUM: 2191 case IPV6_ADDRFORM: 2192 case IPV6_2292PKTINFO: 2193 case IPV6_RECVTCLASS: 2194 case IPV6_RECVRTHDR: 2195 case IPV6_2292RTHDR: 2196 case IPV6_RECVHOPOPTS: 2197 case IPV6_2292HOPOPTS: 2198 case IPV6_RECVDSTOPTS: 2199 case IPV6_2292DSTOPTS: 2200 case IPV6_TCLASS: 2201 case IPV6_ADDR_PREFERENCES: 2202 #ifdef IPV6_RECVPATHMTU 2203 case IPV6_RECVPATHMTU: 2204 #endif 2205 #ifdef IPV6_TRANSPARENT 2206 case IPV6_TRANSPARENT: 2207 #endif 2208 #ifdef IPV6_FREEBIND 2209 case IPV6_FREEBIND: 2210 #endif 2211 #ifdef IPV6_RECVORIGDSTADDR 2212 case IPV6_RECVORIGDSTADDR: 2213 #endif 2214 val = 0; 2215 if (optlen < sizeof(uint32_t)) { 2216 return -TARGET_EINVAL; 2217 } 2218 if (get_user_u32(val, optval_addr)) { 2219 return -TARGET_EFAULT; 2220 } 2221 ret = get_errno(setsockopt(sockfd, level, optname, 2222 &val, sizeof(val))); 2223 break; 2224 case IPV6_PKTINFO: 2225 { 2226 struct in6_pktinfo pki; 2227 2228 if (optlen < sizeof(pki)) { 2229 return -TARGET_EINVAL; 2230 } 2231 2232 if (copy_from_user(&pki, optval_addr, sizeof(pki))) { 2233 return -TARGET_EFAULT; 2234 } 2235 2236 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex); 2237 2238 ret = get_errno(setsockopt(sockfd, level, optname, 2239 &pki, sizeof(pki))); 2240 break; 2241 } 2242 case IPV6_ADD_MEMBERSHIP: 2243 case IPV6_DROP_MEMBERSHIP: 2244 { 2245 struct ipv6_mreq ipv6mreq; 2246 2247 if (optlen < sizeof(ipv6mreq)) { 2248 return -TARGET_EINVAL; 2249 } 2250 2251 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) { 2252 return -TARGET_EFAULT; 2253 } 2254 2255 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface); 2256 2257 ret = get_errno(setsockopt(sockfd, level, optname, 2258 &ipv6mreq, sizeof(ipv6mreq))); 2259 break; 2260 } 2261 default: 2262 goto unimplemented; 2263 } 2264 break; 2265 case SOL_ICMPV6: 2266 switch (optname) { 2267 case ICMPV6_FILTER: 2268 { 2269 struct icmp6_filter icmp6f; 2270 2271 if (optlen > sizeof(icmp6f)) { 2272 optlen = sizeof(icmp6f); 2273 } 2274 2275 if (copy_from_user(&icmp6f, optval_addr, optlen)) { 2276 return -TARGET_EFAULT; 2277 } 2278 2279 for (val = 0; val < 8; val++) { 2280 icmp6f.data[val] = tswap32(icmp6f.data[val]); 2281 } 2282 2283 ret = get_errno(setsockopt(sockfd, level, optname, 2284 &icmp6f, optlen)); 2285 break; 2286 } 2287 default: 2288 goto unimplemented; 2289 } 2290 break; 2291 case SOL_RAW: 2292 switch (optname) { 2293 case ICMP_FILTER: 2294 case IPV6_CHECKSUM: 2295 /* those take an u32 value */ 2296 if (optlen < sizeof(uint32_t)) { 2297 return -TARGET_EINVAL; 2298 } 2299 2300 if (get_user_u32(val, optval_addr)) { 2301 return -TARGET_EFAULT; 2302 } 2303 ret = get_errno(setsockopt(sockfd, level, optname, 2304 &val, sizeof(val))); 2305 break; 2306 2307 default: 2308 goto unimplemented; 2309 } 2310 break; 2311 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE) 2312 case SOL_ALG: 2313 switch (optname) { 2314 case ALG_SET_KEY: 2315 { 2316 char *alg_key = g_malloc(optlen); 2317 2318 if (!alg_key) { 2319 return -TARGET_ENOMEM; 2320 } 2321 if (copy_from_user(alg_key, optval_addr, optlen)) { 2322 g_free(alg_key); 2323 return -TARGET_EFAULT; 2324 } 2325 ret = get_errno(setsockopt(sockfd, level, optname, 2326 alg_key, optlen)); 2327 g_free(alg_key); 2328 break; 2329 } 2330 case ALG_SET_AEAD_AUTHSIZE: 2331 { 2332 ret = get_errno(setsockopt(sockfd, level, optname, 2333 NULL, optlen)); 2334 break; 2335 } 2336 default: 2337 goto unimplemented; 2338 } 2339 break; 2340 #endif 2341 case TARGET_SOL_SOCKET: 2342 switch (optname) { 2343 case TARGET_SO_RCVTIMEO: 2344 { 2345 struct timeval tv; 2346 2347 optname = SO_RCVTIMEO; 2348 2349 set_timeout: 2350 if (optlen != sizeof(struct target_timeval)) { 2351 return -TARGET_EINVAL; 2352 } 2353 2354 if (copy_from_user_timeval(&tv, optval_addr)) { 2355 return -TARGET_EFAULT; 2356 } 2357 2358 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 2359 &tv, sizeof(tv))); 2360 return ret; 2361 } 2362 case TARGET_SO_SNDTIMEO: 2363 optname = SO_SNDTIMEO; 2364 goto set_timeout; 2365 case TARGET_SO_ATTACH_FILTER: 2366 { 2367 struct target_sock_fprog *tfprog; 2368 struct target_sock_filter *tfilter; 2369 struct sock_fprog fprog; 2370 struct sock_filter *filter; 2371 int i; 2372 2373 if (optlen != sizeof(*tfprog)) { 2374 return -TARGET_EINVAL; 2375 } 2376 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 2377 return -TARGET_EFAULT; 2378 } 2379 if (!lock_user_struct(VERIFY_READ, tfilter, 2380 tswapal(tfprog->filter), 0)) { 2381 unlock_user_struct(tfprog, optval_addr, 1); 2382 return -TARGET_EFAULT; 2383 } 2384 2385 fprog.len = tswap16(tfprog->len); 2386 filter = g_try_new(struct sock_filter, fprog.len); 2387 if (filter == NULL) { 2388 unlock_user_struct(tfilter, tfprog->filter, 1); 2389 unlock_user_struct(tfprog, optval_addr, 1); 2390 return -TARGET_ENOMEM; 2391 } 2392 for (i = 0; i < fprog.len; i++) { 2393 filter[i].code = tswap16(tfilter[i].code); 2394 filter[i].jt = tfilter[i].jt; 2395 filter[i].jf = tfilter[i].jf; 2396 filter[i].k = tswap32(tfilter[i].k); 2397 } 2398 fprog.filter = filter; 2399 2400 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 2401 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 2402 g_free(filter); 2403 2404 unlock_user_struct(tfilter, tfprog->filter, 1); 2405 unlock_user_struct(tfprog, optval_addr, 1); 2406 return ret; 2407 } 2408 case TARGET_SO_BINDTODEVICE: 2409 { 2410 char *dev_ifname, *addr_ifname; 2411 2412 if (optlen > IFNAMSIZ - 1) { 2413 optlen = IFNAMSIZ - 1; 2414 } 2415 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2416 if (!dev_ifname) { 2417 return -TARGET_EFAULT; 2418 } 2419 optname = SO_BINDTODEVICE; 2420 addr_ifname = alloca(IFNAMSIZ); 2421 memcpy(addr_ifname, dev_ifname, optlen); 2422 addr_ifname[optlen] = 0; 2423 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 2424 addr_ifname, optlen)); 2425 unlock_user (dev_ifname, optval_addr, 0); 2426 return ret; 2427 } 2428 case TARGET_SO_LINGER: 2429 { 2430 struct linger lg; 2431 struct target_linger *tlg; 2432 2433 if (optlen != sizeof(struct target_linger)) { 2434 return -TARGET_EINVAL; 2435 } 2436 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) { 2437 return -TARGET_EFAULT; 2438 } 2439 __get_user(lg.l_onoff, &tlg->l_onoff); 2440 __get_user(lg.l_linger, &tlg->l_linger); 2441 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER, 2442 &lg, sizeof(lg))); 2443 unlock_user_struct(tlg, optval_addr, 0); 2444 return ret; 2445 } 2446 /* Options with 'int' argument. */ 2447 case TARGET_SO_DEBUG: 2448 optname = SO_DEBUG; 2449 break; 2450 case TARGET_SO_REUSEADDR: 2451 optname = SO_REUSEADDR; 2452 break; 2453 #ifdef SO_REUSEPORT 2454 case TARGET_SO_REUSEPORT: 2455 optname = SO_REUSEPORT; 2456 break; 2457 #endif 2458 case TARGET_SO_TYPE: 2459 optname = SO_TYPE; 2460 break; 2461 case TARGET_SO_ERROR: 2462 optname = SO_ERROR; 2463 break; 2464 case TARGET_SO_DONTROUTE: 2465 optname = SO_DONTROUTE; 2466 break; 2467 case TARGET_SO_BROADCAST: 2468 optname = SO_BROADCAST; 2469 break; 2470 case TARGET_SO_SNDBUF: 2471 optname = SO_SNDBUF; 2472 break; 2473 case TARGET_SO_SNDBUFFORCE: 2474 optname = SO_SNDBUFFORCE; 2475 break; 2476 case TARGET_SO_RCVBUF: 2477 optname = SO_RCVBUF; 2478 break; 2479 case TARGET_SO_RCVBUFFORCE: 2480 optname = SO_RCVBUFFORCE; 2481 break; 2482 case TARGET_SO_KEEPALIVE: 2483 optname = SO_KEEPALIVE; 2484 break; 2485 case TARGET_SO_OOBINLINE: 2486 optname = SO_OOBINLINE; 2487 break; 2488 case TARGET_SO_NO_CHECK: 2489 optname = SO_NO_CHECK; 2490 break; 2491 case TARGET_SO_PRIORITY: 2492 optname = SO_PRIORITY; 2493 break; 2494 #ifdef SO_BSDCOMPAT 2495 case TARGET_SO_BSDCOMPAT: 2496 optname = SO_BSDCOMPAT; 2497 break; 2498 #endif 2499 case TARGET_SO_PASSCRED: 2500 optname = SO_PASSCRED; 2501 break; 2502 case TARGET_SO_PASSSEC: 2503 optname = SO_PASSSEC; 2504 break; 2505 case TARGET_SO_TIMESTAMP: 2506 optname = SO_TIMESTAMP; 2507 break; 2508 case TARGET_SO_RCVLOWAT: 2509 optname = SO_RCVLOWAT; 2510 break; 2511 default: 2512 goto unimplemented; 2513 } 2514 if (optlen < sizeof(uint32_t)) 2515 return -TARGET_EINVAL; 2516 2517 if (get_user_u32(val, optval_addr)) 2518 return -TARGET_EFAULT; 2519 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 2520 break; 2521 #ifdef SOL_NETLINK 2522 case SOL_NETLINK: 2523 switch (optname) { 2524 case NETLINK_PKTINFO: 2525 case NETLINK_ADD_MEMBERSHIP: 2526 case NETLINK_DROP_MEMBERSHIP: 2527 case NETLINK_BROADCAST_ERROR: 2528 case NETLINK_NO_ENOBUFS: 2529 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2530 case NETLINK_LISTEN_ALL_NSID: 2531 case NETLINK_CAP_ACK: 2532 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2533 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) 2534 case NETLINK_EXT_ACK: 2535 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2536 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) 2537 case NETLINK_GET_STRICT_CHK: 2538 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2539 break; 2540 default: 2541 goto unimplemented; 2542 } 2543 val = 0; 2544 if (optlen < sizeof(uint32_t)) { 2545 return -TARGET_EINVAL; 2546 } 2547 if (get_user_u32(val, optval_addr)) { 2548 return -TARGET_EFAULT; 2549 } 2550 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val, 2551 sizeof(val))); 2552 break; 2553 #endif /* SOL_NETLINK */ 2554 default: 2555 unimplemented: 2556 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n", 2557 level, optname); 2558 ret = -TARGET_ENOPROTOOPT; 2559 } 2560 return ret; 2561 } 2562 2563 /* do_getsockopt() Must return target values and target errnos. */ 2564 static abi_long do_getsockopt(int sockfd, int level, int optname, 2565 abi_ulong optval_addr, abi_ulong optlen) 2566 { 2567 abi_long ret; 2568 int len, val; 2569 socklen_t lv; 2570 2571 switch(level) { 2572 case TARGET_SOL_SOCKET: 2573 level = SOL_SOCKET; 2574 switch (optname) { 2575 /* These don't just return a single integer */ 2576 case TARGET_SO_PEERNAME: 2577 goto unimplemented; 2578 case TARGET_SO_RCVTIMEO: { 2579 struct timeval tv; 2580 socklen_t tvlen; 2581 2582 optname = SO_RCVTIMEO; 2583 2584 get_timeout: 2585 if (get_user_u32(len, optlen)) { 2586 return -TARGET_EFAULT; 2587 } 2588 if (len < 0) { 2589 return -TARGET_EINVAL; 2590 } 2591 2592 tvlen = sizeof(tv); 2593 ret = get_errno(getsockopt(sockfd, level, optname, 2594 &tv, &tvlen)); 2595 if (ret < 0) { 2596 return ret; 2597 } 2598 if (len > sizeof(struct target_timeval)) { 2599 len = sizeof(struct target_timeval); 2600 } 2601 if (copy_to_user_timeval(optval_addr, &tv)) { 2602 return -TARGET_EFAULT; 2603 } 2604 if (put_user_u32(len, optlen)) { 2605 return -TARGET_EFAULT; 2606 } 2607 break; 2608 } 2609 case TARGET_SO_SNDTIMEO: 2610 optname = SO_SNDTIMEO; 2611 goto get_timeout; 2612 case TARGET_SO_PEERCRED: { 2613 struct ucred cr; 2614 socklen_t crlen; 2615 struct target_ucred *tcr; 2616 2617 if (get_user_u32(len, optlen)) { 2618 return -TARGET_EFAULT; 2619 } 2620 if (len < 0) { 2621 return -TARGET_EINVAL; 2622 } 2623 2624 crlen = sizeof(cr); 2625 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 2626 &cr, &crlen)); 2627 if (ret < 0) { 2628 return ret; 2629 } 2630 if (len > crlen) { 2631 len = crlen; 2632 } 2633 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 2634 return -TARGET_EFAULT; 2635 } 2636 __put_user(cr.pid, &tcr->pid); 2637 __put_user(cr.uid, &tcr->uid); 2638 __put_user(cr.gid, &tcr->gid); 2639 unlock_user_struct(tcr, optval_addr, 1); 2640 if (put_user_u32(len, optlen)) { 2641 return -TARGET_EFAULT; 2642 } 2643 break; 2644 } 2645 case TARGET_SO_PEERSEC: { 2646 char *name; 2647 2648 if (get_user_u32(len, optlen)) { 2649 return -TARGET_EFAULT; 2650 } 2651 if (len < 0) { 2652 return -TARGET_EINVAL; 2653 } 2654 name = lock_user(VERIFY_WRITE, optval_addr, len, 0); 2655 if (!name) { 2656 return -TARGET_EFAULT; 2657 } 2658 lv = len; 2659 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC, 2660 name, &lv)); 2661 if (put_user_u32(lv, optlen)) { 2662 ret = -TARGET_EFAULT; 2663 } 2664 unlock_user(name, optval_addr, lv); 2665 break; 2666 } 2667 case TARGET_SO_LINGER: 2668 { 2669 struct linger lg; 2670 socklen_t lglen; 2671 struct target_linger *tlg; 2672 2673 if (get_user_u32(len, optlen)) { 2674 return -TARGET_EFAULT; 2675 } 2676 if (len < 0) { 2677 return -TARGET_EINVAL; 2678 } 2679 2680 lglen = sizeof(lg); 2681 ret = get_errno(getsockopt(sockfd, level, SO_LINGER, 2682 &lg, &lglen)); 2683 if (ret < 0) { 2684 return ret; 2685 } 2686 if (len > lglen) { 2687 len = lglen; 2688 } 2689 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) { 2690 return -TARGET_EFAULT; 2691 } 2692 __put_user(lg.l_onoff, &tlg->l_onoff); 2693 __put_user(lg.l_linger, &tlg->l_linger); 2694 unlock_user_struct(tlg, optval_addr, 1); 2695 if (put_user_u32(len, optlen)) { 2696 return -TARGET_EFAULT; 2697 } 2698 break; 2699 } 2700 /* Options with 'int' argument. */ 2701 case TARGET_SO_DEBUG: 2702 optname = SO_DEBUG; 2703 goto int_case; 2704 case TARGET_SO_REUSEADDR: 2705 optname = SO_REUSEADDR; 2706 goto int_case; 2707 #ifdef SO_REUSEPORT 2708 case TARGET_SO_REUSEPORT: 2709 optname = SO_REUSEPORT; 2710 goto int_case; 2711 #endif 2712 case TARGET_SO_TYPE: 2713 optname = SO_TYPE; 2714 goto int_case; 2715 case TARGET_SO_ERROR: 2716 optname = SO_ERROR; 2717 goto int_case; 2718 case TARGET_SO_DONTROUTE: 2719 optname = SO_DONTROUTE; 2720 goto int_case; 2721 case TARGET_SO_BROADCAST: 2722 optname = SO_BROADCAST; 2723 goto int_case; 2724 case TARGET_SO_SNDBUF: 2725 optname = SO_SNDBUF; 2726 goto int_case; 2727 case TARGET_SO_RCVBUF: 2728 optname = SO_RCVBUF; 2729 goto int_case; 2730 case TARGET_SO_KEEPALIVE: 2731 optname = SO_KEEPALIVE; 2732 goto int_case; 2733 case TARGET_SO_OOBINLINE: 2734 optname = SO_OOBINLINE; 2735 goto int_case; 2736 case TARGET_SO_NO_CHECK: 2737 optname = SO_NO_CHECK; 2738 goto int_case; 2739 case TARGET_SO_PRIORITY: 2740 optname = SO_PRIORITY; 2741 goto int_case; 2742 #ifdef SO_BSDCOMPAT 2743 case TARGET_SO_BSDCOMPAT: 2744 optname = SO_BSDCOMPAT; 2745 goto int_case; 2746 #endif 2747 case TARGET_SO_PASSCRED: 2748 optname = SO_PASSCRED; 2749 goto int_case; 2750 case TARGET_SO_TIMESTAMP: 2751 optname = SO_TIMESTAMP; 2752 goto int_case; 2753 case TARGET_SO_RCVLOWAT: 2754 optname = SO_RCVLOWAT; 2755 goto int_case; 2756 case TARGET_SO_ACCEPTCONN: 2757 optname = SO_ACCEPTCONN; 2758 goto int_case; 2759 case TARGET_SO_PROTOCOL: 2760 optname = SO_PROTOCOL; 2761 goto int_case; 2762 case TARGET_SO_DOMAIN: 2763 optname = SO_DOMAIN; 2764 goto int_case; 2765 default: 2766 goto int_case; 2767 } 2768 break; 2769 case SOL_TCP: 2770 case SOL_UDP: 2771 /* TCP and UDP options all take an 'int' value. */ 2772 int_case: 2773 if (get_user_u32(len, optlen)) 2774 return -TARGET_EFAULT; 2775 if (len < 0) 2776 return -TARGET_EINVAL; 2777 lv = sizeof(lv); 2778 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2779 if (ret < 0) 2780 return ret; 2781 switch (optname) { 2782 case SO_TYPE: 2783 val = host_to_target_sock_type(val); 2784 break; 2785 case SO_ERROR: 2786 val = host_to_target_errno(val); 2787 break; 2788 } 2789 if (len > lv) 2790 len = lv; 2791 if (len == 4) { 2792 if (put_user_u32(val, optval_addr)) 2793 return -TARGET_EFAULT; 2794 } else { 2795 if (put_user_u8(val, optval_addr)) 2796 return -TARGET_EFAULT; 2797 } 2798 if (put_user_u32(len, optlen)) 2799 return -TARGET_EFAULT; 2800 break; 2801 case SOL_IP: 2802 switch(optname) { 2803 case IP_TOS: 2804 case IP_TTL: 2805 case IP_HDRINCL: 2806 case IP_ROUTER_ALERT: 2807 case IP_RECVOPTS: 2808 case IP_RETOPTS: 2809 case IP_PKTINFO: 2810 case IP_MTU_DISCOVER: 2811 case IP_RECVERR: 2812 case IP_RECVTOS: 2813 #ifdef IP_FREEBIND 2814 case IP_FREEBIND: 2815 #endif 2816 case IP_MULTICAST_TTL: 2817 case IP_MULTICAST_LOOP: 2818 if (get_user_u32(len, optlen)) 2819 return -TARGET_EFAULT; 2820 if (len < 0) 2821 return -TARGET_EINVAL; 2822 lv = sizeof(lv); 2823 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2824 if (ret < 0) 2825 return ret; 2826 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 2827 len = 1; 2828 if (put_user_u32(len, optlen) 2829 || put_user_u8(val, optval_addr)) 2830 return -TARGET_EFAULT; 2831 } else { 2832 if (len > sizeof(int)) 2833 len = sizeof(int); 2834 if (put_user_u32(len, optlen) 2835 || put_user_u32(val, optval_addr)) 2836 return -TARGET_EFAULT; 2837 } 2838 break; 2839 default: 2840 ret = -TARGET_ENOPROTOOPT; 2841 break; 2842 } 2843 break; 2844 case SOL_IPV6: 2845 switch (optname) { 2846 case IPV6_MTU_DISCOVER: 2847 case IPV6_MTU: 2848 case IPV6_V6ONLY: 2849 case IPV6_RECVPKTINFO: 2850 case IPV6_UNICAST_HOPS: 2851 case IPV6_MULTICAST_HOPS: 2852 case IPV6_MULTICAST_LOOP: 2853 case IPV6_RECVERR: 2854 case IPV6_RECVHOPLIMIT: 2855 case IPV6_2292HOPLIMIT: 2856 case IPV6_CHECKSUM: 2857 case IPV6_ADDRFORM: 2858 case IPV6_2292PKTINFO: 2859 case IPV6_RECVTCLASS: 2860 case IPV6_RECVRTHDR: 2861 case IPV6_2292RTHDR: 2862 case IPV6_RECVHOPOPTS: 2863 case IPV6_2292HOPOPTS: 2864 case IPV6_RECVDSTOPTS: 2865 case IPV6_2292DSTOPTS: 2866 case IPV6_TCLASS: 2867 case IPV6_ADDR_PREFERENCES: 2868 #ifdef IPV6_RECVPATHMTU 2869 case IPV6_RECVPATHMTU: 2870 #endif 2871 #ifdef IPV6_TRANSPARENT 2872 case IPV6_TRANSPARENT: 2873 #endif 2874 #ifdef IPV6_FREEBIND 2875 case IPV6_FREEBIND: 2876 #endif 2877 #ifdef IPV6_RECVORIGDSTADDR 2878 case IPV6_RECVORIGDSTADDR: 2879 #endif 2880 if (get_user_u32(len, optlen)) 2881 return -TARGET_EFAULT; 2882 if (len < 0) 2883 return -TARGET_EINVAL; 2884 lv = sizeof(lv); 2885 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2886 if (ret < 0) 2887 return ret; 2888 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 2889 len = 1; 2890 if (put_user_u32(len, optlen) 2891 || put_user_u8(val, optval_addr)) 2892 return -TARGET_EFAULT; 2893 } else { 2894 if (len > sizeof(int)) 2895 len = sizeof(int); 2896 if (put_user_u32(len, optlen) 2897 || put_user_u32(val, optval_addr)) 2898 return -TARGET_EFAULT; 2899 } 2900 break; 2901 default: 2902 ret = -TARGET_ENOPROTOOPT; 2903 break; 2904 } 2905 break; 2906 #ifdef SOL_NETLINK 2907 case SOL_NETLINK: 2908 switch (optname) { 2909 case NETLINK_PKTINFO: 2910 case NETLINK_BROADCAST_ERROR: 2911 case NETLINK_NO_ENOBUFS: 2912 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2913 case NETLINK_LISTEN_ALL_NSID: 2914 case NETLINK_CAP_ACK: 2915 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2916 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) 2917 case NETLINK_EXT_ACK: 2918 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2919 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) 2920 case NETLINK_GET_STRICT_CHK: 2921 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2922 if (get_user_u32(len, optlen)) { 2923 return -TARGET_EFAULT; 2924 } 2925 if (len != sizeof(val)) { 2926 return -TARGET_EINVAL; 2927 } 2928 lv = len; 2929 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2930 if (ret < 0) { 2931 return ret; 2932 } 2933 if (put_user_u32(lv, optlen) 2934 || put_user_u32(val, optval_addr)) { 2935 return -TARGET_EFAULT; 2936 } 2937 break; 2938 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2939 case NETLINK_LIST_MEMBERSHIPS: 2940 { 2941 uint32_t *results; 2942 int i; 2943 if (get_user_u32(len, optlen)) { 2944 return -TARGET_EFAULT; 2945 } 2946 if (len < 0) { 2947 return -TARGET_EINVAL; 2948 } 2949 results = lock_user(VERIFY_WRITE, optval_addr, len, 1); 2950 if (!results && len > 0) { 2951 return -TARGET_EFAULT; 2952 } 2953 lv = len; 2954 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv)); 2955 if (ret < 0) { 2956 unlock_user(results, optval_addr, 0); 2957 return ret; 2958 } 2959 /* swap host endianess to target endianess. */ 2960 for (i = 0; i < (len / sizeof(uint32_t)); i++) { 2961 results[i] = tswap32(results[i]); 2962 } 2963 if (put_user_u32(lv, optlen)) { 2964 return -TARGET_EFAULT; 2965 } 2966 unlock_user(results, optval_addr, 0); 2967 break; 2968 } 2969 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2970 default: 2971 goto unimplemented; 2972 } 2973 break; 2974 #endif /* SOL_NETLINK */ 2975 default: 2976 unimplemented: 2977 qemu_log_mask(LOG_UNIMP, 2978 "getsockopt level=%d optname=%d not yet supported\n", 2979 level, optname); 2980 ret = -TARGET_EOPNOTSUPP; 2981 break; 2982 } 2983 return ret; 2984 } 2985 2986 /* Convert target low/high pair representing file offset into the host 2987 * low/high pair. This function doesn't handle offsets bigger than 64 bits 2988 * as the kernel doesn't handle them either. 2989 */ 2990 static void target_to_host_low_high(abi_ulong tlow, 2991 abi_ulong thigh, 2992 unsigned long *hlow, 2993 unsigned long *hhigh) 2994 { 2995 uint64_t off = tlow | 2996 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) << 2997 TARGET_LONG_BITS / 2; 2998 2999 *hlow = off; 3000 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2; 3001 } 3002 3003 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 3004 abi_ulong count, int copy) 3005 { 3006 struct target_iovec *target_vec; 3007 struct iovec *vec; 3008 abi_ulong total_len, max_len; 3009 int i; 3010 int err = 0; 3011 bool bad_address = false; 3012 3013 if (count == 0) { 3014 errno = 0; 3015 return NULL; 3016 } 3017 if (count > IOV_MAX) { 3018 errno = EINVAL; 3019 return NULL; 3020 } 3021 3022 vec = g_try_new0(struct iovec, count); 3023 if (vec == NULL) { 3024 errno = ENOMEM; 3025 return NULL; 3026 } 3027 3028 target_vec = lock_user(VERIFY_READ, target_addr, 3029 count * sizeof(struct target_iovec), 1); 3030 if (target_vec == NULL) { 3031 err = EFAULT; 3032 goto fail2; 3033 } 3034 3035 /* ??? If host page size > target page size, this will result in a 3036 value larger than what we can actually support. */ 3037 max_len = 0x7fffffff & TARGET_PAGE_MASK; 3038 total_len = 0; 3039 3040 for (i = 0; i < count; i++) { 3041 abi_ulong base = tswapal(target_vec[i].iov_base); 3042 abi_long len = tswapal(target_vec[i].iov_len); 3043 3044 if (len < 0) { 3045 err = EINVAL; 3046 goto fail; 3047 } else if (len == 0) { 3048 /* Zero length pointer is ignored. */ 3049 vec[i].iov_base = 0; 3050 } else { 3051 vec[i].iov_base = lock_user(type, base, len, copy); 3052 /* If the first buffer pointer is bad, this is a fault. But 3053 * subsequent bad buffers will result in a partial write; this 3054 * is realized by filling the vector with null pointers and 3055 * zero lengths. */ 3056 if (!vec[i].iov_base) { 3057 if (i == 0) { 3058 err = EFAULT; 3059 goto fail; 3060 } else { 3061 bad_address = true; 3062 } 3063 } 3064 if (bad_address) { 3065 len = 0; 3066 } 3067 if (len > max_len - total_len) { 3068 len = max_len - total_len; 3069 } 3070 } 3071 vec[i].iov_len = len; 3072 total_len += len; 3073 } 3074 3075 unlock_user(target_vec, target_addr, 0); 3076 return vec; 3077 3078 fail: 3079 while (--i >= 0) { 3080 if (tswapal(target_vec[i].iov_len) > 0) { 3081 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0); 3082 } 3083 } 3084 unlock_user(target_vec, target_addr, 0); 3085 fail2: 3086 g_free(vec); 3087 errno = err; 3088 return NULL; 3089 } 3090 3091 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 3092 abi_ulong count, int copy) 3093 { 3094 struct target_iovec *target_vec; 3095 int i; 3096 3097 target_vec = lock_user(VERIFY_READ, target_addr, 3098 count * sizeof(struct target_iovec), 1); 3099 if (target_vec) { 3100 for (i = 0; i < count; i++) { 3101 abi_ulong base = tswapal(target_vec[i].iov_base); 3102 abi_long len = tswapal(target_vec[i].iov_len); 3103 if (len < 0) { 3104 break; 3105 } 3106 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 3107 } 3108 unlock_user(target_vec, target_addr, 0); 3109 } 3110 3111 g_free(vec); 3112 } 3113 3114 static inline int target_to_host_sock_type(int *type) 3115 { 3116 int host_type = 0; 3117 int target_type = *type; 3118 3119 switch (target_type & TARGET_SOCK_TYPE_MASK) { 3120 case TARGET_SOCK_DGRAM: 3121 host_type = SOCK_DGRAM; 3122 break; 3123 case TARGET_SOCK_STREAM: 3124 host_type = SOCK_STREAM; 3125 break; 3126 default: 3127 host_type = target_type & TARGET_SOCK_TYPE_MASK; 3128 break; 3129 } 3130 if (target_type & TARGET_SOCK_CLOEXEC) { 3131 #if defined(SOCK_CLOEXEC) 3132 host_type |= SOCK_CLOEXEC; 3133 #else 3134 return -TARGET_EINVAL; 3135 #endif 3136 } 3137 if (target_type & TARGET_SOCK_NONBLOCK) { 3138 #if defined(SOCK_NONBLOCK) 3139 host_type |= SOCK_NONBLOCK; 3140 #elif !defined(O_NONBLOCK) 3141 return -TARGET_EINVAL; 3142 #endif 3143 } 3144 *type = host_type; 3145 return 0; 3146 } 3147 3148 /* Try to emulate socket type flags after socket creation. */ 3149 static int sock_flags_fixup(int fd, int target_type) 3150 { 3151 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 3152 if (target_type & TARGET_SOCK_NONBLOCK) { 3153 int flags = fcntl(fd, F_GETFL); 3154 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 3155 close(fd); 3156 return -TARGET_EINVAL; 3157 } 3158 } 3159 #endif 3160 return fd; 3161 } 3162 3163 /* do_socket() Must return target values and target errnos. */ 3164 static abi_long do_socket(int domain, int type, int protocol) 3165 { 3166 int target_type = type; 3167 int ret; 3168 3169 ret = target_to_host_sock_type(&type); 3170 if (ret) { 3171 return ret; 3172 } 3173 3174 if (domain == PF_NETLINK && !( 3175 #ifdef CONFIG_RTNETLINK 3176 protocol == NETLINK_ROUTE || 3177 #endif 3178 protocol == NETLINK_KOBJECT_UEVENT || 3179 protocol == NETLINK_AUDIT)) { 3180 return -TARGET_EPROTONOSUPPORT; 3181 } 3182 3183 if (domain == AF_PACKET || 3184 (domain == AF_INET && type == SOCK_PACKET)) { 3185 protocol = tswap16(protocol); 3186 } 3187 3188 ret = get_errno(socket(domain, type, protocol)); 3189 if (ret >= 0) { 3190 ret = sock_flags_fixup(ret, target_type); 3191 if (type == SOCK_PACKET) { 3192 /* Manage an obsolete case : 3193 * if socket type is SOCK_PACKET, bind by name 3194 */ 3195 fd_trans_register(ret, &target_packet_trans); 3196 } else if (domain == PF_NETLINK) { 3197 switch (protocol) { 3198 #ifdef CONFIG_RTNETLINK 3199 case NETLINK_ROUTE: 3200 fd_trans_register(ret, &target_netlink_route_trans); 3201 break; 3202 #endif 3203 case NETLINK_KOBJECT_UEVENT: 3204 /* nothing to do: messages are strings */ 3205 break; 3206 case NETLINK_AUDIT: 3207 fd_trans_register(ret, &target_netlink_audit_trans); 3208 break; 3209 default: 3210 g_assert_not_reached(); 3211 } 3212 } 3213 } 3214 return ret; 3215 } 3216 3217 /* do_bind() Must return target values and target errnos. */ 3218 static abi_long do_bind(int sockfd, abi_ulong target_addr, 3219 socklen_t addrlen) 3220 { 3221 void *addr; 3222 abi_long ret; 3223 3224 if ((int)addrlen < 0) { 3225 return -TARGET_EINVAL; 3226 } 3227 3228 addr = alloca(addrlen+1); 3229 3230 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3231 if (ret) 3232 return ret; 3233 3234 return get_errno(bind(sockfd, addr, addrlen)); 3235 } 3236 3237 /* do_connect() Must return target values and target errnos. */ 3238 static abi_long do_connect(int sockfd, abi_ulong target_addr, 3239 socklen_t addrlen) 3240 { 3241 void *addr; 3242 abi_long ret; 3243 3244 if ((int)addrlen < 0) { 3245 return -TARGET_EINVAL; 3246 } 3247 3248 addr = alloca(addrlen+1); 3249 3250 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3251 if (ret) 3252 return ret; 3253 3254 return get_errno(safe_connect(sockfd, addr, addrlen)); 3255 } 3256 3257 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 3258 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 3259 int flags, int send) 3260 { 3261 abi_long ret, len; 3262 struct msghdr msg; 3263 abi_ulong count; 3264 struct iovec *vec; 3265 abi_ulong target_vec; 3266 3267 if (msgp->msg_name) { 3268 msg.msg_namelen = tswap32(msgp->msg_namelen); 3269 msg.msg_name = alloca(msg.msg_namelen+1); 3270 ret = target_to_host_sockaddr(fd, msg.msg_name, 3271 tswapal(msgp->msg_name), 3272 msg.msg_namelen); 3273 if (ret == -TARGET_EFAULT) { 3274 /* For connected sockets msg_name and msg_namelen must 3275 * be ignored, so returning EFAULT immediately is wrong. 3276 * Instead, pass a bad msg_name to the host kernel, and 3277 * let it decide whether to return EFAULT or not. 3278 */ 3279 msg.msg_name = (void *)-1; 3280 } else if (ret) { 3281 goto out2; 3282 } 3283 } else { 3284 msg.msg_name = NULL; 3285 msg.msg_namelen = 0; 3286 } 3287 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 3288 msg.msg_control = alloca(msg.msg_controllen); 3289 memset(msg.msg_control, 0, msg.msg_controllen); 3290 3291 msg.msg_flags = tswap32(msgp->msg_flags); 3292 3293 count = tswapal(msgp->msg_iovlen); 3294 target_vec = tswapal(msgp->msg_iov); 3295 3296 if (count > IOV_MAX) { 3297 /* sendrcvmsg returns a different errno for this condition than 3298 * readv/writev, so we must catch it here before lock_iovec() does. 3299 */ 3300 ret = -TARGET_EMSGSIZE; 3301 goto out2; 3302 } 3303 3304 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 3305 target_vec, count, send); 3306 if (vec == NULL) { 3307 ret = -host_to_target_errno(errno); 3308 /* allow sending packet without any iov, e.g. with MSG_MORE flag */ 3309 if (!send || ret) { 3310 goto out2; 3311 } 3312 } 3313 msg.msg_iovlen = count; 3314 msg.msg_iov = vec; 3315 3316 if (send) { 3317 if (fd_trans_target_to_host_data(fd)) { 3318 void *host_msg; 3319 3320 host_msg = g_malloc(msg.msg_iov->iov_len); 3321 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len); 3322 ret = fd_trans_target_to_host_data(fd)(host_msg, 3323 msg.msg_iov->iov_len); 3324 if (ret >= 0) { 3325 msg.msg_iov->iov_base = host_msg; 3326 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3327 } 3328 g_free(host_msg); 3329 } else { 3330 ret = target_to_host_cmsg(&msg, msgp); 3331 if (ret == 0) { 3332 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3333 } 3334 } 3335 } else { 3336 ret = get_errno(safe_recvmsg(fd, &msg, flags)); 3337 if (!is_error(ret)) { 3338 len = ret; 3339 if (fd_trans_host_to_target_data(fd)) { 3340 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base, 3341 MIN(msg.msg_iov->iov_len, len)); 3342 } 3343 if (!is_error(ret)) { 3344 ret = host_to_target_cmsg(msgp, &msg); 3345 } 3346 if (!is_error(ret)) { 3347 msgp->msg_namelen = tswap32(msg.msg_namelen); 3348 msgp->msg_flags = tswap32(msg.msg_flags); 3349 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) { 3350 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 3351 msg.msg_name, msg.msg_namelen); 3352 if (ret) { 3353 goto out; 3354 } 3355 } 3356 3357 ret = len; 3358 } 3359 } 3360 } 3361 3362 out: 3363 if (vec) { 3364 unlock_iovec(vec, target_vec, count, !send); 3365 } 3366 out2: 3367 return ret; 3368 } 3369 3370 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 3371 int flags, int send) 3372 { 3373 abi_long ret; 3374 struct target_msghdr *msgp; 3375 3376 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 3377 msgp, 3378 target_msg, 3379 send ? 1 : 0)) { 3380 return -TARGET_EFAULT; 3381 } 3382 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 3383 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 3384 return ret; 3385 } 3386 3387 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 3388 * so it might not have this *mmsg-specific flag either. 3389 */ 3390 #ifndef MSG_WAITFORONE 3391 #define MSG_WAITFORONE 0x10000 3392 #endif 3393 3394 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 3395 unsigned int vlen, unsigned int flags, 3396 int send) 3397 { 3398 struct target_mmsghdr *mmsgp; 3399 abi_long ret = 0; 3400 int i; 3401 3402 if (vlen > UIO_MAXIOV) { 3403 vlen = UIO_MAXIOV; 3404 } 3405 3406 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 3407 if (!mmsgp) { 3408 return -TARGET_EFAULT; 3409 } 3410 3411 for (i = 0; i < vlen; i++) { 3412 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 3413 if (is_error(ret)) { 3414 break; 3415 } 3416 mmsgp[i].msg_len = tswap32(ret); 3417 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 3418 if (flags & MSG_WAITFORONE) { 3419 flags |= MSG_DONTWAIT; 3420 } 3421 } 3422 3423 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 3424 3425 /* Return number of datagrams sent if we sent any at all; 3426 * otherwise return the error. 3427 */ 3428 if (i) { 3429 return i; 3430 } 3431 return ret; 3432 } 3433 3434 /* do_accept4() Must return target values and target errnos. */ 3435 static abi_long do_accept4(int fd, abi_ulong target_addr, 3436 abi_ulong target_addrlen_addr, int flags) 3437 { 3438 socklen_t addrlen, ret_addrlen; 3439 void *addr; 3440 abi_long ret; 3441 int host_flags; 3442 3443 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 3444 3445 if (target_addr == 0) { 3446 return get_errno(safe_accept4(fd, NULL, NULL, host_flags)); 3447 } 3448 3449 /* linux returns EFAULT if addrlen pointer is invalid */ 3450 if (get_user_u32(addrlen, target_addrlen_addr)) 3451 return -TARGET_EFAULT; 3452 3453 if ((int)addrlen < 0) { 3454 return -TARGET_EINVAL; 3455 } 3456 3457 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3458 return -TARGET_EFAULT; 3459 } 3460 3461 addr = alloca(addrlen); 3462 3463 ret_addrlen = addrlen; 3464 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags)); 3465 if (!is_error(ret)) { 3466 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3467 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3468 ret = -TARGET_EFAULT; 3469 } 3470 } 3471 return ret; 3472 } 3473 3474 /* do_getpeername() Must return target values and target errnos. */ 3475 static abi_long do_getpeername(int fd, abi_ulong target_addr, 3476 abi_ulong target_addrlen_addr) 3477 { 3478 socklen_t addrlen, ret_addrlen; 3479 void *addr; 3480 abi_long ret; 3481 3482 if (get_user_u32(addrlen, target_addrlen_addr)) 3483 return -TARGET_EFAULT; 3484 3485 if ((int)addrlen < 0) { 3486 return -TARGET_EINVAL; 3487 } 3488 3489 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3490 return -TARGET_EFAULT; 3491 } 3492 3493 addr = alloca(addrlen); 3494 3495 ret_addrlen = addrlen; 3496 ret = get_errno(getpeername(fd, addr, &ret_addrlen)); 3497 if (!is_error(ret)) { 3498 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3499 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3500 ret = -TARGET_EFAULT; 3501 } 3502 } 3503 return ret; 3504 } 3505 3506 /* do_getsockname() Must return target values and target errnos. */ 3507 static abi_long do_getsockname(int fd, abi_ulong target_addr, 3508 abi_ulong target_addrlen_addr) 3509 { 3510 socklen_t addrlen, ret_addrlen; 3511 void *addr; 3512 abi_long ret; 3513 3514 if (get_user_u32(addrlen, target_addrlen_addr)) 3515 return -TARGET_EFAULT; 3516 3517 if ((int)addrlen < 0) { 3518 return -TARGET_EINVAL; 3519 } 3520 3521 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3522 return -TARGET_EFAULT; 3523 } 3524 3525 addr = alloca(addrlen); 3526 3527 ret_addrlen = addrlen; 3528 ret = get_errno(getsockname(fd, addr, &ret_addrlen)); 3529 if (!is_error(ret)) { 3530 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3531 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3532 ret = -TARGET_EFAULT; 3533 } 3534 } 3535 return ret; 3536 } 3537 3538 /* do_socketpair() Must return target values and target errnos. */ 3539 static abi_long do_socketpair(int domain, int type, int protocol, 3540 abi_ulong target_tab_addr) 3541 { 3542 int tab[2]; 3543 abi_long ret; 3544 3545 target_to_host_sock_type(&type); 3546 3547 ret = get_errno(socketpair(domain, type, protocol, tab)); 3548 if (!is_error(ret)) { 3549 if (put_user_s32(tab[0], target_tab_addr) 3550 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 3551 ret = -TARGET_EFAULT; 3552 } 3553 return ret; 3554 } 3555 3556 /* do_sendto() Must return target values and target errnos. */ 3557 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 3558 abi_ulong target_addr, socklen_t addrlen) 3559 { 3560 void *addr; 3561 void *host_msg; 3562 void *copy_msg = NULL; 3563 abi_long ret; 3564 3565 if ((int)addrlen < 0) { 3566 return -TARGET_EINVAL; 3567 } 3568 3569 host_msg = lock_user(VERIFY_READ, msg, len, 1); 3570 if (!host_msg) 3571 return -TARGET_EFAULT; 3572 if (fd_trans_target_to_host_data(fd)) { 3573 copy_msg = host_msg; 3574 host_msg = g_malloc(len); 3575 memcpy(host_msg, copy_msg, len); 3576 ret = fd_trans_target_to_host_data(fd)(host_msg, len); 3577 if (ret < 0) { 3578 goto fail; 3579 } 3580 } 3581 if (target_addr) { 3582 addr = alloca(addrlen+1); 3583 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen); 3584 if (ret) { 3585 goto fail; 3586 } 3587 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen)); 3588 } else { 3589 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0)); 3590 } 3591 fail: 3592 if (copy_msg) { 3593 g_free(host_msg); 3594 host_msg = copy_msg; 3595 } 3596 unlock_user(host_msg, msg, 0); 3597 return ret; 3598 } 3599 3600 /* do_recvfrom() Must return target values and target errnos. */ 3601 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 3602 abi_ulong target_addr, 3603 abi_ulong target_addrlen) 3604 { 3605 socklen_t addrlen, ret_addrlen; 3606 void *addr; 3607 void *host_msg; 3608 abi_long ret; 3609 3610 if (!msg) { 3611 host_msg = NULL; 3612 } else { 3613 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 3614 if (!host_msg) { 3615 return -TARGET_EFAULT; 3616 } 3617 } 3618 if (target_addr) { 3619 if (get_user_u32(addrlen, target_addrlen)) { 3620 ret = -TARGET_EFAULT; 3621 goto fail; 3622 } 3623 if ((int)addrlen < 0) { 3624 ret = -TARGET_EINVAL; 3625 goto fail; 3626 } 3627 addr = alloca(addrlen); 3628 ret_addrlen = addrlen; 3629 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, 3630 addr, &ret_addrlen)); 3631 } else { 3632 addr = NULL; /* To keep compiler quiet. */ 3633 addrlen = 0; /* To keep compiler quiet. */ 3634 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0)); 3635 } 3636 if (!is_error(ret)) { 3637 if (fd_trans_host_to_target_data(fd)) { 3638 abi_long trans; 3639 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len)); 3640 if (is_error(trans)) { 3641 ret = trans; 3642 goto fail; 3643 } 3644 } 3645 if (target_addr) { 3646 host_to_target_sockaddr(target_addr, addr, 3647 MIN(addrlen, ret_addrlen)); 3648 if (put_user_u32(ret_addrlen, target_addrlen)) { 3649 ret = -TARGET_EFAULT; 3650 goto fail; 3651 } 3652 } 3653 unlock_user(host_msg, msg, len); 3654 } else { 3655 fail: 3656 unlock_user(host_msg, msg, 0); 3657 } 3658 return ret; 3659 } 3660 3661 #ifdef TARGET_NR_socketcall 3662 /* do_socketcall() must return target values and target errnos. */ 3663 static abi_long do_socketcall(int num, abi_ulong vptr) 3664 { 3665 static const unsigned nargs[] = { /* number of arguments per operation */ 3666 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */ 3667 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */ 3668 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */ 3669 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */ 3670 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */ 3671 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */ 3672 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */ 3673 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */ 3674 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */ 3675 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */ 3676 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */ 3677 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */ 3678 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */ 3679 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 3680 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 3681 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */ 3682 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */ 3683 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */ 3684 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */ 3685 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */ 3686 }; 3687 abi_long a[6]; /* max 6 args */ 3688 unsigned i; 3689 3690 /* check the range of the first argument num */ 3691 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */ 3692 if (num < 1 || num > TARGET_SYS_SENDMMSG) { 3693 return -TARGET_EINVAL; 3694 } 3695 /* ensure we have space for args */ 3696 if (nargs[num] > ARRAY_SIZE(a)) { 3697 return -TARGET_EINVAL; 3698 } 3699 /* collect the arguments in a[] according to nargs[] */ 3700 for (i = 0; i < nargs[num]; ++i) { 3701 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 3702 return -TARGET_EFAULT; 3703 } 3704 } 3705 /* now when we have the args, invoke the appropriate underlying function */ 3706 switch (num) { 3707 case TARGET_SYS_SOCKET: /* domain, type, protocol */ 3708 return do_socket(a[0], a[1], a[2]); 3709 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */ 3710 return do_bind(a[0], a[1], a[2]); 3711 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */ 3712 return do_connect(a[0], a[1], a[2]); 3713 case TARGET_SYS_LISTEN: /* sockfd, backlog */ 3714 return get_errno(listen(a[0], a[1])); 3715 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */ 3716 return do_accept4(a[0], a[1], a[2], 0); 3717 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */ 3718 return do_getsockname(a[0], a[1], a[2]); 3719 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */ 3720 return do_getpeername(a[0], a[1], a[2]); 3721 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */ 3722 return do_socketpair(a[0], a[1], a[2], a[3]); 3723 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */ 3724 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 3725 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */ 3726 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 3727 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */ 3728 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 3729 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */ 3730 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 3731 case TARGET_SYS_SHUTDOWN: /* sockfd, how */ 3732 return get_errno(shutdown(a[0], a[1])); 3733 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 3734 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 3735 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 3736 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 3737 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */ 3738 return do_sendrecvmsg(a[0], a[1], a[2], 1); 3739 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */ 3740 return do_sendrecvmsg(a[0], a[1], a[2], 0); 3741 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */ 3742 return do_accept4(a[0], a[1], a[2], a[3]); 3743 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */ 3744 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0); 3745 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */ 3746 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1); 3747 default: 3748 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num); 3749 return -TARGET_EINVAL; 3750 } 3751 } 3752 #endif 3753 3754 #define N_SHM_REGIONS 32 3755 3756 static struct shm_region { 3757 abi_ulong start; 3758 abi_ulong size; 3759 bool in_use; 3760 } shm_regions[N_SHM_REGIONS]; 3761 3762 #ifndef TARGET_SEMID64_DS 3763 /* asm-generic version of this struct */ 3764 struct target_semid64_ds 3765 { 3766 struct target_ipc_perm sem_perm; 3767 abi_ulong sem_otime; 3768 #if TARGET_ABI_BITS == 32 3769 abi_ulong __unused1; 3770 #endif 3771 abi_ulong sem_ctime; 3772 #if TARGET_ABI_BITS == 32 3773 abi_ulong __unused2; 3774 #endif 3775 abi_ulong sem_nsems; 3776 abi_ulong __unused3; 3777 abi_ulong __unused4; 3778 }; 3779 #endif 3780 3781 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 3782 abi_ulong target_addr) 3783 { 3784 struct target_ipc_perm *target_ip; 3785 struct target_semid64_ds *target_sd; 3786 3787 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 3788 return -TARGET_EFAULT; 3789 target_ip = &(target_sd->sem_perm); 3790 host_ip->__key = tswap32(target_ip->__key); 3791 host_ip->uid = tswap32(target_ip->uid); 3792 host_ip->gid = tswap32(target_ip->gid); 3793 host_ip->cuid = tswap32(target_ip->cuid); 3794 host_ip->cgid = tswap32(target_ip->cgid); 3795 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 3796 host_ip->mode = tswap32(target_ip->mode); 3797 #else 3798 host_ip->mode = tswap16(target_ip->mode); 3799 #endif 3800 #if defined(TARGET_PPC) 3801 host_ip->__seq = tswap32(target_ip->__seq); 3802 #else 3803 host_ip->__seq = tswap16(target_ip->__seq); 3804 #endif 3805 unlock_user_struct(target_sd, target_addr, 0); 3806 return 0; 3807 } 3808 3809 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 3810 struct ipc_perm *host_ip) 3811 { 3812 struct target_ipc_perm *target_ip; 3813 struct target_semid64_ds *target_sd; 3814 3815 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3816 return -TARGET_EFAULT; 3817 target_ip = &(target_sd->sem_perm); 3818 target_ip->__key = tswap32(host_ip->__key); 3819 target_ip->uid = tswap32(host_ip->uid); 3820 target_ip->gid = tswap32(host_ip->gid); 3821 target_ip->cuid = tswap32(host_ip->cuid); 3822 target_ip->cgid = tswap32(host_ip->cgid); 3823 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 3824 target_ip->mode = tswap32(host_ip->mode); 3825 #else 3826 target_ip->mode = tswap16(host_ip->mode); 3827 #endif 3828 #if defined(TARGET_PPC) 3829 target_ip->__seq = tswap32(host_ip->__seq); 3830 #else 3831 target_ip->__seq = tswap16(host_ip->__seq); 3832 #endif 3833 unlock_user_struct(target_sd, target_addr, 1); 3834 return 0; 3835 } 3836 3837 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 3838 abi_ulong target_addr) 3839 { 3840 struct target_semid64_ds *target_sd; 3841 3842 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 3843 return -TARGET_EFAULT; 3844 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 3845 return -TARGET_EFAULT; 3846 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 3847 host_sd->sem_otime = tswapal(target_sd->sem_otime); 3848 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 3849 unlock_user_struct(target_sd, target_addr, 0); 3850 return 0; 3851 } 3852 3853 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 3854 struct semid_ds *host_sd) 3855 { 3856 struct target_semid64_ds *target_sd; 3857 3858 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3859 return -TARGET_EFAULT; 3860 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 3861 return -TARGET_EFAULT; 3862 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 3863 target_sd->sem_otime = tswapal(host_sd->sem_otime); 3864 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 3865 unlock_user_struct(target_sd, target_addr, 1); 3866 return 0; 3867 } 3868 3869 struct target_seminfo { 3870 int semmap; 3871 int semmni; 3872 int semmns; 3873 int semmnu; 3874 int semmsl; 3875 int semopm; 3876 int semume; 3877 int semusz; 3878 int semvmx; 3879 int semaem; 3880 }; 3881 3882 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 3883 struct seminfo *host_seminfo) 3884 { 3885 struct target_seminfo *target_seminfo; 3886 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 3887 return -TARGET_EFAULT; 3888 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 3889 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 3890 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 3891 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 3892 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 3893 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 3894 __put_user(host_seminfo->semume, &target_seminfo->semume); 3895 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 3896 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 3897 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 3898 unlock_user_struct(target_seminfo, target_addr, 1); 3899 return 0; 3900 } 3901 3902 union semun { 3903 int val; 3904 struct semid_ds *buf; 3905 unsigned short *array; 3906 struct seminfo *__buf; 3907 }; 3908 3909 union target_semun { 3910 int val; 3911 abi_ulong buf; 3912 abi_ulong array; 3913 abi_ulong __buf; 3914 }; 3915 3916 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 3917 abi_ulong target_addr) 3918 { 3919 int nsems; 3920 unsigned short *array; 3921 union semun semun; 3922 struct semid_ds semid_ds; 3923 int i, ret; 3924 3925 semun.buf = &semid_ds; 3926 3927 ret = semctl(semid, 0, IPC_STAT, semun); 3928 if (ret == -1) 3929 return get_errno(ret); 3930 3931 nsems = semid_ds.sem_nsems; 3932 3933 *host_array = g_try_new(unsigned short, nsems); 3934 if (!*host_array) { 3935 return -TARGET_ENOMEM; 3936 } 3937 array = lock_user(VERIFY_READ, target_addr, 3938 nsems*sizeof(unsigned short), 1); 3939 if (!array) { 3940 g_free(*host_array); 3941 return -TARGET_EFAULT; 3942 } 3943 3944 for(i=0; i<nsems; i++) { 3945 __get_user((*host_array)[i], &array[i]); 3946 } 3947 unlock_user(array, target_addr, 0); 3948 3949 return 0; 3950 } 3951 3952 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 3953 unsigned short **host_array) 3954 { 3955 int nsems; 3956 unsigned short *array; 3957 union semun semun; 3958 struct semid_ds semid_ds; 3959 int i, ret; 3960 3961 semun.buf = &semid_ds; 3962 3963 ret = semctl(semid, 0, IPC_STAT, semun); 3964 if (ret == -1) 3965 return get_errno(ret); 3966 3967 nsems = semid_ds.sem_nsems; 3968 3969 array = lock_user(VERIFY_WRITE, target_addr, 3970 nsems*sizeof(unsigned short), 0); 3971 if (!array) 3972 return -TARGET_EFAULT; 3973 3974 for(i=0; i<nsems; i++) { 3975 __put_user((*host_array)[i], &array[i]); 3976 } 3977 g_free(*host_array); 3978 unlock_user(array, target_addr, 1); 3979 3980 return 0; 3981 } 3982 3983 static inline abi_long do_semctl(int semid, int semnum, int cmd, 3984 abi_ulong target_arg) 3985 { 3986 union target_semun target_su = { .buf = target_arg }; 3987 union semun arg; 3988 struct semid_ds dsarg; 3989 unsigned short *array = NULL; 3990 struct seminfo seminfo; 3991 abi_long ret = -TARGET_EINVAL; 3992 abi_long err; 3993 cmd &= 0xff; 3994 3995 switch( cmd ) { 3996 case GETVAL: 3997 case SETVAL: 3998 /* In 64 bit cross-endian situations, we will erroneously pick up 3999 * the wrong half of the union for the "val" element. To rectify 4000 * this, the entire 8-byte structure is byteswapped, followed by 4001 * a swap of the 4 byte val field. In other cases, the data is 4002 * already in proper host byte order. */ 4003 if (sizeof(target_su.val) != (sizeof(target_su.buf))) { 4004 target_su.buf = tswapal(target_su.buf); 4005 arg.val = tswap32(target_su.val); 4006 } else { 4007 arg.val = target_su.val; 4008 } 4009 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4010 break; 4011 case GETALL: 4012 case SETALL: 4013 err = target_to_host_semarray(semid, &array, target_su.array); 4014 if (err) 4015 return err; 4016 arg.array = array; 4017 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4018 err = host_to_target_semarray(semid, target_su.array, &array); 4019 if (err) 4020 return err; 4021 break; 4022 case IPC_STAT: 4023 case IPC_SET: 4024 case SEM_STAT: 4025 err = target_to_host_semid_ds(&dsarg, target_su.buf); 4026 if (err) 4027 return err; 4028 arg.buf = &dsarg; 4029 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4030 err = host_to_target_semid_ds(target_su.buf, &dsarg); 4031 if (err) 4032 return err; 4033 break; 4034 case IPC_INFO: 4035 case SEM_INFO: 4036 arg.__buf = &seminfo; 4037 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4038 err = host_to_target_seminfo(target_su.__buf, &seminfo); 4039 if (err) 4040 return err; 4041 break; 4042 case IPC_RMID: 4043 case GETPID: 4044 case GETNCNT: 4045 case GETZCNT: 4046 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 4047 break; 4048 } 4049 4050 return ret; 4051 } 4052 4053 struct target_sembuf { 4054 unsigned short sem_num; 4055 short sem_op; 4056 short sem_flg; 4057 }; 4058 4059 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 4060 abi_ulong target_addr, 4061 unsigned nsops) 4062 { 4063 struct target_sembuf *target_sembuf; 4064 int i; 4065 4066 target_sembuf = lock_user(VERIFY_READ, target_addr, 4067 nsops*sizeof(struct target_sembuf), 1); 4068 if (!target_sembuf) 4069 return -TARGET_EFAULT; 4070 4071 for(i=0; i<nsops; i++) { 4072 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 4073 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 4074 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 4075 } 4076 4077 unlock_user(target_sembuf, target_addr, 0); 4078 4079 return 0; 4080 } 4081 4082 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \ 4083 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64) 4084 4085 /* 4086 * This macro is required to handle the s390 variants, which passes the 4087 * arguments in a different order than default. 4088 */ 4089 #ifdef __s390x__ 4090 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \ 4091 (__nsops), (__timeout), (__sops) 4092 #else 4093 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \ 4094 (__nsops), 0, (__sops), (__timeout) 4095 #endif 4096 4097 static inline abi_long do_semtimedop(int semid, 4098 abi_long ptr, 4099 unsigned nsops, 4100 abi_long timeout, bool time64) 4101 { 4102 struct sembuf *sops; 4103 struct timespec ts, *pts = NULL; 4104 abi_long ret; 4105 4106 if (timeout) { 4107 pts = &ts; 4108 if (time64) { 4109 if (target_to_host_timespec64(pts, timeout)) { 4110 return -TARGET_EFAULT; 4111 } 4112 } else { 4113 if (target_to_host_timespec(pts, timeout)) { 4114 return -TARGET_EFAULT; 4115 } 4116 } 4117 } 4118 4119 if (nsops > TARGET_SEMOPM) { 4120 return -TARGET_E2BIG; 4121 } 4122 4123 sops = g_new(struct sembuf, nsops); 4124 4125 if (target_to_host_sembuf(sops, ptr, nsops)) { 4126 g_free(sops); 4127 return -TARGET_EFAULT; 4128 } 4129 4130 ret = -TARGET_ENOSYS; 4131 #ifdef __NR_semtimedop 4132 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts)); 4133 #endif 4134 #ifdef __NR_ipc 4135 if (ret == -TARGET_ENOSYS) { 4136 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, 4137 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts))); 4138 } 4139 #endif 4140 g_free(sops); 4141 return ret; 4142 } 4143 #endif 4144 4145 struct target_msqid_ds 4146 { 4147 struct target_ipc_perm msg_perm; 4148 abi_ulong msg_stime; 4149 #if TARGET_ABI_BITS == 32 4150 abi_ulong __unused1; 4151 #endif 4152 abi_ulong msg_rtime; 4153 #if TARGET_ABI_BITS == 32 4154 abi_ulong __unused2; 4155 #endif 4156 abi_ulong msg_ctime; 4157 #if TARGET_ABI_BITS == 32 4158 abi_ulong __unused3; 4159 #endif 4160 abi_ulong __msg_cbytes; 4161 abi_ulong msg_qnum; 4162 abi_ulong msg_qbytes; 4163 abi_ulong msg_lspid; 4164 abi_ulong msg_lrpid; 4165 abi_ulong __unused4; 4166 abi_ulong __unused5; 4167 }; 4168 4169 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 4170 abi_ulong target_addr) 4171 { 4172 struct target_msqid_ds *target_md; 4173 4174 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 4175 return -TARGET_EFAULT; 4176 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 4177 return -TARGET_EFAULT; 4178 host_md->msg_stime = tswapal(target_md->msg_stime); 4179 host_md->msg_rtime = tswapal(target_md->msg_rtime); 4180 host_md->msg_ctime = tswapal(target_md->msg_ctime); 4181 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 4182 host_md->msg_qnum = tswapal(target_md->msg_qnum); 4183 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 4184 host_md->msg_lspid = tswapal(target_md->msg_lspid); 4185 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 4186 unlock_user_struct(target_md, target_addr, 0); 4187 return 0; 4188 } 4189 4190 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 4191 struct msqid_ds *host_md) 4192 { 4193 struct target_msqid_ds *target_md; 4194 4195 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 4196 return -TARGET_EFAULT; 4197 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 4198 return -TARGET_EFAULT; 4199 target_md->msg_stime = tswapal(host_md->msg_stime); 4200 target_md->msg_rtime = tswapal(host_md->msg_rtime); 4201 target_md->msg_ctime = tswapal(host_md->msg_ctime); 4202 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 4203 target_md->msg_qnum = tswapal(host_md->msg_qnum); 4204 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 4205 target_md->msg_lspid = tswapal(host_md->msg_lspid); 4206 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 4207 unlock_user_struct(target_md, target_addr, 1); 4208 return 0; 4209 } 4210 4211 struct target_msginfo { 4212 int msgpool; 4213 int msgmap; 4214 int msgmax; 4215 int msgmnb; 4216 int msgmni; 4217 int msgssz; 4218 int msgtql; 4219 unsigned short int msgseg; 4220 }; 4221 4222 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 4223 struct msginfo *host_msginfo) 4224 { 4225 struct target_msginfo *target_msginfo; 4226 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 4227 return -TARGET_EFAULT; 4228 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 4229 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 4230 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 4231 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 4232 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 4233 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 4234 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 4235 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 4236 unlock_user_struct(target_msginfo, target_addr, 1); 4237 return 0; 4238 } 4239 4240 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 4241 { 4242 struct msqid_ds dsarg; 4243 struct msginfo msginfo; 4244 abi_long ret = -TARGET_EINVAL; 4245 4246 cmd &= 0xff; 4247 4248 switch (cmd) { 4249 case IPC_STAT: 4250 case IPC_SET: 4251 case MSG_STAT: 4252 if (target_to_host_msqid_ds(&dsarg,ptr)) 4253 return -TARGET_EFAULT; 4254 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 4255 if (host_to_target_msqid_ds(ptr,&dsarg)) 4256 return -TARGET_EFAULT; 4257 break; 4258 case IPC_RMID: 4259 ret = get_errno(msgctl(msgid, cmd, NULL)); 4260 break; 4261 case IPC_INFO: 4262 case MSG_INFO: 4263 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 4264 if (host_to_target_msginfo(ptr, &msginfo)) 4265 return -TARGET_EFAULT; 4266 break; 4267 } 4268 4269 return ret; 4270 } 4271 4272 struct target_msgbuf { 4273 abi_long mtype; 4274 char mtext[1]; 4275 }; 4276 4277 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 4278 ssize_t msgsz, int msgflg) 4279 { 4280 struct target_msgbuf *target_mb; 4281 struct msgbuf *host_mb; 4282 abi_long ret = 0; 4283 4284 if (msgsz < 0) { 4285 return -TARGET_EINVAL; 4286 } 4287 4288 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 4289 return -TARGET_EFAULT; 4290 host_mb = g_try_malloc(msgsz + sizeof(long)); 4291 if (!host_mb) { 4292 unlock_user_struct(target_mb, msgp, 0); 4293 return -TARGET_ENOMEM; 4294 } 4295 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 4296 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 4297 ret = -TARGET_ENOSYS; 4298 #ifdef __NR_msgsnd 4299 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg)); 4300 #endif 4301 #ifdef __NR_ipc 4302 if (ret == -TARGET_ENOSYS) { 4303 #ifdef __s390x__ 4304 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg, 4305 host_mb)); 4306 #else 4307 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg, 4308 host_mb, 0)); 4309 #endif 4310 } 4311 #endif 4312 g_free(host_mb); 4313 unlock_user_struct(target_mb, msgp, 0); 4314 4315 return ret; 4316 } 4317 4318 #ifdef __NR_ipc 4319 #if defined(__sparc__) 4320 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */ 4321 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp 4322 #elif defined(__s390x__) 4323 /* The s390 sys_ipc variant has only five parameters. */ 4324 #define MSGRCV_ARGS(__msgp, __msgtyp) \ 4325 ((long int[]){(long int)__msgp, __msgtyp}) 4326 #else 4327 #define MSGRCV_ARGS(__msgp, __msgtyp) \ 4328 ((long int[]){(long int)__msgp, __msgtyp}), 0 4329 #endif 4330 #endif 4331 4332 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 4333 ssize_t msgsz, abi_long msgtyp, 4334 int msgflg) 4335 { 4336 struct target_msgbuf *target_mb; 4337 char *target_mtext; 4338 struct msgbuf *host_mb; 4339 abi_long ret = 0; 4340 4341 if (msgsz < 0) { 4342 return -TARGET_EINVAL; 4343 } 4344 4345 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 4346 return -TARGET_EFAULT; 4347 4348 host_mb = g_try_malloc(msgsz + sizeof(long)); 4349 if (!host_mb) { 4350 ret = -TARGET_ENOMEM; 4351 goto end; 4352 } 4353 ret = -TARGET_ENOSYS; 4354 #ifdef __NR_msgrcv 4355 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 4356 #endif 4357 #ifdef __NR_ipc 4358 if (ret == -TARGET_ENOSYS) { 4359 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz, 4360 msgflg, MSGRCV_ARGS(host_mb, msgtyp))); 4361 } 4362 #endif 4363 4364 if (ret > 0) { 4365 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 4366 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 4367 if (!target_mtext) { 4368 ret = -TARGET_EFAULT; 4369 goto end; 4370 } 4371 memcpy(target_mb->mtext, host_mb->mtext, ret); 4372 unlock_user(target_mtext, target_mtext_addr, ret); 4373 } 4374 4375 target_mb->mtype = tswapal(host_mb->mtype); 4376 4377 end: 4378 if (target_mb) 4379 unlock_user_struct(target_mb, msgp, 1); 4380 g_free(host_mb); 4381 return ret; 4382 } 4383 4384 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 4385 abi_ulong target_addr) 4386 { 4387 struct target_shmid_ds *target_sd; 4388 4389 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4390 return -TARGET_EFAULT; 4391 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 4392 return -TARGET_EFAULT; 4393 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4394 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 4395 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4396 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4397 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4398 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4399 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4400 unlock_user_struct(target_sd, target_addr, 0); 4401 return 0; 4402 } 4403 4404 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 4405 struct shmid_ds *host_sd) 4406 { 4407 struct target_shmid_ds *target_sd; 4408 4409 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4410 return -TARGET_EFAULT; 4411 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 4412 return -TARGET_EFAULT; 4413 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4414 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 4415 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4416 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4417 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4418 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4419 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4420 unlock_user_struct(target_sd, target_addr, 1); 4421 return 0; 4422 } 4423 4424 struct target_shminfo { 4425 abi_ulong shmmax; 4426 abi_ulong shmmin; 4427 abi_ulong shmmni; 4428 abi_ulong shmseg; 4429 abi_ulong shmall; 4430 }; 4431 4432 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 4433 struct shminfo *host_shminfo) 4434 { 4435 struct target_shminfo *target_shminfo; 4436 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 4437 return -TARGET_EFAULT; 4438 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 4439 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 4440 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 4441 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 4442 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 4443 unlock_user_struct(target_shminfo, target_addr, 1); 4444 return 0; 4445 } 4446 4447 struct target_shm_info { 4448 int used_ids; 4449 abi_ulong shm_tot; 4450 abi_ulong shm_rss; 4451 abi_ulong shm_swp; 4452 abi_ulong swap_attempts; 4453 abi_ulong swap_successes; 4454 }; 4455 4456 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 4457 struct shm_info *host_shm_info) 4458 { 4459 struct target_shm_info *target_shm_info; 4460 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 4461 return -TARGET_EFAULT; 4462 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 4463 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 4464 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 4465 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 4466 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 4467 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 4468 unlock_user_struct(target_shm_info, target_addr, 1); 4469 return 0; 4470 } 4471 4472 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 4473 { 4474 struct shmid_ds dsarg; 4475 struct shminfo shminfo; 4476 struct shm_info shm_info; 4477 abi_long ret = -TARGET_EINVAL; 4478 4479 cmd &= 0xff; 4480 4481 switch(cmd) { 4482 case IPC_STAT: 4483 case IPC_SET: 4484 case SHM_STAT: 4485 if (target_to_host_shmid_ds(&dsarg, buf)) 4486 return -TARGET_EFAULT; 4487 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 4488 if (host_to_target_shmid_ds(buf, &dsarg)) 4489 return -TARGET_EFAULT; 4490 break; 4491 case IPC_INFO: 4492 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 4493 if (host_to_target_shminfo(buf, &shminfo)) 4494 return -TARGET_EFAULT; 4495 break; 4496 case SHM_INFO: 4497 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 4498 if (host_to_target_shm_info(buf, &shm_info)) 4499 return -TARGET_EFAULT; 4500 break; 4501 case IPC_RMID: 4502 case SHM_LOCK: 4503 case SHM_UNLOCK: 4504 ret = get_errno(shmctl(shmid, cmd, NULL)); 4505 break; 4506 } 4507 4508 return ret; 4509 } 4510 4511 #ifndef TARGET_FORCE_SHMLBA 4512 /* For most architectures, SHMLBA is the same as the page size; 4513 * some architectures have larger values, in which case they should 4514 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function. 4515 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA 4516 * and defining its own value for SHMLBA. 4517 * 4518 * The kernel also permits SHMLBA to be set by the architecture to a 4519 * value larger than the page size without setting __ARCH_FORCE_SHMLBA; 4520 * this means that addresses are rounded to the large size if 4521 * SHM_RND is set but addresses not aligned to that size are not rejected 4522 * as long as they are at least page-aligned. Since the only architecture 4523 * which uses this is ia64 this code doesn't provide for that oddity. 4524 */ 4525 static inline abi_ulong target_shmlba(CPUArchState *cpu_env) 4526 { 4527 return TARGET_PAGE_SIZE; 4528 } 4529 #endif 4530 4531 static inline abi_ulong do_shmat(CPUArchState *cpu_env, 4532 int shmid, abi_ulong shmaddr, int shmflg) 4533 { 4534 CPUState *cpu = env_cpu(cpu_env); 4535 abi_long raddr; 4536 void *host_raddr; 4537 struct shmid_ds shm_info; 4538 int i,ret; 4539 abi_ulong shmlba; 4540 4541 /* shmat pointers are always untagged */ 4542 4543 /* find out the length of the shared memory segment */ 4544 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 4545 if (is_error(ret)) { 4546 /* can't get length, bail out */ 4547 return ret; 4548 } 4549 4550 shmlba = target_shmlba(cpu_env); 4551 4552 if (shmaddr & (shmlba - 1)) { 4553 if (shmflg & SHM_RND) { 4554 shmaddr &= ~(shmlba - 1); 4555 } else { 4556 return -TARGET_EINVAL; 4557 } 4558 } 4559 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) { 4560 return -TARGET_EINVAL; 4561 } 4562 4563 mmap_lock(); 4564 4565 /* 4566 * We're mapping shared memory, so ensure we generate code for parallel 4567 * execution and flush old translations. This will work up to the level 4568 * supported by the host -- anything that requires EXCP_ATOMIC will not 4569 * be atomic with respect to an external process. 4570 */ 4571 if (!(cpu->tcg_cflags & CF_PARALLEL)) { 4572 cpu->tcg_cflags |= CF_PARALLEL; 4573 tb_flush(cpu); 4574 } 4575 4576 if (shmaddr) 4577 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg); 4578 else { 4579 abi_ulong mmap_start; 4580 4581 /* In order to use the host shmat, we need to honor host SHMLBA. */ 4582 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba)); 4583 4584 if (mmap_start == -1) { 4585 errno = ENOMEM; 4586 host_raddr = (void *)-1; 4587 } else 4588 host_raddr = shmat(shmid, g2h_untagged(mmap_start), 4589 shmflg | SHM_REMAP); 4590 } 4591 4592 if (host_raddr == (void *)-1) { 4593 mmap_unlock(); 4594 return get_errno((long)host_raddr); 4595 } 4596 raddr=h2g((unsigned long)host_raddr); 4597 4598 page_set_flags(raddr, raddr + shm_info.shm_segsz - 1, 4599 PAGE_VALID | PAGE_RESET | PAGE_READ | 4600 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE)); 4601 4602 for (i = 0; i < N_SHM_REGIONS; i++) { 4603 if (!shm_regions[i].in_use) { 4604 shm_regions[i].in_use = true; 4605 shm_regions[i].start = raddr; 4606 shm_regions[i].size = shm_info.shm_segsz; 4607 break; 4608 } 4609 } 4610 4611 mmap_unlock(); 4612 return raddr; 4613 4614 } 4615 4616 static inline abi_long do_shmdt(abi_ulong shmaddr) 4617 { 4618 int i; 4619 abi_long rv; 4620 4621 /* shmdt pointers are always untagged */ 4622 4623 mmap_lock(); 4624 4625 for (i = 0; i < N_SHM_REGIONS; ++i) { 4626 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { 4627 shm_regions[i].in_use = false; 4628 page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0); 4629 break; 4630 } 4631 } 4632 rv = get_errno(shmdt(g2h_untagged(shmaddr))); 4633 4634 mmap_unlock(); 4635 4636 return rv; 4637 } 4638 4639 #ifdef TARGET_NR_ipc 4640 /* ??? This only works with linear mappings. */ 4641 /* do_ipc() must return target values and target errnos. */ 4642 static abi_long do_ipc(CPUArchState *cpu_env, 4643 unsigned int call, abi_long first, 4644 abi_long second, abi_long third, 4645 abi_long ptr, abi_long fifth) 4646 { 4647 int version; 4648 abi_long ret = 0; 4649 4650 version = call >> 16; 4651 call &= 0xffff; 4652 4653 switch (call) { 4654 case IPCOP_semop: 4655 ret = do_semtimedop(first, ptr, second, 0, false); 4656 break; 4657 case IPCOP_semtimedop: 4658 /* 4659 * The s390 sys_ipc variant has only five parameters instead of six 4660 * (as for default variant) and the only difference is the handling of 4661 * SEMTIMEDOP where on s390 the third parameter is used as a pointer 4662 * to a struct timespec where the generic variant uses fifth parameter. 4663 */ 4664 #if defined(TARGET_S390X) 4665 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64); 4666 #else 4667 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64); 4668 #endif 4669 break; 4670 4671 case IPCOP_semget: 4672 ret = get_errno(semget(first, second, third)); 4673 break; 4674 4675 case IPCOP_semctl: { 4676 /* The semun argument to semctl is passed by value, so dereference the 4677 * ptr argument. */ 4678 abi_ulong atptr; 4679 get_user_ual(atptr, ptr); 4680 ret = do_semctl(first, second, third, atptr); 4681 break; 4682 } 4683 4684 case IPCOP_msgget: 4685 ret = get_errno(msgget(first, second)); 4686 break; 4687 4688 case IPCOP_msgsnd: 4689 ret = do_msgsnd(first, ptr, second, third); 4690 break; 4691 4692 case IPCOP_msgctl: 4693 ret = do_msgctl(first, second, ptr); 4694 break; 4695 4696 case IPCOP_msgrcv: 4697 switch (version) { 4698 case 0: 4699 { 4700 struct target_ipc_kludge { 4701 abi_long msgp; 4702 abi_long msgtyp; 4703 } *tmp; 4704 4705 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 4706 ret = -TARGET_EFAULT; 4707 break; 4708 } 4709 4710 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 4711 4712 unlock_user_struct(tmp, ptr, 0); 4713 break; 4714 } 4715 default: 4716 ret = do_msgrcv(first, ptr, second, fifth, third); 4717 } 4718 break; 4719 4720 case IPCOP_shmat: 4721 switch (version) { 4722 default: 4723 { 4724 abi_ulong raddr; 4725 raddr = do_shmat(cpu_env, first, ptr, second); 4726 if (is_error(raddr)) 4727 return get_errno(raddr); 4728 if (put_user_ual(raddr, third)) 4729 return -TARGET_EFAULT; 4730 break; 4731 } 4732 case 1: 4733 ret = -TARGET_EINVAL; 4734 break; 4735 } 4736 break; 4737 case IPCOP_shmdt: 4738 ret = do_shmdt(ptr); 4739 break; 4740 4741 case IPCOP_shmget: 4742 /* IPC_* flag values are the same on all linux platforms */ 4743 ret = get_errno(shmget(first, second, third)); 4744 break; 4745 4746 /* IPC_* and SHM_* command values are the same on all linux platforms */ 4747 case IPCOP_shmctl: 4748 ret = do_shmctl(first, second, ptr); 4749 break; 4750 default: 4751 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n", 4752 call, version); 4753 ret = -TARGET_ENOSYS; 4754 break; 4755 } 4756 return ret; 4757 } 4758 #endif 4759 4760 /* kernel structure types definitions */ 4761 4762 #define STRUCT(name, ...) STRUCT_ ## name, 4763 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 4764 enum { 4765 #include "syscall_types.h" 4766 STRUCT_MAX 4767 }; 4768 #undef STRUCT 4769 #undef STRUCT_SPECIAL 4770 4771 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 4772 #define STRUCT_SPECIAL(name) 4773 #include "syscall_types.h" 4774 #undef STRUCT 4775 #undef STRUCT_SPECIAL 4776 4777 #define MAX_STRUCT_SIZE 4096 4778 4779 #ifdef CONFIG_FIEMAP 4780 /* So fiemap access checks don't overflow on 32 bit systems. 4781 * This is very slightly smaller than the limit imposed by 4782 * the underlying kernel. 4783 */ 4784 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 4785 / sizeof(struct fiemap_extent)) 4786 4787 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 4788 int fd, int cmd, abi_long arg) 4789 { 4790 /* The parameter for this ioctl is a struct fiemap followed 4791 * by an array of struct fiemap_extent whose size is set 4792 * in fiemap->fm_extent_count. The array is filled in by the 4793 * ioctl. 4794 */ 4795 int target_size_in, target_size_out; 4796 struct fiemap *fm; 4797 const argtype *arg_type = ie->arg_type; 4798 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 4799 void *argptr, *p; 4800 abi_long ret; 4801 int i, extent_size = thunk_type_size(extent_arg_type, 0); 4802 uint32_t outbufsz; 4803 int free_fm = 0; 4804 4805 assert(arg_type[0] == TYPE_PTR); 4806 assert(ie->access == IOC_RW); 4807 arg_type++; 4808 target_size_in = thunk_type_size(arg_type, 0); 4809 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 4810 if (!argptr) { 4811 return -TARGET_EFAULT; 4812 } 4813 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4814 unlock_user(argptr, arg, 0); 4815 fm = (struct fiemap *)buf_temp; 4816 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 4817 return -TARGET_EINVAL; 4818 } 4819 4820 outbufsz = sizeof (*fm) + 4821 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 4822 4823 if (outbufsz > MAX_STRUCT_SIZE) { 4824 /* We can't fit all the extents into the fixed size buffer. 4825 * Allocate one that is large enough and use it instead. 4826 */ 4827 fm = g_try_malloc(outbufsz); 4828 if (!fm) { 4829 return -TARGET_ENOMEM; 4830 } 4831 memcpy(fm, buf_temp, sizeof(struct fiemap)); 4832 free_fm = 1; 4833 } 4834 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm)); 4835 if (!is_error(ret)) { 4836 target_size_out = target_size_in; 4837 /* An extent_count of 0 means we were only counting the extents 4838 * so there are no structs to copy 4839 */ 4840 if (fm->fm_extent_count != 0) { 4841 target_size_out += fm->fm_mapped_extents * extent_size; 4842 } 4843 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 4844 if (!argptr) { 4845 ret = -TARGET_EFAULT; 4846 } else { 4847 /* Convert the struct fiemap */ 4848 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 4849 if (fm->fm_extent_count != 0) { 4850 p = argptr + target_size_in; 4851 /* ...and then all the struct fiemap_extents */ 4852 for (i = 0; i < fm->fm_mapped_extents; i++) { 4853 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 4854 THUNK_TARGET); 4855 p += extent_size; 4856 } 4857 } 4858 unlock_user(argptr, arg, target_size_out); 4859 } 4860 } 4861 if (free_fm) { 4862 g_free(fm); 4863 } 4864 return ret; 4865 } 4866 #endif 4867 4868 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 4869 int fd, int cmd, abi_long arg) 4870 { 4871 const argtype *arg_type = ie->arg_type; 4872 int target_size; 4873 void *argptr; 4874 int ret; 4875 struct ifconf *host_ifconf; 4876 uint32_t outbufsz; 4877 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 4878 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) }; 4879 int target_ifreq_size; 4880 int nb_ifreq; 4881 int free_buf = 0; 4882 int i; 4883 int target_ifc_len; 4884 abi_long target_ifc_buf; 4885 int host_ifc_len; 4886 char *host_ifc_buf; 4887 4888 assert(arg_type[0] == TYPE_PTR); 4889 assert(ie->access == IOC_RW); 4890 4891 arg_type++; 4892 target_size = thunk_type_size(arg_type, 0); 4893 4894 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 4895 if (!argptr) 4896 return -TARGET_EFAULT; 4897 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4898 unlock_user(argptr, arg, 0); 4899 4900 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 4901 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 4902 target_ifreq_size = thunk_type_size(ifreq_max_type, 0); 4903 4904 if (target_ifc_buf != 0) { 4905 target_ifc_len = host_ifconf->ifc_len; 4906 nb_ifreq = target_ifc_len / target_ifreq_size; 4907 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 4908 4909 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 4910 if (outbufsz > MAX_STRUCT_SIZE) { 4911 /* 4912 * We can't fit all the extents into the fixed size buffer. 4913 * Allocate one that is large enough and use it instead. 4914 */ 4915 host_ifconf = g_try_malloc(outbufsz); 4916 if (!host_ifconf) { 4917 return -TARGET_ENOMEM; 4918 } 4919 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 4920 free_buf = 1; 4921 } 4922 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf); 4923 4924 host_ifconf->ifc_len = host_ifc_len; 4925 } else { 4926 host_ifc_buf = NULL; 4927 } 4928 host_ifconf->ifc_buf = host_ifc_buf; 4929 4930 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf)); 4931 if (!is_error(ret)) { 4932 /* convert host ifc_len to target ifc_len */ 4933 4934 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 4935 target_ifc_len = nb_ifreq * target_ifreq_size; 4936 host_ifconf->ifc_len = target_ifc_len; 4937 4938 /* restore target ifc_buf */ 4939 4940 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 4941 4942 /* copy struct ifconf to target user */ 4943 4944 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 4945 if (!argptr) 4946 return -TARGET_EFAULT; 4947 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 4948 unlock_user(argptr, arg, target_size); 4949 4950 if (target_ifc_buf != 0) { 4951 /* copy ifreq[] to target user */ 4952 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 4953 for (i = 0; i < nb_ifreq ; i++) { 4954 thunk_convert(argptr + i * target_ifreq_size, 4955 host_ifc_buf + i * sizeof(struct ifreq), 4956 ifreq_arg_type, THUNK_TARGET); 4957 } 4958 unlock_user(argptr, target_ifc_buf, target_ifc_len); 4959 } 4960 } 4961 4962 if (free_buf) { 4963 g_free(host_ifconf); 4964 } 4965 4966 return ret; 4967 } 4968 4969 #if defined(CONFIG_USBFS) 4970 #if HOST_LONG_BITS > 64 4971 #error USBDEVFS thunks do not support >64 bit hosts yet. 4972 #endif 4973 struct live_urb { 4974 uint64_t target_urb_adr; 4975 uint64_t target_buf_adr; 4976 char *target_buf_ptr; 4977 struct usbdevfs_urb host_urb; 4978 }; 4979 4980 static GHashTable *usbdevfs_urb_hashtable(void) 4981 { 4982 static GHashTable *urb_hashtable; 4983 4984 if (!urb_hashtable) { 4985 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal); 4986 } 4987 return urb_hashtable; 4988 } 4989 4990 static void urb_hashtable_insert(struct live_urb *urb) 4991 { 4992 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 4993 g_hash_table_insert(urb_hashtable, urb, urb); 4994 } 4995 4996 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr) 4997 { 4998 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 4999 return g_hash_table_lookup(urb_hashtable, &target_urb_adr); 5000 } 5001 5002 static void urb_hashtable_remove(struct live_urb *urb) 5003 { 5004 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 5005 g_hash_table_remove(urb_hashtable, urb); 5006 } 5007 5008 static abi_long 5009 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp, 5010 int fd, int cmd, abi_long arg) 5011 { 5012 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) }; 5013 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 }; 5014 struct live_urb *lurb; 5015 void *argptr; 5016 uint64_t hurb; 5017 int target_size; 5018 uintptr_t target_urb_adr; 5019 abi_long ret; 5020 5021 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET); 5022 5023 memset(buf_temp, 0, sizeof(uint64_t)); 5024 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5025 if (is_error(ret)) { 5026 return ret; 5027 } 5028 5029 memcpy(&hurb, buf_temp, sizeof(uint64_t)); 5030 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb)); 5031 if (!lurb->target_urb_adr) { 5032 return -TARGET_EFAULT; 5033 } 5034 urb_hashtable_remove(lurb); 5035 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 5036 lurb->host_urb.buffer_length); 5037 lurb->target_buf_ptr = NULL; 5038 5039 /* restore the guest buffer pointer */ 5040 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr; 5041 5042 /* update the guest urb struct */ 5043 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0); 5044 if (!argptr) { 5045 g_free(lurb); 5046 return -TARGET_EFAULT; 5047 } 5048 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET); 5049 unlock_user(argptr, lurb->target_urb_adr, target_size); 5050 5051 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET); 5052 /* write back the urb handle */ 5053 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5054 if (!argptr) { 5055 g_free(lurb); 5056 return -TARGET_EFAULT; 5057 } 5058 5059 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */ 5060 target_urb_adr = lurb->target_urb_adr; 5061 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET); 5062 unlock_user(argptr, arg, target_size); 5063 5064 g_free(lurb); 5065 return ret; 5066 } 5067 5068 static abi_long 5069 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie, 5070 uint8_t *buf_temp __attribute__((unused)), 5071 int fd, int cmd, abi_long arg) 5072 { 5073 struct live_urb *lurb; 5074 5075 /* map target address back to host URB with metadata. */ 5076 lurb = urb_hashtable_lookup(arg); 5077 if (!lurb) { 5078 return -TARGET_EFAULT; 5079 } 5080 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); 5081 } 5082 5083 static abi_long 5084 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp, 5085 int fd, int cmd, abi_long arg) 5086 { 5087 const argtype *arg_type = ie->arg_type; 5088 int target_size; 5089 abi_long ret; 5090 void *argptr; 5091 int rw_dir; 5092 struct live_urb *lurb; 5093 5094 /* 5095 * each submitted URB needs to map to a unique ID for the 5096 * kernel, and that unique ID needs to be a pointer to 5097 * host memory. hence, we need to malloc for each URB. 5098 * isochronous transfers have a variable length struct. 5099 */ 5100 arg_type++; 5101 target_size = thunk_type_size(arg_type, THUNK_TARGET); 5102 5103 /* construct host copy of urb and metadata */ 5104 lurb = g_try_new0(struct live_urb, 1); 5105 if (!lurb) { 5106 return -TARGET_ENOMEM; 5107 } 5108 5109 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5110 if (!argptr) { 5111 g_free(lurb); 5112 return -TARGET_EFAULT; 5113 } 5114 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST); 5115 unlock_user(argptr, arg, 0); 5116 5117 lurb->target_urb_adr = arg; 5118 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer; 5119 5120 /* buffer space used depends on endpoint type so lock the entire buffer */ 5121 /* control type urbs should check the buffer contents for true direction */ 5122 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ; 5123 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr, 5124 lurb->host_urb.buffer_length, 1); 5125 if (lurb->target_buf_ptr == NULL) { 5126 g_free(lurb); 5127 return -TARGET_EFAULT; 5128 } 5129 5130 /* update buffer pointer in host copy */ 5131 lurb->host_urb.buffer = lurb->target_buf_ptr; 5132 5133 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); 5134 if (is_error(ret)) { 5135 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0); 5136 g_free(lurb); 5137 } else { 5138 urb_hashtable_insert(lurb); 5139 } 5140 5141 return ret; 5142 } 5143 #endif /* CONFIG_USBFS */ 5144 5145 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5146 int cmd, abi_long arg) 5147 { 5148 void *argptr; 5149 struct dm_ioctl *host_dm; 5150 abi_long guest_data; 5151 uint32_t guest_data_size; 5152 int target_size; 5153 const argtype *arg_type = ie->arg_type; 5154 abi_long ret; 5155 void *big_buf = NULL; 5156 char *host_data; 5157 5158 arg_type++; 5159 target_size = thunk_type_size(arg_type, 0); 5160 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5161 if (!argptr) { 5162 ret = -TARGET_EFAULT; 5163 goto out; 5164 } 5165 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5166 unlock_user(argptr, arg, 0); 5167 5168 /* buf_temp is too small, so fetch things into a bigger buffer */ 5169 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 5170 memcpy(big_buf, buf_temp, target_size); 5171 buf_temp = big_buf; 5172 host_dm = big_buf; 5173 5174 guest_data = arg + host_dm->data_start; 5175 if ((guest_data - arg) < 0) { 5176 ret = -TARGET_EINVAL; 5177 goto out; 5178 } 5179 guest_data_size = host_dm->data_size - host_dm->data_start; 5180 host_data = (char*)host_dm + host_dm->data_start; 5181 5182 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 5183 if (!argptr) { 5184 ret = -TARGET_EFAULT; 5185 goto out; 5186 } 5187 5188 switch (ie->host_cmd) { 5189 case DM_REMOVE_ALL: 5190 case DM_LIST_DEVICES: 5191 case DM_DEV_CREATE: 5192 case DM_DEV_REMOVE: 5193 case DM_DEV_SUSPEND: 5194 case DM_DEV_STATUS: 5195 case DM_DEV_WAIT: 5196 case DM_TABLE_STATUS: 5197 case DM_TABLE_CLEAR: 5198 case DM_TABLE_DEPS: 5199 case DM_LIST_VERSIONS: 5200 /* no input data */ 5201 break; 5202 case DM_DEV_RENAME: 5203 case DM_DEV_SET_GEOMETRY: 5204 /* data contains only strings */ 5205 memcpy(host_data, argptr, guest_data_size); 5206 break; 5207 case DM_TARGET_MSG: 5208 memcpy(host_data, argptr, guest_data_size); 5209 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 5210 break; 5211 case DM_TABLE_LOAD: 5212 { 5213 void *gspec = argptr; 5214 void *cur_data = host_data; 5215 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5216 int spec_size = thunk_type_size(arg_type, 0); 5217 int i; 5218 5219 for (i = 0; i < host_dm->target_count; i++) { 5220 struct dm_target_spec *spec = cur_data; 5221 uint32_t next; 5222 int slen; 5223 5224 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 5225 slen = strlen((char*)gspec + spec_size) + 1; 5226 next = spec->next; 5227 spec->next = sizeof(*spec) + slen; 5228 strcpy((char*)&spec[1], gspec + spec_size); 5229 gspec += next; 5230 cur_data += spec->next; 5231 } 5232 break; 5233 } 5234 default: 5235 ret = -TARGET_EINVAL; 5236 unlock_user(argptr, guest_data, 0); 5237 goto out; 5238 } 5239 unlock_user(argptr, guest_data, 0); 5240 5241 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5242 if (!is_error(ret)) { 5243 guest_data = arg + host_dm->data_start; 5244 guest_data_size = host_dm->data_size - host_dm->data_start; 5245 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 5246 switch (ie->host_cmd) { 5247 case DM_REMOVE_ALL: 5248 case DM_DEV_CREATE: 5249 case DM_DEV_REMOVE: 5250 case DM_DEV_RENAME: 5251 case DM_DEV_SUSPEND: 5252 case DM_DEV_STATUS: 5253 case DM_TABLE_LOAD: 5254 case DM_TABLE_CLEAR: 5255 case DM_TARGET_MSG: 5256 case DM_DEV_SET_GEOMETRY: 5257 /* no return data */ 5258 break; 5259 case DM_LIST_DEVICES: 5260 { 5261 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 5262 uint32_t remaining_data = guest_data_size; 5263 void *cur_data = argptr; 5264 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 5265 int nl_size = 12; /* can't use thunk_size due to alignment */ 5266 5267 while (1) { 5268 uint32_t next = nl->next; 5269 if (next) { 5270 nl->next = nl_size + (strlen(nl->name) + 1); 5271 } 5272 if (remaining_data < nl->next) { 5273 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5274 break; 5275 } 5276 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 5277 strcpy(cur_data + nl_size, nl->name); 5278 cur_data += nl->next; 5279 remaining_data -= nl->next; 5280 if (!next) { 5281 break; 5282 } 5283 nl = (void*)nl + next; 5284 } 5285 break; 5286 } 5287 case DM_DEV_WAIT: 5288 case DM_TABLE_STATUS: 5289 { 5290 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 5291 void *cur_data = argptr; 5292 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5293 int spec_size = thunk_type_size(arg_type, 0); 5294 int i; 5295 5296 for (i = 0; i < host_dm->target_count; i++) { 5297 uint32_t next = spec->next; 5298 int slen = strlen((char*)&spec[1]) + 1; 5299 spec->next = (cur_data - argptr) + spec_size + slen; 5300 if (guest_data_size < spec->next) { 5301 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5302 break; 5303 } 5304 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 5305 strcpy(cur_data + spec_size, (char*)&spec[1]); 5306 cur_data = argptr + spec->next; 5307 spec = (void*)host_dm + host_dm->data_start + next; 5308 } 5309 break; 5310 } 5311 case DM_TABLE_DEPS: 5312 { 5313 void *hdata = (void*)host_dm + host_dm->data_start; 5314 int count = *(uint32_t*)hdata; 5315 uint64_t *hdev = hdata + 8; 5316 uint64_t *gdev = argptr + 8; 5317 int i; 5318 5319 *(uint32_t*)argptr = tswap32(count); 5320 for (i = 0; i < count; i++) { 5321 *gdev = tswap64(*hdev); 5322 gdev++; 5323 hdev++; 5324 } 5325 break; 5326 } 5327 case DM_LIST_VERSIONS: 5328 { 5329 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 5330 uint32_t remaining_data = guest_data_size; 5331 void *cur_data = argptr; 5332 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 5333 int vers_size = thunk_type_size(arg_type, 0); 5334 5335 while (1) { 5336 uint32_t next = vers->next; 5337 if (next) { 5338 vers->next = vers_size + (strlen(vers->name) + 1); 5339 } 5340 if (remaining_data < vers->next) { 5341 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5342 break; 5343 } 5344 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 5345 strcpy(cur_data + vers_size, vers->name); 5346 cur_data += vers->next; 5347 remaining_data -= vers->next; 5348 if (!next) { 5349 break; 5350 } 5351 vers = (void*)vers + next; 5352 } 5353 break; 5354 } 5355 default: 5356 unlock_user(argptr, guest_data, 0); 5357 ret = -TARGET_EINVAL; 5358 goto out; 5359 } 5360 unlock_user(argptr, guest_data, guest_data_size); 5361 5362 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5363 if (!argptr) { 5364 ret = -TARGET_EFAULT; 5365 goto out; 5366 } 5367 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5368 unlock_user(argptr, arg, target_size); 5369 } 5370 out: 5371 g_free(big_buf); 5372 return ret; 5373 } 5374 5375 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5376 int cmd, abi_long arg) 5377 { 5378 void *argptr; 5379 int target_size; 5380 const argtype *arg_type = ie->arg_type; 5381 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) }; 5382 abi_long ret; 5383 5384 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp; 5385 struct blkpg_partition host_part; 5386 5387 /* Read and convert blkpg */ 5388 arg_type++; 5389 target_size = thunk_type_size(arg_type, 0); 5390 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5391 if (!argptr) { 5392 ret = -TARGET_EFAULT; 5393 goto out; 5394 } 5395 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5396 unlock_user(argptr, arg, 0); 5397 5398 switch (host_blkpg->op) { 5399 case BLKPG_ADD_PARTITION: 5400 case BLKPG_DEL_PARTITION: 5401 /* payload is struct blkpg_partition */ 5402 break; 5403 default: 5404 /* Unknown opcode */ 5405 ret = -TARGET_EINVAL; 5406 goto out; 5407 } 5408 5409 /* Read and convert blkpg->data */ 5410 arg = (abi_long)(uintptr_t)host_blkpg->data; 5411 target_size = thunk_type_size(part_arg_type, 0); 5412 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5413 if (!argptr) { 5414 ret = -TARGET_EFAULT; 5415 goto out; 5416 } 5417 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST); 5418 unlock_user(argptr, arg, 0); 5419 5420 /* Swizzle the data pointer to our local copy and call! */ 5421 host_blkpg->data = &host_part; 5422 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg)); 5423 5424 out: 5425 return ret; 5426 } 5427 5428 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 5429 int fd, int cmd, abi_long arg) 5430 { 5431 const argtype *arg_type = ie->arg_type; 5432 const StructEntry *se; 5433 const argtype *field_types; 5434 const int *dst_offsets, *src_offsets; 5435 int target_size; 5436 void *argptr; 5437 abi_ulong *target_rt_dev_ptr = NULL; 5438 unsigned long *host_rt_dev_ptr = NULL; 5439 abi_long ret; 5440 int i; 5441 5442 assert(ie->access == IOC_W); 5443 assert(*arg_type == TYPE_PTR); 5444 arg_type++; 5445 assert(*arg_type == TYPE_STRUCT); 5446 target_size = thunk_type_size(arg_type, 0); 5447 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5448 if (!argptr) { 5449 return -TARGET_EFAULT; 5450 } 5451 arg_type++; 5452 assert(*arg_type == (int)STRUCT_rtentry); 5453 se = struct_entries + *arg_type++; 5454 assert(se->convert[0] == NULL); 5455 /* convert struct here to be able to catch rt_dev string */ 5456 field_types = se->field_types; 5457 dst_offsets = se->field_offsets[THUNK_HOST]; 5458 src_offsets = se->field_offsets[THUNK_TARGET]; 5459 for (i = 0; i < se->nb_fields; i++) { 5460 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 5461 assert(*field_types == TYPE_PTRVOID); 5462 target_rt_dev_ptr = argptr + src_offsets[i]; 5463 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 5464 if (*target_rt_dev_ptr != 0) { 5465 *host_rt_dev_ptr = (unsigned long)lock_user_string( 5466 tswapal(*target_rt_dev_ptr)); 5467 if (!*host_rt_dev_ptr) { 5468 unlock_user(argptr, arg, 0); 5469 return -TARGET_EFAULT; 5470 } 5471 } else { 5472 *host_rt_dev_ptr = 0; 5473 } 5474 field_types++; 5475 continue; 5476 } 5477 field_types = thunk_convert(buf_temp + dst_offsets[i], 5478 argptr + src_offsets[i], 5479 field_types, THUNK_HOST); 5480 } 5481 unlock_user(argptr, arg, 0); 5482 5483 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5484 5485 assert(host_rt_dev_ptr != NULL); 5486 assert(target_rt_dev_ptr != NULL); 5487 if (*host_rt_dev_ptr != 0) { 5488 unlock_user((void *)*host_rt_dev_ptr, 5489 *target_rt_dev_ptr, 0); 5490 } 5491 return ret; 5492 } 5493 5494 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 5495 int fd, int cmd, abi_long arg) 5496 { 5497 int sig = target_to_host_signal(arg); 5498 return get_errno(safe_ioctl(fd, ie->host_cmd, sig)); 5499 } 5500 5501 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp, 5502 int fd, int cmd, abi_long arg) 5503 { 5504 struct timeval tv; 5505 abi_long ret; 5506 5507 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv)); 5508 if (is_error(ret)) { 5509 return ret; 5510 } 5511 5512 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) { 5513 if (copy_to_user_timeval(arg, &tv)) { 5514 return -TARGET_EFAULT; 5515 } 5516 } else { 5517 if (copy_to_user_timeval64(arg, &tv)) { 5518 return -TARGET_EFAULT; 5519 } 5520 } 5521 5522 return ret; 5523 } 5524 5525 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp, 5526 int fd, int cmd, abi_long arg) 5527 { 5528 struct timespec ts; 5529 abi_long ret; 5530 5531 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts)); 5532 if (is_error(ret)) { 5533 return ret; 5534 } 5535 5536 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) { 5537 if (host_to_target_timespec(arg, &ts)) { 5538 return -TARGET_EFAULT; 5539 } 5540 } else{ 5541 if (host_to_target_timespec64(arg, &ts)) { 5542 return -TARGET_EFAULT; 5543 } 5544 } 5545 5546 return ret; 5547 } 5548 5549 #ifdef TIOCGPTPEER 5550 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp, 5551 int fd, int cmd, abi_long arg) 5552 { 5553 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl); 5554 return get_errno(safe_ioctl(fd, ie->host_cmd, flags)); 5555 } 5556 #endif 5557 5558 #ifdef HAVE_DRM_H 5559 5560 static void unlock_drm_version(struct drm_version *host_ver, 5561 struct target_drm_version *target_ver, 5562 bool copy) 5563 { 5564 unlock_user(host_ver->name, target_ver->name, 5565 copy ? host_ver->name_len : 0); 5566 unlock_user(host_ver->date, target_ver->date, 5567 copy ? host_ver->date_len : 0); 5568 unlock_user(host_ver->desc, target_ver->desc, 5569 copy ? host_ver->desc_len : 0); 5570 } 5571 5572 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver, 5573 struct target_drm_version *target_ver) 5574 { 5575 memset(host_ver, 0, sizeof(*host_ver)); 5576 5577 __get_user(host_ver->name_len, &target_ver->name_len); 5578 if (host_ver->name_len) { 5579 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name, 5580 target_ver->name_len, 0); 5581 if (!host_ver->name) { 5582 return -EFAULT; 5583 } 5584 } 5585 5586 __get_user(host_ver->date_len, &target_ver->date_len); 5587 if (host_ver->date_len) { 5588 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date, 5589 target_ver->date_len, 0); 5590 if (!host_ver->date) { 5591 goto err; 5592 } 5593 } 5594 5595 __get_user(host_ver->desc_len, &target_ver->desc_len); 5596 if (host_ver->desc_len) { 5597 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc, 5598 target_ver->desc_len, 0); 5599 if (!host_ver->desc) { 5600 goto err; 5601 } 5602 } 5603 5604 return 0; 5605 err: 5606 unlock_drm_version(host_ver, target_ver, false); 5607 return -EFAULT; 5608 } 5609 5610 static inline void host_to_target_drmversion( 5611 struct target_drm_version *target_ver, 5612 struct drm_version *host_ver) 5613 { 5614 __put_user(host_ver->version_major, &target_ver->version_major); 5615 __put_user(host_ver->version_minor, &target_ver->version_minor); 5616 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel); 5617 __put_user(host_ver->name_len, &target_ver->name_len); 5618 __put_user(host_ver->date_len, &target_ver->date_len); 5619 __put_user(host_ver->desc_len, &target_ver->desc_len); 5620 unlock_drm_version(host_ver, target_ver, true); 5621 } 5622 5623 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp, 5624 int fd, int cmd, abi_long arg) 5625 { 5626 struct drm_version *ver; 5627 struct target_drm_version *target_ver; 5628 abi_long ret; 5629 5630 switch (ie->host_cmd) { 5631 case DRM_IOCTL_VERSION: 5632 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) { 5633 return -TARGET_EFAULT; 5634 } 5635 ver = (struct drm_version *)buf_temp; 5636 ret = target_to_host_drmversion(ver, target_ver); 5637 if (!is_error(ret)) { 5638 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver)); 5639 if (is_error(ret)) { 5640 unlock_drm_version(ver, target_ver, false); 5641 } else { 5642 host_to_target_drmversion(target_ver, ver); 5643 } 5644 } 5645 unlock_user_struct(target_ver, arg, 0); 5646 return ret; 5647 } 5648 return -TARGET_ENOSYS; 5649 } 5650 5651 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie, 5652 struct drm_i915_getparam *gparam, 5653 int fd, abi_long arg) 5654 { 5655 abi_long ret; 5656 int value; 5657 struct target_drm_i915_getparam *target_gparam; 5658 5659 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) { 5660 return -TARGET_EFAULT; 5661 } 5662 5663 __get_user(gparam->param, &target_gparam->param); 5664 gparam->value = &value; 5665 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam)); 5666 put_user_s32(value, target_gparam->value); 5667 5668 unlock_user_struct(target_gparam, arg, 0); 5669 return ret; 5670 } 5671 5672 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp, 5673 int fd, int cmd, abi_long arg) 5674 { 5675 switch (ie->host_cmd) { 5676 case DRM_IOCTL_I915_GETPARAM: 5677 return do_ioctl_drm_i915_getparam(ie, 5678 (struct drm_i915_getparam *)buf_temp, 5679 fd, arg); 5680 default: 5681 return -TARGET_ENOSYS; 5682 } 5683 } 5684 5685 #endif 5686 5687 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp, 5688 int fd, int cmd, abi_long arg) 5689 { 5690 struct tun_filter *filter = (struct tun_filter *)buf_temp; 5691 struct tun_filter *target_filter; 5692 char *target_addr; 5693 5694 assert(ie->access == IOC_W); 5695 5696 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1); 5697 if (!target_filter) { 5698 return -TARGET_EFAULT; 5699 } 5700 filter->flags = tswap16(target_filter->flags); 5701 filter->count = tswap16(target_filter->count); 5702 unlock_user(target_filter, arg, 0); 5703 5704 if (filter->count) { 5705 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN > 5706 MAX_STRUCT_SIZE) { 5707 return -TARGET_EFAULT; 5708 } 5709 5710 target_addr = lock_user(VERIFY_READ, 5711 arg + offsetof(struct tun_filter, addr), 5712 filter->count * ETH_ALEN, 1); 5713 if (!target_addr) { 5714 return -TARGET_EFAULT; 5715 } 5716 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN); 5717 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0); 5718 } 5719 5720 return get_errno(safe_ioctl(fd, ie->host_cmd, filter)); 5721 } 5722 5723 IOCTLEntry ioctl_entries[] = { 5724 #define IOCTL(cmd, access, ...) \ 5725 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 5726 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 5727 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 5728 #define IOCTL_IGNORE(cmd) \ 5729 { TARGET_ ## cmd, 0, #cmd }, 5730 #include "ioctls.h" 5731 { 0, 0, }, 5732 }; 5733 5734 /* ??? Implement proper locking for ioctls. */ 5735 /* do_ioctl() Must return target values and target errnos. */ 5736 static abi_long do_ioctl(int fd, int cmd, abi_long arg) 5737 { 5738 const IOCTLEntry *ie; 5739 const argtype *arg_type; 5740 abi_long ret; 5741 uint8_t buf_temp[MAX_STRUCT_SIZE]; 5742 int target_size; 5743 void *argptr; 5744 5745 ie = ioctl_entries; 5746 for(;;) { 5747 if (ie->target_cmd == 0) { 5748 qemu_log_mask( 5749 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 5750 return -TARGET_ENOTTY; 5751 } 5752 if (ie->target_cmd == cmd) 5753 break; 5754 ie++; 5755 } 5756 arg_type = ie->arg_type; 5757 if (ie->do_ioctl) { 5758 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 5759 } else if (!ie->host_cmd) { 5760 /* Some architectures define BSD ioctls in their headers 5761 that are not implemented in Linux. */ 5762 return -TARGET_ENOTTY; 5763 } 5764 5765 switch(arg_type[0]) { 5766 case TYPE_NULL: 5767 /* no argument */ 5768 ret = get_errno(safe_ioctl(fd, ie->host_cmd)); 5769 break; 5770 case TYPE_PTRVOID: 5771 case TYPE_INT: 5772 case TYPE_LONG: 5773 case TYPE_ULONG: 5774 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg)); 5775 break; 5776 case TYPE_PTR: 5777 arg_type++; 5778 target_size = thunk_type_size(arg_type, 0); 5779 switch(ie->access) { 5780 case IOC_R: 5781 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5782 if (!is_error(ret)) { 5783 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5784 if (!argptr) 5785 return -TARGET_EFAULT; 5786 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5787 unlock_user(argptr, arg, target_size); 5788 } 5789 break; 5790 case IOC_W: 5791 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5792 if (!argptr) 5793 return -TARGET_EFAULT; 5794 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5795 unlock_user(argptr, arg, 0); 5796 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5797 break; 5798 default: 5799 case IOC_RW: 5800 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5801 if (!argptr) 5802 return -TARGET_EFAULT; 5803 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5804 unlock_user(argptr, arg, 0); 5805 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5806 if (!is_error(ret)) { 5807 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5808 if (!argptr) 5809 return -TARGET_EFAULT; 5810 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5811 unlock_user(argptr, arg, target_size); 5812 } 5813 break; 5814 } 5815 break; 5816 default: 5817 qemu_log_mask(LOG_UNIMP, 5818 "Unsupported ioctl type: cmd=0x%04lx type=%d\n", 5819 (long)cmd, arg_type[0]); 5820 ret = -TARGET_ENOTTY; 5821 break; 5822 } 5823 return ret; 5824 } 5825 5826 static const bitmask_transtbl iflag_tbl[] = { 5827 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 5828 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 5829 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 5830 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 5831 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 5832 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 5833 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 5834 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 5835 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 5836 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 5837 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 5838 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 5839 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 5840 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 5841 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8}, 5842 { 0, 0, 0, 0 } 5843 }; 5844 5845 static const bitmask_transtbl oflag_tbl[] = { 5846 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 5847 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 5848 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 5849 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 5850 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 5851 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 5852 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 5853 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 5854 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 5855 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 5856 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 5857 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 5858 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 5859 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 5860 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 5861 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 5862 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 5863 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 5864 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 5865 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 5866 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 5867 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 5868 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 5869 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 5870 { 0, 0, 0, 0 } 5871 }; 5872 5873 static const bitmask_transtbl cflag_tbl[] = { 5874 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 5875 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 5876 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 5877 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 5878 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 5879 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 5880 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 5881 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 5882 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 5883 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 5884 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 5885 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 5886 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 5887 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 5888 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 5889 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 5890 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 5891 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 5892 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 5893 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 5894 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 5895 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 5896 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 5897 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 5898 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 5899 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 5900 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 5901 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 5902 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 5903 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 5904 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 5905 { 0, 0, 0, 0 } 5906 }; 5907 5908 static const bitmask_transtbl lflag_tbl[] = { 5909 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 5910 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 5911 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 5912 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 5913 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 5914 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 5915 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 5916 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 5917 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 5918 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 5919 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 5920 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 5921 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 5922 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 5923 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 5924 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC}, 5925 { 0, 0, 0, 0 } 5926 }; 5927 5928 static void target_to_host_termios (void *dst, const void *src) 5929 { 5930 struct host_termios *host = dst; 5931 const struct target_termios *target = src; 5932 5933 host->c_iflag = 5934 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 5935 host->c_oflag = 5936 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 5937 host->c_cflag = 5938 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 5939 host->c_lflag = 5940 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 5941 host->c_line = target->c_line; 5942 5943 memset(host->c_cc, 0, sizeof(host->c_cc)); 5944 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 5945 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 5946 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 5947 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 5948 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 5949 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 5950 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 5951 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 5952 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 5953 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 5954 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 5955 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 5956 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 5957 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 5958 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 5959 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 5960 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 5961 } 5962 5963 static void host_to_target_termios (void *dst, const void *src) 5964 { 5965 struct target_termios *target = dst; 5966 const struct host_termios *host = src; 5967 5968 target->c_iflag = 5969 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 5970 target->c_oflag = 5971 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 5972 target->c_cflag = 5973 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 5974 target->c_lflag = 5975 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 5976 target->c_line = host->c_line; 5977 5978 memset(target->c_cc, 0, sizeof(target->c_cc)); 5979 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 5980 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 5981 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 5982 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 5983 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 5984 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 5985 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 5986 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 5987 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 5988 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 5989 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 5990 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 5991 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 5992 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 5993 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 5994 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 5995 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 5996 } 5997 5998 static const StructEntry struct_termios_def = { 5999 .convert = { host_to_target_termios, target_to_host_termios }, 6000 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 6001 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 6002 .print = print_termios, 6003 }; 6004 6005 static const bitmask_transtbl mmap_flags_tbl[] = { 6006 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 6007 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 6008 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 6009 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, 6010 MAP_ANONYMOUS, MAP_ANONYMOUS }, 6011 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, 6012 MAP_GROWSDOWN, MAP_GROWSDOWN }, 6013 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, 6014 MAP_DENYWRITE, MAP_DENYWRITE }, 6015 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, 6016 MAP_EXECUTABLE, MAP_EXECUTABLE }, 6017 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 6018 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, 6019 MAP_NORESERVE, MAP_NORESERVE }, 6020 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB }, 6021 /* MAP_STACK had been ignored by the kernel for quite some time. 6022 Recognize it for the target insofar as we do not want to pass 6023 it through to the host. */ 6024 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 }, 6025 { 0, 0, 0, 0 } 6026 }; 6027 6028 /* 6029 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64) 6030 * TARGET_I386 is defined if TARGET_X86_64 is defined 6031 */ 6032 #if defined(TARGET_I386) 6033 6034 /* NOTE: there is really one LDT for all the threads */ 6035 static uint8_t *ldt_table; 6036 6037 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 6038 { 6039 int size; 6040 void *p; 6041 6042 if (!ldt_table) 6043 return 0; 6044 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 6045 if (size > bytecount) 6046 size = bytecount; 6047 p = lock_user(VERIFY_WRITE, ptr, size, 0); 6048 if (!p) 6049 return -TARGET_EFAULT; 6050 /* ??? Should this by byteswapped? */ 6051 memcpy(p, ldt_table, size); 6052 unlock_user(p, ptr, size); 6053 return size; 6054 } 6055 6056 /* XXX: add locking support */ 6057 static abi_long write_ldt(CPUX86State *env, 6058 abi_ulong ptr, unsigned long bytecount, int oldmode) 6059 { 6060 struct target_modify_ldt_ldt_s ldt_info; 6061 struct target_modify_ldt_ldt_s *target_ldt_info; 6062 int seg_32bit, contents, read_exec_only, limit_in_pages; 6063 int seg_not_present, useable, lm; 6064 uint32_t *lp, entry_1, entry_2; 6065 6066 if (bytecount != sizeof(ldt_info)) 6067 return -TARGET_EINVAL; 6068 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 6069 return -TARGET_EFAULT; 6070 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6071 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6072 ldt_info.limit = tswap32(target_ldt_info->limit); 6073 ldt_info.flags = tswap32(target_ldt_info->flags); 6074 unlock_user_struct(target_ldt_info, ptr, 0); 6075 6076 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 6077 return -TARGET_EINVAL; 6078 seg_32bit = ldt_info.flags & 1; 6079 contents = (ldt_info.flags >> 1) & 3; 6080 read_exec_only = (ldt_info.flags >> 3) & 1; 6081 limit_in_pages = (ldt_info.flags >> 4) & 1; 6082 seg_not_present = (ldt_info.flags >> 5) & 1; 6083 useable = (ldt_info.flags >> 6) & 1; 6084 #ifdef TARGET_ABI32 6085 lm = 0; 6086 #else 6087 lm = (ldt_info.flags >> 7) & 1; 6088 #endif 6089 if (contents == 3) { 6090 if (oldmode) 6091 return -TARGET_EINVAL; 6092 if (seg_not_present == 0) 6093 return -TARGET_EINVAL; 6094 } 6095 /* allocate the LDT */ 6096 if (!ldt_table) { 6097 env->ldt.base = target_mmap(0, 6098 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 6099 PROT_READ|PROT_WRITE, 6100 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 6101 if (env->ldt.base == -1) 6102 return -TARGET_ENOMEM; 6103 memset(g2h_untagged(env->ldt.base), 0, 6104 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 6105 env->ldt.limit = 0xffff; 6106 ldt_table = g2h_untagged(env->ldt.base); 6107 } 6108 6109 /* NOTE: same code as Linux kernel */ 6110 /* Allow LDTs to be cleared by the user. */ 6111 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6112 if (oldmode || 6113 (contents == 0 && 6114 read_exec_only == 1 && 6115 seg_32bit == 0 && 6116 limit_in_pages == 0 && 6117 seg_not_present == 1 && 6118 useable == 0 )) { 6119 entry_1 = 0; 6120 entry_2 = 0; 6121 goto install; 6122 } 6123 } 6124 6125 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6126 (ldt_info.limit & 0x0ffff); 6127 entry_2 = (ldt_info.base_addr & 0xff000000) | 6128 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6129 (ldt_info.limit & 0xf0000) | 6130 ((read_exec_only ^ 1) << 9) | 6131 (contents << 10) | 6132 ((seg_not_present ^ 1) << 15) | 6133 (seg_32bit << 22) | 6134 (limit_in_pages << 23) | 6135 (lm << 21) | 6136 0x7000; 6137 if (!oldmode) 6138 entry_2 |= (useable << 20); 6139 6140 /* Install the new entry ... */ 6141 install: 6142 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 6143 lp[0] = tswap32(entry_1); 6144 lp[1] = tswap32(entry_2); 6145 return 0; 6146 } 6147 6148 /* specific and weird i386 syscalls */ 6149 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 6150 unsigned long bytecount) 6151 { 6152 abi_long ret; 6153 6154 switch (func) { 6155 case 0: 6156 ret = read_ldt(ptr, bytecount); 6157 break; 6158 case 1: 6159 ret = write_ldt(env, ptr, bytecount, 1); 6160 break; 6161 case 0x11: 6162 ret = write_ldt(env, ptr, bytecount, 0); 6163 break; 6164 default: 6165 ret = -TARGET_ENOSYS; 6166 break; 6167 } 6168 return ret; 6169 } 6170 6171 #if defined(TARGET_ABI32) 6172 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 6173 { 6174 uint64_t *gdt_table = g2h_untagged(env->gdt.base); 6175 struct target_modify_ldt_ldt_s ldt_info; 6176 struct target_modify_ldt_ldt_s *target_ldt_info; 6177 int seg_32bit, contents, read_exec_only, limit_in_pages; 6178 int seg_not_present, useable, lm; 6179 uint32_t *lp, entry_1, entry_2; 6180 int i; 6181 6182 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6183 if (!target_ldt_info) 6184 return -TARGET_EFAULT; 6185 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6186 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6187 ldt_info.limit = tswap32(target_ldt_info->limit); 6188 ldt_info.flags = tswap32(target_ldt_info->flags); 6189 if (ldt_info.entry_number == -1) { 6190 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 6191 if (gdt_table[i] == 0) { 6192 ldt_info.entry_number = i; 6193 target_ldt_info->entry_number = tswap32(i); 6194 break; 6195 } 6196 } 6197 } 6198 unlock_user_struct(target_ldt_info, ptr, 1); 6199 6200 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 6201 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 6202 return -TARGET_EINVAL; 6203 seg_32bit = ldt_info.flags & 1; 6204 contents = (ldt_info.flags >> 1) & 3; 6205 read_exec_only = (ldt_info.flags >> 3) & 1; 6206 limit_in_pages = (ldt_info.flags >> 4) & 1; 6207 seg_not_present = (ldt_info.flags >> 5) & 1; 6208 useable = (ldt_info.flags >> 6) & 1; 6209 #ifdef TARGET_ABI32 6210 lm = 0; 6211 #else 6212 lm = (ldt_info.flags >> 7) & 1; 6213 #endif 6214 6215 if (contents == 3) { 6216 if (seg_not_present == 0) 6217 return -TARGET_EINVAL; 6218 } 6219 6220 /* NOTE: same code as Linux kernel */ 6221 /* Allow LDTs to be cleared by the user. */ 6222 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6223 if ((contents == 0 && 6224 read_exec_only == 1 && 6225 seg_32bit == 0 && 6226 limit_in_pages == 0 && 6227 seg_not_present == 1 && 6228 useable == 0 )) { 6229 entry_1 = 0; 6230 entry_2 = 0; 6231 goto install; 6232 } 6233 } 6234 6235 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6236 (ldt_info.limit & 0x0ffff); 6237 entry_2 = (ldt_info.base_addr & 0xff000000) | 6238 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6239 (ldt_info.limit & 0xf0000) | 6240 ((read_exec_only ^ 1) << 9) | 6241 (contents << 10) | 6242 ((seg_not_present ^ 1) << 15) | 6243 (seg_32bit << 22) | 6244 (limit_in_pages << 23) | 6245 (useable << 20) | 6246 (lm << 21) | 6247 0x7000; 6248 6249 /* Install the new entry ... */ 6250 install: 6251 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 6252 lp[0] = tswap32(entry_1); 6253 lp[1] = tswap32(entry_2); 6254 return 0; 6255 } 6256 6257 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 6258 { 6259 struct target_modify_ldt_ldt_s *target_ldt_info; 6260 uint64_t *gdt_table = g2h_untagged(env->gdt.base); 6261 uint32_t base_addr, limit, flags; 6262 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 6263 int seg_not_present, useable, lm; 6264 uint32_t *lp, entry_1, entry_2; 6265 6266 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6267 if (!target_ldt_info) 6268 return -TARGET_EFAULT; 6269 idx = tswap32(target_ldt_info->entry_number); 6270 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 6271 idx > TARGET_GDT_ENTRY_TLS_MAX) { 6272 unlock_user_struct(target_ldt_info, ptr, 1); 6273 return -TARGET_EINVAL; 6274 } 6275 lp = (uint32_t *)(gdt_table + idx); 6276 entry_1 = tswap32(lp[0]); 6277 entry_2 = tswap32(lp[1]); 6278 6279 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 6280 contents = (entry_2 >> 10) & 3; 6281 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 6282 seg_32bit = (entry_2 >> 22) & 1; 6283 limit_in_pages = (entry_2 >> 23) & 1; 6284 useable = (entry_2 >> 20) & 1; 6285 #ifdef TARGET_ABI32 6286 lm = 0; 6287 #else 6288 lm = (entry_2 >> 21) & 1; 6289 #endif 6290 flags = (seg_32bit << 0) | (contents << 1) | 6291 (read_exec_only << 3) | (limit_in_pages << 4) | 6292 (seg_not_present << 5) | (useable << 6) | (lm << 7); 6293 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 6294 base_addr = (entry_1 >> 16) | 6295 (entry_2 & 0xff000000) | 6296 ((entry_2 & 0xff) << 16); 6297 target_ldt_info->base_addr = tswapal(base_addr); 6298 target_ldt_info->limit = tswap32(limit); 6299 target_ldt_info->flags = tswap32(flags); 6300 unlock_user_struct(target_ldt_info, ptr, 1); 6301 return 0; 6302 } 6303 6304 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6305 { 6306 return -TARGET_ENOSYS; 6307 } 6308 #else 6309 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6310 { 6311 abi_long ret = 0; 6312 abi_ulong val; 6313 int idx; 6314 6315 switch(code) { 6316 case TARGET_ARCH_SET_GS: 6317 case TARGET_ARCH_SET_FS: 6318 if (code == TARGET_ARCH_SET_GS) 6319 idx = R_GS; 6320 else 6321 idx = R_FS; 6322 cpu_x86_load_seg(env, idx, 0); 6323 env->segs[idx].base = addr; 6324 break; 6325 case TARGET_ARCH_GET_GS: 6326 case TARGET_ARCH_GET_FS: 6327 if (code == TARGET_ARCH_GET_GS) 6328 idx = R_GS; 6329 else 6330 idx = R_FS; 6331 val = env->segs[idx].base; 6332 if (put_user(val, addr, abi_ulong)) 6333 ret = -TARGET_EFAULT; 6334 break; 6335 default: 6336 ret = -TARGET_EINVAL; 6337 break; 6338 } 6339 return ret; 6340 } 6341 #endif /* defined(TARGET_ABI32 */ 6342 #endif /* defined(TARGET_I386) */ 6343 6344 /* 6345 * These constants are generic. Supply any that are missing from the host. 6346 */ 6347 #ifndef PR_SET_NAME 6348 # define PR_SET_NAME 15 6349 # define PR_GET_NAME 16 6350 #endif 6351 #ifndef PR_SET_FP_MODE 6352 # define PR_SET_FP_MODE 45 6353 # define PR_GET_FP_MODE 46 6354 # define PR_FP_MODE_FR (1 << 0) 6355 # define PR_FP_MODE_FRE (1 << 1) 6356 #endif 6357 #ifndef PR_SVE_SET_VL 6358 # define PR_SVE_SET_VL 50 6359 # define PR_SVE_GET_VL 51 6360 # define PR_SVE_VL_LEN_MASK 0xffff 6361 # define PR_SVE_VL_INHERIT (1 << 17) 6362 #endif 6363 #ifndef PR_PAC_RESET_KEYS 6364 # define PR_PAC_RESET_KEYS 54 6365 # define PR_PAC_APIAKEY (1 << 0) 6366 # define PR_PAC_APIBKEY (1 << 1) 6367 # define PR_PAC_APDAKEY (1 << 2) 6368 # define PR_PAC_APDBKEY (1 << 3) 6369 # define PR_PAC_APGAKEY (1 << 4) 6370 #endif 6371 #ifndef PR_SET_TAGGED_ADDR_CTRL 6372 # define PR_SET_TAGGED_ADDR_CTRL 55 6373 # define PR_GET_TAGGED_ADDR_CTRL 56 6374 # define PR_TAGGED_ADDR_ENABLE (1UL << 0) 6375 #endif 6376 #ifndef PR_MTE_TCF_SHIFT 6377 # define PR_MTE_TCF_SHIFT 1 6378 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT) 6379 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT) 6380 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT) 6381 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT) 6382 # define PR_MTE_TAG_SHIFT 3 6383 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT) 6384 #endif 6385 #ifndef PR_SET_IO_FLUSHER 6386 # define PR_SET_IO_FLUSHER 57 6387 # define PR_GET_IO_FLUSHER 58 6388 #endif 6389 #ifndef PR_SET_SYSCALL_USER_DISPATCH 6390 # define PR_SET_SYSCALL_USER_DISPATCH 59 6391 #endif 6392 #ifndef PR_SME_SET_VL 6393 # define PR_SME_SET_VL 63 6394 # define PR_SME_GET_VL 64 6395 # define PR_SME_VL_LEN_MASK 0xffff 6396 # define PR_SME_VL_INHERIT (1 << 17) 6397 #endif 6398 6399 #include "target_prctl.h" 6400 6401 static abi_long do_prctl_inval0(CPUArchState *env) 6402 { 6403 return -TARGET_EINVAL; 6404 } 6405 6406 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2) 6407 { 6408 return -TARGET_EINVAL; 6409 } 6410 6411 #ifndef do_prctl_get_fp_mode 6412 #define do_prctl_get_fp_mode do_prctl_inval0 6413 #endif 6414 #ifndef do_prctl_set_fp_mode 6415 #define do_prctl_set_fp_mode do_prctl_inval1 6416 #endif 6417 #ifndef do_prctl_sve_get_vl 6418 #define do_prctl_sve_get_vl do_prctl_inval0 6419 #endif 6420 #ifndef do_prctl_sve_set_vl 6421 #define do_prctl_sve_set_vl do_prctl_inval1 6422 #endif 6423 #ifndef do_prctl_reset_keys 6424 #define do_prctl_reset_keys do_prctl_inval1 6425 #endif 6426 #ifndef do_prctl_set_tagged_addr_ctrl 6427 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1 6428 #endif 6429 #ifndef do_prctl_get_tagged_addr_ctrl 6430 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0 6431 #endif 6432 #ifndef do_prctl_get_unalign 6433 #define do_prctl_get_unalign do_prctl_inval1 6434 #endif 6435 #ifndef do_prctl_set_unalign 6436 #define do_prctl_set_unalign do_prctl_inval1 6437 #endif 6438 #ifndef do_prctl_sme_get_vl 6439 #define do_prctl_sme_get_vl do_prctl_inval0 6440 #endif 6441 #ifndef do_prctl_sme_set_vl 6442 #define do_prctl_sme_set_vl do_prctl_inval1 6443 #endif 6444 6445 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, 6446 abi_long arg3, abi_long arg4, abi_long arg5) 6447 { 6448 abi_long ret; 6449 6450 switch (option) { 6451 case PR_GET_PDEATHSIG: 6452 { 6453 int deathsig; 6454 ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig, 6455 arg3, arg4, arg5)); 6456 if (!is_error(ret) && 6457 put_user_s32(host_to_target_signal(deathsig), arg2)) { 6458 return -TARGET_EFAULT; 6459 } 6460 return ret; 6461 } 6462 case PR_SET_PDEATHSIG: 6463 return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2), 6464 arg3, arg4, arg5)); 6465 case PR_GET_NAME: 6466 { 6467 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 6468 if (!name) { 6469 return -TARGET_EFAULT; 6470 } 6471 ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name, 6472 arg3, arg4, arg5)); 6473 unlock_user(name, arg2, 16); 6474 return ret; 6475 } 6476 case PR_SET_NAME: 6477 { 6478 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 6479 if (!name) { 6480 return -TARGET_EFAULT; 6481 } 6482 ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name, 6483 arg3, arg4, arg5)); 6484 unlock_user(name, arg2, 0); 6485 return ret; 6486 } 6487 case PR_GET_FP_MODE: 6488 return do_prctl_get_fp_mode(env); 6489 case PR_SET_FP_MODE: 6490 return do_prctl_set_fp_mode(env, arg2); 6491 case PR_SVE_GET_VL: 6492 return do_prctl_sve_get_vl(env); 6493 case PR_SVE_SET_VL: 6494 return do_prctl_sve_set_vl(env, arg2); 6495 case PR_SME_GET_VL: 6496 return do_prctl_sme_get_vl(env); 6497 case PR_SME_SET_VL: 6498 return do_prctl_sme_set_vl(env, arg2); 6499 case PR_PAC_RESET_KEYS: 6500 if (arg3 || arg4 || arg5) { 6501 return -TARGET_EINVAL; 6502 } 6503 return do_prctl_reset_keys(env, arg2); 6504 case PR_SET_TAGGED_ADDR_CTRL: 6505 if (arg3 || arg4 || arg5) { 6506 return -TARGET_EINVAL; 6507 } 6508 return do_prctl_set_tagged_addr_ctrl(env, arg2); 6509 case PR_GET_TAGGED_ADDR_CTRL: 6510 if (arg2 || arg3 || arg4 || arg5) { 6511 return -TARGET_EINVAL; 6512 } 6513 return do_prctl_get_tagged_addr_ctrl(env); 6514 6515 case PR_GET_UNALIGN: 6516 return do_prctl_get_unalign(env, arg2); 6517 case PR_SET_UNALIGN: 6518 return do_prctl_set_unalign(env, arg2); 6519 6520 case PR_CAP_AMBIENT: 6521 case PR_CAPBSET_READ: 6522 case PR_CAPBSET_DROP: 6523 case PR_GET_DUMPABLE: 6524 case PR_SET_DUMPABLE: 6525 case PR_GET_KEEPCAPS: 6526 case PR_SET_KEEPCAPS: 6527 case PR_GET_SECUREBITS: 6528 case PR_SET_SECUREBITS: 6529 case PR_GET_TIMING: 6530 case PR_SET_TIMING: 6531 case PR_GET_TIMERSLACK: 6532 case PR_SET_TIMERSLACK: 6533 case PR_MCE_KILL: 6534 case PR_MCE_KILL_GET: 6535 case PR_GET_NO_NEW_PRIVS: 6536 case PR_SET_NO_NEW_PRIVS: 6537 case PR_GET_IO_FLUSHER: 6538 case PR_SET_IO_FLUSHER: 6539 /* Some prctl options have no pointer arguments and we can pass on. */ 6540 return get_errno(prctl(option, arg2, arg3, arg4, arg5)); 6541 6542 case PR_GET_CHILD_SUBREAPER: 6543 case PR_SET_CHILD_SUBREAPER: 6544 case PR_GET_SPECULATION_CTRL: 6545 case PR_SET_SPECULATION_CTRL: 6546 case PR_GET_TID_ADDRESS: 6547 /* TODO */ 6548 return -TARGET_EINVAL; 6549 6550 case PR_GET_FPEXC: 6551 case PR_SET_FPEXC: 6552 /* Was used for SPE on PowerPC. */ 6553 return -TARGET_EINVAL; 6554 6555 case PR_GET_ENDIAN: 6556 case PR_SET_ENDIAN: 6557 case PR_GET_FPEMU: 6558 case PR_SET_FPEMU: 6559 case PR_SET_MM: 6560 case PR_GET_SECCOMP: 6561 case PR_SET_SECCOMP: 6562 case PR_SET_SYSCALL_USER_DISPATCH: 6563 case PR_GET_THP_DISABLE: 6564 case PR_SET_THP_DISABLE: 6565 case PR_GET_TSC: 6566 case PR_SET_TSC: 6567 /* Disable to prevent the target disabling stuff we need. */ 6568 return -TARGET_EINVAL; 6569 6570 default: 6571 qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n", 6572 option); 6573 return -TARGET_EINVAL; 6574 } 6575 } 6576 6577 #define NEW_STACK_SIZE 0x40000 6578 6579 6580 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 6581 typedef struct { 6582 CPUArchState *env; 6583 pthread_mutex_t mutex; 6584 pthread_cond_t cond; 6585 pthread_t thread; 6586 uint32_t tid; 6587 abi_ulong child_tidptr; 6588 abi_ulong parent_tidptr; 6589 sigset_t sigmask; 6590 } new_thread_info; 6591 6592 static void *clone_func(void *arg) 6593 { 6594 new_thread_info *info = arg; 6595 CPUArchState *env; 6596 CPUState *cpu; 6597 TaskState *ts; 6598 6599 rcu_register_thread(); 6600 tcg_register_thread(); 6601 env = info->env; 6602 cpu = env_cpu(env); 6603 thread_cpu = cpu; 6604 ts = (TaskState *)cpu->opaque; 6605 info->tid = sys_gettid(); 6606 task_settid(ts); 6607 if (info->child_tidptr) 6608 put_user_u32(info->tid, info->child_tidptr); 6609 if (info->parent_tidptr) 6610 put_user_u32(info->tid, info->parent_tidptr); 6611 qemu_guest_random_seed_thread_part2(cpu->random_seed); 6612 /* Enable signals. */ 6613 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 6614 /* Signal to the parent that we're ready. */ 6615 pthread_mutex_lock(&info->mutex); 6616 pthread_cond_broadcast(&info->cond); 6617 pthread_mutex_unlock(&info->mutex); 6618 /* Wait until the parent has finished initializing the tls state. */ 6619 pthread_mutex_lock(&clone_lock); 6620 pthread_mutex_unlock(&clone_lock); 6621 cpu_loop(env); 6622 /* never exits */ 6623 return NULL; 6624 } 6625 6626 /* do_fork() Must return host values and target errnos (unlike most 6627 do_*() functions). */ 6628 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 6629 abi_ulong parent_tidptr, target_ulong newtls, 6630 abi_ulong child_tidptr) 6631 { 6632 CPUState *cpu = env_cpu(env); 6633 int ret; 6634 TaskState *ts; 6635 CPUState *new_cpu; 6636 CPUArchState *new_env; 6637 sigset_t sigmask; 6638 6639 flags &= ~CLONE_IGNORED_FLAGS; 6640 6641 /* Emulate vfork() with fork() */ 6642 if (flags & CLONE_VFORK) 6643 flags &= ~(CLONE_VFORK | CLONE_VM); 6644 6645 if (flags & CLONE_VM) { 6646 TaskState *parent_ts = (TaskState *)cpu->opaque; 6647 new_thread_info info; 6648 pthread_attr_t attr; 6649 6650 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) || 6651 (flags & CLONE_INVALID_THREAD_FLAGS)) { 6652 return -TARGET_EINVAL; 6653 } 6654 6655 ts = g_new0(TaskState, 1); 6656 init_task_state(ts); 6657 6658 /* Grab a mutex so that thread setup appears atomic. */ 6659 pthread_mutex_lock(&clone_lock); 6660 6661 /* 6662 * If this is our first additional thread, we need to ensure we 6663 * generate code for parallel execution and flush old translations. 6664 * Do this now so that the copy gets CF_PARALLEL too. 6665 */ 6666 if (!(cpu->tcg_cflags & CF_PARALLEL)) { 6667 cpu->tcg_cflags |= CF_PARALLEL; 6668 tb_flush(cpu); 6669 } 6670 6671 /* we create a new CPU instance. */ 6672 new_env = cpu_copy(env); 6673 /* Init regs that differ from the parent. */ 6674 cpu_clone_regs_child(new_env, newsp, flags); 6675 cpu_clone_regs_parent(env, flags); 6676 new_cpu = env_cpu(new_env); 6677 new_cpu->opaque = ts; 6678 ts->bprm = parent_ts->bprm; 6679 ts->info = parent_ts->info; 6680 ts->signal_mask = parent_ts->signal_mask; 6681 6682 if (flags & CLONE_CHILD_CLEARTID) { 6683 ts->child_tidptr = child_tidptr; 6684 } 6685 6686 if (flags & CLONE_SETTLS) { 6687 cpu_set_tls (new_env, newtls); 6688 } 6689 6690 memset(&info, 0, sizeof(info)); 6691 pthread_mutex_init(&info.mutex, NULL); 6692 pthread_mutex_lock(&info.mutex); 6693 pthread_cond_init(&info.cond, NULL); 6694 info.env = new_env; 6695 if (flags & CLONE_CHILD_SETTID) { 6696 info.child_tidptr = child_tidptr; 6697 } 6698 if (flags & CLONE_PARENT_SETTID) { 6699 info.parent_tidptr = parent_tidptr; 6700 } 6701 6702 ret = pthread_attr_init(&attr); 6703 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 6704 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 6705 /* It is not safe to deliver signals until the child has finished 6706 initializing, so temporarily block all signals. */ 6707 sigfillset(&sigmask); 6708 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 6709 cpu->random_seed = qemu_guest_random_seed_thread_part1(); 6710 6711 ret = pthread_create(&info.thread, &attr, clone_func, &info); 6712 /* TODO: Free new CPU state if thread creation failed. */ 6713 6714 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 6715 pthread_attr_destroy(&attr); 6716 if (ret == 0) { 6717 /* Wait for the child to initialize. */ 6718 pthread_cond_wait(&info.cond, &info.mutex); 6719 ret = info.tid; 6720 } else { 6721 ret = -1; 6722 } 6723 pthread_mutex_unlock(&info.mutex); 6724 pthread_cond_destroy(&info.cond); 6725 pthread_mutex_destroy(&info.mutex); 6726 pthread_mutex_unlock(&clone_lock); 6727 } else { 6728 /* if no CLONE_VM, we consider it is a fork */ 6729 if (flags & CLONE_INVALID_FORK_FLAGS) { 6730 return -TARGET_EINVAL; 6731 } 6732 6733 /* We can't support custom termination signals */ 6734 if ((flags & CSIGNAL) != TARGET_SIGCHLD) { 6735 return -TARGET_EINVAL; 6736 } 6737 6738 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open) 6739 if (flags & CLONE_PIDFD) { 6740 return -TARGET_EINVAL; 6741 } 6742 #endif 6743 6744 /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */ 6745 if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) { 6746 return -TARGET_EINVAL; 6747 } 6748 6749 if (block_signals()) { 6750 return -QEMU_ERESTARTSYS; 6751 } 6752 6753 fork_start(); 6754 ret = fork(); 6755 if (ret == 0) { 6756 /* Child Process. */ 6757 cpu_clone_regs_child(env, newsp, flags); 6758 fork_end(1); 6759 /* There is a race condition here. The parent process could 6760 theoretically read the TID in the child process before the child 6761 tid is set. This would require using either ptrace 6762 (not implemented) or having *_tidptr to point at a shared memory 6763 mapping. We can't repeat the spinlock hack used above because 6764 the child process gets its own copy of the lock. */ 6765 if (flags & CLONE_CHILD_SETTID) 6766 put_user_u32(sys_gettid(), child_tidptr); 6767 if (flags & CLONE_PARENT_SETTID) 6768 put_user_u32(sys_gettid(), parent_tidptr); 6769 ts = (TaskState *)cpu->opaque; 6770 if (flags & CLONE_SETTLS) 6771 cpu_set_tls (env, newtls); 6772 if (flags & CLONE_CHILD_CLEARTID) 6773 ts->child_tidptr = child_tidptr; 6774 } else { 6775 cpu_clone_regs_parent(env, flags); 6776 if (flags & CLONE_PIDFD) { 6777 int pid_fd = 0; 6778 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open) 6779 int pid_child = ret; 6780 pid_fd = pidfd_open(pid_child, 0); 6781 if (pid_fd >= 0) { 6782 fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL) 6783 | FD_CLOEXEC); 6784 } else { 6785 pid_fd = 0; 6786 } 6787 #endif 6788 put_user_u32(pid_fd, parent_tidptr); 6789 } 6790 fork_end(0); 6791 } 6792 g_assert(!cpu_in_exclusive_context(cpu)); 6793 } 6794 return ret; 6795 } 6796 6797 /* warning : doesn't handle linux specific flags... */ 6798 static int target_to_host_fcntl_cmd(int cmd) 6799 { 6800 int ret; 6801 6802 switch(cmd) { 6803 case TARGET_F_DUPFD: 6804 case TARGET_F_GETFD: 6805 case TARGET_F_SETFD: 6806 case TARGET_F_GETFL: 6807 case TARGET_F_SETFL: 6808 case TARGET_F_OFD_GETLK: 6809 case TARGET_F_OFD_SETLK: 6810 case TARGET_F_OFD_SETLKW: 6811 ret = cmd; 6812 break; 6813 case TARGET_F_GETLK: 6814 ret = F_GETLK64; 6815 break; 6816 case TARGET_F_SETLK: 6817 ret = F_SETLK64; 6818 break; 6819 case TARGET_F_SETLKW: 6820 ret = F_SETLKW64; 6821 break; 6822 case TARGET_F_GETOWN: 6823 ret = F_GETOWN; 6824 break; 6825 case TARGET_F_SETOWN: 6826 ret = F_SETOWN; 6827 break; 6828 case TARGET_F_GETSIG: 6829 ret = F_GETSIG; 6830 break; 6831 case TARGET_F_SETSIG: 6832 ret = F_SETSIG; 6833 break; 6834 #if TARGET_ABI_BITS == 32 6835 case TARGET_F_GETLK64: 6836 ret = F_GETLK64; 6837 break; 6838 case TARGET_F_SETLK64: 6839 ret = F_SETLK64; 6840 break; 6841 case TARGET_F_SETLKW64: 6842 ret = F_SETLKW64; 6843 break; 6844 #endif 6845 case TARGET_F_SETLEASE: 6846 ret = F_SETLEASE; 6847 break; 6848 case TARGET_F_GETLEASE: 6849 ret = F_GETLEASE; 6850 break; 6851 #ifdef F_DUPFD_CLOEXEC 6852 case TARGET_F_DUPFD_CLOEXEC: 6853 ret = F_DUPFD_CLOEXEC; 6854 break; 6855 #endif 6856 case TARGET_F_NOTIFY: 6857 ret = F_NOTIFY; 6858 break; 6859 #ifdef F_GETOWN_EX 6860 case TARGET_F_GETOWN_EX: 6861 ret = F_GETOWN_EX; 6862 break; 6863 #endif 6864 #ifdef F_SETOWN_EX 6865 case TARGET_F_SETOWN_EX: 6866 ret = F_SETOWN_EX; 6867 break; 6868 #endif 6869 #ifdef F_SETPIPE_SZ 6870 case TARGET_F_SETPIPE_SZ: 6871 ret = F_SETPIPE_SZ; 6872 break; 6873 case TARGET_F_GETPIPE_SZ: 6874 ret = F_GETPIPE_SZ; 6875 break; 6876 #endif 6877 #ifdef F_ADD_SEALS 6878 case TARGET_F_ADD_SEALS: 6879 ret = F_ADD_SEALS; 6880 break; 6881 case TARGET_F_GET_SEALS: 6882 ret = F_GET_SEALS; 6883 break; 6884 #endif 6885 default: 6886 ret = -TARGET_EINVAL; 6887 break; 6888 } 6889 6890 #if defined(__powerpc64__) 6891 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and 6892 * is not supported by kernel. The glibc fcntl call actually adjusts 6893 * them to 5, 6 and 7 before making the syscall(). Since we make the 6894 * syscall directly, adjust to what is supported by the kernel. 6895 */ 6896 if (ret >= F_GETLK64 && ret <= F_SETLKW64) { 6897 ret -= F_GETLK64 - 5; 6898 } 6899 #endif 6900 6901 return ret; 6902 } 6903 6904 #define FLOCK_TRANSTBL \ 6905 switch (type) { \ 6906 TRANSTBL_CONVERT(F_RDLCK); \ 6907 TRANSTBL_CONVERT(F_WRLCK); \ 6908 TRANSTBL_CONVERT(F_UNLCK); \ 6909 } 6910 6911 static int target_to_host_flock(int type) 6912 { 6913 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a 6914 FLOCK_TRANSTBL 6915 #undef TRANSTBL_CONVERT 6916 return -TARGET_EINVAL; 6917 } 6918 6919 static int host_to_target_flock(int type) 6920 { 6921 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a 6922 FLOCK_TRANSTBL 6923 #undef TRANSTBL_CONVERT 6924 /* if we don't know how to convert the value coming 6925 * from the host we copy to the target field as-is 6926 */ 6927 return type; 6928 } 6929 6930 static inline abi_long copy_from_user_flock(struct flock64 *fl, 6931 abi_ulong target_flock_addr) 6932 { 6933 struct target_flock *target_fl; 6934 int l_type; 6935 6936 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6937 return -TARGET_EFAULT; 6938 } 6939 6940 __get_user(l_type, &target_fl->l_type); 6941 l_type = target_to_host_flock(l_type); 6942 if (l_type < 0) { 6943 return l_type; 6944 } 6945 fl->l_type = l_type; 6946 __get_user(fl->l_whence, &target_fl->l_whence); 6947 __get_user(fl->l_start, &target_fl->l_start); 6948 __get_user(fl->l_len, &target_fl->l_len); 6949 __get_user(fl->l_pid, &target_fl->l_pid); 6950 unlock_user_struct(target_fl, target_flock_addr, 0); 6951 return 0; 6952 } 6953 6954 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr, 6955 const struct flock64 *fl) 6956 { 6957 struct target_flock *target_fl; 6958 short l_type; 6959 6960 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6961 return -TARGET_EFAULT; 6962 } 6963 6964 l_type = host_to_target_flock(fl->l_type); 6965 __put_user(l_type, &target_fl->l_type); 6966 __put_user(fl->l_whence, &target_fl->l_whence); 6967 __put_user(fl->l_start, &target_fl->l_start); 6968 __put_user(fl->l_len, &target_fl->l_len); 6969 __put_user(fl->l_pid, &target_fl->l_pid); 6970 unlock_user_struct(target_fl, target_flock_addr, 1); 6971 return 0; 6972 } 6973 6974 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr); 6975 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl); 6976 6977 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32 6978 struct target_oabi_flock64 { 6979 abi_short l_type; 6980 abi_short l_whence; 6981 abi_llong l_start; 6982 abi_llong l_len; 6983 abi_int l_pid; 6984 } QEMU_PACKED; 6985 6986 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl, 6987 abi_ulong target_flock_addr) 6988 { 6989 struct target_oabi_flock64 *target_fl; 6990 int l_type; 6991 6992 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6993 return -TARGET_EFAULT; 6994 } 6995 6996 __get_user(l_type, &target_fl->l_type); 6997 l_type = target_to_host_flock(l_type); 6998 if (l_type < 0) { 6999 return l_type; 7000 } 7001 fl->l_type = l_type; 7002 __get_user(fl->l_whence, &target_fl->l_whence); 7003 __get_user(fl->l_start, &target_fl->l_start); 7004 __get_user(fl->l_len, &target_fl->l_len); 7005 __get_user(fl->l_pid, &target_fl->l_pid); 7006 unlock_user_struct(target_fl, target_flock_addr, 0); 7007 return 0; 7008 } 7009 7010 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr, 7011 const struct flock64 *fl) 7012 { 7013 struct target_oabi_flock64 *target_fl; 7014 short l_type; 7015 7016 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 7017 return -TARGET_EFAULT; 7018 } 7019 7020 l_type = host_to_target_flock(fl->l_type); 7021 __put_user(l_type, &target_fl->l_type); 7022 __put_user(fl->l_whence, &target_fl->l_whence); 7023 __put_user(fl->l_start, &target_fl->l_start); 7024 __put_user(fl->l_len, &target_fl->l_len); 7025 __put_user(fl->l_pid, &target_fl->l_pid); 7026 unlock_user_struct(target_fl, target_flock_addr, 1); 7027 return 0; 7028 } 7029 #endif 7030 7031 static inline abi_long copy_from_user_flock64(struct flock64 *fl, 7032 abi_ulong target_flock_addr) 7033 { 7034 struct target_flock64 *target_fl; 7035 int l_type; 7036 7037 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 7038 return -TARGET_EFAULT; 7039 } 7040 7041 __get_user(l_type, &target_fl->l_type); 7042 l_type = target_to_host_flock(l_type); 7043 if (l_type < 0) { 7044 return l_type; 7045 } 7046 fl->l_type = l_type; 7047 __get_user(fl->l_whence, &target_fl->l_whence); 7048 __get_user(fl->l_start, &target_fl->l_start); 7049 __get_user(fl->l_len, &target_fl->l_len); 7050 __get_user(fl->l_pid, &target_fl->l_pid); 7051 unlock_user_struct(target_fl, target_flock_addr, 0); 7052 return 0; 7053 } 7054 7055 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr, 7056 const struct flock64 *fl) 7057 { 7058 struct target_flock64 *target_fl; 7059 short l_type; 7060 7061 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 7062 return -TARGET_EFAULT; 7063 } 7064 7065 l_type = host_to_target_flock(fl->l_type); 7066 __put_user(l_type, &target_fl->l_type); 7067 __put_user(fl->l_whence, &target_fl->l_whence); 7068 __put_user(fl->l_start, &target_fl->l_start); 7069 __put_user(fl->l_len, &target_fl->l_len); 7070 __put_user(fl->l_pid, &target_fl->l_pid); 7071 unlock_user_struct(target_fl, target_flock_addr, 1); 7072 return 0; 7073 } 7074 7075 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 7076 { 7077 struct flock64 fl64; 7078 #ifdef F_GETOWN_EX 7079 struct f_owner_ex fox; 7080 struct target_f_owner_ex *target_fox; 7081 #endif 7082 abi_long ret; 7083 int host_cmd = target_to_host_fcntl_cmd(cmd); 7084 7085 if (host_cmd == -TARGET_EINVAL) 7086 return host_cmd; 7087 7088 switch(cmd) { 7089 case TARGET_F_GETLK: 7090 ret = copy_from_user_flock(&fl64, arg); 7091 if (ret) { 7092 return ret; 7093 } 7094 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7095 if (ret == 0) { 7096 ret = copy_to_user_flock(arg, &fl64); 7097 } 7098 break; 7099 7100 case TARGET_F_SETLK: 7101 case TARGET_F_SETLKW: 7102 ret = copy_from_user_flock(&fl64, arg); 7103 if (ret) { 7104 return ret; 7105 } 7106 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7107 break; 7108 7109 case TARGET_F_GETLK64: 7110 case TARGET_F_OFD_GETLK: 7111 ret = copy_from_user_flock64(&fl64, arg); 7112 if (ret) { 7113 return ret; 7114 } 7115 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7116 if (ret == 0) { 7117 ret = copy_to_user_flock64(arg, &fl64); 7118 } 7119 break; 7120 case TARGET_F_SETLK64: 7121 case TARGET_F_SETLKW64: 7122 case TARGET_F_OFD_SETLK: 7123 case TARGET_F_OFD_SETLKW: 7124 ret = copy_from_user_flock64(&fl64, arg); 7125 if (ret) { 7126 return ret; 7127 } 7128 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7129 break; 7130 7131 case TARGET_F_GETFL: 7132 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 7133 if (ret >= 0) { 7134 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 7135 } 7136 break; 7137 7138 case TARGET_F_SETFL: 7139 ret = get_errno(safe_fcntl(fd, host_cmd, 7140 target_to_host_bitmask(arg, 7141 fcntl_flags_tbl))); 7142 break; 7143 7144 #ifdef F_GETOWN_EX 7145 case TARGET_F_GETOWN_EX: 7146 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 7147 if (ret >= 0) { 7148 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 7149 return -TARGET_EFAULT; 7150 target_fox->type = tswap32(fox.type); 7151 target_fox->pid = tswap32(fox.pid); 7152 unlock_user_struct(target_fox, arg, 1); 7153 } 7154 break; 7155 #endif 7156 7157 #ifdef F_SETOWN_EX 7158 case TARGET_F_SETOWN_EX: 7159 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 7160 return -TARGET_EFAULT; 7161 fox.type = tswap32(target_fox->type); 7162 fox.pid = tswap32(target_fox->pid); 7163 unlock_user_struct(target_fox, arg, 0); 7164 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 7165 break; 7166 #endif 7167 7168 case TARGET_F_SETSIG: 7169 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg))); 7170 break; 7171 7172 case TARGET_F_GETSIG: 7173 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg))); 7174 break; 7175 7176 case TARGET_F_SETOWN: 7177 case TARGET_F_GETOWN: 7178 case TARGET_F_SETLEASE: 7179 case TARGET_F_GETLEASE: 7180 case TARGET_F_SETPIPE_SZ: 7181 case TARGET_F_GETPIPE_SZ: 7182 case TARGET_F_ADD_SEALS: 7183 case TARGET_F_GET_SEALS: 7184 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 7185 break; 7186 7187 default: 7188 ret = get_errno(safe_fcntl(fd, cmd, arg)); 7189 break; 7190 } 7191 return ret; 7192 } 7193 7194 #ifdef USE_UID16 7195 7196 static inline int high2lowuid(int uid) 7197 { 7198 if (uid > 65535) 7199 return 65534; 7200 else 7201 return uid; 7202 } 7203 7204 static inline int high2lowgid(int gid) 7205 { 7206 if (gid > 65535) 7207 return 65534; 7208 else 7209 return gid; 7210 } 7211 7212 static inline int low2highuid(int uid) 7213 { 7214 if ((int16_t)uid == -1) 7215 return -1; 7216 else 7217 return uid; 7218 } 7219 7220 static inline int low2highgid(int gid) 7221 { 7222 if ((int16_t)gid == -1) 7223 return -1; 7224 else 7225 return gid; 7226 } 7227 static inline int tswapid(int id) 7228 { 7229 return tswap16(id); 7230 } 7231 7232 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 7233 7234 #else /* !USE_UID16 */ 7235 static inline int high2lowuid(int uid) 7236 { 7237 return uid; 7238 } 7239 static inline int high2lowgid(int gid) 7240 { 7241 return gid; 7242 } 7243 static inline int low2highuid(int uid) 7244 { 7245 return uid; 7246 } 7247 static inline int low2highgid(int gid) 7248 { 7249 return gid; 7250 } 7251 static inline int tswapid(int id) 7252 { 7253 return tswap32(id); 7254 } 7255 7256 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 7257 7258 #endif /* USE_UID16 */ 7259 7260 /* We must do direct syscalls for setting UID/GID, because we want to 7261 * implement the Linux system call semantics of "change only for this thread", 7262 * not the libc/POSIX semantics of "change for all threads in process". 7263 * (See http://ewontfix.com/17/ for more details.) 7264 * We use the 32-bit version of the syscalls if present; if it is not 7265 * then either the host architecture supports 32-bit UIDs natively with 7266 * the standard syscall, or the 16-bit UID is the best we can do. 7267 */ 7268 #ifdef __NR_setuid32 7269 #define __NR_sys_setuid __NR_setuid32 7270 #else 7271 #define __NR_sys_setuid __NR_setuid 7272 #endif 7273 #ifdef __NR_setgid32 7274 #define __NR_sys_setgid __NR_setgid32 7275 #else 7276 #define __NR_sys_setgid __NR_setgid 7277 #endif 7278 #ifdef __NR_setresuid32 7279 #define __NR_sys_setresuid __NR_setresuid32 7280 #else 7281 #define __NR_sys_setresuid __NR_setresuid 7282 #endif 7283 #ifdef __NR_setresgid32 7284 #define __NR_sys_setresgid __NR_setresgid32 7285 #else 7286 #define __NR_sys_setresgid __NR_setresgid 7287 #endif 7288 7289 _syscall1(int, sys_setuid, uid_t, uid) 7290 _syscall1(int, sys_setgid, gid_t, gid) 7291 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 7292 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 7293 7294 void syscall_init(void) 7295 { 7296 IOCTLEntry *ie; 7297 const argtype *arg_type; 7298 int size; 7299 7300 thunk_init(STRUCT_MAX); 7301 7302 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 7303 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 7304 #include "syscall_types.h" 7305 #undef STRUCT 7306 #undef STRUCT_SPECIAL 7307 7308 /* we patch the ioctl size if necessary. We rely on the fact that 7309 no ioctl has all the bits at '1' in the size field */ 7310 ie = ioctl_entries; 7311 while (ie->target_cmd != 0) { 7312 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 7313 TARGET_IOC_SIZEMASK) { 7314 arg_type = ie->arg_type; 7315 if (arg_type[0] != TYPE_PTR) { 7316 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 7317 ie->target_cmd); 7318 exit(1); 7319 } 7320 arg_type++; 7321 size = thunk_type_size(arg_type, 0); 7322 ie->target_cmd = (ie->target_cmd & 7323 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 7324 (size << TARGET_IOC_SIZESHIFT); 7325 } 7326 7327 /* automatic consistency check if same arch */ 7328 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 7329 (defined(__x86_64__) && defined(TARGET_X86_64)) 7330 if (unlikely(ie->target_cmd != ie->host_cmd)) { 7331 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 7332 ie->name, ie->target_cmd, ie->host_cmd); 7333 } 7334 #endif 7335 ie++; 7336 } 7337 } 7338 7339 #ifdef TARGET_NR_truncate64 7340 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1, 7341 abi_long arg2, 7342 abi_long arg3, 7343 abi_long arg4) 7344 { 7345 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) { 7346 arg2 = arg3; 7347 arg3 = arg4; 7348 } 7349 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 7350 } 7351 #endif 7352 7353 #ifdef TARGET_NR_ftruncate64 7354 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1, 7355 abi_long arg2, 7356 abi_long arg3, 7357 abi_long arg4) 7358 { 7359 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) { 7360 arg2 = arg3; 7361 arg3 = arg4; 7362 } 7363 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 7364 } 7365 #endif 7366 7367 #if defined(TARGET_NR_timer_settime) || \ 7368 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)) 7369 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its, 7370 abi_ulong target_addr) 7371 { 7372 if (target_to_host_timespec(&host_its->it_interval, target_addr + 7373 offsetof(struct target_itimerspec, 7374 it_interval)) || 7375 target_to_host_timespec(&host_its->it_value, target_addr + 7376 offsetof(struct target_itimerspec, 7377 it_value))) { 7378 return -TARGET_EFAULT; 7379 } 7380 7381 return 0; 7382 } 7383 #endif 7384 7385 #if defined(TARGET_NR_timer_settime64) || \ 7386 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) 7387 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its, 7388 abi_ulong target_addr) 7389 { 7390 if (target_to_host_timespec64(&host_its->it_interval, target_addr + 7391 offsetof(struct target__kernel_itimerspec, 7392 it_interval)) || 7393 target_to_host_timespec64(&host_its->it_value, target_addr + 7394 offsetof(struct target__kernel_itimerspec, 7395 it_value))) { 7396 return -TARGET_EFAULT; 7397 } 7398 7399 return 0; 7400 } 7401 #endif 7402 7403 #if ((defined(TARGET_NR_timerfd_gettime) || \ 7404 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \ 7405 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime) 7406 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 7407 struct itimerspec *host_its) 7408 { 7409 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec, 7410 it_interval), 7411 &host_its->it_interval) || 7412 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec, 7413 it_value), 7414 &host_its->it_value)) { 7415 return -TARGET_EFAULT; 7416 } 7417 return 0; 7418 } 7419 #endif 7420 7421 #if ((defined(TARGET_NR_timerfd_gettime64) || \ 7422 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \ 7423 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64) 7424 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr, 7425 struct itimerspec *host_its) 7426 { 7427 if (host_to_target_timespec64(target_addr + 7428 offsetof(struct target__kernel_itimerspec, 7429 it_interval), 7430 &host_its->it_interval) || 7431 host_to_target_timespec64(target_addr + 7432 offsetof(struct target__kernel_itimerspec, 7433 it_value), 7434 &host_its->it_value)) { 7435 return -TARGET_EFAULT; 7436 } 7437 return 0; 7438 } 7439 #endif 7440 7441 #if defined(TARGET_NR_adjtimex) || \ 7442 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)) 7443 static inline abi_long target_to_host_timex(struct timex *host_tx, 7444 abi_long target_addr) 7445 { 7446 struct target_timex *target_tx; 7447 7448 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7449 return -TARGET_EFAULT; 7450 } 7451 7452 __get_user(host_tx->modes, &target_tx->modes); 7453 __get_user(host_tx->offset, &target_tx->offset); 7454 __get_user(host_tx->freq, &target_tx->freq); 7455 __get_user(host_tx->maxerror, &target_tx->maxerror); 7456 __get_user(host_tx->esterror, &target_tx->esterror); 7457 __get_user(host_tx->status, &target_tx->status); 7458 __get_user(host_tx->constant, &target_tx->constant); 7459 __get_user(host_tx->precision, &target_tx->precision); 7460 __get_user(host_tx->tolerance, &target_tx->tolerance); 7461 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7462 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7463 __get_user(host_tx->tick, &target_tx->tick); 7464 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7465 __get_user(host_tx->jitter, &target_tx->jitter); 7466 __get_user(host_tx->shift, &target_tx->shift); 7467 __get_user(host_tx->stabil, &target_tx->stabil); 7468 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7469 __get_user(host_tx->calcnt, &target_tx->calcnt); 7470 __get_user(host_tx->errcnt, &target_tx->errcnt); 7471 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7472 __get_user(host_tx->tai, &target_tx->tai); 7473 7474 unlock_user_struct(target_tx, target_addr, 0); 7475 return 0; 7476 } 7477 7478 static inline abi_long host_to_target_timex(abi_long target_addr, 7479 struct timex *host_tx) 7480 { 7481 struct target_timex *target_tx; 7482 7483 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7484 return -TARGET_EFAULT; 7485 } 7486 7487 __put_user(host_tx->modes, &target_tx->modes); 7488 __put_user(host_tx->offset, &target_tx->offset); 7489 __put_user(host_tx->freq, &target_tx->freq); 7490 __put_user(host_tx->maxerror, &target_tx->maxerror); 7491 __put_user(host_tx->esterror, &target_tx->esterror); 7492 __put_user(host_tx->status, &target_tx->status); 7493 __put_user(host_tx->constant, &target_tx->constant); 7494 __put_user(host_tx->precision, &target_tx->precision); 7495 __put_user(host_tx->tolerance, &target_tx->tolerance); 7496 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7497 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7498 __put_user(host_tx->tick, &target_tx->tick); 7499 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7500 __put_user(host_tx->jitter, &target_tx->jitter); 7501 __put_user(host_tx->shift, &target_tx->shift); 7502 __put_user(host_tx->stabil, &target_tx->stabil); 7503 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7504 __put_user(host_tx->calcnt, &target_tx->calcnt); 7505 __put_user(host_tx->errcnt, &target_tx->errcnt); 7506 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7507 __put_user(host_tx->tai, &target_tx->tai); 7508 7509 unlock_user_struct(target_tx, target_addr, 1); 7510 return 0; 7511 } 7512 #endif 7513 7514 7515 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 7516 static inline abi_long target_to_host_timex64(struct timex *host_tx, 7517 abi_long target_addr) 7518 { 7519 struct target__kernel_timex *target_tx; 7520 7521 if (copy_from_user_timeval64(&host_tx->time, target_addr + 7522 offsetof(struct target__kernel_timex, 7523 time))) { 7524 return -TARGET_EFAULT; 7525 } 7526 7527 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7528 return -TARGET_EFAULT; 7529 } 7530 7531 __get_user(host_tx->modes, &target_tx->modes); 7532 __get_user(host_tx->offset, &target_tx->offset); 7533 __get_user(host_tx->freq, &target_tx->freq); 7534 __get_user(host_tx->maxerror, &target_tx->maxerror); 7535 __get_user(host_tx->esterror, &target_tx->esterror); 7536 __get_user(host_tx->status, &target_tx->status); 7537 __get_user(host_tx->constant, &target_tx->constant); 7538 __get_user(host_tx->precision, &target_tx->precision); 7539 __get_user(host_tx->tolerance, &target_tx->tolerance); 7540 __get_user(host_tx->tick, &target_tx->tick); 7541 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7542 __get_user(host_tx->jitter, &target_tx->jitter); 7543 __get_user(host_tx->shift, &target_tx->shift); 7544 __get_user(host_tx->stabil, &target_tx->stabil); 7545 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7546 __get_user(host_tx->calcnt, &target_tx->calcnt); 7547 __get_user(host_tx->errcnt, &target_tx->errcnt); 7548 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7549 __get_user(host_tx->tai, &target_tx->tai); 7550 7551 unlock_user_struct(target_tx, target_addr, 0); 7552 return 0; 7553 } 7554 7555 static inline abi_long host_to_target_timex64(abi_long target_addr, 7556 struct timex *host_tx) 7557 { 7558 struct target__kernel_timex *target_tx; 7559 7560 if (copy_to_user_timeval64(target_addr + 7561 offsetof(struct target__kernel_timex, time), 7562 &host_tx->time)) { 7563 return -TARGET_EFAULT; 7564 } 7565 7566 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7567 return -TARGET_EFAULT; 7568 } 7569 7570 __put_user(host_tx->modes, &target_tx->modes); 7571 __put_user(host_tx->offset, &target_tx->offset); 7572 __put_user(host_tx->freq, &target_tx->freq); 7573 __put_user(host_tx->maxerror, &target_tx->maxerror); 7574 __put_user(host_tx->esterror, &target_tx->esterror); 7575 __put_user(host_tx->status, &target_tx->status); 7576 __put_user(host_tx->constant, &target_tx->constant); 7577 __put_user(host_tx->precision, &target_tx->precision); 7578 __put_user(host_tx->tolerance, &target_tx->tolerance); 7579 __put_user(host_tx->tick, &target_tx->tick); 7580 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7581 __put_user(host_tx->jitter, &target_tx->jitter); 7582 __put_user(host_tx->shift, &target_tx->shift); 7583 __put_user(host_tx->stabil, &target_tx->stabil); 7584 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7585 __put_user(host_tx->calcnt, &target_tx->calcnt); 7586 __put_user(host_tx->errcnt, &target_tx->errcnt); 7587 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7588 __put_user(host_tx->tai, &target_tx->tai); 7589 7590 unlock_user_struct(target_tx, target_addr, 1); 7591 return 0; 7592 } 7593 #endif 7594 7595 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID 7596 #define sigev_notify_thread_id _sigev_un._tid 7597 #endif 7598 7599 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, 7600 abi_ulong target_addr) 7601 { 7602 struct target_sigevent *target_sevp; 7603 7604 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { 7605 return -TARGET_EFAULT; 7606 } 7607 7608 /* This union is awkward on 64 bit systems because it has a 32 bit 7609 * integer and a pointer in it; we follow the conversion approach 7610 * used for handling sigval types in signal.c so the guest should get 7611 * the correct value back even if we did a 64 bit byteswap and it's 7612 * using the 32 bit integer. 7613 */ 7614 host_sevp->sigev_value.sival_ptr = 7615 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); 7616 host_sevp->sigev_signo = 7617 target_to_host_signal(tswap32(target_sevp->sigev_signo)); 7618 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); 7619 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid); 7620 7621 unlock_user_struct(target_sevp, target_addr, 1); 7622 return 0; 7623 } 7624 7625 #if defined(TARGET_NR_mlockall) 7626 static inline int target_to_host_mlockall_arg(int arg) 7627 { 7628 int result = 0; 7629 7630 if (arg & TARGET_MCL_CURRENT) { 7631 result |= MCL_CURRENT; 7632 } 7633 if (arg & TARGET_MCL_FUTURE) { 7634 result |= MCL_FUTURE; 7635 } 7636 #ifdef MCL_ONFAULT 7637 if (arg & TARGET_MCL_ONFAULT) { 7638 result |= MCL_ONFAULT; 7639 } 7640 #endif 7641 7642 return result; 7643 } 7644 #endif 7645 7646 static inline int target_to_host_msync_arg(abi_long arg) 7647 { 7648 return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) | 7649 ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) | 7650 ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) | 7651 (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC)); 7652 } 7653 7654 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \ 7655 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \ 7656 defined(TARGET_NR_newfstatat)) 7657 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env, 7658 abi_ulong target_addr, 7659 struct stat *host_st) 7660 { 7661 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 7662 if (cpu_env->eabi) { 7663 struct target_eabi_stat64 *target_st; 7664 7665 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7666 return -TARGET_EFAULT; 7667 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 7668 __put_user(host_st->st_dev, &target_st->st_dev); 7669 __put_user(host_st->st_ino, &target_st->st_ino); 7670 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7671 __put_user(host_st->st_ino, &target_st->__st_ino); 7672 #endif 7673 __put_user(host_st->st_mode, &target_st->st_mode); 7674 __put_user(host_st->st_nlink, &target_st->st_nlink); 7675 __put_user(host_st->st_uid, &target_st->st_uid); 7676 __put_user(host_st->st_gid, &target_st->st_gid); 7677 __put_user(host_st->st_rdev, &target_st->st_rdev); 7678 __put_user(host_st->st_size, &target_st->st_size); 7679 __put_user(host_st->st_blksize, &target_st->st_blksize); 7680 __put_user(host_st->st_blocks, &target_st->st_blocks); 7681 __put_user(host_st->st_atime, &target_st->target_st_atime); 7682 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7683 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7684 #ifdef HAVE_STRUCT_STAT_ST_ATIM 7685 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); 7686 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); 7687 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); 7688 #endif 7689 unlock_user_struct(target_st, target_addr, 1); 7690 } else 7691 #endif 7692 { 7693 #if defined(TARGET_HAS_STRUCT_STAT64) 7694 struct target_stat64 *target_st; 7695 #else 7696 struct target_stat *target_st; 7697 #endif 7698 7699 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7700 return -TARGET_EFAULT; 7701 memset(target_st, 0, sizeof(*target_st)); 7702 __put_user(host_st->st_dev, &target_st->st_dev); 7703 __put_user(host_st->st_ino, &target_st->st_ino); 7704 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7705 __put_user(host_st->st_ino, &target_st->__st_ino); 7706 #endif 7707 __put_user(host_st->st_mode, &target_st->st_mode); 7708 __put_user(host_st->st_nlink, &target_st->st_nlink); 7709 __put_user(host_st->st_uid, &target_st->st_uid); 7710 __put_user(host_st->st_gid, &target_st->st_gid); 7711 __put_user(host_st->st_rdev, &target_st->st_rdev); 7712 /* XXX: better use of kernel struct */ 7713 __put_user(host_st->st_size, &target_st->st_size); 7714 __put_user(host_st->st_blksize, &target_st->st_blksize); 7715 __put_user(host_st->st_blocks, &target_st->st_blocks); 7716 __put_user(host_st->st_atime, &target_st->target_st_atime); 7717 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7718 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7719 #ifdef HAVE_STRUCT_STAT_ST_ATIM 7720 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); 7721 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); 7722 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); 7723 #endif 7724 unlock_user_struct(target_st, target_addr, 1); 7725 } 7726 7727 return 0; 7728 } 7729 #endif 7730 7731 #if defined(TARGET_NR_statx) && defined(__NR_statx) 7732 static inline abi_long host_to_target_statx(struct target_statx *host_stx, 7733 abi_ulong target_addr) 7734 { 7735 struct target_statx *target_stx; 7736 7737 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) { 7738 return -TARGET_EFAULT; 7739 } 7740 memset(target_stx, 0, sizeof(*target_stx)); 7741 7742 __put_user(host_stx->stx_mask, &target_stx->stx_mask); 7743 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize); 7744 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes); 7745 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink); 7746 __put_user(host_stx->stx_uid, &target_stx->stx_uid); 7747 __put_user(host_stx->stx_gid, &target_stx->stx_gid); 7748 __put_user(host_stx->stx_mode, &target_stx->stx_mode); 7749 __put_user(host_stx->stx_ino, &target_stx->stx_ino); 7750 __put_user(host_stx->stx_size, &target_stx->stx_size); 7751 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks); 7752 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask); 7753 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec); 7754 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec); 7755 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec); 7756 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec); 7757 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec); 7758 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec); 7759 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec); 7760 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec); 7761 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major); 7762 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor); 7763 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major); 7764 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor); 7765 7766 unlock_user_struct(target_stx, target_addr, 1); 7767 7768 return 0; 7769 } 7770 #endif 7771 7772 static int do_sys_futex(int *uaddr, int op, int val, 7773 const struct timespec *timeout, int *uaddr2, 7774 int val3) 7775 { 7776 #if HOST_LONG_BITS == 64 7777 #if defined(__NR_futex) 7778 /* always a 64-bit time_t, it doesn't define _time64 version */ 7779 return sys_futex(uaddr, op, val, timeout, uaddr2, val3); 7780 7781 #endif 7782 #else /* HOST_LONG_BITS == 64 */ 7783 #if defined(__NR_futex_time64) 7784 if (sizeof(timeout->tv_sec) == 8) { 7785 /* _time64 function on 32bit arch */ 7786 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3); 7787 } 7788 #endif 7789 #if defined(__NR_futex) 7790 /* old function on 32bit arch */ 7791 return sys_futex(uaddr, op, val, timeout, uaddr2, val3); 7792 #endif 7793 #endif /* HOST_LONG_BITS == 64 */ 7794 g_assert_not_reached(); 7795 } 7796 7797 static int do_safe_futex(int *uaddr, int op, int val, 7798 const struct timespec *timeout, int *uaddr2, 7799 int val3) 7800 { 7801 #if HOST_LONG_BITS == 64 7802 #if defined(__NR_futex) 7803 /* always a 64-bit time_t, it doesn't define _time64 version */ 7804 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3)); 7805 #endif 7806 #else /* HOST_LONG_BITS == 64 */ 7807 #if defined(__NR_futex_time64) 7808 if (sizeof(timeout->tv_sec) == 8) { 7809 /* _time64 function on 32bit arch */ 7810 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2, 7811 val3)); 7812 } 7813 #endif 7814 #if defined(__NR_futex) 7815 /* old function on 32bit arch */ 7816 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3)); 7817 #endif 7818 #endif /* HOST_LONG_BITS == 64 */ 7819 return -TARGET_ENOSYS; 7820 } 7821 7822 /* ??? Using host futex calls even when target atomic operations 7823 are not really atomic probably breaks things. However implementing 7824 futexes locally would make futexes shared between multiple processes 7825 tricky. However they're probably useless because guest atomic 7826 operations won't work either. */ 7827 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64) 7828 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr, 7829 int op, int val, target_ulong timeout, 7830 target_ulong uaddr2, int val3) 7831 { 7832 struct timespec ts, *pts = NULL; 7833 void *haddr2 = NULL; 7834 int base_op; 7835 7836 /* We assume FUTEX_* constants are the same on both host and target. */ 7837 #ifdef FUTEX_CMD_MASK 7838 base_op = op & FUTEX_CMD_MASK; 7839 #else 7840 base_op = op; 7841 #endif 7842 switch (base_op) { 7843 case FUTEX_WAIT: 7844 case FUTEX_WAIT_BITSET: 7845 val = tswap32(val); 7846 break; 7847 case FUTEX_WAIT_REQUEUE_PI: 7848 val = tswap32(val); 7849 haddr2 = g2h(cpu, uaddr2); 7850 break; 7851 case FUTEX_LOCK_PI: 7852 case FUTEX_LOCK_PI2: 7853 break; 7854 case FUTEX_WAKE: 7855 case FUTEX_WAKE_BITSET: 7856 case FUTEX_TRYLOCK_PI: 7857 case FUTEX_UNLOCK_PI: 7858 timeout = 0; 7859 break; 7860 case FUTEX_FD: 7861 val = target_to_host_signal(val); 7862 timeout = 0; 7863 break; 7864 case FUTEX_CMP_REQUEUE: 7865 case FUTEX_CMP_REQUEUE_PI: 7866 val3 = tswap32(val3); 7867 /* fall through */ 7868 case FUTEX_REQUEUE: 7869 case FUTEX_WAKE_OP: 7870 /* 7871 * For these, the 4th argument is not TIMEOUT, but VAL2. 7872 * But the prototype of do_safe_futex takes a pointer, so 7873 * insert casts to satisfy the compiler. We do not need 7874 * to tswap VAL2 since it's not compared to guest memory. 7875 */ 7876 pts = (struct timespec *)(uintptr_t)timeout; 7877 timeout = 0; 7878 haddr2 = g2h(cpu, uaddr2); 7879 break; 7880 default: 7881 return -TARGET_ENOSYS; 7882 } 7883 if (timeout) { 7884 pts = &ts; 7885 if (time64 7886 ? target_to_host_timespec64(pts, timeout) 7887 : target_to_host_timespec(pts, timeout)) { 7888 return -TARGET_EFAULT; 7889 } 7890 } 7891 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3); 7892 } 7893 #endif 7894 7895 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7896 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname, 7897 abi_long handle, abi_long mount_id, 7898 abi_long flags) 7899 { 7900 struct file_handle *target_fh; 7901 struct file_handle *fh; 7902 int mid = 0; 7903 abi_long ret; 7904 char *name; 7905 unsigned int size, total_size; 7906 7907 if (get_user_s32(size, handle)) { 7908 return -TARGET_EFAULT; 7909 } 7910 7911 name = lock_user_string(pathname); 7912 if (!name) { 7913 return -TARGET_EFAULT; 7914 } 7915 7916 total_size = sizeof(struct file_handle) + size; 7917 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0); 7918 if (!target_fh) { 7919 unlock_user(name, pathname, 0); 7920 return -TARGET_EFAULT; 7921 } 7922 7923 fh = g_malloc0(total_size); 7924 fh->handle_bytes = size; 7925 7926 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags)); 7927 unlock_user(name, pathname, 0); 7928 7929 /* man name_to_handle_at(2): 7930 * Other than the use of the handle_bytes field, the caller should treat 7931 * the file_handle structure as an opaque data type 7932 */ 7933 7934 memcpy(target_fh, fh, total_size); 7935 target_fh->handle_bytes = tswap32(fh->handle_bytes); 7936 target_fh->handle_type = tswap32(fh->handle_type); 7937 g_free(fh); 7938 unlock_user(target_fh, handle, total_size); 7939 7940 if (put_user_s32(mid, mount_id)) { 7941 return -TARGET_EFAULT; 7942 } 7943 7944 return ret; 7945 7946 } 7947 #endif 7948 7949 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7950 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle, 7951 abi_long flags) 7952 { 7953 struct file_handle *target_fh; 7954 struct file_handle *fh; 7955 unsigned int size, total_size; 7956 abi_long ret; 7957 7958 if (get_user_s32(size, handle)) { 7959 return -TARGET_EFAULT; 7960 } 7961 7962 total_size = sizeof(struct file_handle) + size; 7963 target_fh = lock_user(VERIFY_READ, handle, total_size, 1); 7964 if (!target_fh) { 7965 return -TARGET_EFAULT; 7966 } 7967 7968 fh = g_memdup(target_fh, total_size); 7969 fh->handle_bytes = size; 7970 fh->handle_type = tswap32(target_fh->handle_type); 7971 7972 ret = get_errno(open_by_handle_at(mount_fd, fh, 7973 target_to_host_bitmask(flags, fcntl_flags_tbl))); 7974 7975 g_free(fh); 7976 7977 unlock_user(target_fh, handle, total_size); 7978 7979 return ret; 7980 } 7981 #endif 7982 7983 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4) 7984 7985 static abi_long do_signalfd4(int fd, abi_long mask, int flags) 7986 { 7987 int host_flags; 7988 target_sigset_t *target_mask; 7989 sigset_t host_mask; 7990 abi_long ret; 7991 7992 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) { 7993 return -TARGET_EINVAL; 7994 } 7995 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) { 7996 return -TARGET_EFAULT; 7997 } 7998 7999 target_to_host_sigset(&host_mask, target_mask); 8000 8001 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 8002 8003 ret = get_errno(signalfd(fd, &host_mask, host_flags)); 8004 if (ret >= 0) { 8005 fd_trans_register(ret, &target_signalfd_trans); 8006 } 8007 8008 unlock_user_struct(target_mask, mask, 0); 8009 8010 return ret; 8011 } 8012 #endif 8013 8014 /* Map host to target signal numbers for the wait family of syscalls. 8015 Assume all other status bits are the same. */ 8016 int host_to_target_waitstatus(int status) 8017 { 8018 if (WIFSIGNALED(status)) { 8019 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 8020 } 8021 if (WIFSTOPPED(status)) { 8022 return (host_to_target_signal(WSTOPSIG(status)) << 8) 8023 | (status & 0xff); 8024 } 8025 return status; 8026 } 8027 8028 static int open_self_cmdline(CPUArchState *cpu_env, int fd) 8029 { 8030 CPUState *cpu = env_cpu(cpu_env); 8031 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; 8032 int i; 8033 8034 for (i = 0; i < bprm->argc; i++) { 8035 size_t len = strlen(bprm->argv[i]) + 1; 8036 8037 if (write(fd, bprm->argv[i], len) != len) { 8038 return -1; 8039 } 8040 } 8041 8042 return 0; 8043 } 8044 8045 static int open_self_maps(CPUArchState *cpu_env, int fd) 8046 { 8047 CPUState *cpu = env_cpu(cpu_env); 8048 TaskState *ts = cpu->opaque; 8049 GSList *map_info = read_self_maps(); 8050 GSList *s; 8051 int count; 8052 8053 for (s = map_info; s; s = g_slist_next(s)) { 8054 MapInfo *e = (MapInfo *) s->data; 8055 8056 if (h2g_valid(e->start)) { 8057 unsigned long min = e->start; 8058 unsigned long max = e->end; 8059 int flags = page_get_flags(h2g(min)); 8060 const char *path; 8061 8062 max = h2g_valid(max - 1) ? 8063 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1; 8064 8065 if (page_check_range(h2g(min), max - min, flags) == -1) { 8066 continue; 8067 } 8068 8069 #ifdef TARGET_HPPA 8070 if (h2g(max) == ts->info->stack_limit) { 8071 #else 8072 if (h2g(min) == ts->info->stack_limit) { 8073 #endif 8074 path = "[stack]"; 8075 } else { 8076 path = e->path; 8077 } 8078 8079 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr 8080 " %c%c%c%c %08" PRIx64 " %s %"PRId64, 8081 h2g(min), h2g(max - 1) + 1, 8082 (flags & PAGE_READ) ? 'r' : '-', 8083 (flags & PAGE_WRITE_ORG) ? 'w' : '-', 8084 (flags & PAGE_EXEC) ? 'x' : '-', 8085 e->is_priv ? 'p' : 's', 8086 (uint64_t) e->offset, e->dev, e->inode); 8087 if (path) { 8088 dprintf(fd, "%*s%s\n", 73 - count, "", path); 8089 } else { 8090 dprintf(fd, "\n"); 8091 } 8092 } 8093 } 8094 8095 free_self_maps(map_info); 8096 8097 #ifdef TARGET_VSYSCALL_PAGE 8098 /* 8099 * We only support execution from the vsyscall page. 8100 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3. 8101 */ 8102 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx 8103 " --xp 00000000 00:00 0", 8104 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE); 8105 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]"); 8106 #endif 8107 8108 return 0; 8109 } 8110 8111 static int open_self_stat(CPUArchState *cpu_env, int fd) 8112 { 8113 CPUState *cpu = env_cpu(cpu_env); 8114 TaskState *ts = cpu->opaque; 8115 g_autoptr(GString) buf = g_string_new(NULL); 8116 int i; 8117 8118 for (i = 0; i < 44; i++) { 8119 if (i == 0) { 8120 /* pid */ 8121 g_string_printf(buf, FMT_pid " ", getpid()); 8122 } else if (i == 1) { 8123 /* app name */ 8124 gchar *bin = g_strrstr(ts->bprm->argv[0], "/"); 8125 bin = bin ? bin + 1 : ts->bprm->argv[0]; 8126 g_string_printf(buf, "(%.15s) ", bin); 8127 } else if (i == 2) { 8128 /* task state */ 8129 g_string_assign(buf, "R "); /* we are running right now */ 8130 } else if (i == 3) { 8131 /* ppid */ 8132 g_string_printf(buf, FMT_pid " ", getppid()); 8133 } else if (i == 21) { 8134 /* starttime */ 8135 g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime); 8136 } else if (i == 27) { 8137 /* stack bottom */ 8138 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack); 8139 } else { 8140 /* for the rest, there is MasterCard */ 8141 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' '); 8142 } 8143 8144 if (write(fd, buf->str, buf->len) != buf->len) { 8145 return -1; 8146 } 8147 } 8148 8149 return 0; 8150 } 8151 8152 static int open_self_auxv(CPUArchState *cpu_env, int fd) 8153 { 8154 CPUState *cpu = env_cpu(cpu_env); 8155 TaskState *ts = cpu->opaque; 8156 abi_ulong auxv = ts->info->saved_auxv; 8157 abi_ulong len = ts->info->auxv_len; 8158 char *ptr; 8159 8160 /* 8161 * Auxiliary vector is stored in target process stack. 8162 * read in whole auxv vector and copy it to file 8163 */ 8164 ptr = lock_user(VERIFY_READ, auxv, len, 0); 8165 if (ptr != NULL) { 8166 while (len > 0) { 8167 ssize_t r; 8168 r = write(fd, ptr, len); 8169 if (r <= 0) { 8170 break; 8171 } 8172 len -= r; 8173 ptr += r; 8174 } 8175 lseek(fd, 0, SEEK_SET); 8176 unlock_user(ptr, auxv, len); 8177 } 8178 8179 return 0; 8180 } 8181 8182 static int is_proc_myself(const char *filename, const char *entry) 8183 { 8184 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 8185 filename += strlen("/proc/"); 8186 if (!strncmp(filename, "self/", strlen("self/"))) { 8187 filename += strlen("self/"); 8188 } else if (*filename >= '1' && *filename <= '9') { 8189 char myself[80]; 8190 snprintf(myself, sizeof(myself), "%d/", getpid()); 8191 if (!strncmp(filename, myself, strlen(myself))) { 8192 filename += strlen(myself); 8193 } else { 8194 return 0; 8195 } 8196 } else { 8197 return 0; 8198 } 8199 if (!strcmp(filename, entry)) { 8200 return 1; 8201 } 8202 } 8203 return 0; 8204 } 8205 8206 static void excp_dump_file(FILE *logfile, CPUArchState *env, 8207 const char *fmt, int code) 8208 { 8209 if (logfile) { 8210 CPUState *cs = env_cpu(env); 8211 8212 fprintf(logfile, fmt, code); 8213 fprintf(logfile, "Failing executable: %s\n", exec_path); 8214 cpu_dump_state(cs, logfile, 0); 8215 open_self_maps(env, fileno(logfile)); 8216 } 8217 } 8218 8219 void target_exception_dump(CPUArchState *env, const char *fmt, int code) 8220 { 8221 /* dump to console */ 8222 excp_dump_file(stderr, env, fmt, code); 8223 8224 /* dump to log file */ 8225 if (qemu_log_separate()) { 8226 FILE *logfile = qemu_log_trylock(); 8227 8228 excp_dump_file(logfile, env, fmt, code); 8229 qemu_log_unlock(logfile); 8230 } 8231 } 8232 8233 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \ 8234 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \ 8235 defined(TARGET_RISCV) || defined(TARGET_S390X) 8236 static int is_proc(const char *filename, const char *entry) 8237 { 8238 return strcmp(filename, entry) == 0; 8239 } 8240 #endif 8241 8242 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN 8243 static int open_net_route(CPUArchState *cpu_env, int fd) 8244 { 8245 FILE *fp; 8246 char *line = NULL; 8247 size_t len = 0; 8248 ssize_t read; 8249 8250 fp = fopen("/proc/net/route", "r"); 8251 if (fp == NULL) { 8252 return -1; 8253 } 8254 8255 /* read header */ 8256 8257 read = getline(&line, &len, fp); 8258 dprintf(fd, "%s", line); 8259 8260 /* read routes */ 8261 8262 while ((read = getline(&line, &len, fp)) != -1) { 8263 char iface[16]; 8264 uint32_t dest, gw, mask; 8265 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 8266 int fields; 8267 8268 fields = sscanf(line, 8269 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 8270 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 8271 &mask, &mtu, &window, &irtt); 8272 if (fields != 11) { 8273 continue; 8274 } 8275 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 8276 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 8277 metric, tswap32(mask), mtu, window, irtt); 8278 } 8279 8280 free(line); 8281 fclose(fp); 8282 8283 return 0; 8284 } 8285 #endif 8286 8287 #if defined(TARGET_SPARC) 8288 static int open_cpuinfo(CPUArchState *cpu_env, int fd) 8289 { 8290 dprintf(fd, "type\t\t: sun4u\n"); 8291 return 0; 8292 } 8293 #endif 8294 8295 #if defined(TARGET_HPPA) 8296 static int open_cpuinfo(CPUArchState *cpu_env, int fd) 8297 { 8298 int i, num_cpus; 8299 8300 num_cpus = sysconf(_SC_NPROCESSORS_ONLN); 8301 for (i = 0; i < num_cpus; i++) { 8302 dprintf(fd, "processor\t: %d\n", i); 8303 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n"); 8304 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n"); 8305 dprintf(fd, "capabilities\t: os32\n"); 8306 dprintf(fd, "model\t\t: 9000/778/B160L - " 8307 "Merlin L2 160 QEMU (9000/778/B160L)\n\n"); 8308 } 8309 return 0; 8310 } 8311 #endif 8312 8313 #if defined(TARGET_RISCV) 8314 static int open_cpuinfo(CPUArchState *cpu_env, int fd) 8315 { 8316 int i; 8317 int num_cpus = sysconf(_SC_NPROCESSORS_ONLN); 8318 RISCVCPU *cpu = env_archcpu(cpu_env); 8319 const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env); 8320 char *isa_string = riscv_isa_string(cpu); 8321 const char *mmu; 8322 8323 if (cfg->mmu) { 8324 mmu = (cpu_env->xl == MXL_RV32) ? "sv32" : "sv48"; 8325 } else { 8326 mmu = "none"; 8327 } 8328 8329 for (i = 0; i < num_cpus; i++) { 8330 dprintf(fd, "processor\t: %d\n", i); 8331 dprintf(fd, "hart\t\t: %d\n", i); 8332 dprintf(fd, "isa\t\t: %s\n", isa_string); 8333 dprintf(fd, "mmu\t\t: %s\n", mmu); 8334 dprintf(fd, "uarch\t\t: qemu\n\n"); 8335 } 8336 8337 g_free(isa_string); 8338 return 0; 8339 } 8340 #endif 8341 8342 #if defined(TARGET_S390X) 8343 /* 8344 * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would 8345 * show in /proc/cpuinfo. 8346 * 8347 * Skip the following in order to match the missing support in op_ecag(): 8348 * - show_cacheinfo(). 8349 * - show_cpu_topology(). 8350 * - show_cpu_mhz(). 8351 * 8352 * Use fixed values for certain fields: 8353 * - bogomips per cpu - from a qemu-system-s390x run. 8354 * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported. 8355 * 8356 * Keep the code structure close to arch/s390/kernel/processor.c. 8357 */ 8358 8359 static void show_facilities(int fd) 8360 { 8361 size_t sizeof_stfl_bytes = 2048; 8362 g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes); 8363 unsigned int bit; 8364 8365 dprintf(fd, "facilities :"); 8366 s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes); 8367 for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) { 8368 if (test_be_bit(bit, stfl_bytes)) { 8369 dprintf(fd, " %d", bit); 8370 } 8371 } 8372 dprintf(fd, "\n"); 8373 } 8374 8375 static int cpu_ident(unsigned long n) 8376 { 8377 return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS, 8378 n); 8379 } 8380 8381 static void show_cpu_summary(CPUArchState *cpu_env, int fd) 8382 { 8383 S390CPUModel *model = env_archcpu(cpu_env)->model; 8384 int num_cpus = sysconf(_SC_NPROCESSORS_ONLN); 8385 uint32_t elf_hwcap = get_elf_hwcap(); 8386 const char *hwcap_str; 8387 int i; 8388 8389 dprintf(fd, "vendor_id : IBM/S390\n" 8390 "# processors : %i\n" 8391 "bogomips per cpu: 13370.00\n", 8392 num_cpus); 8393 dprintf(fd, "max thread id : 0\n"); 8394 dprintf(fd, "features\t: "); 8395 for (i = 0; i < sizeof(elf_hwcap) * 8; i++) { 8396 if (!(elf_hwcap & (1 << i))) { 8397 continue; 8398 } 8399 hwcap_str = elf_hwcap_str(i); 8400 if (hwcap_str) { 8401 dprintf(fd, "%s ", hwcap_str); 8402 } 8403 } 8404 dprintf(fd, "\n"); 8405 show_facilities(fd); 8406 for (i = 0; i < num_cpus; i++) { 8407 dprintf(fd, "processor %d: " 8408 "version = %02X, " 8409 "identification = %06X, " 8410 "machine = %04X\n", 8411 i, model->cpu_ver, cpu_ident(i), model->def->type); 8412 } 8413 } 8414 8415 static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n) 8416 { 8417 S390CPUModel *model = env_archcpu(cpu_env)->model; 8418 8419 dprintf(fd, "version : %02X\n", model->cpu_ver); 8420 dprintf(fd, "identification : %06X\n", cpu_ident(n)); 8421 dprintf(fd, "machine : %04X\n", model->def->type); 8422 } 8423 8424 static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n) 8425 { 8426 dprintf(fd, "\ncpu number : %ld\n", n); 8427 show_cpu_ids(cpu_env, fd, n); 8428 } 8429 8430 static int open_cpuinfo(CPUArchState *cpu_env, int fd) 8431 { 8432 int num_cpus = sysconf(_SC_NPROCESSORS_ONLN); 8433 int i; 8434 8435 show_cpu_summary(cpu_env, fd); 8436 for (i = 0; i < num_cpus; i++) { 8437 show_cpuinfo(cpu_env, fd, i); 8438 } 8439 return 0; 8440 } 8441 #endif 8442 8443 #if defined(TARGET_M68K) 8444 static int open_hardware(CPUArchState *cpu_env, int fd) 8445 { 8446 dprintf(fd, "Model:\t\tqemu-m68k\n"); 8447 return 0; 8448 } 8449 #endif 8450 8451 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) 8452 { 8453 struct fake_open { 8454 const char *filename; 8455 int (*fill)(CPUArchState *cpu_env, int fd); 8456 int (*cmp)(const char *s1, const char *s2); 8457 }; 8458 const struct fake_open *fake_open; 8459 static const struct fake_open fakes[] = { 8460 { "maps", open_self_maps, is_proc_myself }, 8461 { "stat", open_self_stat, is_proc_myself }, 8462 { "auxv", open_self_auxv, is_proc_myself }, 8463 { "cmdline", open_self_cmdline, is_proc_myself }, 8464 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN 8465 { "/proc/net/route", open_net_route, is_proc }, 8466 #endif 8467 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \ 8468 defined(TARGET_RISCV) || defined(TARGET_S390X) 8469 { "/proc/cpuinfo", open_cpuinfo, is_proc }, 8470 #endif 8471 #if defined(TARGET_M68K) 8472 { "/proc/hardware", open_hardware, is_proc }, 8473 #endif 8474 { NULL, NULL, NULL } 8475 }; 8476 8477 if (is_proc_myself(pathname, "exe")) { 8478 return safe_openat(dirfd, exec_path, flags, mode); 8479 } 8480 8481 for (fake_open = fakes; fake_open->filename; fake_open++) { 8482 if (fake_open->cmp(pathname, fake_open->filename)) { 8483 break; 8484 } 8485 } 8486 8487 if (fake_open->filename) { 8488 const char *tmpdir; 8489 char filename[PATH_MAX]; 8490 int fd, r; 8491 8492 fd = memfd_create("qemu-open", 0); 8493 if (fd < 0) { 8494 if (errno != ENOSYS) { 8495 return fd; 8496 } 8497 /* create temporary file to map stat to */ 8498 tmpdir = getenv("TMPDIR"); 8499 if (!tmpdir) 8500 tmpdir = "/tmp"; 8501 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 8502 fd = mkstemp(filename); 8503 if (fd < 0) { 8504 return fd; 8505 } 8506 unlink(filename); 8507 } 8508 8509 if ((r = fake_open->fill(cpu_env, fd))) { 8510 int e = errno; 8511 close(fd); 8512 errno = e; 8513 return r; 8514 } 8515 lseek(fd, 0, SEEK_SET); 8516 8517 return fd; 8518 } 8519 8520 return safe_openat(dirfd, path(pathname), flags, mode); 8521 } 8522 8523 static int do_execveat(CPUArchState *cpu_env, int dirfd, 8524 abi_long pathname, abi_long guest_argp, 8525 abi_long guest_envp, int flags) 8526 { 8527 int ret; 8528 char **argp, **envp; 8529 int argc, envc; 8530 abi_ulong gp; 8531 abi_ulong addr; 8532 char **q; 8533 void *p; 8534 8535 argc = 0; 8536 8537 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 8538 if (get_user_ual(addr, gp)) { 8539 return -TARGET_EFAULT; 8540 } 8541 if (!addr) { 8542 break; 8543 } 8544 argc++; 8545 } 8546 envc = 0; 8547 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 8548 if (get_user_ual(addr, gp)) { 8549 return -TARGET_EFAULT; 8550 } 8551 if (!addr) { 8552 break; 8553 } 8554 envc++; 8555 } 8556 8557 argp = g_new0(char *, argc + 1); 8558 envp = g_new0(char *, envc + 1); 8559 8560 for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) { 8561 if (get_user_ual(addr, gp)) { 8562 goto execve_efault; 8563 } 8564 if (!addr) { 8565 break; 8566 } 8567 *q = lock_user_string(addr); 8568 if (!*q) { 8569 goto execve_efault; 8570 } 8571 } 8572 *q = NULL; 8573 8574 for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) { 8575 if (get_user_ual(addr, gp)) { 8576 goto execve_efault; 8577 } 8578 if (!addr) { 8579 break; 8580 } 8581 *q = lock_user_string(addr); 8582 if (!*q) { 8583 goto execve_efault; 8584 } 8585 } 8586 *q = NULL; 8587 8588 /* 8589 * Although execve() is not an interruptible syscall it is 8590 * a special case where we must use the safe_syscall wrapper: 8591 * if we allow a signal to happen before we make the host 8592 * syscall then we will 'lose' it, because at the point of 8593 * execve the process leaves QEMU's control. So we use the 8594 * safe syscall wrapper to ensure that we either take the 8595 * signal as a guest signal, or else it does not happen 8596 * before the execve completes and makes it the other 8597 * program's problem. 8598 */ 8599 p = lock_user_string(pathname); 8600 if (!p) { 8601 goto execve_efault; 8602 } 8603 8604 if (is_proc_myself(p, "exe")) { 8605 ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags)); 8606 } else { 8607 ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags)); 8608 } 8609 8610 unlock_user(p, pathname, 0); 8611 8612 goto execve_end; 8613 8614 execve_efault: 8615 ret = -TARGET_EFAULT; 8616 8617 execve_end: 8618 for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) { 8619 if (get_user_ual(addr, gp) || !addr) { 8620 break; 8621 } 8622 unlock_user(*q, addr, 0); 8623 } 8624 for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) { 8625 if (get_user_ual(addr, gp) || !addr) { 8626 break; 8627 } 8628 unlock_user(*q, addr, 0); 8629 } 8630 8631 g_free(argp); 8632 g_free(envp); 8633 return ret; 8634 } 8635 8636 #define TIMER_MAGIC 0x0caf0000 8637 #define TIMER_MAGIC_MASK 0xffff0000 8638 8639 /* Convert QEMU provided timer ID back to internal 16bit index format */ 8640 static target_timer_t get_timer_id(abi_long arg) 8641 { 8642 target_timer_t timerid = arg; 8643 8644 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) { 8645 return -TARGET_EINVAL; 8646 } 8647 8648 timerid &= 0xffff; 8649 8650 if (timerid >= ARRAY_SIZE(g_posix_timers)) { 8651 return -TARGET_EINVAL; 8652 } 8653 8654 return timerid; 8655 } 8656 8657 static int target_to_host_cpu_mask(unsigned long *host_mask, 8658 size_t host_size, 8659 abi_ulong target_addr, 8660 size_t target_size) 8661 { 8662 unsigned target_bits = sizeof(abi_ulong) * 8; 8663 unsigned host_bits = sizeof(*host_mask) * 8; 8664 abi_ulong *target_mask; 8665 unsigned i, j; 8666 8667 assert(host_size >= target_size); 8668 8669 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1); 8670 if (!target_mask) { 8671 return -TARGET_EFAULT; 8672 } 8673 memset(host_mask, 0, host_size); 8674 8675 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 8676 unsigned bit = i * target_bits; 8677 abi_ulong val; 8678 8679 __get_user(val, &target_mask[i]); 8680 for (j = 0; j < target_bits; j++, bit++) { 8681 if (val & (1UL << j)) { 8682 host_mask[bit / host_bits] |= 1UL << (bit % host_bits); 8683 } 8684 } 8685 } 8686 8687 unlock_user(target_mask, target_addr, 0); 8688 return 0; 8689 } 8690 8691 static int host_to_target_cpu_mask(const unsigned long *host_mask, 8692 size_t host_size, 8693 abi_ulong target_addr, 8694 size_t target_size) 8695 { 8696 unsigned target_bits = sizeof(abi_ulong) * 8; 8697 unsigned host_bits = sizeof(*host_mask) * 8; 8698 abi_ulong *target_mask; 8699 unsigned i, j; 8700 8701 assert(host_size >= target_size); 8702 8703 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0); 8704 if (!target_mask) { 8705 return -TARGET_EFAULT; 8706 } 8707 8708 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 8709 unsigned bit = i * target_bits; 8710 abi_ulong val = 0; 8711 8712 for (j = 0; j < target_bits; j++, bit++) { 8713 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) { 8714 val |= 1UL << j; 8715 } 8716 } 8717 __put_user(val, &target_mask[i]); 8718 } 8719 8720 unlock_user(target_mask, target_addr, target_size); 8721 return 0; 8722 } 8723 8724 #ifdef TARGET_NR_getdents 8725 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count) 8726 { 8727 g_autofree void *hdirp = NULL; 8728 void *tdirp; 8729 int hlen, hoff, toff; 8730 int hreclen, treclen; 8731 off64_t prev_diroff = 0; 8732 8733 hdirp = g_try_malloc(count); 8734 if (!hdirp) { 8735 return -TARGET_ENOMEM; 8736 } 8737 8738 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 8739 hlen = sys_getdents(dirfd, hdirp, count); 8740 #else 8741 hlen = sys_getdents64(dirfd, hdirp, count); 8742 #endif 8743 8744 hlen = get_errno(hlen); 8745 if (is_error(hlen)) { 8746 return hlen; 8747 } 8748 8749 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0); 8750 if (!tdirp) { 8751 return -TARGET_EFAULT; 8752 } 8753 8754 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) { 8755 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 8756 struct linux_dirent *hde = hdirp + hoff; 8757 #else 8758 struct linux_dirent64 *hde = hdirp + hoff; 8759 #endif 8760 struct target_dirent *tde = tdirp + toff; 8761 int namelen; 8762 uint8_t type; 8763 8764 namelen = strlen(hde->d_name); 8765 hreclen = hde->d_reclen; 8766 treclen = offsetof(struct target_dirent, d_name) + namelen + 2; 8767 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent)); 8768 8769 if (toff + treclen > count) { 8770 /* 8771 * If the host struct is smaller than the target struct, or 8772 * requires less alignment and thus packs into less space, 8773 * then the host can return more entries than we can pass 8774 * on to the guest. 8775 */ 8776 if (toff == 0) { 8777 toff = -TARGET_EINVAL; /* result buffer is too small */ 8778 break; 8779 } 8780 /* 8781 * Return what we have, resetting the file pointer to the 8782 * location of the first record not returned. 8783 */ 8784 lseek64(dirfd, prev_diroff, SEEK_SET); 8785 break; 8786 } 8787 8788 prev_diroff = hde->d_off; 8789 tde->d_ino = tswapal(hde->d_ino); 8790 tde->d_off = tswapal(hde->d_off); 8791 tde->d_reclen = tswap16(treclen); 8792 memcpy(tde->d_name, hde->d_name, namelen + 1); 8793 8794 /* 8795 * The getdents type is in what was formerly a padding byte at the 8796 * end of the structure. 8797 */ 8798 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 8799 type = *((uint8_t *)hde + hreclen - 1); 8800 #else 8801 type = hde->d_type; 8802 #endif 8803 *((uint8_t *)tde + treclen - 1) = type; 8804 } 8805 8806 unlock_user(tdirp, arg2, toff); 8807 return toff; 8808 } 8809 #endif /* TARGET_NR_getdents */ 8810 8811 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 8812 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count) 8813 { 8814 g_autofree void *hdirp = NULL; 8815 void *tdirp; 8816 int hlen, hoff, toff; 8817 int hreclen, treclen; 8818 off64_t prev_diroff = 0; 8819 8820 hdirp = g_try_malloc(count); 8821 if (!hdirp) { 8822 return -TARGET_ENOMEM; 8823 } 8824 8825 hlen = get_errno(sys_getdents64(dirfd, hdirp, count)); 8826 if (is_error(hlen)) { 8827 return hlen; 8828 } 8829 8830 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0); 8831 if (!tdirp) { 8832 return -TARGET_EFAULT; 8833 } 8834 8835 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) { 8836 struct linux_dirent64 *hde = hdirp + hoff; 8837 struct target_dirent64 *tde = tdirp + toff; 8838 int namelen; 8839 8840 namelen = strlen(hde->d_name) + 1; 8841 hreclen = hde->d_reclen; 8842 treclen = offsetof(struct target_dirent64, d_name) + namelen; 8843 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64)); 8844 8845 if (toff + treclen > count) { 8846 /* 8847 * If the host struct is smaller than the target struct, or 8848 * requires less alignment and thus packs into less space, 8849 * then the host can return more entries than we can pass 8850 * on to the guest. 8851 */ 8852 if (toff == 0) { 8853 toff = -TARGET_EINVAL; /* result buffer is too small */ 8854 break; 8855 } 8856 /* 8857 * Return what we have, resetting the file pointer to the 8858 * location of the first record not returned. 8859 */ 8860 lseek64(dirfd, prev_diroff, SEEK_SET); 8861 break; 8862 } 8863 8864 prev_diroff = hde->d_off; 8865 tde->d_ino = tswap64(hde->d_ino); 8866 tde->d_off = tswap64(hde->d_off); 8867 tde->d_reclen = tswap16(treclen); 8868 tde->d_type = hde->d_type; 8869 memcpy(tde->d_name, hde->d_name, namelen); 8870 } 8871 8872 unlock_user(tdirp, arg2, toff); 8873 return toff; 8874 } 8875 #endif /* TARGET_NR_getdents64 */ 8876 8877 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root) 8878 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old) 8879 #endif 8880 8881 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree) 8882 #define __NR_sys_open_tree __NR_open_tree 8883 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename, 8884 unsigned int, __flags) 8885 #endif 8886 8887 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount) 8888 #define __NR_sys_move_mount __NR_move_mount 8889 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname, 8890 int, __to_dfd, const char *, __to_pathname, unsigned int, flag) 8891 #endif 8892 8893 /* This is an internal helper for do_syscall so that it is easier 8894 * to have a single return point, so that actions, such as logging 8895 * of syscall results, can be performed. 8896 * All errnos that do_syscall() returns must be -TARGET_<errcode>. 8897 */ 8898 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, 8899 abi_long arg2, abi_long arg3, abi_long arg4, 8900 abi_long arg5, abi_long arg6, abi_long arg7, 8901 abi_long arg8) 8902 { 8903 CPUState *cpu = env_cpu(cpu_env); 8904 abi_long ret; 8905 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \ 8906 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \ 8907 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \ 8908 || defined(TARGET_NR_statx) 8909 struct stat st; 8910 #endif 8911 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \ 8912 || defined(TARGET_NR_fstatfs) 8913 struct statfs stfs; 8914 #endif 8915 void *p; 8916 8917 switch(num) { 8918 case TARGET_NR_exit: 8919 /* In old applications this may be used to implement _exit(2). 8920 However in threaded applications it is used for thread termination, 8921 and _exit_group is used for application termination. 8922 Do thread termination if we have more then one thread. */ 8923 8924 if (block_signals()) { 8925 return -QEMU_ERESTARTSYS; 8926 } 8927 8928 pthread_mutex_lock(&clone_lock); 8929 8930 if (CPU_NEXT(first_cpu)) { 8931 TaskState *ts = cpu->opaque; 8932 8933 if (ts->child_tidptr) { 8934 put_user_u32(0, ts->child_tidptr); 8935 do_sys_futex(g2h(cpu, ts->child_tidptr), 8936 FUTEX_WAKE, INT_MAX, NULL, NULL, 0); 8937 } 8938 8939 object_unparent(OBJECT(cpu)); 8940 object_unref(OBJECT(cpu)); 8941 /* 8942 * At this point the CPU should be unrealized and removed 8943 * from cpu lists. We can clean-up the rest of the thread 8944 * data without the lock held. 8945 */ 8946 8947 pthread_mutex_unlock(&clone_lock); 8948 8949 thread_cpu = NULL; 8950 g_free(ts); 8951 rcu_unregister_thread(); 8952 pthread_exit(NULL); 8953 } 8954 8955 pthread_mutex_unlock(&clone_lock); 8956 preexit_cleanup(cpu_env, arg1); 8957 _exit(arg1); 8958 return 0; /* avoid warning */ 8959 case TARGET_NR_read: 8960 if (arg2 == 0 && arg3 == 0) { 8961 return get_errno(safe_read(arg1, 0, 0)); 8962 } else { 8963 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 8964 return -TARGET_EFAULT; 8965 ret = get_errno(safe_read(arg1, p, arg3)); 8966 if (ret >= 0 && 8967 fd_trans_host_to_target_data(arg1)) { 8968 ret = fd_trans_host_to_target_data(arg1)(p, ret); 8969 } 8970 unlock_user(p, arg2, ret); 8971 } 8972 return ret; 8973 case TARGET_NR_write: 8974 if (arg2 == 0 && arg3 == 0) { 8975 return get_errno(safe_write(arg1, 0, 0)); 8976 } 8977 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 8978 return -TARGET_EFAULT; 8979 if (fd_trans_target_to_host_data(arg1)) { 8980 void *copy = g_malloc(arg3); 8981 memcpy(copy, p, arg3); 8982 ret = fd_trans_target_to_host_data(arg1)(copy, arg3); 8983 if (ret >= 0) { 8984 ret = get_errno(safe_write(arg1, copy, ret)); 8985 } 8986 g_free(copy); 8987 } else { 8988 ret = get_errno(safe_write(arg1, p, arg3)); 8989 } 8990 unlock_user(p, arg2, 0); 8991 return ret; 8992 8993 #ifdef TARGET_NR_open 8994 case TARGET_NR_open: 8995 if (!(p = lock_user_string(arg1))) 8996 return -TARGET_EFAULT; 8997 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, 8998 target_to_host_bitmask(arg2, fcntl_flags_tbl), 8999 arg3)); 9000 fd_trans_unregister(ret); 9001 unlock_user(p, arg1, 0); 9002 return ret; 9003 #endif 9004 case TARGET_NR_openat: 9005 if (!(p = lock_user_string(arg2))) 9006 return -TARGET_EFAULT; 9007 ret = get_errno(do_openat(cpu_env, arg1, p, 9008 target_to_host_bitmask(arg3, fcntl_flags_tbl), 9009 arg4)); 9010 fd_trans_unregister(ret); 9011 unlock_user(p, arg2, 0); 9012 return ret; 9013 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 9014 case TARGET_NR_name_to_handle_at: 9015 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5); 9016 return ret; 9017 #endif 9018 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 9019 case TARGET_NR_open_by_handle_at: 9020 ret = do_open_by_handle_at(arg1, arg2, arg3); 9021 fd_trans_unregister(ret); 9022 return ret; 9023 #endif 9024 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open) 9025 case TARGET_NR_pidfd_open: 9026 return get_errno(pidfd_open(arg1, arg2)); 9027 #endif 9028 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal) 9029 case TARGET_NR_pidfd_send_signal: 9030 { 9031 siginfo_t uinfo, *puinfo; 9032 9033 if (arg3) { 9034 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); 9035 if (!p) { 9036 return -TARGET_EFAULT; 9037 } 9038 target_to_host_siginfo(&uinfo, p); 9039 unlock_user(p, arg3, 0); 9040 puinfo = &uinfo; 9041 } else { 9042 puinfo = NULL; 9043 } 9044 ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2), 9045 puinfo, arg4)); 9046 } 9047 return ret; 9048 #endif 9049 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd) 9050 case TARGET_NR_pidfd_getfd: 9051 return get_errno(pidfd_getfd(arg1, arg2, arg3)); 9052 #endif 9053 case TARGET_NR_close: 9054 fd_trans_unregister(arg1); 9055 return get_errno(close(arg1)); 9056 #if defined(__NR_close_range) && defined(TARGET_NR_close_range) 9057 case TARGET_NR_close_range: 9058 ret = get_errno(sys_close_range(arg1, arg2, arg3)); 9059 if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) { 9060 abi_long fd, maxfd; 9061 maxfd = MIN(arg2, target_fd_max); 9062 for (fd = arg1; fd < maxfd; fd++) { 9063 fd_trans_unregister(fd); 9064 } 9065 } 9066 return ret; 9067 #endif 9068 9069 case TARGET_NR_brk: 9070 return do_brk(arg1); 9071 #ifdef TARGET_NR_fork 9072 case TARGET_NR_fork: 9073 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0)); 9074 #endif 9075 #ifdef TARGET_NR_waitpid 9076 case TARGET_NR_waitpid: 9077 { 9078 int status; 9079 ret = get_errno(safe_wait4(arg1, &status, arg3, 0)); 9080 if (!is_error(ret) && arg2 && ret 9081 && put_user_s32(host_to_target_waitstatus(status), arg2)) 9082 return -TARGET_EFAULT; 9083 } 9084 return ret; 9085 #endif 9086 #ifdef TARGET_NR_waitid 9087 case TARGET_NR_waitid: 9088 { 9089 siginfo_t info; 9090 info.si_pid = 0; 9091 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL)); 9092 if (!is_error(ret) && arg3 && info.si_pid != 0) { 9093 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 9094 return -TARGET_EFAULT; 9095 host_to_target_siginfo(p, &info); 9096 unlock_user(p, arg3, sizeof(target_siginfo_t)); 9097 } 9098 } 9099 return ret; 9100 #endif 9101 #ifdef TARGET_NR_creat /* not on alpha */ 9102 case TARGET_NR_creat: 9103 if (!(p = lock_user_string(arg1))) 9104 return -TARGET_EFAULT; 9105 ret = get_errno(creat(p, arg2)); 9106 fd_trans_unregister(ret); 9107 unlock_user(p, arg1, 0); 9108 return ret; 9109 #endif 9110 #ifdef TARGET_NR_link 9111 case TARGET_NR_link: 9112 { 9113 void * p2; 9114 p = lock_user_string(arg1); 9115 p2 = lock_user_string(arg2); 9116 if (!p || !p2) 9117 ret = -TARGET_EFAULT; 9118 else 9119 ret = get_errno(link(p, p2)); 9120 unlock_user(p2, arg2, 0); 9121 unlock_user(p, arg1, 0); 9122 } 9123 return ret; 9124 #endif 9125 #if defined(TARGET_NR_linkat) 9126 case TARGET_NR_linkat: 9127 { 9128 void * p2 = NULL; 9129 if (!arg2 || !arg4) 9130 return -TARGET_EFAULT; 9131 p = lock_user_string(arg2); 9132 p2 = lock_user_string(arg4); 9133 if (!p || !p2) 9134 ret = -TARGET_EFAULT; 9135 else 9136 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 9137 unlock_user(p, arg2, 0); 9138 unlock_user(p2, arg4, 0); 9139 } 9140 return ret; 9141 #endif 9142 #ifdef TARGET_NR_unlink 9143 case TARGET_NR_unlink: 9144 if (!(p = lock_user_string(arg1))) 9145 return -TARGET_EFAULT; 9146 ret = get_errno(unlink(p)); 9147 unlock_user(p, arg1, 0); 9148 return ret; 9149 #endif 9150 #if defined(TARGET_NR_unlinkat) 9151 case TARGET_NR_unlinkat: 9152 if (!(p = lock_user_string(arg2))) 9153 return -TARGET_EFAULT; 9154 ret = get_errno(unlinkat(arg1, p, arg3)); 9155 unlock_user(p, arg2, 0); 9156 return ret; 9157 #endif 9158 case TARGET_NR_execveat: 9159 return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5); 9160 case TARGET_NR_execve: 9161 return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0); 9162 case TARGET_NR_chdir: 9163 if (!(p = lock_user_string(arg1))) 9164 return -TARGET_EFAULT; 9165 ret = get_errno(chdir(p)); 9166 unlock_user(p, arg1, 0); 9167 return ret; 9168 #ifdef TARGET_NR_time 9169 case TARGET_NR_time: 9170 { 9171 time_t host_time; 9172 ret = get_errno(time(&host_time)); 9173 if (!is_error(ret) 9174 && arg1 9175 && put_user_sal(host_time, arg1)) 9176 return -TARGET_EFAULT; 9177 } 9178 return ret; 9179 #endif 9180 #ifdef TARGET_NR_mknod 9181 case TARGET_NR_mknod: 9182 if (!(p = lock_user_string(arg1))) 9183 return -TARGET_EFAULT; 9184 ret = get_errno(mknod(p, arg2, arg3)); 9185 unlock_user(p, arg1, 0); 9186 return ret; 9187 #endif 9188 #if defined(TARGET_NR_mknodat) 9189 case TARGET_NR_mknodat: 9190 if (!(p = lock_user_string(arg2))) 9191 return -TARGET_EFAULT; 9192 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 9193 unlock_user(p, arg2, 0); 9194 return ret; 9195 #endif 9196 #ifdef TARGET_NR_chmod 9197 case TARGET_NR_chmod: 9198 if (!(p = lock_user_string(arg1))) 9199 return -TARGET_EFAULT; 9200 ret = get_errno(chmod(p, arg2)); 9201 unlock_user(p, arg1, 0); 9202 return ret; 9203 #endif 9204 #ifdef TARGET_NR_lseek 9205 case TARGET_NR_lseek: 9206 return get_errno(lseek(arg1, arg2, arg3)); 9207 #endif 9208 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 9209 /* Alpha specific */ 9210 case TARGET_NR_getxpid: 9211 cpu_env->ir[IR_A4] = getppid(); 9212 return get_errno(getpid()); 9213 #endif 9214 #ifdef TARGET_NR_getpid 9215 case TARGET_NR_getpid: 9216 return get_errno(getpid()); 9217 #endif 9218 case TARGET_NR_mount: 9219 { 9220 /* need to look at the data field */ 9221 void *p2, *p3; 9222 9223 if (arg1) { 9224 p = lock_user_string(arg1); 9225 if (!p) { 9226 return -TARGET_EFAULT; 9227 } 9228 } else { 9229 p = NULL; 9230 } 9231 9232 p2 = lock_user_string(arg2); 9233 if (!p2) { 9234 if (arg1) { 9235 unlock_user(p, arg1, 0); 9236 } 9237 return -TARGET_EFAULT; 9238 } 9239 9240 if (arg3) { 9241 p3 = lock_user_string(arg3); 9242 if (!p3) { 9243 if (arg1) { 9244 unlock_user(p, arg1, 0); 9245 } 9246 unlock_user(p2, arg2, 0); 9247 return -TARGET_EFAULT; 9248 } 9249 } else { 9250 p3 = NULL; 9251 } 9252 9253 /* FIXME - arg5 should be locked, but it isn't clear how to 9254 * do that since it's not guaranteed to be a NULL-terminated 9255 * string. 9256 */ 9257 if (!arg5) { 9258 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 9259 } else { 9260 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5)); 9261 } 9262 ret = get_errno(ret); 9263 9264 if (arg1) { 9265 unlock_user(p, arg1, 0); 9266 } 9267 unlock_user(p2, arg2, 0); 9268 if (arg3) { 9269 unlock_user(p3, arg3, 0); 9270 } 9271 } 9272 return ret; 9273 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount) 9274 #if defined(TARGET_NR_umount) 9275 case TARGET_NR_umount: 9276 #endif 9277 #if defined(TARGET_NR_oldumount) 9278 case TARGET_NR_oldumount: 9279 #endif 9280 if (!(p = lock_user_string(arg1))) 9281 return -TARGET_EFAULT; 9282 ret = get_errno(umount(p)); 9283 unlock_user(p, arg1, 0); 9284 return ret; 9285 #endif 9286 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount) 9287 case TARGET_NR_move_mount: 9288 { 9289 void *p2, *p4; 9290 9291 if (!arg2 || !arg4) { 9292 return -TARGET_EFAULT; 9293 } 9294 9295 p2 = lock_user_string(arg2); 9296 if (!p2) { 9297 return -TARGET_EFAULT; 9298 } 9299 9300 p4 = lock_user_string(arg4); 9301 if (!p4) { 9302 unlock_user(p2, arg2, 0); 9303 return -TARGET_EFAULT; 9304 } 9305 ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5)); 9306 9307 unlock_user(p2, arg2, 0); 9308 unlock_user(p4, arg4, 0); 9309 9310 return ret; 9311 } 9312 #endif 9313 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree) 9314 case TARGET_NR_open_tree: 9315 { 9316 void *p2; 9317 int host_flags; 9318 9319 if (!arg2) { 9320 return -TARGET_EFAULT; 9321 } 9322 9323 p2 = lock_user_string(arg2); 9324 if (!p2) { 9325 return -TARGET_EFAULT; 9326 } 9327 9328 host_flags = arg3 & ~TARGET_O_CLOEXEC; 9329 if (arg3 & TARGET_O_CLOEXEC) { 9330 host_flags |= O_CLOEXEC; 9331 } 9332 9333 ret = get_errno(sys_open_tree(arg1, p2, host_flags)); 9334 9335 unlock_user(p2, arg2, 0); 9336 9337 return ret; 9338 } 9339 #endif 9340 #ifdef TARGET_NR_stime /* not on alpha */ 9341 case TARGET_NR_stime: 9342 { 9343 struct timespec ts; 9344 ts.tv_nsec = 0; 9345 if (get_user_sal(ts.tv_sec, arg1)) { 9346 return -TARGET_EFAULT; 9347 } 9348 return get_errno(clock_settime(CLOCK_REALTIME, &ts)); 9349 } 9350 #endif 9351 #ifdef TARGET_NR_alarm /* not on alpha */ 9352 case TARGET_NR_alarm: 9353 return alarm(arg1); 9354 #endif 9355 #ifdef TARGET_NR_pause /* not on alpha */ 9356 case TARGET_NR_pause: 9357 if (!block_signals()) { 9358 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask); 9359 } 9360 return -TARGET_EINTR; 9361 #endif 9362 #ifdef TARGET_NR_utime 9363 case TARGET_NR_utime: 9364 { 9365 struct utimbuf tbuf, *host_tbuf; 9366 struct target_utimbuf *target_tbuf; 9367 if (arg2) { 9368 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 9369 return -TARGET_EFAULT; 9370 tbuf.actime = tswapal(target_tbuf->actime); 9371 tbuf.modtime = tswapal(target_tbuf->modtime); 9372 unlock_user_struct(target_tbuf, arg2, 0); 9373 host_tbuf = &tbuf; 9374 } else { 9375 host_tbuf = NULL; 9376 } 9377 if (!(p = lock_user_string(arg1))) 9378 return -TARGET_EFAULT; 9379 ret = get_errno(utime(p, host_tbuf)); 9380 unlock_user(p, arg1, 0); 9381 } 9382 return ret; 9383 #endif 9384 #ifdef TARGET_NR_utimes 9385 case TARGET_NR_utimes: 9386 { 9387 struct timeval *tvp, tv[2]; 9388 if (arg2) { 9389 if (copy_from_user_timeval(&tv[0], arg2) 9390 || copy_from_user_timeval(&tv[1], 9391 arg2 + sizeof(struct target_timeval))) 9392 return -TARGET_EFAULT; 9393 tvp = tv; 9394 } else { 9395 tvp = NULL; 9396 } 9397 if (!(p = lock_user_string(arg1))) 9398 return -TARGET_EFAULT; 9399 ret = get_errno(utimes(p, tvp)); 9400 unlock_user(p, arg1, 0); 9401 } 9402 return ret; 9403 #endif 9404 #if defined(TARGET_NR_futimesat) 9405 case TARGET_NR_futimesat: 9406 { 9407 struct timeval *tvp, tv[2]; 9408 if (arg3) { 9409 if (copy_from_user_timeval(&tv[0], arg3) 9410 || copy_from_user_timeval(&tv[1], 9411 arg3 + sizeof(struct target_timeval))) 9412 return -TARGET_EFAULT; 9413 tvp = tv; 9414 } else { 9415 tvp = NULL; 9416 } 9417 if (!(p = lock_user_string(arg2))) { 9418 return -TARGET_EFAULT; 9419 } 9420 ret = get_errno(futimesat(arg1, path(p), tvp)); 9421 unlock_user(p, arg2, 0); 9422 } 9423 return ret; 9424 #endif 9425 #ifdef TARGET_NR_access 9426 case TARGET_NR_access: 9427 if (!(p = lock_user_string(arg1))) { 9428 return -TARGET_EFAULT; 9429 } 9430 ret = get_errno(access(path(p), arg2)); 9431 unlock_user(p, arg1, 0); 9432 return ret; 9433 #endif 9434 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 9435 case TARGET_NR_faccessat: 9436 if (!(p = lock_user_string(arg2))) { 9437 return -TARGET_EFAULT; 9438 } 9439 ret = get_errno(faccessat(arg1, p, arg3, 0)); 9440 unlock_user(p, arg2, 0); 9441 return ret; 9442 #endif 9443 #if defined(TARGET_NR_faccessat2) 9444 case TARGET_NR_faccessat2: 9445 if (!(p = lock_user_string(arg2))) { 9446 return -TARGET_EFAULT; 9447 } 9448 ret = get_errno(faccessat(arg1, p, arg3, arg4)); 9449 unlock_user(p, arg2, 0); 9450 return ret; 9451 #endif 9452 #ifdef TARGET_NR_nice /* not on alpha */ 9453 case TARGET_NR_nice: 9454 return get_errno(nice(arg1)); 9455 #endif 9456 case TARGET_NR_sync: 9457 sync(); 9458 return 0; 9459 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS) 9460 case TARGET_NR_syncfs: 9461 return get_errno(syncfs(arg1)); 9462 #endif 9463 case TARGET_NR_kill: 9464 return get_errno(safe_kill(arg1, target_to_host_signal(arg2))); 9465 #ifdef TARGET_NR_rename 9466 case TARGET_NR_rename: 9467 { 9468 void *p2; 9469 p = lock_user_string(arg1); 9470 p2 = lock_user_string(arg2); 9471 if (!p || !p2) 9472 ret = -TARGET_EFAULT; 9473 else 9474 ret = get_errno(rename(p, p2)); 9475 unlock_user(p2, arg2, 0); 9476 unlock_user(p, arg1, 0); 9477 } 9478 return ret; 9479 #endif 9480 #if defined(TARGET_NR_renameat) 9481 case TARGET_NR_renameat: 9482 { 9483 void *p2; 9484 p = lock_user_string(arg2); 9485 p2 = lock_user_string(arg4); 9486 if (!p || !p2) 9487 ret = -TARGET_EFAULT; 9488 else 9489 ret = get_errno(renameat(arg1, p, arg3, p2)); 9490 unlock_user(p2, arg4, 0); 9491 unlock_user(p, arg2, 0); 9492 } 9493 return ret; 9494 #endif 9495 #if defined(TARGET_NR_renameat2) 9496 case TARGET_NR_renameat2: 9497 { 9498 void *p2; 9499 p = lock_user_string(arg2); 9500 p2 = lock_user_string(arg4); 9501 if (!p || !p2) { 9502 ret = -TARGET_EFAULT; 9503 } else { 9504 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5)); 9505 } 9506 unlock_user(p2, arg4, 0); 9507 unlock_user(p, arg2, 0); 9508 } 9509 return ret; 9510 #endif 9511 #ifdef TARGET_NR_mkdir 9512 case TARGET_NR_mkdir: 9513 if (!(p = lock_user_string(arg1))) 9514 return -TARGET_EFAULT; 9515 ret = get_errno(mkdir(p, arg2)); 9516 unlock_user(p, arg1, 0); 9517 return ret; 9518 #endif 9519 #if defined(TARGET_NR_mkdirat) 9520 case TARGET_NR_mkdirat: 9521 if (!(p = lock_user_string(arg2))) 9522 return -TARGET_EFAULT; 9523 ret = get_errno(mkdirat(arg1, p, arg3)); 9524 unlock_user(p, arg2, 0); 9525 return ret; 9526 #endif 9527 #ifdef TARGET_NR_rmdir 9528 case TARGET_NR_rmdir: 9529 if (!(p = lock_user_string(arg1))) 9530 return -TARGET_EFAULT; 9531 ret = get_errno(rmdir(p)); 9532 unlock_user(p, arg1, 0); 9533 return ret; 9534 #endif 9535 case TARGET_NR_dup: 9536 ret = get_errno(dup(arg1)); 9537 if (ret >= 0) { 9538 fd_trans_dup(arg1, ret); 9539 } 9540 return ret; 9541 #ifdef TARGET_NR_pipe 9542 case TARGET_NR_pipe: 9543 return do_pipe(cpu_env, arg1, 0, 0); 9544 #endif 9545 #ifdef TARGET_NR_pipe2 9546 case TARGET_NR_pipe2: 9547 return do_pipe(cpu_env, arg1, 9548 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 9549 #endif 9550 case TARGET_NR_times: 9551 { 9552 struct target_tms *tmsp; 9553 struct tms tms; 9554 ret = get_errno(times(&tms)); 9555 if (arg1) { 9556 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 9557 if (!tmsp) 9558 return -TARGET_EFAULT; 9559 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 9560 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 9561 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 9562 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 9563 } 9564 if (!is_error(ret)) 9565 ret = host_to_target_clock_t(ret); 9566 } 9567 return ret; 9568 case TARGET_NR_acct: 9569 if (arg1 == 0) { 9570 ret = get_errno(acct(NULL)); 9571 } else { 9572 if (!(p = lock_user_string(arg1))) { 9573 return -TARGET_EFAULT; 9574 } 9575 ret = get_errno(acct(path(p))); 9576 unlock_user(p, arg1, 0); 9577 } 9578 return ret; 9579 #ifdef TARGET_NR_umount2 9580 case TARGET_NR_umount2: 9581 if (!(p = lock_user_string(arg1))) 9582 return -TARGET_EFAULT; 9583 ret = get_errno(umount2(p, arg2)); 9584 unlock_user(p, arg1, 0); 9585 return ret; 9586 #endif 9587 case TARGET_NR_ioctl: 9588 return do_ioctl(arg1, arg2, arg3); 9589 #ifdef TARGET_NR_fcntl 9590 case TARGET_NR_fcntl: 9591 return do_fcntl(arg1, arg2, arg3); 9592 #endif 9593 case TARGET_NR_setpgid: 9594 return get_errno(setpgid(arg1, arg2)); 9595 case TARGET_NR_umask: 9596 return get_errno(umask(arg1)); 9597 case TARGET_NR_chroot: 9598 if (!(p = lock_user_string(arg1))) 9599 return -TARGET_EFAULT; 9600 ret = get_errno(chroot(p)); 9601 unlock_user(p, arg1, 0); 9602 return ret; 9603 #ifdef TARGET_NR_dup2 9604 case TARGET_NR_dup2: 9605 ret = get_errno(dup2(arg1, arg2)); 9606 if (ret >= 0) { 9607 fd_trans_dup(arg1, arg2); 9608 } 9609 return ret; 9610 #endif 9611 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 9612 case TARGET_NR_dup3: 9613 { 9614 int host_flags; 9615 9616 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) { 9617 return -EINVAL; 9618 } 9619 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl); 9620 ret = get_errno(dup3(arg1, arg2, host_flags)); 9621 if (ret >= 0) { 9622 fd_trans_dup(arg1, arg2); 9623 } 9624 return ret; 9625 } 9626 #endif 9627 #ifdef TARGET_NR_getppid /* not on alpha */ 9628 case TARGET_NR_getppid: 9629 return get_errno(getppid()); 9630 #endif 9631 #ifdef TARGET_NR_getpgrp 9632 case TARGET_NR_getpgrp: 9633 return get_errno(getpgrp()); 9634 #endif 9635 case TARGET_NR_setsid: 9636 return get_errno(setsid()); 9637 #ifdef TARGET_NR_sigaction 9638 case TARGET_NR_sigaction: 9639 { 9640 #if defined(TARGET_MIPS) 9641 struct target_sigaction act, oact, *pact, *old_act; 9642 9643 if (arg2) { 9644 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 9645 return -TARGET_EFAULT; 9646 act._sa_handler = old_act->_sa_handler; 9647 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 9648 act.sa_flags = old_act->sa_flags; 9649 unlock_user_struct(old_act, arg2, 0); 9650 pact = &act; 9651 } else { 9652 pact = NULL; 9653 } 9654 9655 ret = get_errno(do_sigaction(arg1, pact, &oact, 0)); 9656 9657 if (!is_error(ret) && arg3) { 9658 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 9659 return -TARGET_EFAULT; 9660 old_act->_sa_handler = oact._sa_handler; 9661 old_act->sa_flags = oact.sa_flags; 9662 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 9663 old_act->sa_mask.sig[1] = 0; 9664 old_act->sa_mask.sig[2] = 0; 9665 old_act->sa_mask.sig[3] = 0; 9666 unlock_user_struct(old_act, arg3, 1); 9667 } 9668 #else 9669 struct target_old_sigaction *old_act; 9670 struct target_sigaction act, oact, *pact; 9671 if (arg2) { 9672 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 9673 return -TARGET_EFAULT; 9674 act._sa_handler = old_act->_sa_handler; 9675 target_siginitset(&act.sa_mask, old_act->sa_mask); 9676 act.sa_flags = old_act->sa_flags; 9677 #ifdef TARGET_ARCH_HAS_SA_RESTORER 9678 act.sa_restorer = old_act->sa_restorer; 9679 #endif 9680 unlock_user_struct(old_act, arg2, 0); 9681 pact = &act; 9682 } else { 9683 pact = NULL; 9684 } 9685 ret = get_errno(do_sigaction(arg1, pact, &oact, 0)); 9686 if (!is_error(ret) && arg3) { 9687 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 9688 return -TARGET_EFAULT; 9689 old_act->_sa_handler = oact._sa_handler; 9690 old_act->sa_mask = oact.sa_mask.sig[0]; 9691 old_act->sa_flags = oact.sa_flags; 9692 #ifdef TARGET_ARCH_HAS_SA_RESTORER 9693 old_act->sa_restorer = oact.sa_restorer; 9694 #endif 9695 unlock_user_struct(old_act, arg3, 1); 9696 } 9697 #endif 9698 } 9699 return ret; 9700 #endif 9701 case TARGET_NR_rt_sigaction: 9702 { 9703 /* 9704 * For Alpha and SPARC this is a 5 argument syscall, with 9705 * a 'restorer' parameter which must be copied into the 9706 * sa_restorer field of the sigaction struct. 9707 * For Alpha that 'restorer' is arg5; for SPARC it is arg4, 9708 * and arg5 is the sigsetsize. 9709 */ 9710 #if defined(TARGET_ALPHA) 9711 target_ulong sigsetsize = arg4; 9712 target_ulong restorer = arg5; 9713 #elif defined(TARGET_SPARC) 9714 target_ulong restorer = arg4; 9715 target_ulong sigsetsize = arg5; 9716 #else 9717 target_ulong sigsetsize = arg4; 9718 target_ulong restorer = 0; 9719 #endif 9720 struct target_sigaction *act = NULL; 9721 struct target_sigaction *oact = NULL; 9722 9723 if (sigsetsize != sizeof(target_sigset_t)) { 9724 return -TARGET_EINVAL; 9725 } 9726 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) { 9727 return -TARGET_EFAULT; 9728 } 9729 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 9730 ret = -TARGET_EFAULT; 9731 } else { 9732 ret = get_errno(do_sigaction(arg1, act, oact, restorer)); 9733 if (oact) { 9734 unlock_user_struct(oact, arg3, 1); 9735 } 9736 } 9737 if (act) { 9738 unlock_user_struct(act, arg2, 0); 9739 } 9740 } 9741 return ret; 9742 #ifdef TARGET_NR_sgetmask /* not on alpha */ 9743 case TARGET_NR_sgetmask: 9744 { 9745 sigset_t cur_set; 9746 abi_ulong target_set; 9747 ret = do_sigprocmask(0, NULL, &cur_set); 9748 if (!ret) { 9749 host_to_target_old_sigset(&target_set, &cur_set); 9750 ret = target_set; 9751 } 9752 } 9753 return ret; 9754 #endif 9755 #ifdef TARGET_NR_ssetmask /* not on alpha */ 9756 case TARGET_NR_ssetmask: 9757 { 9758 sigset_t set, oset; 9759 abi_ulong target_set = arg1; 9760 target_to_host_old_sigset(&set, &target_set); 9761 ret = do_sigprocmask(SIG_SETMASK, &set, &oset); 9762 if (!ret) { 9763 host_to_target_old_sigset(&target_set, &oset); 9764 ret = target_set; 9765 } 9766 } 9767 return ret; 9768 #endif 9769 #ifdef TARGET_NR_sigprocmask 9770 case TARGET_NR_sigprocmask: 9771 { 9772 #if defined(TARGET_ALPHA) 9773 sigset_t set, oldset; 9774 abi_ulong mask; 9775 int how; 9776 9777 switch (arg1) { 9778 case TARGET_SIG_BLOCK: 9779 how = SIG_BLOCK; 9780 break; 9781 case TARGET_SIG_UNBLOCK: 9782 how = SIG_UNBLOCK; 9783 break; 9784 case TARGET_SIG_SETMASK: 9785 how = SIG_SETMASK; 9786 break; 9787 default: 9788 return -TARGET_EINVAL; 9789 } 9790 mask = arg2; 9791 target_to_host_old_sigset(&set, &mask); 9792 9793 ret = do_sigprocmask(how, &set, &oldset); 9794 if (!is_error(ret)) { 9795 host_to_target_old_sigset(&mask, &oldset); 9796 ret = mask; 9797 cpu_env->ir[IR_V0] = 0; /* force no error */ 9798 } 9799 #else 9800 sigset_t set, oldset, *set_ptr; 9801 int how; 9802 9803 if (arg2) { 9804 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1); 9805 if (!p) { 9806 return -TARGET_EFAULT; 9807 } 9808 target_to_host_old_sigset(&set, p); 9809 unlock_user(p, arg2, 0); 9810 set_ptr = &set; 9811 switch (arg1) { 9812 case TARGET_SIG_BLOCK: 9813 how = SIG_BLOCK; 9814 break; 9815 case TARGET_SIG_UNBLOCK: 9816 how = SIG_UNBLOCK; 9817 break; 9818 case TARGET_SIG_SETMASK: 9819 how = SIG_SETMASK; 9820 break; 9821 default: 9822 return -TARGET_EINVAL; 9823 } 9824 } else { 9825 how = 0; 9826 set_ptr = NULL; 9827 } 9828 ret = do_sigprocmask(how, set_ptr, &oldset); 9829 if (!is_error(ret) && arg3) { 9830 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9831 return -TARGET_EFAULT; 9832 host_to_target_old_sigset(p, &oldset); 9833 unlock_user(p, arg3, sizeof(target_sigset_t)); 9834 } 9835 #endif 9836 } 9837 return ret; 9838 #endif 9839 case TARGET_NR_rt_sigprocmask: 9840 { 9841 int how = arg1; 9842 sigset_t set, oldset, *set_ptr; 9843 9844 if (arg4 != sizeof(target_sigset_t)) { 9845 return -TARGET_EINVAL; 9846 } 9847 9848 if (arg2) { 9849 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1); 9850 if (!p) { 9851 return -TARGET_EFAULT; 9852 } 9853 target_to_host_sigset(&set, p); 9854 unlock_user(p, arg2, 0); 9855 set_ptr = &set; 9856 switch(how) { 9857 case TARGET_SIG_BLOCK: 9858 how = SIG_BLOCK; 9859 break; 9860 case TARGET_SIG_UNBLOCK: 9861 how = SIG_UNBLOCK; 9862 break; 9863 case TARGET_SIG_SETMASK: 9864 how = SIG_SETMASK; 9865 break; 9866 default: 9867 return -TARGET_EINVAL; 9868 } 9869 } else { 9870 how = 0; 9871 set_ptr = NULL; 9872 } 9873 ret = do_sigprocmask(how, set_ptr, &oldset); 9874 if (!is_error(ret) && arg3) { 9875 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9876 return -TARGET_EFAULT; 9877 host_to_target_sigset(p, &oldset); 9878 unlock_user(p, arg3, sizeof(target_sigset_t)); 9879 } 9880 } 9881 return ret; 9882 #ifdef TARGET_NR_sigpending 9883 case TARGET_NR_sigpending: 9884 { 9885 sigset_t set; 9886 ret = get_errno(sigpending(&set)); 9887 if (!is_error(ret)) { 9888 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9889 return -TARGET_EFAULT; 9890 host_to_target_old_sigset(p, &set); 9891 unlock_user(p, arg1, sizeof(target_sigset_t)); 9892 } 9893 } 9894 return ret; 9895 #endif 9896 case TARGET_NR_rt_sigpending: 9897 { 9898 sigset_t set; 9899 9900 /* Yes, this check is >, not != like most. We follow the kernel's 9901 * logic and it does it like this because it implements 9902 * NR_sigpending through the same code path, and in that case 9903 * the old_sigset_t is smaller in size. 9904 */ 9905 if (arg2 > sizeof(target_sigset_t)) { 9906 return -TARGET_EINVAL; 9907 } 9908 9909 ret = get_errno(sigpending(&set)); 9910 if (!is_error(ret)) { 9911 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9912 return -TARGET_EFAULT; 9913 host_to_target_sigset(p, &set); 9914 unlock_user(p, arg1, sizeof(target_sigset_t)); 9915 } 9916 } 9917 return ret; 9918 #ifdef TARGET_NR_sigsuspend 9919 case TARGET_NR_sigsuspend: 9920 { 9921 sigset_t *set; 9922 9923 #if defined(TARGET_ALPHA) 9924 TaskState *ts = cpu->opaque; 9925 /* target_to_host_old_sigset will bswap back */ 9926 abi_ulong mask = tswapal(arg1); 9927 set = &ts->sigsuspend_mask; 9928 target_to_host_old_sigset(set, &mask); 9929 #else 9930 ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t)); 9931 if (ret != 0) { 9932 return ret; 9933 } 9934 #endif 9935 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE)); 9936 finish_sigsuspend_mask(ret); 9937 } 9938 return ret; 9939 #endif 9940 case TARGET_NR_rt_sigsuspend: 9941 { 9942 sigset_t *set; 9943 9944 ret = process_sigsuspend_mask(&set, arg1, arg2); 9945 if (ret != 0) { 9946 return ret; 9947 } 9948 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE)); 9949 finish_sigsuspend_mask(ret); 9950 } 9951 return ret; 9952 #ifdef TARGET_NR_rt_sigtimedwait 9953 case TARGET_NR_rt_sigtimedwait: 9954 { 9955 sigset_t set; 9956 struct timespec uts, *puts; 9957 siginfo_t uinfo; 9958 9959 if (arg4 != sizeof(target_sigset_t)) { 9960 return -TARGET_EINVAL; 9961 } 9962 9963 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9964 return -TARGET_EFAULT; 9965 target_to_host_sigset(&set, p); 9966 unlock_user(p, arg1, 0); 9967 if (arg3) { 9968 puts = &uts; 9969 if (target_to_host_timespec(puts, arg3)) { 9970 return -TARGET_EFAULT; 9971 } 9972 } else { 9973 puts = NULL; 9974 } 9975 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9976 SIGSET_T_SIZE)); 9977 if (!is_error(ret)) { 9978 if (arg2) { 9979 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 9980 0); 9981 if (!p) { 9982 return -TARGET_EFAULT; 9983 } 9984 host_to_target_siginfo(p, &uinfo); 9985 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9986 } 9987 ret = host_to_target_signal(ret); 9988 } 9989 } 9990 return ret; 9991 #endif 9992 #ifdef TARGET_NR_rt_sigtimedwait_time64 9993 case TARGET_NR_rt_sigtimedwait_time64: 9994 { 9995 sigset_t set; 9996 struct timespec uts, *puts; 9997 siginfo_t uinfo; 9998 9999 if (arg4 != sizeof(target_sigset_t)) { 10000 return -TARGET_EINVAL; 10001 } 10002 10003 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1); 10004 if (!p) { 10005 return -TARGET_EFAULT; 10006 } 10007 target_to_host_sigset(&set, p); 10008 unlock_user(p, arg1, 0); 10009 if (arg3) { 10010 puts = &uts; 10011 if (target_to_host_timespec64(puts, arg3)) { 10012 return -TARGET_EFAULT; 10013 } 10014 } else { 10015 puts = NULL; 10016 } 10017 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 10018 SIGSET_T_SIZE)); 10019 if (!is_error(ret)) { 10020 if (arg2) { 10021 p = lock_user(VERIFY_WRITE, arg2, 10022 sizeof(target_siginfo_t), 0); 10023 if (!p) { 10024 return -TARGET_EFAULT; 10025 } 10026 host_to_target_siginfo(p, &uinfo); 10027 unlock_user(p, arg2, sizeof(target_siginfo_t)); 10028 } 10029 ret = host_to_target_signal(ret); 10030 } 10031 } 10032 return ret; 10033 #endif 10034 case TARGET_NR_rt_sigqueueinfo: 10035 { 10036 siginfo_t uinfo; 10037 10038 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); 10039 if (!p) { 10040 return -TARGET_EFAULT; 10041 } 10042 target_to_host_siginfo(&uinfo, p); 10043 unlock_user(p, arg3, 0); 10044 ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo)); 10045 } 10046 return ret; 10047 case TARGET_NR_rt_tgsigqueueinfo: 10048 { 10049 siginfo_t uinfo; 10050 10051 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1); 10052 if (!p) { 10053 return -TARGET_EFAULT; 10054 } 10055 target_to_host_siginfo(&uinfo, p); 10056 unlock_user(p, arg4, 0); 10057 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo)); 10058 } 10059 return ret; 10060 #ifdef TARGET_NR_sigreturn 10061 case TARGET_NR_sigreturn: 10062 if (block_signals()) { 10063 return -QEMU_ERESTARTSYS; 10064 } 10065 return do_sigreturn(cpu_env); 10066 #endif 10067 case TARGET_NR_rt_sigreturn: 10068 if (block_signals()) { 10069 return -QEMU_ERESTARTSYS; 10070 } 10071 return do_rt_sigreturn(cpu_env); 10072 case TARGET_NR_sethostname: 10073 if (!(p = lock_user_string(arg1))) 10074 return -TARGET_EFAULT; 10075 ret = get_errno(sethostname(p, arg2)); 10076 unlock_user(p, arg1, 0); 10077 return ret; 10078 #ifdef TARGET_NR_setrlimit 10079 case TARGET_NR_setrlimit: 10080 { 10081 int resource = target_to_host_resource(arg1); 10082 struct target_rlimit *target_rlim; 10083 struct rlimit rlim; 10084 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 10085 return -TARGET_EFAULT; 10086 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 10087 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 10088 unlock_user_struct(target_rlim, arg2, 0); 10089 /* 10090 * If we just passed through resource limit settings for memory then 10091 * they would also apply to QEMU's own allocations, and QEMU will 10092 * crash or hang or die if its allocations fail. Ideally we would 10093 * track the guest allocations in QEMU and apply the limits ourselves. 10094 * For now, just tell the guest the call succeeded but don't actually 10095 * limit anything. 10096 */ 10097 if (resource != RLIMIT_AS && 10098 resource != RLIMIT_DATA && 10099 resource != RLIMIT_STACK) { 10100 return get_errno(setrlimit(resource, &rlim)); 10101 } else { 10102 return 0; 10103 } 10104 } 10105 #endif 10106 #ifdef TARGET_NR_getrlimit 10107 case TARGET_NR_getrlimit: 10108 { 10109 int resource = target_to_host_resource(arg1); 10110 struct target_rlimit *target_rlim; 10111 struct rlimit rlim; 10112 10113 ret = get_errno(getrlimit(resource, &rlim)); 10114 if (!is_error(ret)) { 10115 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 10116 return -TARGET_EFAULT; 10117 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 10118 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 10119 unlock_user_struct(target_rlim, arg2, 1); 10120 } 10121 } 10122 return ret; 10123 #endif 10124 case TARGET_NR_getrusage: 10125 { 10126 struct rusage rusage; 10127 ret = get_errno(getrusage(arg1, &rusage)); 10128 if (!is_error(ret)) { 10129 ret = host_to_target_rusage(arg2, &rusage); 10130 } 10131 } 10132 return ret; 10133 #if defined(TARGET_NR_gettimeofday) 10134 case TARGET_NR_gettimeofday: 10135 { 10136 struct timeval tv; 10137 struct timezone tz; 10138 10139 ret = get_errno(gettimeofday(&tv, &tz)); 10140 if (!is_error(ret)) { 10141 if (arg1 && copy_to_user_timeval(arg1, &tv)) { 10142 return -TARGET_EFAULT; 10143 } 10144 if (arg2 && copy_to_user_timezone(arg2, &tz)) { 10145 return -TARGET_EFAULT; 10146 } 10147 } 10148 } 10149 return ret; 10150 #endif 10151 #if defined(TARGET_NR_settimeofday) 10152 case TARGET_NR_settimeofday: 10153 { 10154 struct timeval tv, *ptv = NULL; 10155 struct timezone tz, *ptz = NULL; 10156 10157 if (arg1) { 10158 if (copy_from_user_timeval(&tv, arg1)) { 10159 return -TARGET_EFAULT; 10160 } 10161 ptv = &tv; 10162 } 10163 10164 if (arg2) { 10165 if (copy_from_user_timezone(&tz, arg2)) { 10166 return -TARGET_EFAULT; 10167 } 10168 ptz = &tz; 10169 } 10170 10171 return get_errno(settimeofday(ptv, ptz)); 10172 } 10173 #endif 10174 #if defined(TARGET_NR_select) 10175 case TARGET_NR_select: 10176 #if defined(TARGET_WANT_NI_OLD_SELECT) 10177 /* some architectures used to have old_select here 10178 * but now ENOSYS it. 10179 */ 10180 ret = -TARGET_ENOSYS; 10181 #elif defined(TARGET_WANT_OLD_SYS_SELECT) 10182 ret = do_old_select(arg1); 10183 #else 10184 ret = do_select(arg1, arg2, arg3, arg4, arg5); 10185 #endif 10186 return ret; 10187 #endif 10188 #ifdef TARGET_NR_pselect6 10189 case TARGET_NR_pselect6: 10190 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false); 10191 #endif 10192 #ifdef TARGET_NR_pselect6_time64 10193 case TARGET_NR_pselect6_time64: 10194 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true); 10195 #endif 10196 #ifdef TARGET_NR_symlink 10197 case TARGET_NR_symlink: 10198 { 10199 void *p2; 10200 p = lock_user_string(arg1); 10201 p2 = lock_user_string(arg2); 10202 if (!p || !p2) 10203 ret = -TARGET_EFAULT; 10204 else 10205 ret = get_errno(symlink(p, p2)); 10206 unlock_user(p2, arg2, 0); 10207 unlock_user(p, arg1, 0); 10208 } 10209 return ret; 10210 #endif 10211 #if defined(TARGET_NR_symlinkat) 10212 case TARGET_NR_symlinkat: 10213 { 10214 void *p2; 10215 p = lock_user_string(arg1); 10216 p2 = lock_user_string(arg3); 10217 if (!p || !p2) 10218 ret = -TARGET_EFAULT; 10219 else 10220 ret = get_errno(symlinkat(p, arg2, p2)); 10221 unlock_user(p2, arg3, 0); 10222 unlock_user(p, arg1, 0); 10223 } 10224 return ret; 10225 #endif 10226 #ifdef TARGET_NR_readlink 10227 case TARGET_NR_readlink: 10228 { 10229 void *p2; 10230 p = lock_user_string(arg1); 10231 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 10232 if (!p || !p2) { 10233 ret = -TARGET_EFAULT; 10234 } else if (!arg3) { 10235 /* Short circuit this for the magic exe check. */ 10236 ret = -TARGET_EINVAL; 10237 } else if (is_proc_myself((const char *)p, "exe")) { 10238 /* 10239 * Don't worry about sign mismatch as earlier mapping 10240 * logic would have thrown a bad address error. 10241 */ 10242 ret = MIN(strlen(exec_path), arg3); 10243 /* We cannot NUL terminate the string. */ 10244 memcpy(p2, exec_path, ret); 10245 } else { 10246 ret = get_errno(readlink(path(p), p2, arg3)); 10247 } 10248 unlock_user(p2, arg2, ret); 10249 unlock_user(p, arg1, 0); 10250 } 10251 return ret; 10252 #endif 10253 #if defined(TARGET_NR_readlinkat) 10254 case TARGET_NR_readlinkat: 10255 { 10256 void *p2; 10257 p = lock_user_string(arg2); 10258 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 10259 if (!p || !p2) { 10260 ret = -TARGET_EFAULT; 10261 } else if (!arg4) { 10262 /* Short circuit this for the magic exe check. */ 10263 ret = -TARGET_EINVAL; 10264 } else if (is_proc_myself((const char *)p, "exe")) { 10265 /* 10266 * Don't worry about sign mismatch as earlier mapping 10267 * logic would have thrown a bad address error. 10268 */ 10269 ret = MIN(strlen(exec_path), arg4); 10270 /* We cannot NUL terminate the string. */ 10271 memcpy(p2, exec_path, ret); 10272 } else { 10273 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 10274 } 10275 unlock_user(p2, arg3, ret); 10276 unlock_user(p, arg2, 0); 10277 } 10278 return ret; 10279 #endif 10280 #ifdef TARGET_NR_swapon 10281 case TARGET_NR_swapon: 10282 if (!(p = lock_user_string(arg1))) 10283 return -TARGET_EFAULT; 10284 ret = get_errno(swapon(p, arg2)); 10285 unlock_user(p, arg1, 0); 10286 return ret; 10287 #endif 10288 case TARGET_NR_reboot: 10289 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 10290 /* arg4 must be ignored in all other cases */ 10291 p = lock_user_string(arg4); 10292 if (!p) { 10293 return -TARGET_EFAULT; 10294 } 10295 ret = get_errno(reboot(arg1, arg2, arg3, p)); 10296 unlock_user(p, arg4, 0); 10297 } else { 10298 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 10299 } 10300 return ret; 10301 #ifdef TARGET_NR_mmap 10302 case TARGET_NR_mmap: 10303 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 10304 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 10305 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 10306 || defined(TARGET_S390X) 10307 { 10308 abi_ulong *v; 10309 abi_ulong v1, v2, v3, v4, v5, v6; 10310 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 10311 return -TARGET_EFAULT; 10312 v1 = tswapal(v[0]); 10313 v2 = tswapal(v[1]); 10314 v3 = tswapal(v[2]); 10315 v4 = tswapal(v[3]); 10316 v5 = tswapal(v[4]); 10317 v6 = tswapal(v[5]); 10318 unlock_user(v, arg1, 0); 10319 ret = get_errno(target_mmap(v1, v2, v3, 10320 target_to_host_bitmask(v4, mmap_flags_tbl), 10321 v5, v6)); 10322 } 10323 #else 10324 /* mmap pointers are always untagged */ 10325 ret = get_errno(target_mmap(arg1, arg2, arg3, 10326 target_to_host_bitmask(arg4, mmap_flags_tbl), 10327 arg5, 10328 arg6)); 10329 #endif 10330 return ret; 10331 #endif 10332 #ifdef TARGET_NR_mmap2 10333 case TARGET_NR_mmap2: 10334 #ifndef MMAP_SHIFT 10335 #define MMAP_SHIFT 12 10336 #endif 10337 ret = target_mmap(arg1, arg2, arg3, 10338 target_to_host_bitmask(arg4, mmap_flags_tbl), 10339 arg5, arg6 << MMAP_SHIFT); 10340 return get_errno(ret); 10341 #endif 10342 case TARGET_NR_munmap: 10343 arg1 = cpu_untagged_addr(cpu, arg1); 10344 return get_errno(target_munmap(arg1, arg2)); 10345 case TARGET_NR_mprotect: 10346 arg1 = cpu_untagged_addr(cpu, arg1); 10347 { 10348 TaskState *ts = cpu->opaque; 10349 /* Special hack to detect libc making the stack executable. */ 10350 if ((arg3 & PROT_GROWSDOWN) 10351 && arg1 >= ts->info->stack_limit 10352 && arg1 <= ts->info->start_stack) { 10353 arg3 &= ~PROT_GROWSDOWN; 10354 arg2 = arg2 + arg1 - ts->info->stack_limit; 10355 arg1 = ts->info->stack_limit; 10356 } 10357 } 10358 return get_errno(target_mprotect(arg1, arg2, arg3)); 10359 #ifdef TARGET_NR_mremap 10360 case TARGET_NR_mremap: 10361 arg1 = cpu_untagged_addr(cpu, arg1); 10362 /* mremap new_addr (arg5) is always untagged */ 10363 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 10364 #endif 10365 /* ??? msync/mlock/munlock are broken for softmmu. */ 10366 #ifdef TARGET_NR_msync 10367 case TARGET_NR_msync: 10368 return get_errno(msync(g2h(cpu, arg1), arg2, 10369 target_to_host_msync_arg(arg3))); 10370 #endif 10371 #ifdef TARGET_NR_mlock 10372 case TARGET_NR_mlock: 10373 return get_errno(mlock(g2h(cpu, arg1), arg2)); 10374 #endif 10375 #ifdef TARGET_NR_munlock 10376 case TARGET_NR_munlock: 10377 return get_errno(munlock(g2h(cpu, arg1), arg2)); 10378 #endif 10379 #ifdef TARGET_NR_mlockall 10380 case TARGET_NR_mlockall: 10381 return get_errno(mlockall(target_to_host_mlockall_arg(arg1))); 10382 #endif 10383 #ifdef TARGET_NR_munlockall 10384 case TARGET_NR_munlockall: 10385 return get_errno(munlockall()); 10386 #endif 10387 #ifdef TARGET_NR_truncate 10388 case TARGET_NR_truncate: 10389 if (!(p = lock_user_string(arg1))) 10390 return -TARGET_EFAULT; 10391 ret = get_errno(truncate(p, arg2)); 10392 unlock_user(p, arg1, 0); 10393 return ret; 10394 #endif 10395 #ifdef TARGET_NR_ftruncate 10396 case TARGET_NR_ftruncate: 10397 return get_errno(ftruncate(arg1, arg2)); 10398 #endif 10399 case TARGET_NR_fchmod: 10400 return get_errno(fchmod(arg1, arg2)); 10401 #if defined(TARGET_NR_fchmodat) 10402 case TARGET_NR_fchmodat: 10403 if (!(p = lock_user_string(arg2))) 10404 return -TARGET_EFAULT; 10405 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 10406 unlock_user(p, arg2, 0); 10407 return ret; 10408 #endif 10409 case TARGET_NR_getpriority: 10410 /* Note that negative values are valid for getpriority, so we must 10411 differentiate based on errno settings. */ 10412 errno = 0; 10413 ret = getpriority(arg1, arg2); 10414 if (ret == -1 && errno != 0) { 10415 return -host_to_target_errno(errno); 10416 } 10417 #ifdef TARGET_ALPHA 10418 /* Return value is the unbiased priority. Signal no error. */ 10419 cpu_env->ir[IR_V0] = 0; 10420 #else 10421 /* Return value is a biased priority to avoid negative numbers. */ 10422 ret = 20 - ret; 10423 #endif 10424 return ret; 10425 case TARGET_NR_setpriority: 10426 return get_errno(setpriority(arg1, arg2, arg3)); 10427 #ifdef TARGET_NR_statfs 10428 case TARGET_NR_statfs: 10429 if (!(p = lock_user_string(arg1))) { 10430 return -TARGET_EFAULT; 10431 } 10432 ret = get_errno(statfs(path(p), &stfs)); 10433 unlock_user(p, arg1, 0); 10434 convert_statfs: 10435 if (!is_error(ret)) { 10436 struct target_statfs *target_stfs; 10437 10438 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 10439 return -TARGET_EFAULT; 10440 __put_user(stfs.f_type, &target_stfs->f_type); 10441 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 10442 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 10443 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 10444 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 10445 __put_user(stfs.f_files, &target_stfs->f_files); 10446 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 10447 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 10448 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 10449 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 10450 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 10451 #ifdef _STATFS_F_FLAGS 10452 __put_user(stfs.f_flags, &target_stfs->f_flags); 10453 #else 10454 __put_user(0, &target_stfs->f_flags); 10455 #endif 10456 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 10457 unlock_user_struct(target_stfs, arg2, 1); 10458 } 10459 return ret; 10460 #endif 10461 #ifdef TARGET_NR_fstatfs 10462 case TARGET_NR_fstatfs: 10463 ret = get_errno(fstatfs(arg1, &stfs)); 10464 goto convert_statfs; 10465 #endif 10466 #ifdef TARGET_NR_statfs64 10467 case TARGET_NR_statfs64: 10468 if (!(p = lock_user_string(arg1))) { 10469 return -TARGET_EFAULT; 10470 } 10471 ret = get_errno(statfs(path(p), &stfs)); 10472 unlock_user(p, arg1, 0); 10473 convert_statfs64: 10474 if (!is_error(ret)) { 10475 struct target_statfs64 *target_stfs; 10476 10477 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 10478 return -TARGET_EFAULT; 10479 __put_user(stfs.f_type, &target_stfs->f_type); 10480 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 10481 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 10482 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 10483 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 10484 __put_user(stfs.f_files, &target_stfs->f_files); 10485 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 10486 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 10487 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 10488 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 10489 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 10490 #ifdef _STATFS_F_FLAGS 10491 __put_user(stfs.f_flags, &target_stfs->f_flags); 10492 #else 10493 __put_user(0, &target_stfs->f_flags); 10494 #endif 10495 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 10496 unlock_user_struct(target_stfs, arg3, 1); 10497 } 10498 return ret; 10499 case TARGET_NR_fstatfs64: 10500 ret = get_errno(fstatfs(arg1, &stfs)); 10501 goto convert_statfs64; 10502 #endif 10503 #ifdef TARGET_NR_socketcall 10504 case TARGET_NR_socketcall: 10505 return do_socketcall(arg1, arg2); 10506 #endif 10507 #ifdef TARGET_NR_accept 10508 case TARGET_NR_accept: 10509 return do_accept4(arg1, arg2, arg3, 0); 10510 #endif 10511 #ifdef TARGET_NR_accept4 10512 case TARGET_NR_accept4: 10513 return do_accept4(arg1, arg2, arg3, arg4); 10514 #endif 10515 #ifdef TARGET_NR_bind 10516 case TARGET_NR_bind: 10517 return do_bind(arg1, arg2, arg3); 10518 #endif 10519 #ifdef TARGET_NR_connect 10520 case TARGET_NR_connect: 10521 return do_connect(arg1, arg2, arg3); 10522 #endif 10523 #ifdef TARGET_NR_getpeername 10524 case TARGET_NR_getpeername: 10525 return do_getpeername(arg1, arg2, arg3); 10526 #endif 10527 #ifdef TARGET_NR_getsockname 10528 case TARGET_NR_getsockname: 10529 return do_getsockname(arg1, arg2, arg3); 10530 #endif 10531 #ifdef TARGET_NR_getsockopt 10532 case TARGET_NR_getsockopt: 10533 return do_getsockopt(arg1, arg2, arg3, arg4, arg5); 10534 #endif 10535 #ifdef TARGET_NR_listen 10536 case TARGET_NR_listen: 10537 return get_errno(listen(arg1, arg2)); 10538 #endif 10539 #ifdef TARGET_NR_recv 10540 case TARGET_NR_recv: 10541 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 10542 #endif 10543 #ifdef TARGET_NR_recvfrom 10544 case TARGET_NR_recvfrom: 10545 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 10546 #endif 10547 #ifdef TARGET_NR_recvmsg 10548 case TARGET_NR_recvmsg: 10549 return do_sendrecvmsg(arg1, arg2, arg3, 0); 10550 #endif 10551 #ifdef TARGET_NR_send 10552 case TARGET_NR_send: 10553 return do_sendto(arg1, arg2, arg3, arg4, 0, 0); 10554 #endif 10555 #ifdef TARGET_NR_sendmsg 10556 case TARGET_NR_sendmsg: 10557 return do_sendrecvmsg(arg1, arg2, arg3, 1); 10558 #endif 10559 #ifdef TARGET_NR_sendmmsg 10560 case TARGET_NR_sendmmsg: 10561 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 10562 #endif 10563 #ifdef TARGET_NR_recvmmsg 10564 case TARGET_NR_recvmmsg: 10565 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 10566 #endif 10567 #ifdef TARGET_NR_sendto 10568 case TARGET_NR_sendto: 10569 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 10570 #endif 10571 #ifdef TARGET_NR_shutdown 10572 case TARGET_NR_shutdown: 10573 return get_errno(shutdown(arg1, arg2)); 10574 #endif 10575 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 10576 case TARGET_NR_getrandom: 10577 p = lock_user(VERIFY_WRITE, arg1, arg2, 0); 10578 if (!p) { 10579 return -TARGET_EFAULT; 10580 } 10581 ret = get_errno(getrandom(p, arg2, arg3)); 10582 unlock_user(p, arg1, ret); 10583 return ret; 10584 #endif 10585 #ifdef TARGET_NR_socket 10586 case TARGET_NR_socket: 10587 return do_socket(arg1, arg2, arg3); 10588 #endif 10589 #ifdef TARGET_NR_socketpair 10590 case TARGET_NR_socketpair: 10591 return do_socketpair(arg1, arg2, arg3, arg4); 10592 #endif 10593 #ifdef TARGET_NR_setsockopt 10594 case TARGET_NR_setsockopt: 10595 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 10596 #endif 10597 #if defined(TARGET_NR_syslog) 10598 case TARGET_NR_syslog: 10599 { 10600 int len = arg2; 10601 10602 switch (arg1) { 10603 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */ 10604 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */ 10605 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */ 10606 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */ 10607 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */ 10608 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */ 10609 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */ 10610 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */ 10611 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3)); 10612 case TARGET_SYSLOG_ACTION_READ: /* Read from log */ 10613 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */ 10614 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */ 10615 { 10616 if (len < 0) { 10617 return -TARGET_EINVAL; 10618 } 10619 if (len == 0) { 10620 return 0; 10621 } 10622 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 10623 if (!p) { 10624 return -TARGET_EFAULT; 10625 } 10626 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 10627 unlock_user(p, arg2, arg3); 10628 } 10629 return ret; 10630 default: 10631 return -TARGET_EINVAL; 10632 } 10633 } 10634 break; 10635 #endif 10636 case TARGET_NR_setitimer: 10637 { 10638 struct itimerval value, ovalue, *pvalue; 10639 10640 if (arg2) { 10641 pvalue = &value; 10642 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 10643 || copy_from_user_timeval(&pvalue->it_value, 10644 arg2 + sizeof(struct target_timeval))) 10645 return -TARGET_EFAULT; 10646 } else { 10647 pvalue = NULL; 10648 } 10649 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 10650 if (!is_error(ret) && arg3) { 10651 if (copy_to_user_timeval(arg3, 10652 &ovalue.it_interval) 10653 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 10654 &ovalue.it_value)) 10655 return -TARGET_EFAULT; 10656 } 10657 } 10658 return ret; 10659 case TARGET_NR_getitimer: 10660 { 10661 struct itimerval value; 10662 10663 ret = get_errno(getitimer(arg1, &value)); 10664 if (!is_error(ret) && arg2) { 10665 if (copy_to_user_timeval(arg2, 10666 &value.it_interval) 10667 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 10668 &value.it_value)) 10669 return -TARGET_EFAULT; 10670 } 10671 } 10672 return ret; 10673 #ifdef TARGET_NR_stat 10674 case TARGET_NR_stat: 10675 if (!(p = lock_user_string(arg1))) { 10676 return -TARGET_EFAULT; 10677 } 10678 ret = get_errno(stat(path(p), &st)); 10679 unlock_user(p, arg1, 0); 10680 goto do_stat; 10681 #endif 10682 #ifdef TARGET_NR_lstat 10683 case TARGET_NR_lstat: 10684 if (!(p = lock_user_string(arg1))) { 10685 return -TARGET_EFAULT; 10686 } 10687 ret = get_errno(lstat(path(p), &st)); 10688 unlock_user(p, arg1, 0); 10689 goto do_stat; 10690 #endif 10691 #ifdef TARGET_NR_fstat 10692 case TARGET_NR_fstat: 10693 { 10694 ret = get_errno(fstat(arg1, &st)); 10695 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat) 10696 do_stat: 10697 #endif 10698 if (!is_error(ret)) { 10699 struct target_stat *target_st; 10700 10701 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 10702 return -TARGET_EFAULT; 10703 memset(target_st, 0, sizeof(*target_st)); 10704 __put_user(st.st_dev, &target_st->st_dev); 10705 __put_user(st.st_ino, &target_st->st_ino); 10706 __put_user(st.st_mode, &target_st->st_mode); 10707 __put_user(st.st_uid, &target_st->st_uid); 10708 __put_user(st.st_gid, &target_st->st_gid); 10709 __put_user(st.st_nlink, &target_st->st_nlink); 10710 __put_user(st.st_rdev, &target_st->st_rdev); 10711 __put_user(st.st_size, &target_st->st_size); 10712 __put_user(st.st_blksize, &target_st->st_blksize); 10713 __put_user(st.st_blocks, &target_st->st_blocks); 10714 __put_user(st.st_atime, &target_st->target_st_atime); 10715 __put_user(st.st_mtime, &target_st->target_st_mtime); 10716 __put_user(st.st_ctime, &target_st->target_st_ctime); 10717 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC) 10718 __put_user(st.st_atim.tv_nsec, 10719 &target_st->target_st_atime_nsec); 10720 __put_user(st.st_mtim.tv_nsec, 10721 &target_st->target_st_mtime_nsec); 10722 __put_user(st.st_ctim.tv_nsec, 10723 &target_st->target_st_ctime_nsec); 10724 #endif 10725 unlock_user_struct(target_st, arg2, 1); 10726 } 10727 } 10728 return ret; 10729 #endif 10730 case TARGET_NR_vhangup: 10731 return get_errno(vhangup()); 10732 #ifdef TARGET_NR_syscall 10733 case TARGET_NR_syscall: 10734 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 10735 arg6, arg7, arg8, 0); 10736 #endif 10737 #if defined(TARGET_NR_wait4) 10738 case TARGET_NR_wait4: 10739 { 10740 int status; 10741 abi_long status_ptr = arg2; 10742 struct rusage rusage, *rusage_ptr; 10743 abi_ulong target_rusage = arg4; 10744 abi_long rusage_err; 10745 if (target_rusage) 10746 rusage_ptr = &rusage; 10747 else 10748 rusage_ptr = NULL; 10749 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr)); 10750 if (!is_error(ret)) { 10751 if (status_ptr && ret) { 10752 status = host_to_target_waitstatus(status); 10753 if (put_user_s32(status, status_ptr)) 10754 return -TARGET_EFAULT; 10755 } 10756 if (target_rusage) { 10757 rusage_err = host_to_target_rusage(target_rusage, &rusage); 10758 if (rusage_err) { 10759 ret = rusage_err; 10760 } 10761 } 10762 } 10763 } 10764 return ret; 10765 #endif 10766 #ifdef TARGET_NR_swapoff 10767 case TARGET_NR_swapoff: 10768 if (!(p = lock_user_string(arg1))) 10769 return -TARGET_EFAULT; 10770 ret = get_errno(swapoff(p)); 10771 unlock_user(p, arg1, 0); 10772 return ret; 10773 #endif 10774 case TARGET_NR_sysinfo: 10775 { 10776 struct target_sysinfo *target_value; 10777 struct sysinfo value; 10778 ret = get_errno(sysinfo(&value)); 10779 if (!is_error(ret) && arg1) 10780 { 10781 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 10782 return -TARGET_EFAULT; 10783 __put_user(value.uptime, &target_value->uptime); 10784 __put_user(value.loads[0], &target_value->loads[0]); 10785 __put_user(value.loads[1], &target_value->loads[1]); 10786 __put_user(value.loads[2], &target_value->loads[2]); 10787 __put_user(value.totalram, &target_value->totalram); 10788 __put_user(value.freeram, &target_value->freeram); 10789 __put_user(value.sharedram, &target_value->sharedram); 10790 __put_user(value.bufferram, &target_value->bufferram); 10791 __put_user(value.totalswap, &target_value->totalswap); 10792 __put_user(value.freeswap, &target_value->freeswap); 10793 __put_user(value.procs, &target_value->procs); 10794 __put_user(value.totalhigh, &target_value->totalhigh); 10795 __put_user(value.freehigh, &target_value->freehigh); 10796 __put_user(value.mem_unit, &target_value->mem_unit); 10797 unlock_user_struct(target_value, arg1, 1); 10798 } 10799 } 10800 return ret; 10801 #ifdef TARGET_NR_ipc 10802 case TARGET_NR_ipc: 10803 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); 10804 #endif 10805 #ifdef TARGET_NR_semget 10806 case TARGET_NR_semget: 10807 return get_errno(semget(arg1, arg2, arg3)); 10808 #endif 10809 #ifdef TARGET_NR_semop 10810 case TARGET_NR_semop: 10811 return do_semtimedop(arg1, arg2, arg3, 0, false); 10812 #endif 10813 #ifdef TARGET_NR_semtimedop 10814 case TARGET_NR_semtimedop: 10815 return do_semtimedop(arg1, arg2, arg3, arg4, false); 10816 #endif 10817 #ifdef TARGET_NR_semtimedop_time64 10818 case TARGET_NR_semtimedop_time64: 10819 return do_semtimedop(arg1, arg2, arg3, arg4, true); 10820 #endif 10821 #ifdef TARGET_NR_semctl 10822 case TARGET_NR_semctl: 10823 return do_semctl(arg1, arg2, arg3, arg4); 10824 #endif 10825 #ifdef TARGET_NR_msgctl 10826 case TARGET_NR_msgctl: 10827 return do_msgctl(arg1, arg2, arg3); 10828 #endif 10829 #ifdef TARGET_NR_msgget 10830 case TARGET_NR_msgget: 10831 return get_errno(msgget(arg1, arg2)); 10832 #endif 10833 #ifdef TARGET_NR_msgrcv 10834 case TARGET_NR_msgrcv: 10835 return do_msgrcv(arg1, arg2, arg3, arg4, arg5); 10836 #endif 10837 #ifdef TARGET_NR_msgsnd 10838 case TARGET_NR_msgsnd: 10839 return do_msgsnd(arg1, arg2, arg3, arg4); 10840 #endif 10841 #ifdef TARGET_NR_shmget 10842 case TARGET_NR_shmget: 10843 return get_errno(shmget(arg1, arg2, arg3)); 10844 #endif 10845 #ifdef TARGET_NR_shmctl 10846 case TARGET_NR_shmctl: 10847 return do_shmctl(arg1, arg2, arg3); 10848 #endif 10849 #ifdef TARGET_NR_shmat 10850 case TARGET_NR_shmat: 10851 return do_shmat(cpu_env, arg1, arg2, arg3); 10852 #endif 10853 #ifdef TARGET_NR_shmdt 10854 case TARGET_NR_shmdt: 10855 return do_shmdt(arg1); 10856 #endif 10857 case TARGET_NR_fsync: 10858 return get_errno(fsync(arg1)); 10859 case TARGET_NR_clone: 10860 /* Linux manages to have three different orderings for its 10861 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 10862 * match the kernel's CONFIG_CLONE_* settings. 10863 * Microblaze is further special in that it uses a sixth 10864 * implicit argument to clone for the TLS pointer. 10865 */ 10866 #if defined(TARGET_MICROBLAZE) 10867 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 10868 #elif defined(TARGET_CLONE_BACKWARDS) 10869 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 10870 #elif defined(TARGET_CLONE_BACKWARDS2) 10871 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 10872 #else 10873 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 10874 #endif 10875 return ret; 10876 #ifdef __NR_exit_group 10877 /* new thread calls */ 10878 case TARGET_NR_exit_group: 10879 preexit_cleanup(cpu_env, arg1); 10880 return get_errno(exit_group(arg1)); 10881 #endif 10882 case TARGET_NR_setdomainname: 10883 if (!(p = lock_user_string(arg1))) 10884 return -TARGET_EFAULT; 10885 ret = get_errno(setdomainname(p, arg2)); 10886 unlock_user(p, arg1, 0); 10887 return ret; 10888 case TARGET_NR_uname: 10889 /* no need to transcode because we use the linux syscall */ 10890 { 10891 struct new_utsname * buf; 10892 10893 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 10894 return -TARGET_EFAULT; 10895 ret = get_errno(sys_uname(buf)); 10896 if (!is_error(ret)) { 10897 /* Overwrite the native machine name with whatever is being 10898 emulated. */ 10899 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env), 10900 sizeof(buf->machine)); 10901 /* Allow the user to override the reported release. */ 10902 if (qemu_uname_release && *qemu_uname_release) { 10903 g_strlcpy(buf->release, qemu_uname_release, 10904 sizeof(buf->release)); 10905 } 10906 } 10907 unlock_user_struct(buf, arg1, 1); 10908 } 10909 return ret; 10910 #ifdef TARGET_I386 10911 case TARGET_NR_modify_ldt: 10912 return do_modify_ldt(cpu_env, arg1, arg2, arg3); 10913 #if !defined(TARGET_X86_64) 10914 case TARGET_NR_vm86: 10915 return do_vm86(cpu_env, arg1, arg2); 10916 #endif 10917 #endif 10918 #if defined(TARGET_NR_adjtimex) 10919 case TARGET_NR_adjtimex: 10920 { 10921 struct timex host_buf; 10922 10923 if (target_to_host_timex(&host_buf, arg1) != 0) { 10924 return -TARGET_EFAULT; 10925 } 10926 ret = get_errno(adjtimex(&host_buf)); 10927 if (!is_error(ret)) { 10928 if (host_to_target_timex(arg1, &host_buf) != 0) { 10929 return -TARGET_EFAULT; 10930 } 10931 } 10932 } 10933 return ret; 10934 #endif 10935 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) 10936 case TARGET_NR_clock_adjtime: 10937 { 10938 struct timex htx, *phtx = &htx; 10939 10940 if (target_to_host_timex(phtx, arg2) != 0) { 10941 return -TARGET_EFAULT; 10942 } 10943 ret = get_errno(clock_adjtime(arg1, phtx)); 10944 if (!is_error(ret) && phtx) { 10945 if (host_to_target_timex(arg2, phtx) != 0) { 10946 return -TARGET_EFAULT; 10947 } 10948 } 10949 } 10950 return ret; 10951 #endif 10952 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 10953 case TARGET_NR_clock_adjtime64: 10954 { 10955 struct timex htx; 10956 10957 if (target_to_host_timex64(&htx, arg2) != 0) { 10958 return -TARGET_EFAULT; 10959 } 10960 ret = get_errno(clock_adjtime(arg1, &htx)); 10961 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) { 10962 return -TARGET_EFAULT; 10963 } 10964 } 10965 return ret; 10966 #endif 10967 case TARGET_NR_getpgid: 10968 return get_errno(getpgid(arg1)); 10969 case TARGET_NR_fchdir: 10970 return get_errno(fchdir(arg1)); 10971 case TARGET_NR_personality: 10972 return get_errno(personality(arg1)); 10973 #ifdef TARGET_NR__llseek /* Not on alpha */ 10974 case TARGET_NR__llseek: 10975 { 10976 int64_t res; 10977 #if !defined(__NR_llseek) 10978 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5); 10979 if (res == -1) { 10980 ret = get_errno(res); 10981 } else { 10982 ret = 0; 10983 } 10984 #else 10985 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 10986 #endif 10987 if ((ret == 0) && put_user_s64(res, arg4)) { 10988 return -TARGET_EFAULT; 10989 } 10990 } 10991 return ret; 10992 #endif 10993 #ifdef TARGET_NR_getdents 10994 case TARGET_NR_getdents: 10995 return do_getdents(arg1, arg2, arg3); 10996 #endif /* TARGET_NR_getdents */ 10997 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 10998 case TARGET_NR_getdents64: 10999 return do_getdents64(arg1, arg2, arg3); 11000 #endif /* TARGET_NR_getdents64 */ 11001 #if defined(TARGET_NR__newselect) 11002 case TARGET_NR__newselect: 11003 return do_select(arg1, arg2, arg3, arg4, arg5); 11004 #endif 11005 #ifdef TARGET_NR_poll 11006 case TARGET_NR_poll: 11007 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false); 11008 #endif 11009 #ifdef TARGET_NR_ppoll 11010 case TARGET_NR_ppoll: 11011 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false); 11012 #endif 11013 #ifdef TARGET_NR_ppoll_time64 11014 case TARGET_NR_ppoll_time64: 11015 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true); 11016 #endif 11017 case TARGET_NR_flock: 11018 /* NOTE: the flock constant seems to be the same for every 11019 Linux platform */ 11020 return get_errno(safe_flock(arg1, arg2)); 11021 case TARGET_NR_readv: 11022 { 11023 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 11024 if (vec != NULL) { 11025 ret = get_errno(safe_readv(arg1, vec, arg3)); 11026 unlock_iovec(vec, arg2, arg3, 1); 11027 } else { 11028 ret = -host_to_target_errno(errno); 11029 } 11030 } 11031 return ret; 11032 case TARGET_NR_writev: 11033 { 11034 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 11035 if (vec != NULL) { 11036 ret = get_errno(safe_writev(arg1, vec, arg3)); 11037 unlock_iovec(vec, arg2, arg3, 0); 11038 } else { 11039 ret = -host_to_target_errno(errno); 11040 } 11041 } 11042 return ret; 11043 #if defined(TARGET_NR_preadv) 11044 case TARGET_NR_preadv: 11045 { 11046 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 11047 if (vec != NULL) { 11048 unsigned long low, high; 11049 11050 target_to_host_low_high(arg4, arg5, &low, &high); 11051 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high)); 11052 unlock_iovec(vec, arg2, arg3, 1); 11053 } else { 11054 ret = -host_to_target_errno(errno); 11055 } 11056 } 11057 return ret; 11058 #endif 11059 #if defined(TARGET_NR_pwritev) 11060 case TARGET_NR_pwritev: 11061 { 11062 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 11063 if (vec != NULL) { 11064 unsigned long low, high; 11065 11066 target_to_host_low_high(arg4, arg5, &low, &high); 11067 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high)); 11068 unlock_iovec(vec, arg2, arg3, 0); 11069 } else { 11070 ret = -host_to_target_errno(errno); 11071 } 11072 } 11073 return ret; 11074 #endif 11075 case TARGET_NR_getsid: 11076 return get_errno(getsid(arg1)); 11077 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 11078 case TARGET_NR_fdatasync: 11079 return get_errno(fdatasync(arg1)); 11080 #endif 11081 case TARGET_NR_sched_getaffinity: 11082 { 11083 unsigned int mask_size; 11084 unsigned long *mask; 11085 11086 /* 11087 * sched_getaffinity needs multiples of ulong, so need to take 11088 * care of mismatches between target ulong and host ulong sizes. 11089 */ 11090 if (arg2 & (sizeof(abi_ulong) - 1)) { 11091 return -TARGET_EINVAL; 11092 } 11093 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 11094 11095 mask = alloca(mask_size); 11096 memset(mask, 0, mask_size); 11097 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 11098 11099 if (!is_error(ret)) { 11100 if (ret > arg2) { 11101 /* More data returned than the caller's buffer will fit. 11102 * This only happens if sizeof(abi_long) < sizeof(long) 11103 * and the caller passed us a buffer holding an odd number 11104 * of abi_longs. If the host kernel is actually using the 11105 * extra 4 bytes then fail EINVAL; otherwise we can just 11106 * ignore them and only copy the interesting part. 11107 */ 11108 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 11109 if (numcpus > arg2 * 8) { 11110 return -TARGET_EINVAL; 11111 } 11112 ret = arg2; 11113 } 11114 11115 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) { 11116 return -TARGET_EFAULT; 11117 } 11118 } 11119 } 11120 return ret; 11121 case TARGET_NR_sched_setaffinity: 11122 { 11123 unsigned int mask_size; 11124 unsigned long *mask; 11125 11126 /* 11127 * sched_setaffinity needs multiples of ulong, so need to take 11128 * care of mismatches between target ulong and host ulong sizes. 11129 */ 11130 if (arg2 & (sizeof(abi_ulong) - 1)) { 11131 return -TARGET_EINVAL; 11132 } 11133 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 11134 mask = alloca(mask_size); 11135 11136 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2); 11137 if (ret) { 11138 return ret; 11139 } 11140 11141 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 11142 } 11143 case TARGET_NR_getcpu: 11144 { 11145 unsigned cpu, node; 11146 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL, 11147 arg2 ? &node : NULL, 11148 NULL)); 11149 if (is_error(ret)) { 11150 return ret; 11151 } 11152 if (arg1 && put_user_u32(cpu, arg1)) { 11153 return -TARGET_EFAULT; 11154 } 11155 if (arg2 && put_user_u32(node, arg2)) { 11156 return -TARGET_EFAULT; 11157 } 11158 } 11159 return ret; 11160 case TARGET_NR_sched_setparam: 11161 { 11162 struct target_sched_param *target_schp; 11163 struct sched_param schp; 11164 11165 if (arg2 == 0) { 11166 return -TARGET_EINVAL; 11167 } 11168 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) { 11169 return -TARGET_EFAULT; 11170 } 11171 schp.sched_priority = tswap32(target_schp->sched_priority); 11172 unlock_user_struct(target_schp, arg2, 0); 11173 return get_errno(sys_sched_setparam(arg1, &schp)); 11174 } 11175 case TARGET_NR_sched_getparam: 11176 { 11177 struct target_sched_param *target_schp; 11178 struct sched_param schp; 11179 11180 if (arg2 == 0) { 11181 return -TARGET_EINVAL; 11182 } 11183 ret = get_errno(sys_sched_getparam(arg1, &schp)); 11184 if (!is_error(ret)) { 11185 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) { 11186 return -TARGET_EFAULT; 11187 } 11188 target_schp->sched_priority = tswap32(schp.sched_priority); 11189 unlock_user_struct(target_schp, arg2, 1); 11190 } 11191 } 11192 return ret; 11193 case TARGET_NR_sched_setscheduler: 11194 { 11195 struct target_sched_param *target_schp; 11196 struct sched_param schp; 11197 if (arg3 == 0) { 11198 return -TARGET_EINVAL; 11199 } 11200 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) { 11201 return -TARGET_EFAULT; 11202 } 11203 schp.sched_priority = tswap32(target_schp->sched_priority); 11204 unlock_user_struct(target_schp, arg3, 0); 11205 return get_errno(sys_sched_setscheduler(arg1, arg2, &schp)); 11206 } 11207 case TARGET_NR_sched_getscheduler: 11208 return get_errno(sys_sched_getscheduler(arg1)); 11209 case TARGET_NR_sched_getattr: 11210 { 11211 struct target_sched_attr *target_scha; 11212 struct sched_attr scha; 11213 if (arg2 == 0) { 11214 return -TARGET_EINVAL; 11215 } 11216 if (arg3 > sizeof(scha)) { 11217 arg3 = sizeof(scha); 11218 } 11219 ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4)); 11220 if (!is_error(ret)) { 11221 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11222 if (!target_scha) { 11223 return -TARGET_EFAULT; 11224 } 11225 target_scha->size = tswap32(scha.size); 11226 target_scha->sched_policy = tswap32(scha.sched_policy); 11227 target_scha->sched_flags = tswap64(scha.sched_flags); 11228 target_scha->sched_nice = tswap32(scha.sched_nice); 11229 target_scha->sched_priority = tswap32(scha.sched_priority); 11230 target_scha->sched_runtime = tswap64(scha.sched_runtime); 11231 target_scha->sched_deadline = tswap64(scha.sched_deadline); 11232 target_scha->sched_period = tswap64(scha.sched_period); 11233 if (scha.size > offsetof(struct sched_attr, sched_util_min)) { 11234 target_scha->sched_util_min = tswap32(scha.sched_util_min); 11235 target_scha->sched_util_max = tswap32(scha.sched_util_max); 11236 } 11237 unlock_user(target_scha, arg2, arg3); 11238 } 11239 return ret; 11240 } 11241 case TARGET_NR_sched_setattr: 11242 { 11243 struct target_sched_attr *target_scha; 11244 struct sched_attr scha; 11245 uint32_t size; 11246 int zeroed; 11247 if (arg2 == 0) { 11248 return -TARGET_EINVAL; 11249 } 11250 if (get_user_u32(size, arg2)) { 11251 return -TARGET_EFAULT; 11252 } 11253 if (!size) { 11254 size = offsetof(struct target_sched_attr, sched_util_min); 11255 } 11256 if (size < offsetof(struct target_sched_attr, sched_util_min)) { 11257 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) { 11258 return -TARGET_EFAULT; 11259 } 11260 return -TARGET_E2BIG; 11261 } 11262 11263 zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size); 11264 if (zeroed < 0) { 11265 return zeroed; 11266 } else if (zeroed == 0) { 11267 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) { 11268 return -TARGET_EFAULT; 11269 } 11270 return -TARGET_E2BIG; 11271 } 11272 if (size > sizeof(struct target_sched_attr)) { 11273 size = sizeof(struct target_sched_attr); 11274 } 11275 11276 target_scha = lock_user(VERIFY_READ, arg2, size, 1); 11277 if (!target_scha) { 11278 return -TARGET_EFAULT; 11279 } 11280 scha.size = size; 11281 scha.sched_policy = tswap32(target_scha->sched_policy); 11282 scha.sched_flags = tswap64(target_scha->sched_flags); 11283 scha.sched_nice = tswap32(target_scha->sched_nice); 11284 scha.sched_priority = tswap32(target_scha->sched_priority); 11285 scha.sched_runtime = tswap64(target_scha->sched_runtime); 11286 scha.sched_deadline = tswap64(target_scha->sched_deadline); 11287 scha.sched_period = tswap64(target_scha->sched_period); 11288 if (size > offsetof(struct target_sched_attr, sched_util_min)) { 11289 scha.sched_util_min = tswap32(target_scha->sched_util_min); 11290 scha.sched_util_max = tswap32(target_scha->sched_util_max); 11291 } 11292 unlock_user(target_scha, arg2, 0); 11293 return get_errno(sys_sched_setattr(arg1, &scha, arg3)); 11294 } 11295 case TARGET_NR_sched_yield: 11296 return get_errno(sched_yield()); 11297 case TARGET_NR_sched_get_priority_max: 11298 return get_errno(sched_get_priority_max(arg1)); 11299 case TARGET_NR_sched_get_priority_min: 11300 return get_errno(sched_get_priority_min(arg1)); 11301 #ifdef TARGET_NR_sched_rr_get_interval 11302 case TARGET_NR_sched_rr_get_interval: 11303 { 11304 struct timespec ts; 11305 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 11306 if (!is_error(ret)) { 11307 ret = host_to_target_timespec(arg2, &ts); 11308 } 11309 } 11310 return ret; 11311 #endif 11312 #ifdef TARGET_NR_sched_rr_get_interval_time64 11313 case TARGET_NR_sched_rr_get_interval_time64: 11314 { 11315 struct timespec ts; 11316 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 11317 if (!is_error(ret)) { 11318 ret = host_to_target_timespec64(arg2, &ts); 11319 } 11320 } 11321 return ret; 11322 #endif 11323 #if defined(TARGET_NR_nanosleep) 11324 case TARGET_NR_nanosleep: 11325 { 11326 struct timespec req, rem; 11327 target_to_host_timespec(&req, arg1); 11328 ret = get_errno(safe_nanosleep(&req, &rem)); 11329 if (is_error(ret) && arg2) { 11330 host_to_target_timespec(arg2, &rem); 11331 } 11332 } 11333 return ret; 11334 #endif 11335 case TARGET_NR_prctl: 11336 return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5); 11337 break; 11338 #ifdef TARGET_NR_arch_prctl 11339 case TARGET_NR_arch_prctl: 11340 return do_arch_prctl(cpu_env, arg1, arg2); 11341 #endif 11342 #ifdef TARGET_NR_pread64 11343 case TARGET_NR_pread64: 11344 if (regpairs_aligned(cpu_env, num)) { 11345 arg4 = arg5; 11346 arg5 = arg6; 11347 } 11348 if (arg2 == 0 && arg3 == 0) { 11349 /* Special-case NULL buffer and zero length, which should succeed */ 11350 p = 0; 11351 } else { 11352 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11353 if (!p) { 11354 return -TARGET_EFAULT; 11355 } 11356 } 11357 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 11358 unlock_user(p, arg2, ret); 11359 return ret; 11360 case TARGET_NR_pwrite64: 11361 if (regpairs_aligned(cpu_env, num)) { 11362 arg4 = arg5; 11363 arg5 = arg6; 11364 } 11365 if (arg2 == 0 && arg3 == 0) { 11366 /* Special-case NULL buffer and zero length, which should succeed */ 11367 p = 0; 11368 } else { 11369 p = lock_user(VERIFY_READ, arg2, arg3, 1); 11370 if (!p) { 11371 return -TARGET_EFAULT; 11372 } 11373 } 11374 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 11375 unlock_user(p, arg2, 0); 11376 return ret; 11377 #endif 11378 case TARGET_NR_getcwd: 11379 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 11380 return -TARGET_EFAULT; 11381 ret = get_errno(sys_getcwd1(p, arg2)); 11382 unlock_user(p, arg1, ret); 11383 return ret; 11384 case TARGET_NR_capget: 11385 case TARGET_NR_capset: 11386 { 11387 struct target_user_cap_header *target_header; 11388 struct target_user_cap_data *target_data = NULL; 11389 struct __user_cap_header_struct header; 11390 struct __user_cap_data_struct data[2]; 11391 struct __user_cap_data_struct *dataptr = NULL; 11392 int i, target_datalen; 11393 int data_items = 1; 11394 11395 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 11396 return -TARGET_EFAULT; 11397 } 11398 header.version = tswap32(target_header->version); 11399 header.pid = tswap32(target_header->pid); 11400 11401 if (header.version != _LINUX_CAPABILITY_VERSION) { 11402 /* Version 2 and up takes pointer to two user_data structs */ 11403 data_items = 2; 11404 } 11405 11406 target_datalen = sizeof(*target_data) * data_items; 11407 11408 if (arg2) { 11409 if (num == TARGET_NR_capget) { 11410 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 11411 } else { 11412 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 11413 } 11414 if (!target_data) { 11415 unlock_user_struct(target_header, arg1, 0); 11416 return -TARGET_EFAULT; 11417 } 11418 11419 if (num == TARGET_NR_capset) { 11420 for (i = 0; i < data_items; i++) { 11421 data[i].effective = tswap32(target_data[i].effective); 11422 data[i].permitted = tswap32(target_data[i].permitted); 11423 data[i].inheritable = tswap32(target_data[i].inheritable); 11424 } 11425 } 11426 11427 dataptr = data; 11428 } 11429 11430 if (num == TARGET_NR_capget) { 11431 ret = get_errno(capget(&header, dataptr)); 11432 } else { 11433 ret = get_errno(capset(&header, dataptr)); 11434 } 11435 11436 /* The kernel always updates version for both capget and capset */ 11437 target_header->version = tswap32(header.version); 11438 unlock_user_struct(target_header, arg1, 1); 11439 11440 if (arg2) { 11441 if (num == TARGET_NR_capget) { 11442 for (i = 0; i < data_items; i++) { 11443 target_data[i].effective = tswap32(data[i].effective); 11444 target_data[i].permitted = tswap32(data[i].permitted); 11445 target_data[i].inheritable = tswap32(data[i].inheritable); 11446 } 11447 unlock_user(target_data, arg2, target_datalen); 11448 } else { 11449 unlock_user(target_data, arg2, 0); 11450 } 11451 } 11452 return ret; 11453 } 11454 case TARGET_NR_sigaltstack: 11455 return do_sigaltstack(arg1, arg2, cpu_env); 11456 11457 #ifdef CONFIG_SENDFILE 11458 #ifdef TARGET_NR_sendfile 11459 case TARGET_NR_sendfile: 11460 { 11461 off_t *offp = NULL; 11462 off_t off; 11463 if (arg3) { 11464 ret = get_user_sal(off, arg3); 11465 if (is_error(ret)) { 11466 return ret; 11467 } 11468 offp = &off; 11469 } 11470 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 11471 if (!is_error(ret) && arg3) { 11472 abi_long ret2 = put_user_sal(off, arg3); 11473 if (is_error(ret2)) { 11474 ret = ret2; 11475 } 11476 } 11477 return ret; 11478 } 11479 #endif 11480 #ifdef TARGET_NR_sendfile64 11481 case TARGET_NR_sendfile64: 11482 { 11483 off_t *offp = NULL; 11484 off_t off; 11485 if (arg3) { 11486 ret = get_user_s64(off, arg3); 11487 if (is_error(ret)) { 11488 return ret; 11489 } 11490 offp = &off; 11491 } 11492 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 11493 if (!is_error(ret) && arg3) { 11494 abi_long ret2 = put_user_s64(off, arg3); 11495 if (is_error(ret2)) { 11496 ret = ret2; 11497 } 11498 } 11499 return ret; 11500 } 11501 #endif 11502 #endif 11503 #ifdef TARGET_NR_vfork 11504 case TARGET_NR_vfork: 11505 return get_errno(do_fork(cpu_env, 11506 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD, 11507 0, 0, 0, 0)); 11508 #endif 11509 #ifdef TARGET_NR_ugetrlimit 11510 case TARGET_NR_ugetrlimit: 11511 { 11512 struct rlimit rlim; 11513 int resource = target_to_host_resource(arg1); 11514 ret = get_errno(getrlimit(resource, &rlim)); 11515 if (!is_error(ret)) { 11516 struct target_rlimit *target_rlim; 11517 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 11518 return -TARGET_EFAULT; 11519 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 11520 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 11521 unlock_user_struct(target_rlim, arg2, 1); 11522 } 11523 return ret; 11524 } 11525 #endif 11526 #ifdef TARGET_NR_truncate64 11527 case TARGET_NR_truncate64: 11528 if (!(p = lock_user_string(arg1))) 11529 return -TARGET_EFAULT; 11530 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 11531 unlock_user(p, arg1, 0); 11532 return ret; 11533 #endif 11534 #ifdef TARGET_NR_ftruncate64 11535 case TARGET_NR_ftruncate64: 11536 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 11537 #endif 11538 #ifdef TARGET_NR_stat64 11539 case TARGET_NR_stat64: 11540 if (!(p = lock_user_string(arg1))) { 11541 return -TARGET_EFAULT; 11542 } 11543 ret = get_errno(stat(path(p), &st)); 11544 unlock_user(p, arg1, 0); 11545 if (!is_error(ret)) 11546 ret = host_to_target_stat64(cpu_env, arg2, &st); 11547 return ret; 11548 #endif 11549 #ifdef TARGET_NR_lstat64 11550 case TARGET_NR_lstat64: 11551 if (!(p = lock_user_string(arg1))) { 11552 return -TARGET_EFAULT; 11553 } 11554 ret = get_errno(lstat(path(p), &st)); 11555 unlock_user(p, arg1, 0); 11556 if (!is_error(ret)) 11557 ret = host_to_target_stat64(cpu_env, arg2, &st); 11558 return ret; 11559 #endif 11560 #ifdef TARGET_NR_fstat64 11561 case TARGET_NR_fstat64: 11562 ret = get_errno(fstat(arg1, &st)); 11563 if (!is_error(ret)) 11564 ret = host_to_target_stat64(cpu_env, arg2, &st); 11565 return ret; 11566 #endif 11567 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 11568 #ifdef TARGET_NR_fstatat64 11569 case TARGET_NR_fstatat64: 11570 #endif 11571 #ifdef TARGET_NR_newfstatat 11572 case TARGET_NR_newfstatat: 11573 #endif 11574 if (!(p = lock_user_string(arg2))) { 11575 return -TARGET_EFAULT; 11576 } 11577 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 11578 unlock_user(p, arg2, 0); 11579 if (!is_error(ret)) 11580 ret = host_to_target_stat64(cpu_env, arg3, &st); 11581 return ret; 11582 #endif 11583 #if defined(TARGET_NR_statx) 11584 case TARGET_NR_statx: 11585 { 11586 struct target_statx *target_stx; 11587 int dirfd = arg1; 11588 int flags = arg3; 11589 11590 p = lock_user_string(arg2); 11591 if (p == NULL) { 11592 return -TARGET_EFAULT; 11593 } 11594 #if defined(__NR_statx) 11595 { 11596 /* 11597 * It is assumed that struct statx is architecture independent. 11598 */ 11599 struct target_statx host_stx; 11600 int mask = arg4; 11601 11602 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx)); 11603 if (!is_error(ret)) { 11604 if (host_to_target_statx(&host_stx, arg5) != 0) { 11605 unlock_user(p, arg2, 0); 11606 return -TARGET_EFAULT; 11607 } 11608 } 11609 11610 if (ret != -TARGET_ENOSYS) { 11611 unlock_user(p, arg2, 0); 11612 return ret; 11613 } 11614 } 11615 #endif 11616 ret = get_errno(fstatat(dirfd, path(p), &st, flags)); 11617 unlock_user(p, arg2, 0); 11618 11619 if (!is_error(ret)) { 11620 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) { 11621 return -TARGET_EFAULT; 11622 } 11623 memset(target_stx, 0, sizeof(*target_stx)); 11624 __put_user(major(st.st_dev), &target_stx->stx_dev_major); 11625 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor); 11626 __put_user(st.st_ino, &target_stx->stx_ino); 11627 __put_user(st.st_mode, &target_stx->stx_mode); 11628 __put_user(st.st_uid, &target_stx->stx_uid); 11629 __put_user(st.st_gid, &target_stx->stx_gid); 11630 __put_user(st.st_nlink, &target_stx->stx_nlink); 11631 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major); 11632 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor); 11633 __put_user(st.st_size, &target_stx->stx_size); 11634 __put_user(st.st_blksize, &target_stx->stx_blksize); 11635 __put_user(st.st_blocks, &target_stx->stx_blocks); 11636 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec); 11637 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec); 11638 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec); 11639 unlock_user_struct(target_stx, arg5, 1); 11640 } 11641 } 11642 return ret; 11643 #endif 11644 #ifdef TARGET_NR_lchown 11645 case TARGET_NR_lchown: 11646 if (!(p = lock_user_string(arg1))) 11647 return -TARGET_EFAULT; 11648 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 11649 unlock_user(p, arg1, 0); 11650 return ret; 11651 #endif 11652 #ifdef TARGET_NR_getuid 11653 case TARGET_NR_getuid: 11654 return get_errno(high2lowuid(getuid())); 11655 #endif 11656 #ifdef TARGET_NR_getgid 11657 case TARGET_NR_getgid: 11658 return get_errno(high2lowgid(getgid())); 11659 #endif 11660 #ifdef TARGET_NR_geteuid 11661 case TARGET_NR_geteuid: 11662 return get_errno(high2lowuid(geteuid())); 11663 #endif 11664 #ifdef TARGET_NR_getegid 11665 case TARGET_NR_getegid: 11666 return get_errno(high2lowgid(getegid())); 11667 #endif 11668 case TARGET_NR_setreuid: 11669 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 11670 case TARGET_NR_setregid: 11671 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 11672 case TARGET_NR_getgroups: 11673 { /* the same code as for TARGET_NR_getgroups32 */ 11674 int gidsetsize = arg1; 11675 target_id *target_grouplist; 11676 g_autofree gid_t *grouplist = NULL; 11677 int i; 11678 11679 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) { 11680 return -TARGET_EINVAL; 11681 } 11682 if (gidsetsize > 0) { 11683 grouplist = g_try_new(gid_t, gidsetsize); 11684 if (!grouplist) { 11685 return -TARGET_ENOMEM; 11686 } 11687 } 11688 ret = get_errno(getgroups(gidsetsize, grouplist)); 11689 if (!is_error(ret) && gidsetsize > 0) { 11690 target_grouplist = lock_user(VERIFY_WRITE, arg2, 11691 gidsetsize * sizeof(target_id), 0); 11692 if (!target_grouplist) { 11693 return -TARGET_EFAULT; 11694 } 11695 for (i = 0; i < ret; i++) { 11696 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 11697 } 11698 unlock_user(target_grouplist, arg2, 11699 gidsetsize * sizeof(target_id)); 11700 } 11701 return ret; 11702 } 11703 case TARGET_NR_setgroups: 11704 { /* the same code as for TARGET_NR_setgroups32 */ 11705 int gidsetsize = arg1; 11706 target_id *target_grouplist; 11707 g_autofree gid_t *grouplist = NULL; 11708 int i; 11709 11710 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) { 11711 return -TARGET_EINVAL; 11712 } 11713 if (gidsetsize > 0) { 11714 grouplist = g_try_new(gid_t, gidsetsize); 11715 if (!grouplist) { 11716 return -TARGET_ENOMEM; 11717 } 11718 target_grouplist = lock_user(VERIFY_READ, arg2, 11719 gidsetsize * sizeof(target_id), 1); 11720 if (!target_grouplist) { 11721 return -TARGET_EFAULT; 11722 } 11723 for (i = 0; i < gidsetsize; i++) { 11724 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 11725 } 11726 unlock_user(target_grouplist, arg2, 11727 gidsetsize * sizeof(target_id)); 11728 } 11729 return get_errno(setgroups(gidsetsize, grouplist)); 11730 } 11731 case TARGET_NR_fchown: 11732 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 11733 #if defined(TARGET_NR_fchownat) 11734 case TARGET_NR_fchownat: 11735 if (!(p = lock_user_string(arg2))) 11736 return -TARGET_EFAULT; 11737 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 11738 low2highgid(arg4), arg5)); 11739 unlock_user(p, arg2, 0); 11740 return ret; 11741 #endif 11742 #ifdef TARGET_NR_setresuid 11743 case TARGET_NR_setresuid: 11744 return get_errno(sys_setresuid(low2highuid(arg1), 11745 low2highuid(arg2), 11746 low2highuid(arg3))); 11747 #endif 11748 #ifdef TARGET_NR_getresuid 11749 case TARGET_NR_getresuid: 11750 { 11751 uid_t ruid, euid, suid; 11752 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11753 if (!is_error(ret)) { 11754 if (put_user_id(high2lowuid(ruid), arg1) 11755 || put_user_id(high2lowuid(euid), arg2) 11756 || put_user_id(high2lowuid(suid), arg3)) 11757 return -TARGET_EFAULT; 11758 } 11759 } 11760 return ret; 11761 #endif 11762 #ifdef TARGET_NR_getresgid 11763 case TARGET_NR_setresgid: 11764 return get_errno(sys_setresgid(low2highgid(arg1), 11765 low2highgid(arg2), 11766 low2highgid(arg3))); 11767 #endif 11768 #ifdef TARGET_NR_getresgid 11769 case TARGET_NR_getresgid: 11770 { 11771 gid_t rgid, egid, sgid; 11772 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11773 if (!is_error(ret)) { 11774 if (put_user_id(high2lowgid(rgid), arg1) 11775 || put_user_id(high2lowgid(egid), arg2) 11776 || put_user_id(high2lowgid(sgid), arg3)) 11777 return -TARGET_EFAULT; 11778 } 11779 } 11780 return ret; 11781 #endif 11782 #ifdef TARGET_NR_chown 11783 case TARGET_NR_chown: 11784 if (!(p = lock_user_string(arg1))) 11785 return -TARGET_EFAULT; 11786 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 11787 unlock_user(p, arg1, 0); 11788 return ret; 11789 #endif 11790 case TARGET_NR_setuid: 11791 return get_errno(sys_setuid(low2highuid(arg1))); 11792 case TARGET_NR_setgid: 11793 return get_errno(sys_setgid(low2highgid(arg1))); 11794 case TARGET_NR_setfsuid: 11795 return get_errno(setfsuid(arg1)); 11796 case TARGET_NR_setfsgid: 11797 return get_errno(setfsgid(arg1)); 11798 11799 #ifdef TARGET_NR_lchown32 11800 case TARGET_NR_lchown32: 11801 if (!(p = lock_user_string(arg1))) 11802 return -TARGET_EFAULT; 11803 ret = get_errno(lchown(p, arg2, arg3)); 11804 unlock_user(p, arg1, 0); 11805 return ret; 11806 #endif 11807 #ifdef TARGET_NR_getuid32 11808 case TARGET_NR_getuid32: 11809 return get_errno(getuid()); 11810 #endif 11811 11812 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 11813 /* Alpha specific */ 11814 case TARGET_NR_getxuid: 11815 { 11816 uid_t euid; 11817 euid=geteuid(); 11818 cpu_env->ir[IR_A4]=euid; 11819 } 11820 return get_errno(getuid()); 11821 #endif 11822 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 11823 /* Alpha specific */ 11824 case TARGET_NR_getxgid: 11825 { 11826 uid_t egid; 11827 egid=getegid(); 11828 cpu_env->ir[IR_A4]=egid; 11829 } 11830 return get_errno(getgid()); 11831 #endif 11832 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 11833 /* Alpha specific */ 11834 case TARGET_NR_osf_getsysinfo: 11835 ret = -TARGET_EOPNOTSUPP; 11836 switch (arg1) { 11837 case TARGET_GSI_IEEE_FP_CONTROL: 11838 { 11839 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env); 11840 uint64_t swcr = cpu_env->swcr; 11841 11842 swcr &= ~SWCR_STATUS_MASK; 11843 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK; 11844 11845 if (put_user_u64 (swcr, arg2)) 11846 return -TARGET_EFAULT; 11847 ret = 0; 11848 } 11849 break; 11850 11851 /* case GSI_IEEE_STATE_AT_SIGNAL: 11852 -- Not implemented in linux kernel. 11853 case GSI_UACPROC: 11854 -- Retrieves current unaligned access state; not much used. 11855 case GSI_PROC_TYPE: 11856 -- Retrieves implver information; surely not used. 11857 case GSI_GET_HWRPB: 11858 -- Grabs a copy of the HWRPB; surely not used. 11859 */ 11860 } 11861 return ret; 11862 #endif 11863 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 11864 /* Alpha specific */ 11865 case TARGET_NR_osf_setsysinfo: 11866 ret = -TARGET_EOPNOTSUPP; 11867 switch (arg1) { 11868 case TARGET_SSI_IEEE_FP_CONTROL: 11869 { 11870 uint64_t swcr, fpcr; 11871 11872 if (get_user_u64 (swcr, arg2)) { 11873 return -TARGET_EFAULT; 11874 } 11875 11876 /* 11877 * The kernel calls swcr_update_status to update the 11878 * status bits from the fpcr at every point that it 11879 * could be queried. Therefore, we store the status 11880 * bits only in FPCR. 11881 */ 11882 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK); 11883 11884 fpcr = cpu_alpha_load_fpcr(cpu_env); 11885 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32); 11886 fpcr |= alpha_ieee_swcr_to_fpcr(swcr); 11887 cpu_alpha_store_fpcr(cpu_env, fpcr); 11888 ret = 0; 11889 } 11890 break; 11891 11892 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 11893 { 11894 uint64_t exc, fpcr, fex; 11895 11896 if (get_user_u64(exc, arg2)) { 11897 return -TARGET_EFAULT; 11898 } 11899 exc &= SWCR_STATUS_MASK; 11900 fpcr = cpu_alpha_load_fpcr(cpu_env); 11901 11902 /* Old exceptions are not signaled. */ 11903 fex = alpha_ieee_fpcr_to_swcr(fpcr); 11904 fex = exc & ~fex; 11905 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT; 11906 fex &= (cpu_env)->swcr; 11907 11908 /* Update the hardware fpcr. */ 11909 fpcr |= alpha_ieee_swcr_to_fpcr(exc); 11910 cpu_alpha_store_fpcr(cpu_env, fpcr); 11911 11912 if (fex) { 11913 int si_code = TARGET_FPE_FLTUNK; 11914 target_siginfo_t info; 11915 11916 if (fex & SWCR_TRAP_ENABLE_DNO) { 11917 si_code = TARGET_FPE_FLTUND; 11918 } 11919 if (fex & SWCR_TRAP_ENABLE_INE) { 11920 si_code = TARGET_FPE_FLTRES; 11921 } 11922 if (fex & SWCR_TRAP_ENABLE_UNF) { 11923 si_code = TARGET_FPE_FLTUND; 11924 } 11925 if (fex & SWCR_TRAP_ENABLE_OVF) { 11926 si_code = TARGET_FPE_FLTOVF; 11927 } 11928 if (fex & SWCR_TRAP_ENABLE_DZE) { 11929 si_code = TARGET_FPE_FLTDIV; 11930 } 11931 if (fex & SWCR_TRAP_ENABLE_INV) { 11932 si_code = TARGET_FPE_FLTINV; 11933 } 11934 11935 info.si_signo = SIGFPE; 11936 info.si_errno = 0; 11937 info.si_code = si_code; 11938 info._sifields._sigfault._addr = (cpu_env)->pc; 11939 queue_signal(cpu_env, info.si_signo, 11940 QEMU_SI_FAULT, &info); 11941 } 11942 ret = 0; 11943 } 11944 break; 11945 11946 /* case SSI_NVPAIRS: 11947 -- Used with SSIN_UACPROC to enable unaligned accesses. 11948 case SSI_IEEE_STATE_AT_SIGNAL: 11949 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 11950 -- Not implemented in linux kernel 11951 */ 11952 } 11953 return ret; 11954 #endif 11955 #ifdef TARGET_NR_osf_sigprocmask 11956 /* Alpha specific. */ 11957 case TARGET_NR_osf_sigprocmask: 11958 { 11959 abi_ulong mask; 11960 int how; 11961 sigset_t set, oldset; 11962 11963 switch(arg1) { 11964 case TARGET_SIG_BLOCK: 11965 how = SIG_BLOCK; 11966 break; 11967 case TARGET_SIG_UNBLOCK: 11968 how = SIG_UNBLOCK; 11969 break; 11970 case TARGET_SIG_SETMASK: 11971 how = SIG_SETMASK; 11972 break; 11973 default: 11974 return -TARGET_EINVAL; 11975 } 11976 mask = arg2; 11977 target_to_host_old_sigset(&set, &mask); 11978 ret = do_sigprocmask(how, &set, &oldset); 11979 if (!ret) { 11980 host_to_target_old_sigset(&mask, &oldset); 11981 ret = mask; 11982 } 11983 } 11984 return ret; 11985 #endif 11986 11987 #ifdef TARGET_NR_getgid32 11988 case TARGET_NR_getgid32: 11989 return get_errno(getgid()); 11990 #endif 11991 #ifdef TARGET_NR_geteuid32 11992 case TARGET_NR_geteuid32: 11993 return get_errno(geteuid()); 11994 #endif 11995 #ifdef TARGET_NR_getegid32 11996 case TARGET_NR_getegid32: 11997 return get_errno(getegid()); 11998 #endif 11999 #ifdef TARGET_NR_setreuid32 12000 case TARGET_NR_setreuid32: 12001 return get_errno(setreuid(arg1, arg2)); 12002 #endif 12003 #ifdef TARGET_NR_setregid32 12004 case TARGET_NR_setregid32: 12005 return get_errno(setregid(arg1, arg2)); 12006 #endif 12007 #ifdef TARGET_NR_getgroups32 12008 case TARGET_NR_getgroups32: 12009 { /* the same code as for TARGET_NR_getgroups */ 12010 int gidsetsize = arg1; 12011 uint32_t *target_grouplist; 12012 g_autofree gid_t *grouplist = NULL; 12013 int i; 12014 12015 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) { 12016 return -TARGET_EINVAL; 12017 } 12018 if (gidsetsize > 0) { 12019 grouplist = g_try_new(gid_t, gidsetsize); 12020 if (!grouplist) { 12021 return -TARGET_ENOMEM; 12022 } 12023 } 12024 ret = get_errno(getgroups(gidsetsize, grouplist)); 12025 if (!is_error(ret) && gidsetsize > 0) { 12026 target_grouplist = lock_user(VERIFY_WRITE, arg2, 12027 gidsetsize * 4, 0); 12028 if (!target_grouplist) { 12029 return -TARGET_EFAULT; 12030 } 12031 for (i = 0; i < ret; i++) { 12032 target_grouplist[i] = tswap32(grouplist[i]); 12033 } 12034 unlock_user(target_grouplist, arg2, gidsetsize * 4); 12035 } 12036 return ret; 12037 } 12038 #endif 12039 #ifdef TARGET_NR_setgroups32 12040 case TARGET_NR_setgroups32: 12041 { /* the same code as for TARGET_NR_setgroups */ 12042 int gidsetsize = arg1; 12043 uint32_t *target_grouplist; 12044 g_autofree gid_t *grouplist = NULL; 12045 int i; 12046 12047 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) { 12048 return -TARGET_EINVAL; 12049 } 12050 if (gidsetsize > 0) { 12051 grouplist = g_try_new(gid_t, gidsetsize); 12052 if (!grouplist) { 12053 return -TARGET_ENOMEM; 12054 } 12055 target_grouplist = lock_user(VERIFY_READ, arg2, 12056 gidsetsize * 4, 1); 12057 if (!target_grouplist) { 12058 return -TARGET_EFAULT; 12059 } 12060 for (i = 0; i < gidsetsize; i++) { 12061 grouplist[i] = tswap32(target_grouplist[i]); 12062 } 12063 unlock_user(target_grouplist, arg2, 0); 12064 } 12065 return get_errno(setgroups(gidsetsize, grouplist)); 12066 } 12067 #endif 12068 #ifdef TARGET_NR_fchown32 12069 case TARGET_NR_fchown32: 12070 return get_errno(fchown(arg1, arg2, arg3)); 12071 #endif 12072 #ifdef TARGET_NR_setresuid32 12073 case TARGET_NR_setresuid32: 12074 return get_errno(sys_setresuid(arg1, arg2, arg3)); 12075 #endif 12076 #ifdef TARGET_NR_getresuid32 12077 case TARGET_NR_getresuid32: 12078 { 12079 uid_t ruid, euid, suid; 12080 ret = get_errno(getresuid(&ruid, &euid, &suid)); 12081 if (!is_error(ret)) { 12082 if (put_user_u32(ruid, arg1) 12083 || put_user_u32(euid, arg2) 12084 || put_user_u32(suid, arg3)) 12085 return -TARGET_EFAULT; 12086 } 12087 } 12088 return ret; 12089 #endif 12090 #ifdef TARGET_NR_setresgid32 12091 case TARGET_NR_setresgid32: 12092 return get_errno(sys_setresgid(arg1, arg2, arg3)); 12093 #endif 12094 #ifdef TARGET_NR_getresgid32 12095 case TARGET_NR_getresgid32: 12096 { 12097 gid_t rgid, egid, sgid; 12098 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 12099 if (!is_error(ret)) { 12100 if (put_user_u32(rgid, arg1) 12101 || put_user_u32(egid, arg2) 12102 || put_user_u32(sgid, arg3)) 12103 return -TARGET_EFAULT; 12104 } 12105 } 12106 return ret; 12107 #endif 12108 #ifdef TARGET_NR_chown32 12109 case TARGET_NR_chown32: 12110 if (!(p = lock_user_string(arg1))) 12111 return -TARGET_EFAULT; 12112 ret = get_errno(chown(p, arg2, arg3)); 12113 unlock_user(p, arg1, 0); 12114 return ret; 12115 #endif 12116 #ifdef TARGET_NR_setuid32 12117 case TARGET_NR_setuid32: 12118 return get_errno(sys_setuid(arg1)); 12119 #endif 12120 #ifdef TARGET_NR_setgid32 12121 case TARGET_NR_setgid32: 12122 return get_errno(sys_setgid(arg1)); 12123 #endif 12124 #ifdef TARGET_NR_setfsuid32 12125 case TARGET_NR_setfsuid32: 12126 return get_errno(setfsuid(arg1)); 12127 #endif 12128 #ifdef TARGET_NR_setfsgid32 12129 case TARGET_NR_setfsgid32: 12130 return get_errno(setfsgid(arg1)); 12131 #endif 12132 #ifdef TARGET_NR_mincore 12133 case TARGET_NR_mincore: 12134 { 12135 void *a = lock_user(VERIFY_NONE, arg1, arg2, 0); 12136 if (!a) { 12137 return -TARGET_ENOMEM; 12138 } 12139 p = lock_user_string(arg3); 12140 if (!p) { 12141 ret = -TARGET_EFAULT; 12142 } else { 12143 ret = get_errno(mincore(a, arg2, p)); 12144 unlock_user(p, arg3, ret); 12145 } 12146 unlock_user(a, arg1, 0); 12147 } 12148 return ret; 12149 #endif 12150 #ifdef TARGET_NR_arm_fadvise64_64 12151 case TARGET_NR_arm_fadvise64_64: 12152 /* arm_fadvise64_64 looks like fadvise64_64 but 12153 * with different argument order: fd, advice, offset, len 12154 * rather than the usual fd, offset, len, advice. 12155 * Note that offset and len are both 64-bit so appear as 12156 * pairs of 32-bit registers. 12157 */ 12158 ret = posix_fadvise(arg1, target_offset64(arg3, arg4), 12159 target_offset64(arg5, arg6), arg2); 12160 return -host_to_target_errno(ret); 12161 #endif 12162 12163 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) 12164 12165 #ifdef TARGET_NR_fadvise64_64 12166 case TARGET_NR_fadvise64_64: 12167 #if defined(TARGET_PPC) || defined(TARGET_XTENSA) 12168 /* 6 args: fd, advice, offset (high, low), len (high, low) */ 12169 ret = arg2; 12170 arg2 = arg3; 12171 arg3 = arg4; 12172 arg4 = arg5; 12173 arg5 = arg6; 12174 arg6 = ret; 12175 #else 12176 /* 6 args: fd, offset (high, low), len (high, low), advice */ 12177 if (regpairs_aligned(cpu_env, num)) { 12178 /* offset is in (3,4), len in (5,6) and advice in 7 */ 12179 arg2 = arg3; 12180 arg3 = arg4; 12181 arg4 = arg5; 12182 arg5 = arg6; 12183 arg6 = arg7; 12184 } 12185 #endif 12186 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), 12187 target_offset64(arg4, arg5), arg6); 12188 return -host_to_target_errno(ret); 12189 #endif 12190 12191 #ifdef TARGET_NR_fadvise64 12192 case TARGET_NR_fadvise64: 12193 /* 5 args: fd, offset (high, low), len, advice */ 12194 if (regpairs_aligned(cpu_env, num)) { 12195 /* offset is in (3,4), len in 5 and advice in 6 */ 12196 arg2 = arg3; 12197 arg3 = arg4; 12198 arg4 = arg5; 12199 arg5 = arg6; 12200 } 12201 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5); 12202 return -host_to_target_errno(ret); 12203 #endif 12204 12205 #else /* not a 32-bit ABI */ 12206 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64) 12207 #ifdef TARGET_NR_fadvise64_64 12208 case TARGET_NR_fadvise64_64: 12209 #endif 12210 #ifdef TARGET_NR_fadvise64 12211 case TARGET_NR_fadvise64: 12212 #endif 12213 #ifdef TARGET_S390X 12214 switch (arg4) { 12215 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 12216 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 12217 case 6: arg4 = POSIX_FADV_DONTNEED; break; 12218 case 7: arg4 = POSIX_FADV_NOREUSE; break; 12219 default: break; 12220 } 12221 #endif 12222 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4)); 12223 #endif 12224 #endif /* end of 64-bit ABI fadvise handling */ 12225 12226 #ifdef TARGET_NR_madvise 12227 case TARGET_NR_madvise: 12228 return target_madvise(arg1, arg2, arg3); 12229 #endif 12230 #ifdef TARGET_NR_fcntl64 12231 case TARGET_NR_fcntl64: 12232 { 12233 int cmd; 12234 struct flock64 fl; 12235 from_flock64_fn *copyfrom = copy_from_user_flock64; 12236 to_flock64_fn *copyto = copy_to_user_flock64; 12237 12238 #ifdef TARGET_ARM 12239 if (!cpu_env->eabi) { 12240 copyfrom = copy_from_user_oabi_flock64; 12241 copyto = copy_to_user_oabi_flock64; 12242 } 12243 #endif 12244 12245 cmd = target_to_host_fcntl_cmd(arg2); 12246 if (cmd == -TARGET_EINVAL) { 12247 return cmd; 12248 } 12249 12250 switch(arg2) { 12251 case TARGET_F_GETLK64: 12252 ret = copyfrom(&fl, arg3); 12253 if (ret) { 12254 break; 12255 } 12256 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 12257 if (ret == 0) { 12258 ret = copyto(arg3, &fl); 12259 } 12260 break; 12261 12262 case TARGET_F_SETLK64: 12263 case TARGET_F_SETLKW64: 12264 ret = copyfrom(&fl, arg3); 12265 if (ret) { 12266 break; 12267 } 12268 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 12269 break; 12270 default: 12271 ret = do_fcntl(arg1, arg2, arg3); 12272 break; 12273 } 12274 return ret; 12275 } 12276 #endif 12277 #ifdef TARGET_NR_cacheflush 12278 case TARGET_NR_cacheflush: 12279 /* self-modifying code is handled automatically, so nothing needed */ 12280 return 0; 12281 #endif 12282 #ifdef TARGET_NR_getpagesize 12283 case TARGET_NR_getpagesize: 12284 return TARGET_PAGE_SIZE; 12285 #endif 12286 case TARGET_NR_gettid: 12287 return get_errno(sys_gettid()); 12288 #ifdef TARGET_NR_readahead 12289 case TARGET_NR_readahead: 12290 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) 12291 if (regpairs_aligned(cpu_env, num)) { 12292 arg2 = arg3; 12293 arg3 = arg4; 12294 arg4 = arg5; 12295 } 12296 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4)); 12297 #else 12298 ret = get_errno(readahead(arg1, arg2, arg3)); 12299 #endif 12300 return ret; 12301 #endif 12302 #ifdef CONFIG_ATTR 12303 #ifdef TARGET_NR_setxattr 12304 case TARGET_NR_listxattr: 12305 case TARGET_NR_llistxattr: 12306 { 12307 void *p, *b = 0; 12308 if (arg2) { 12309 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 12310 if (!b) { 12311 return -TARGET_EFAULT; 12312 } 12313 } 12314 p = lock_user_string(arg1); 12315 if (p) { 12316 if (num == TARGET_NR_listxattr) { 12317 ret = get_errno(listxattr(p, b, arg3)); 12318 } else { 12319 ret = get_errno(llistxattr(p, b, arg3)); 12320 } 12321 } else { 12322 ret = -TARGET_EFAULT; 12323 } 12324 unlock_user(p, arg1, 0); 12325 unlock_user(b, arg2, arg3); 12326 return ret; 12327 } 12328 case TARGET_NR_flistxattr: 12329 { 12330 void *b = 0; 12331 if (arg2) { 12332 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 12333 if (!b) { 12334 return -TARGET_EFAULT; 12335 } 12336 } 12337 ret = get_errno(flistxattr(arg1, b, arg3)); 12338 unlock_user(b, arg2, arg3); 12339 return ret; 12340 } 12341 case TARGET_NR_setxattr: 12342 case TARGET_NR_lsetxattr: 12343 { 12344 void *p, *n, *v = 0; 12345 if (arg3) { 12346 v = lock_user(VERIFY_READ, arg3, arg4, 1); 12347 if (!v) { 12348 return -TARGET_EFAULT; 12349 } 12350 } 12351 p = lock_user_string(arg1); 12352 n = lock_user_string(arg2); 12353 if (p && n) { 12354 if (num == TARGET_NR_setxattr) { 12355 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 12356 } else { 12357 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 12358 } 12359 } else { 12360 ret = -TARGET_EFAULT; 12361 } 12362 unlock_user(p, arg1, 0); 12363 unlock_user(n, arg2, 0); 12364 unlock_user(v, arg3, 0); 12365 } 12366 return ret; 12367 case TARGET_NR_fsetxattr: 12368 { 12369 void *n, *v = 0; 12370 if (arg3) { 12371 v = lock_user(VERIFY_READ, arg3, arg4, 1); 12372 if (!v) { 12373 return -TARGET_EFAULT; 12374 } 12375 } 12376 n = lock_user_string(arg2); 12377 if (n) { 12378 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 12379 } else { 12380 ret = -TARGET_EFAULT; 12381 } 12382 unlock_user(n, arg2, 0); 12383 unlock_user(v, arg3, 0); 12384 } 12385 return ret; 12386 case TARGET_NR_getxattr: 12387 case TARGET_NR_lgetxattr: 12388 { 12389 void *p, *n, *v = 0; 12390 if (arg3) { 12391 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 12392 if (!v) { 12393 return -TARGET_EFAULT; 12394 } 12395 } 12396 p = lock_user_string(arg1); 12397 n = lock_user_string(arg2); 12398 if (p && n) { 12399 if (num == TARGET_NR_getxattr) { 12400 ret = get_errno(getxattr(p, n, v, arg4)); 12401 } else { 12402 ret = get_errno(lgetxattr(p, n, v, arg4)); 12403 } 12404 } else { 12405 ret = -TARGET_EFAULT; 12406 } 12407 unlock_user(p, arg1, 0); 12408 unlock_user(n, arg2, 0); 12409 unlock_user(v, arg3, arg4); 12410 } 12411 return ret; 12412 case TARGET_NR_fgetxattr: 12413 { 12414 void *n, *v = 0; 12415 if (arg3) { 12416 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 12417 if (!v) { 12418 return -TARGET_EFAULT; 12419 } 12420 } 12421 n = lock_user_string(arg2); 12422 if (n) { 12423 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 12424 } else { 12425 ret = -TARGET_EFAULT; 12426 } 12427 unlock_user(n, arg2, 0); 12428 unlock_user(v, arg3, arg4); 12429 } 12430 return ret; 12431 case TARGET_NR_removexattr: 12432 case TARGET_NR_lremovexattr: 12433 { 12434 void *p, *n; 12435 p = lock_user_string(arg1); 12436 n = lock_user_string(arg2); 12437 if (p && n) { 12438 if (num == TARGET_NR_removexattr) { 12439 ret = get_errno(removexattr(p, n)); 12440 } else { 12441 ret = get_errno(lremovexattr(p, n)); 12442 } 12443 } else { 12444 ret = -TARGET_EFAULT; 12445 } 12446 unlock_user(p, arg1, 0); 12447 unlock_user(n, arg2, 0); 12448 } 12449 return ret; 12450 case TARGET_NR_fremovexattr: 12451 { 12452 void *n; 12453 n = lock_user_string(arg2); 12454 if (n) { 12455 ret = get_errno(fremovexattr(arg1, n)); 12456 } else { 12457 ret = -TARGET_EFAULT; 12458 } 12459 unlock_user(n, arg2, 0); 12460 } 12461 return ret; 12462 #endif 12463 #endif /* CONFIG_ATTR */ 12464 #ifdef TARGET_NR_set_thread_area 12465 case TARGET_NR_set_thread_area: 12466 #if defined(TARGET_MIPS) 12467 cpu_env->active_tc.CP0_UserLocal = arg1; 12468 return 0; 12469 #elif defined(TARGET_CRIS) 12470 if (arg1 & 0xff) 12471 ret = -TARGET_EINVAL; 12472 else { 12473 cpu_env->pregs[PR_PID] = arg1; 12474 ret = 0; 12475 } 12476 return ret; 12477 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 12478 return do_set_thread_area(cpu_env, arg1); 12479 #elif defined(TARGET_M68K) 12480 { 12481 TaskState *ts = cpu->opaque; 12482 ts->tp_value = arg1; 12483 return 0; 12484 } 12485 #else 12486 return -TARGET_ENOSYS; 12487 #endif 12488 #endif 12489 #ifdef TARGET_NR_get_thread_area 12490 case TARGET_NR_get_thread_area: 12491 #if defined(TARGET_I386) && defined(TARGET_ABI32) 12492 return do_get_thread_area(cpu_env, arg1); 12493 #elif defined(TARGET_M68K) 12494 { 12495 TaskState *ts = cpu->opaque; 12496 return ts->tp_value; 12497 } 12498 #else 12499 return -TARGET_ENOSYS; 12500 #endif 12501 #endif 12502 #ifdef TARGET_NR_getdomainname 12503 case TARGET_NR_getdomainname: 12504 return -TARGET_ENOSYS; 12505 #endif 12506 12507 #ifdef TARGET_NR_clock_settime 12508 case TARGET_NR_clock_settime: 12509 { 12510 struct timespec ts; 12511 12512 ret = target_to_host_timespec(&ts, arg2); 12513 if (!is_error(ret)) { 12514 ret = get_errno(clock_settime(arg1, &ts)); 12515 } 12516 return ret; 12517 } 12518 #endif 12519 #ifdef TARGET_NR_clock_settime64 12520 case TARGET_NR_clock_settime64: 12521 { 12522 struct timespec ts; 12523 12524 ret = target_to_host_timespec64(&ts, arg2); 12525 if (!is_error(ret)) { 12526 ret = get_errno(clock_settime(arg1, &ts)); 12527 } 12528 return ret; 12529 } 12530 #endif 12531 #ifdef TARGET_NR_clock_gettime 12532 case TARGET_NR_clock_gettime: 12533 { 12534 struct timespec ts; 12535 ret = get_errno(clock_gettime(arg1, &ts)); 12536 if (!is_error(ret)) { 12537 ret = host_to_target_timespec(arg2, &ts); 12538 } 12539 return ret; 12540 } 12541 #endif 12542 #ifdef TARGET_NR_clock_gettime64 12543 case TARGET_NR_clock_gettime64: 12544 { 12545 struct timespec ts; 12546 ret = get_errno(clock_gettime(arg1, &ts)); 12547 if (!is_error(ret)) { 12548 ret = host_to_target_timespec64(arg2, &ts); 12549 } 12550 return ret; 12551 } 12552 #endif 12553 #ifdef TARGET_NR_clock_getres 12554 case TARGET_NR_clock_getres: 12555 { 12556 struct timespec ts; 12557 ret = get_errno(clock_getres(arg1, &ts)); 12558 if (!is_error(ret)) { 12559 host_to_target_timespec(arg2, &ts); 12560 } 12561 return ret; 12562 } 12563 #endif 12564 #ifdef TARGET_NR_clock_getres_time64 12565 case TARGET_NR_clock_getres_time64: 12566 { 12567 struct timespec ts; 12568 ret = get_errno(clock_getres(arg1, &ts)); 12569 if (!is_error(ret)) { 12570 host_to_target_timespec64(arg2, &ts); 12571 } 12572 return ret; 12573 } 12574 #endif 12575 #ifdef TARGET_NR_clock_nanosleep 12576 case TARGET_NR_clock_nanosleep: 12577 { 12578 struct timespec ts; 12579 if (target_to_host_timespec(&ts, arg3)) { 12580 return -TARGET_EFAULT; 12581 } 12582 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 12583 &ts, arg4 ? &ts : NULL)); 12584 /* 12585 * if the call is interrupted by a signal handler, it fails 12586 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not 12587 * TIMER_ABSTIME, it returns the remaining unslept time in arg4. 12588 */ 12589 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME && 12590 host_to_target_timespec(arg4, &ts)) { 12591 return -TARGET_EFAULT; 12592 } 12593 12594 return ret; 12595 } 12596 #endif 12597 #ifdef TARGET_NR_clock_nanosleep_time64 12598 case TARGET_NR_clock_nanosleep_time64: 12599 { 12600 struct timespec ts; 12601 12602 if (target_to_host_timespec64(&ts, arg3)) { 12603 return -TARGET_EFAULT; 12604 } 12605 12606 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 12607 &ts, arg4 ? &ts : NULL)); 12608 12609 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME && 12610 host_to_target_timespec64(arg4, &ts)) { 12611 return -TARGET_EFAULT; 12612 } 12613 return ret; 12614 } 12615 #endif 12616 12617 #if defined(TARGET_NR_set_tid_address) 12618 case TARGET_NR_set_tid_address: 12619 { 12620 TaskState *ts = cpu->opaque; 12621 ts->child_tidptr = arg1; 12622 /* do not call host set_tid_address() syscall, instead return tid() */ 12623 return get_errno(sys_gettid()); 12624 } 12625 #endif 12626 12627 case TARGET_NR_tkill: 12628 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2))); 12629 12630 case TARGET_NR_tgkill: 12631 return get_errno(safe_tgkill((int)arg1, (int)arg2, 12632 target_to_host_signal(arg3))); 12633 12634 #ifdef TARGET_NR_set_robust_list 12635 case TARGET_NR_set_robust_list: 12636 case TARGET_NR_get_robust_list: 12637 /* The ABI for supporting robust futexes has userspace pass 12638 * the kernel a pointer to a linked list which is updated by 12639 * userspace after the syscall; the list is walked by the kernel 12640 * when the thread exits. Since the linked list in QEMU guest 12641 * memory isn't a valid linked list for the host and we have 12642 * no way to reliably intercept the thread-death event, we can't 12643 * support these. Silently return ENOSYS so that guest userspace 12644 * falls back to a non-robust futex implementation (which should 12645 * be OK except in the corner case of the guest crashing while 12646 * holding a mutex that is shared with another process via 12647 * shared memory). 12648 */ 12649 return -TARGET_ENOSYS; 12650 #endif 12651 12652 #if defined(TARGET_NR_utimensat) 12653 case TARGET_NR_utimensat: 12654 { 12655 struct timespec *tsp, ts[2]; 12656 if (!arg3) { 12657 tsp = NULL; 12658 } else { 12659 if (target_to_host_timespec(ts, arg3)) { 12660 return -TARGET_EFAULT; 12661 } 12662 if (target_to_host_timespec(ts + 1, arg3 + 12663 sizeof(struct target_timespec))) { 12664 return -TARGET_EFAULT; 12665 } 12666 tsp = ts; 12667 } 12668 if (!arg2) 12669 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 12670 else { 12671 if (!(p = lock_user_string(arg2))) { 12672 return -TARGET_EFAULT; 12673 } 12674 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 12675 unlock_user(p, arg2, 0); 12676 } 12677 } 12678 return ret; 12679 #endif 12680 #ifdef TARGET_NR_utimensat_time64 12681 case TARGET_NR_utimensat_time64: 12682 { 12683 struct timespec *tsp, ts[2]; 12684 if (!arg3) { 12685 tsp = NULL; 12686 } else { 12687 if (target_to_host_timespec64(ts, arg3)) { 12688 return -TARGET_EFAULT; 12689 } 12690 if (target_to_host_timespec64(ts + 1, arg3 + 12691 sizeof(struct target__kernel_timespec))) { 12692 return -TARGET_EFAULT; 12693 } 12694 tsp = ts; 12695 } 12696 if (!arg2) 12697 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 12698 else { 12699 p = lock_user_string(arg2); 12700 if (!p) { 12701 return -TARGET_EFAULT; 12702 } 12703 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 12704 unlock_user(p, arg2, 0); 12705 } 12706 } 12707 return ret; 12708 #endif 12709 #ifdef TARGET_NR_futex 12710 case TARGET_NR_futex: 12711 return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6); 12712 #endif 12713 #ifdef TARGET_NR_futex_time64 12714 case TARGET_NR_futex_time64: 12715 return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6); 12716 #endif 12717 #ifdef CONFIG_INOTIFY 12718 #if defined(TARGET_NR_inotify_init) 12719 case TARGET_NR_inotify_init: 12720 ret = get_errno(inotify_init()); 12721 if (ret >= 0) { 12722 fd_trans_register(ret, &target_inotify_trans); 12723 } 12724 return ret; 12725 #endif 12726 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1) 12727 case TARGET_NR_inotify_init1: 12728 ret = get_errno(inotify_init1(target_to_host_bitmask(arg1, 12729 fcntl_flags_tbl))); 12730 if (ret >= 0) { 12731 fd_trans_register(ret, &target_inotify_trans); 12732 } 12733 return ret; 12734 #endif 12735 #if defined(TARGET_NR_inotify_add_watch) 12736 case TARGET_NR_inotify_add_watch: 12737 p = lock_user_string(arg2); 12738 ret = get_errno(inotify_add_watch(arg1, path(p), arg3)); 12739 unlock_user(p, arg2, 0); 12740 return ret; 12741 #endif 12742 #if defined(TARGET_NR_inotify_rm_watch) 12743 case TARGET_NR_inotify_rm_watch: 12744 return get_errno(inotify_rm_watch(arg1, arg2)); 12745 #endif 12746 #endif 12747 12748 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 12749 case TARGET_NR_mq_open: 12750 { 12751 struct mq_attr posix_mq_attr; 12752 struct mq_attr *pposix_mq_attr; 12753 int host_flags; 12754 12755 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl); 12756 pposix_mq_attr = NULL; 12757 if (arg4) { 12758 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) { 12759 return -TARGET_EFAULT; 12760 } 12761 pposix_mq_attr = &posix_mq_attr; 12762 } 12763 p = lock_user_string(arg1 - 1); 12764 if (!p) { 12765 return -TARGET_EFAULT; 12766 } 12767 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr)); 12768 unlock_user (p, arg1, 0); 12769 } 12770 return ret; 12771 12772 case TARGET_NR_mq_unlink: 12773 p = lock_user_string(arg1 - 1); 12774 if (!p) { 12775 return -TARGET_EFAULT; 12776 } 12777 ret = get_errno(mq_unlink(p)); 12778 unlock_user (p, arg1, 0); 12779 return ret; 12780 12781 #ifdef TARGET_NR_mq_timedsend 12782 case TARGET_NR_mq_timedsend: 12783 { 12784 struct timespec ts; 12785 12786 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12787 if (arg5 != 0) { 12788 if (target_to_host_timespec(&ts, arg5)) { 12789 return -TARGET_EFAULT; 12790 } 12791 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12792 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) { 12793 return -TARGET_EFAULT; 12794 } 12795 } else { 12796 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12797 } 12798 unlock_user (p, arg2, arg3); 12799 } 12800 return ret; 12801 #endif 12802 #ifdef TARGET_NR_mq_timedsend_time64 12803 case TARGET_NR_mq_timedsend_time64: 12804 { 12805 struct timespec ts; 12806 12807 p = lock_user(VERIFY_READ, arg2, arg3, 1); 12808 if (arg5 != 0) { 12809 if (target_to_host_timespec64(&ts, arg5)) { 12810 return -TARGET_EFAULT; 12811 } 12812 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12813 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) { 12814 return -TARGET_EFAULT; 12815 } 12816 } else { 12817 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12818 } 12819 unlock_user(p, arg2, arg3); 12820 } 12821 return ret; 12822 #endif 12823 12824 #ifdef TARGET_NR_mq_timedreceive 12825 case TARGET_NR_mq_timedreceive: 12826 { 12827 struct timespec ts; 12828 unsigned int prio; 12829 12830 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12831 if (arg5 != 0) { 12832 if (target_to_host_timespec(&ts, arg5)) { 12833 return -TARGET_EFAULT; 12834 } 12835 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12836 &prio, &ts)); 12837 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) { 12838 return -TARGET_EFAULT; 12839 } 12840 } else { 12841 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12842 &prio, NULL)); 12843 } 12844 unlock_user (p, arg2, arg3); 12845 if (arg4 != 0) 12846 put_user_u32(prio, arg4); 12847 } 12848 return ret; 12849 #endif 12850 #ifdef TARGET_NR_mq_timedreceive_time64 12851 case TARGET_NR_mq_timedreceive_time64: 12852 { 12853 struct timespec ts; 12854 unsigned int prio; 12855 12856 p = lock_user(VERIFY_READ, arg2, arg3, 1); 12857 if (arg5 != 0) { 12858 if (target_to_host_timespec64(&ts, arg5)) { 12859 return -TARGET_EFAULT; 12860 } 12861 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12862 &prio, &ts)); 12863 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) { 12864 return -TARGET_EFAULT; 12865 } 12866 } else { 12867 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12868 &prio, NULL)); 12869 } 12870 unlock_user(p, arg2, arg3); 12871 if (arg4 != 0) { 12872 put_user_u32(prio, arg4); 12873 } 12874 } 12875 return ret; 12876 #endif 12877 12878 /* Not implemented for now... */ 12879 /* case TARGET_NR_mq_notify: */ 12880 /* break; */ 12881 12882 case TARGET_NR_mq_getsetattr: 12883 { 12884 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 12885 ret = 0; 12886 if (arg2 != 0) { 12887 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 12888 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in, 12889 &posix_mq_attr_out)); 12890 } else if (arg3 != 0) { 12891 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out)); 12892 } 12893 if (ret == 0 && arg3 != 0) { 12894 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 12895 } 12896 } 12897 return ret; 12898 #endif 12899 12900 #ifdef CONFIG_SPLICE 12901 #ifdef TARGET_NR_tee 12902 case TARGET_NR_tee: 12903 { 12904 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 12905 } 12906 return ret; 12907 #endif 12908 #ifdef TARGET_NR_splice 12909 case TARGET_NR_splice: 12910 { 12911 loff_t loff_in, loff_out; 12912 loff_t *ploff_in = NULL, *ploff_out = NULL; 12913 if (arg2) { 12914 if (get_user_u64(loff_in, arg2)) { 12915 return -TARGET_EFAULT; 12916 } 12917 ploff_in = &loff_in; 12918 } 12919 if (arg4) { 12920 if (get_user_u64(loff_out, arg4)) { 12921 return -TARGET_EFAULT; 12922 } 12923 ploff_out = &loff_out; 12924 } 12925 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 12926 if (arg2) { 12927 if (put_user_u64(loff_in, arg2)) { 12928 return -TARGET_EFAULT; 12929 } 12930 } 12931 if (arg4) { 12932 if (put_user_u64(loff_out, arg4)) { 12933 return -TARGET_EFAULT; 12934 } 12935 } 12936 } 12937 return ret; 12938 #endif 12939 #ifdef TARGET_NR_vmsplice 12940 case TARGET_NR_vmsplice: 12941 { 12942 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 12943 if (vec != NULL) { 12944 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 12945 unlock_iovec(vec, arg2, arg3, 0); 12946 } else { 12947 ret = -host_to_target_errno(errno); 12948 } 12949 } 12950 return ret; 12951 #endif 12952 #endif /* CONFIG_SPLICE */ 12953 #ifdef CONFIG_EVENTFD 12954 #if defined(TARGET_NR_eventfd) 12955 case TARGET_NR_eventfd: 12956 ret = get_errno(eventfd(arg1, 0)); 12957 if (ret >= 0) { 12958 fd_trans_register(ret, &target_eventfd_trans); 12959 } 12960 return ret; 12961 #endif 12962 #if defined(TARGET_NR_eventfd2) 12963 case TARGET_NR_eventfd2: 12964 { 12965 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)); 12966 if (arg2 & TARGET_O_NONBLOCK) { 12967 host_flags |= O_NONBLOCK; 12968 } 12969 if (arg2 & TARGET_O_CLOEXEC) { 12970 host_flags |= O_CLOEXEC; 12971 } 12972 ret = get_errno(eventfd(arg1, host_flags)); 12973 if (ret >= 0) { 12974 fd_trans_register(ret, &target_eventfd_trans); 12975 } 12976 return ret; 12977 } 12978 #endif 12979 #endif /* CONFIG_EVENTFD */ 12980 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 12981 case TARGET_NR_fallocate: 12982 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) 12983 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 12984 target_offset64(arg5, arg6))); 12985 #else 12986 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 12987 #endif 12988 return ret; 12989 #endif 12990 #if defined(CONFIG_SYNC_FILE_RANGE) 12991 #if defined(TARGET_NR_sync_file_range) 12992 case TARGET_NR_sync_file_range: 12993 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) 12994 #if defined(TARGET_MIPS) 12995 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12996 target_offset64(arg5, arg6), arg7)); 12997 #else 12998 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 12999 target_offset64(arg4, arg5), arg6)); 13000 #endif /* !TARGET_MIPS */ 13001 #else 13002 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 13003 #endif 13004 return ret; 13005 #endif 13006 #if defined(TARGET_NR_sync_file_range2) || \ 13007 defined(TARGET_NR_arm_sync_file_range) 13008 #if defined(TARGET_NR_sync_file_range2) 13009 case TARGET_NR_sync_file_range2: 13010 #endif 13011 #if defined(TARGET_NR_arm_sync_file_range) 13012 case TARGET_NR_arm_sync_file_range: 13013 #endif 13014 /* This is like sync_file_range but the arguments are reordered */ 13015 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32) 13016 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 13017 target_offset64(arg5, arg6), arg2)); 13018 #else 13019 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 13020 #endif 13021 return ret; 13022 #endif 13023 #endif 13024 #if defined(TARGET_NR_signalfd4) 13025 case TARGET_NR_signalfd4: 13026 return do_signalfd4(arg1, arg2, arg4); 13027 #endif 13028 #if defined(TARGET_NR_signalfd) 13029 case TARGET_NR_signalfd: 13030 return do_signalfd4(arg1, arg2, 0); 13031 #endif 13032 #if defined(CONFIG_EPOLL) 13033 #if defined(TARGET_NR_epoll_create) 13034 case TARGET_NR_epoll_create: 13035 return get_errno(epoll_create(arg1)); 13036 #endif 13037 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 13038 case TARGET_NR_epoll_create1: 13039 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl))); 13040 #endif 13041 #if defined(TARGET_NR_epoll_ctl) 13042 case TARGET_NR_epoll_ctl: 13043 { 13044 struct epoll_event ep; 13045 struct epoll_event *epp = 0; 13046 if (arg4) { 13047 if (arg2 != EPOLL_CTL_DEL) { 13048 struct target_epoll_event *target_ep; 13049 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 13050 return -TARGET_EFAULT; 13051 } 13052 ep.events = tswap32(target_ep->events); 13053 /* 13054 * The epoll_data_t union is just opaque data to the kernel, 13055 * so we transfer all 64 bits across and need not worry what 13056 * actual data type it is. 13057 */ 13058 ep.data.u64 = tswap64(target_ep->data.u64); 13059 unlock_user_struct(target_ep, arg4, 0); 13060 } 13061 /* 13062 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a 13063 * non-null pointer, even though this argument is ignored. 13064 * 13065 */ 13066 epp = &ep; 13067 } 13068 return get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 13069 } 13070 #endif 13071 13072 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait) 13073 #if defined(TARGET_NR_epoll_wait) 13074 case TARGET_NR_epoll_wait: 13075 #endif 13076 #if defined(TARGET_NR_epoll_pwait) 13077 case TARGET_NR_epoll_pwait: 13078 #endif 13079 { 13080 struct target_epoll_event *target_ep; 13081 struct epoll_event *ep; 13082 int epfd = arg1; 13083 int maxevents = arg3; 13084 int timeout = arg4; 13085 13086 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) { 13087 return -TARGET_EINVAL; 13088 } 13089 13090 target_ep = lock_user(VERIFY_WRITE, arg2, 13091 maxevents * sizeof(struct target_epoll_event), 1); 13092 if (!target_ep) { 13093 return -TARGET_EFAULT; 13094 } 13095 13096 ep = g_try_new(struct epoll_event, maxevents); 13097 if (!ep) { 13098 unlock_user(target_ep, arg2, 0); 13099 return -TARGET_ENOMEM; 13100 } 13101 13102 switch (num) { 13103 #if defined(TARGET_NR_epoll_pwait) 13104 case TARGET_NR_epoll_pwait: 13105 { 13106 sigset_t *set = NULL; 13107 13108 if (arg5) { 13109 ret = process_sigsuspend_mask(&set, arg5, arg6); 13110 if (ret != 0) { 13111 break; 13112 } 13113 } 13114 13115 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 13116 set, SIGSET_T_SIZE)); 13117 13118 if (set) { 13119 finish_sigsuspend_mask(ret); 13120 } 13121 break; 13122 } 13123 #endif 13124 #if defined(TARGET_NR_epoll_wait) 13125 case TARGET_NR_epoll_wait: 13126 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 13127 NULL, 0)); 13128 break; 13129 #endif 13130 default: 13131 ret = -TARGET_ENOSYS; 13132 } 13133 if (!is_error(ret)) { 13134 int i; 13135 for (i = 0; i < ret; i++) { 13136 target_ep[i].events = tswap32(ep[i].events); 13137 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 13138 } 13139 unlock_user(target_ep, arg2, 13140 ret * sizeof(struct target_epoll_event)); 13141 } else { 13142 unlock_user(target_ep, arg2, 0); 13143 } 13144 g_free(ep); 13145 return ret; 13146 } 13147 #endif 13148 #endif 13149 #ifdef TARGET_NR_prlimit64 13150 case TARGET_NR_prlimit64: 13151 { 13152 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 13153 struct target_rlimit64 *target_rnew, *target_rold; 13154 struct host_rlimit64 rnew, rold, *rnewp = 0; 13155 int resource = target_to_host_resource(arg2); 13156 13157 if (arg3 && (resource != RLIMIT_AS && 13158 resource != RLIMIT_DATA && 13159 resource != RLIMIT_STACK)) { 13160 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 13161 return -TARGET_EFAULT; 13162 } 13163 __get_user(rnew.rlim_cur, &target_rnew->rlim_cur); 13164 __get_user(rnew.rlim_max, &target_rnew->rlim_max); 13165 unlock_user_struct(target_rnew, arg3, 0); 13166 rnewp = &rnew; 13167 } 13168 13169 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0)); 13170 if (!is_error(ret) && arg4) { 13171 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 13172 return -TARGET_EFAULT; 13173 } 13174 __put_user(rold.rlim_cur, &target_rold->rlim_cur); 13175 __put_user(rold.rlim_max, &target_rold->rlim_max); 13176 unlock_user_struct(target_rold, arg4, 1); 13177 } 13178 return ret; 13179 } 13180 #endif 13181 #ifdef TARGET_NR_gethostname 13182 case TARGET_NR_gethostname: 13183 { 13184 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 13185 if (name) { 13186 ret = get_errno(gethostname(name, arg2)); 13187 unlock_user(name, arg1, arg2); 13188 } else { 13189 ret = -TARGET_EFAULT; 13190 } 13191 return ret; 13192 } 13193 #endif 13194 #ifdef TARGET_NR_atomic_cmpxchg_32 13195 case TARGET_NR_atomic_cmpxchg_32: 13196 { 13197 /* should use start_exclusive from main.c */ 13198 abi_ulong mem_value; 13199 if (get_user_u32(mem_value, arg6)) { 13200 target_siginfo_t info; 13201 info.si_signo = SIGSEGV; 13202 info.si_errno = 0; 13203 info.si_code = TARGET_SEGV_MAPERR; 13204 info._sifields._sigfault._addr = arg6; 13205 queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info); 13206 ret = 0xdeadbeef; 13207 13208 } 13209 if (mem_value == arg2) 13210 put_user_u32(arg1, arg6); 13211 return mem_value; 13212 } 13213 #endif 13214 #ifdef TARGET_NR_atomic_barrier 13215 case TARGET_NR_atomic_barrier: 13216 /* Like the kernel implementation and the 13217 qemu arm barrier, no-op this? */ 13218 return 0; 13219 #endif 13220 13221 #ifdef TARGET_NR_timer_create 13222 case TARGET_NR_timer_create: 13223 { 13224 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 13225 13226 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 13227 13228 int clkid = arg1; 13229 int timer_index = next_free_host_timer(); 13230 13231 if (timer_index < 0) { 13232 ret = -TARGET_EAGAIN; 13233 } else { 13234 timer_t *phtimer = g_posix_timers + timer_index; 13235 13236 if (arg2) { 13237 phost_sevp = &host_sevp; 13238 ret = target_to_host_sigevent(phost_sevp, arg2); 13239 if (ret != 0) { 13240 free_host_timer_slot(timer_index); 13241 return ret; 13242 } 13243 } 13244 13245 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 13246 if (ret) { 13247 free_host_timer_slot(timer_index); 13248 } else { 13249 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { 13250 timer_delete(*phtimer); 13251 free_host_timer_slot(timer_index); 13252 return -TARGET_EFAULT; 13253 } 13254 } 13255 } 13256 return ret; 13257 } 13258 #endif 13259 13260 #ifdef TARGET_NR_timer_settime 13261 case TARGET_NR_timer_settime: 13262 { 13263 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 13264 * struct itimerspec * old_value */ 13265 target_timer_t timerid = get_timer_id(arg1); 13266 13267 if (timerid < 0) { 13268 ret = timerid; 13269 } else if (arg3 == 0) { 13270 ret = -TARGET_EINVAL; 13271 } else { 13272 timer_t htimer = g_posix_timers[timerid]; 13273 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 13274 13275 if (target_to_host_itimerspec(&hspec_new, arg3)) { 13276 return -TARGET_EFAULT; 13277 } 13278 ret = get_errno( 13279 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 13280 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) { 13281 return -TARGET_EFAULT; 13282 } 13283 } 13284 return ret; 13285 } 13286 #endif 13287 13288 #ifdef TARGET_NR_timer_settime64 13289 case TARGET_NR_timer_settime64: 13290 { 13291 target_timer_t timerid = get_timer_id(arg1); 13292 13293 if (timerid < 0) { 13294 ret = timerid; 13295 } else if (arg3 == 0) { 13296 ret = -TARGET_EINVAL; 13297 } else { 13298 timer_t htimer = g_posix_timers[timerid]; 13299 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 13300 13301 if (target_to_host_itimerspec64(&hspec_new, arg3)) { 13302 return -TARGET_EFAULT; 13303 } 13304 ret = get_errno( 13305 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 13306 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) { 13307 return -TARGET_EFAULT; 13308 } 13309 } 13310 return ret; 13311 } 13312 #endif 13313 13314 #ifdef TARGET_NR_timer_gettime 13315 case TARGET_NR_timer_gettime: 13316 { 13317 /* args: timer_t timerid, struct itimerspec *curr_value */ 13318 target_timer_t timerid = get_timer_id(arg1); 13319 13320 if (timerid < 0) { 13321 ret = timerid; 13322 } else if (!arg2) { 13323 ret = -TARGET_EFAULT; 13324 } else { 13325 timer_t htimer = g_posix_timers[timerid]; 13326 struct itimerspec hspec; 13327 ret = get_errno(timer_gettime(htimer, &hspec)); 13328 13329 if (host_to_target_itimerspec(arg2, &hspec)) { 13330 ret = -TARGET_EFAULT; 13331 } 13332 } 13333 return ret; 13334 } 13335 #endif 13336 13337 #ifdef TARGET_NR_timer_gettime64 13338 case TARGET_NR_timer_gettime64: 13339 { 13340 /* args: timer_t timerid, struct itimerspec64 *curr_value */ 13341 target_timer_t timerid = get_timer_id(arg1); 13342 13343 if (timerid < 0) { 13344 ret = timerid; 13345 } else if (!arg2) { 13346 ret = -TARGET_EFAULT; 13347 } else { 13348 timer_t htimer = g_posix_timers[timerid]; 13349 struct itimerspec hspec; 13350 ret = get_errno(timer_gettime(htimer, &hspec)); 13351 13352 if (host_to_target_itimerspec64(arg2, &hspec)) { 13353 ret = -TARGET_EFAULT; 13354 } 13355 } 13356 return ret; 13357 } 13358 #endif 13359 13360 #ifdef TARGET_NR_timer_getoverrun 13361 case TARGET_NR_timer_getoverrun: 13362 { 13363 /* args: timer_t timerid */ 13364 target_timer_t timerid = get_timer_id(arg1); 13365 13366 if (timerid < 0) { 13367 ret = timerid; 13368 } else { 13369 timer_t htimer = g_posix_timers[timerid]; 13370 ret = get_errno(timer_getoverrun(htimer)); 13371 } 13372 return ret; 13373 } 13374 #endif 13375 13376 #ifdef TARGET_NR_timer_delete 13377 case TARGET_NR_timer_delete: 13378 { 13379 /* args: timer_t timerid */ 13380 target_timer_t timerid = get_timer_id(arg1); 13381 13382 if (timerid < 0) { 13383 ret = timerid; 13384 } else { 13385 timer_t htimer = g_posix_timers[timerid]; 13386 ret = get_errno(timer_delete(htimer)); 13387 free_host_timer_slot(timerid); 13388 } 13389 return ret; 13390 } 13391 #endif 13392 13393 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) 13394 case TARGET_NR_timerfd_create: 13395 ret = get_errno(timerfd_create(arg1, 13396 target_to_host_bitmask(arg2, fcntl_flags_tbl))); 13397 if (ret >= 0) { 13398 fd_trans_register(ret, &target_timerfd_trans); 13399 } 13400 return ret; 13401 #endif 13402 13403 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) 13404 case TARGET_NR_timerfd_gettime: 13405 { 13406 struct itimerspec its_curr; 13407 13408 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 13409 13410 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { 13411 return -TARGET_EFAULT; 13412 } 13413 } 13414 return ret; 13415 #endif 13416 13417 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD) 13418 case TARGET_NR_timerfd_gettime64: 13419 { 13420 struct itimerspec its_curr; 13421 13422 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 13423 13424 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) { 13425 return -TARGET_EFAULT; 13426 } 13427 } 13428 return ret; 13429 #endif 13430 13431 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) 13432 case TARGET_NR_timerfd_settime: 13433 { 13434 struct itimerspec its_new, its_old, *p_new; 13435 13436 if (arg3) { 13437 if (target_to_host_itimerspec(&its_new, arg3)) { 13438 return -TARGET_EFAULT; 13439 } 13440 p_new = &its_new; 13441 } else { 13442 p_new = NULL; 13443 } 13444 13445 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 13446 13447 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { 13448 return -TARGET_EFAULT; 13449 } 13450 } 13451 return ret; 13452 #endif 13453 13454 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD) 13455 case TARGET_NR_timerfd_settime64: 13456 { 13457 struct itimerspec its_new, its_old, *p_new; 13458 13459 if (arg3) { 13460 if (target_to_host_itimerspec64(&its_new, arg3)) { 13461 return -TARGET_EFAULT; 13462 } 13463 p_new = &its_new; 13464 } else { 13465 p_new = NULL; 13466 } 13467 13468 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 13469 13470 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) { 13471 return -TARGET_EFAULT; 13472 } 13473 } 13474 return ret; 13475 #endif 13476 13477 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 13478 case TARGET_NR_ioprio_get: 13479 return get_errno(ioprio_get(arg1, arg2)); 13480 #endif 13481 13482 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 13483 case TARGET_NR_ioprio_set: 13484 return get_errno(ioprio_set(arg1, arg2, arg3)); 13485 #endif 13486 13487 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) 13488 case TARGET_NR_setns: 13489 return get_errno(setns(arg1, arg2)); 13490 #endif 13491 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) 13492 case TARGET_NR_unshare: 13493 return get_errno(unshare(arg1)); 13494 #endif 13495 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 13496 case TARGET_NR_kcmp: 13497 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5)); 13498 #endif 13499 #ifdef TARGET_NR_swapcontext 13500 case TARGET_NR_swapcontext: 13501 /* PowerPC specific. */ 13502 return do_swapcontext(cpu_env, arg1, arg2, arg3); 13503 #endif 13504 #ifdef TARGET_NR_memfd_create 13505 case TARGET_NR_memfd_create: 13506 p = lock_user_string(arg1); 13507 if (!p) { 13508 return -TARGET_EFAULT; 13509 } 13510 ret = get_errno(memfd_create(p, arg2)); 13511 fd_trans_unregister(ret); 13512 unlock_user(p, arg1, 0); 13513 return ret; 13514 #endif 13515 #if defined TARGET_NR_membarrier && defined __NR_membarrier 13516 case TARGET_NR_membarrier: 13517 return get_errno(membarrier(arg1, arg2)); 13518 #endif 13519 13520 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range) 13521 case TARGET_NR_copy_file_range: 13522 { 13523 loff_t inoff, outoff; 13524 loff_t *pinoff = NULL, *poutoff = NULL; 13525 13526 if (arg2) { 13527 if (get_user_u64(inoff, arg2)) { 13528 return -TARGET_EFAULT; 13529 } 13530 pinoff = &inoff; 13531 } 13532 if (arg4) { 13533 if (get_user_u64(outoff, arg4)) { 13534 return -TARGET_EFAULT; 13535 } 13536 poutoff = &outoff; 13537 } 13538 /* Do not sign-extend the count parameter. */ 13539 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff, 13540 (abi_ulong)arg5, arg6)); 13541 if (!is_error(ret) && ret > 0) { 13542 if (arg2) { 13543 if (put_user_u64(inoff, arg2)) { 13544 return -TARGET_EFAULT; 13545 } 13546 } 13547 if (arg4) { 13548 if (put_user_u64(outoff, arg4)) { 13549 return -TARGET_EFAULT; 13550 } 13551 } 13552 } 13553 } 13554 return ret; 13555 #endif 13556 13557 #if defined(TARGET_NR_pivot_root) 13558 case TARGET_NR_pivot_root: 13559 { 13560 void *p2; 13561 p = lock_user_string(arg1); /* new_root */ 13562 p2 = lock_user_string(arg2); /* put_old */ 13563 if (!p || !p2) { 13564 ret = -TARGET_EFAULT; 13565 } else { 13566 ret = get_errno(pivot_root(p, p2)); 13567 } 13568 unlock_user(p2, arg2, 0); 13569 unlock_user(p, arg1, 0); 13570 } 13571 return ret; 13572 #endif 13573 13574 default: 13575 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num); 13576 return -TARGET_ENOSYS; 13577 } 13578 return ret; 13579 } 13580 13581 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1, 13582 abi_long arg2, abi_long arg3, abi_long arg4, 13583 abi_long arg5, abi_long arg6, abi_long arg7, 13584 abi_long arg8) 13585 { 13586 CPUState *cpu = env_cpu(cpu_env); 13587 abi_long ret; 13588 13589 #ifdef DEBUG_ERESTARTSYS 13590 /* Debug-only code for exercising the syscall-restart code paths 13591 * in the per-architecture cpu main loops: restart every syscall 13592 * the guest makes once before letting it through. 13593 */ 13594 { 13595 static bool flag; 13596 flag = !flag; 13597 if (flag) { 13598 return -QEMU_ERESTARTSYS; 13599 } 13600 } 13601 #endif 13602 13603 record_syscall_start(cpu, num, arg1, 13604 arg2, arg3, arg4, arg5, arg6, arg7, arg8); 13605 13606 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { 13607 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6); 13608 } 13609 13610 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4, 13611 arg5, arg6, arg7, arg8); 13612 13613 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { 13614 print_syscall_ret(cpu_env, num, ret, arg1, arg2, 13615 arg3, arg4, arg5, arg6); 13616 } 13617 13618 record_syscall_return(cpu, num, ret); 13619 return ret; 13620 } 13621