1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include "qemu/osdep.h" 21 #include "qemu/cutils.h" 22 #include "qemu/path.h" 23 #include "qemu/memfd.h" 24 #include "qemu/queue.h" 25 #include <elf.h> 26 #include <endian.h> 27 #include <grp.h> 28 #include <sys/ipc.h> 29 #include <sys/msg.h> 30 #include <sys/wait.h> 31 #include <sys/mount.h> 32 #include <sys/file.h> 33 #include <sys/fsuid.h> 34 #include <sys/personality.h> 35 #include <sys/prctl.h> 36 #include <sys/resource.h> 37 #include <sys/swap.h> 38 #include <linux/capability.h> 39 #include <sched.h> 40 #include <sys/timex.h> 41 #include <sys/socket.h> 42 #include <linux/sockios.h> 43 #include <sys/un.h> 44 #include <sys/uio.h> 45 #include <poll.h> 46 #include <sys/times.h> 47 #include <sys/shm.h> 48 #include <sys/sem.h> 49 #include <sys/statfs.h> 50 #include <utime.h> 51 #include <sys/sysinfo.h> 52 #include <sys/signalfd.h> 53 //#include <sys/user.h> 54 #include <netinet/in.h> 55 #include <netinet/ip.h> 56 #include <netinet/tcp.h> 57 #include <netinet/udp.h> 58 #include <linux/wireless.h> 59 #include <linux/icmp.h> 60 #include <linux/icmpv6.h> 61 #include <linux/if_tun.h> 62 #include <linux/in6.h> 63 #include <linux/errqueue.h> 64 #include <linux/random.h> 65 #ifdef CONFIG_TIMERFD 66 #include <sys/timerfd.h> 67 #endif 68 #ifdef CONFIG_EVENTFD 69 #include <sys/eventfd.h> 70 #endif 71 #ifdef CONFIG_EPOLL 72 #include <sys/epoll.h> 73 #endif 74 #ifdef CONFIG_ATTR 75 #include "qemu/xattr.h" 76 #endif 77 #ifdef CONFIG_SENDFILE 78 #include <sys/sendfile.h> 79 #endif 80 #ifdef HAVE_SYS_KCOV_H 81 #include <sys/kcov.h> 82 #endif 83 84 #define termios host_termios 85 #define winsize host_winsize 86 #define termio host_termio 87 #define sgttyb host_sgttyb /* same as target */ 88 #define tchars host_tchars /* same as target */ 89 #define ltchars host_ltchars /* same as target */ 90 91 #include <linux/termios.h> 92 #include <linux/unistd.h> 93 #include <linux/cdrom.h> 94 #include <linux/hdreg.h> 95 #include <linux/soundcard.h> 96 #include <linux/kd.h> 97 #include <linux/mtio.h> 98 99 #ifdef HAVE_SYS_MOUNT_FSCONFIG 100 /* 101 * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h, 102 * which in turn prevents use of linux/fs.h. So we have to 103 * define the constants ourselves for now. 104 */ 105 #define FS_IOC_GETFLAGS _IOR('f', 1, long) 106 #define FS_IOC_SETFLAGS _IOW('f', 2, long) 107 #define FS_IOC_GETVERSION _IOR('v', 1, long) 108 #define FS_IOC_SETVERSION _IOW('v', 2, long) 109 #define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap) 110 #define FS_IOC32_GETFLAGS _IOR('f', 1, int) 111 #define FS_IOC32_SETFLAGS _IOW('f', 2, int) 112 #define FS_IOC32_GETVERSION _IOR('v', 1, int) 113 #define FS_IOC32_SETVERSION _IOW('v', 2, int) 114 #else 115 #include <linux/fs.h> 116 #endif 117 #include <linux/fd.h> 118 #if defined(CONFIG_FIEMAP) 119 #include <linux/fiemap.h> 120 #endif 121 #include <linux/fb.h> 122 #if defined(CONFIG_USBFS) 123 #include <linux/usbdevice_fs.h> 124 #include <linux/usb/ch9.h> 125 #endif 126 #include <linux/vt.h> 127 #include <linux/dm-ioctl.h> 128 #include <linux/reboot.h> 129 #include <linux/route.h> 130 #include <linux/filter.h> 131 #include <linux/blkpg.h> 132 #include <netpacket/packet.h> 133 #include <linux/netlink.h> 134 #include <linux/if_alg.h> 135 #include <linux/rtc.h> 136 #include <sound/asound.h> 137 #ifdef HAVE_BTRFS_H 138 #include <linux/btrfs.h> 139 #endif 140 #ifdef HAVE_DRM_H 141 #include <libdrm/drm.h> 142 #include <libdrm/i915_drm.h> 143 #endif 144 #include "linux_loop.h" 145 #include "uname.h" 146 147 #include "qemu.h" 148 #include "user-internals.h" 149 #include "strace.h" 150 #include "signal-common.h" 151 #include "loader.h" 152 #include "user-mmap.h" 153 #include "user/safe-syscall.h" 154 #include "qemu/guest-random.h" 155 #include "qemu/selfmap.h" 156 #include "user/syscall-trace.h" 157 #include "special-errno.h" 158 #include "qapi/error.h" 159 #include "fd-trans.h" 160 #include "tcg/tcg.h" 161 162 #ifndef CLONE_IO 163 #define CLONE_IO 0x80000000 /* Clone io context */ 164 #endif 165 166 /* We can't directly call the host clone syscall, because this will 167 * badly confuse libc (breaking mutexes, for example). So we must 168 * divide clone flags into: 169 * * flag combinations that look like pthread_create() 170 * * flag combinations that look like fork() 171 * * flags we can implement within QEMU itself 172 * * flags we can't support and will return an error for 173 */ 174 /* For thread creation, all these flags must be present; for 175 * fork, none must be present. 176 */ 177 #define CLONE_THREAD_FLAGS \ 178 (CLONE_VM | CLONE_FS | CLONE_FILES | \ 179 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM) 180 181 /* These flags are ignored: 182 * CLONE_DETACHED is now ignored by the kernel; 183 * CLONE_IO is just an optimisation hint to the I/O scheduler 184 */ 185 #define CLONE_IGNORED_FLAGS \ 186 (CLONE_DETACHED | CLONE_IO) 187 188 /* Flags for fork which we can implement within QEMU itself */ 189 #define CLONE_OPTIONAL_FORK_FLAGS \ 190 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 191 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID) 192 193 /* Flags for thread creation which we can implement within QEMU itself */ 194 #define CLONE_OPTIONAL_THREAD_FLAGS \ 195 (CLONE_SETTLS | CLONE_PARENT_SETTID | \ 196 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT) 197 198 #define CLONE_INVALID_FORK_FLAGS \ 199 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS)) 200 201 #define CLONE_INVALID_THREAD_FLAGS \ 202 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \ 203 CLONE_IGNORED_FLAGS)) 204 205 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits 206 * have almost all been allocated. We cannot support any of 207 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC, 208 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED. 209 * The checks against the invalid thread masks above will catch these. 210 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.) 211 */ 212 213 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted 214 * once. This exercises the codepaths for restart. 215 */ 216 //#define DEBUG_ERESTARTSYS 217 218 //#include <linux/msdos_fs.h> 219 #define VFAT_IOCTL_READDIR_BOTH \ 220 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2) 221 #define VFAT_IOCTL_READDIR_SHORT \ 222 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2) 223 224 #undef _syscall0 225 #undef _syscall1 226 #undef _syscall2 227 #undef _syscall3 228 #undef _syscall4 229 #undef _syscall5 230 #undef _syscall6 231 232 #define _syscall0(type,name) \ 233 static type name (void) \ 234 { \ 235 return syscall(__NR_##name); \ 236 } 237 238 #define _syscall1(type,name,type1,arg1) \ 239 static type name (type1 arg1) \ 240 { \ 241 return syscall(__NR_##name, arg1); \ 242 } 243 244 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 245 static type name (type1 arg1,type2 arg2) \ 246 { \ 247 return syscall(__NR_##name, arg1, arg2); \ 248 } 249 250 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 251 static type name (type1 arg1,type2 arg2,type3 arg3) \ 252 { \ 253 return syscall(__NR_##name, arg1, arg2, arg3); \ 254 } 255 256 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 257 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 258 { \ 259 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 260 } 261 262 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 263 type5,arg5) \ 264 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 265 { \ 266 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 267 } 268 269 270 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 271 type5,arg5,type6,arg6) \ 272 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 273 type6 arg6) \ 274 { \ 275 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 276 } 277 278 279 #define __NR_sys_uname __NR_uname 280 #define __NR_sys_getcwd1 __NR_getcwd 281 #define __NR_sys_getdents __NR_getdents 282 #define __NR_sys_getdents64 __NR_getdents64 283 #define __NR_sys_getpriority __NR_getpriority 284 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 285 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo 286 #define __NR_sys_syslog __NR_syslog 287 #if defined(__NR_futex) 288 # define __NR_sys_futex __NR_futex 289 #endif 290 #if defined(__NR_futex_time64) 291 # define __NR_sys_futex_time64 __NR_futex_time64 292 #endif 293 #define __NR_sys_statx __NR_statx 294 295 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__) 296 #define __NR__llseek __NR_lseek 297 #endif 298 299 /* Newer kernel ports have llseek() instead of _llseek() */ 300 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 301 #define TARGET_NR__llseek TARGET_NR_llseek 302 #endif 303 304 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */ 305 #ifndef TARGET_O_NONBLOCK_MASK 306 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK 307 #endif 308 309 #define __NR_sys_gettid __NR_gettid 310 _syscall0(int, sys_gettid) 311 312 /* For the 64-bit guest on 32-bit host case we must emulate 313 * getdents using getdents64, because otherwise the host 314 * might hand us back more dirent records than we can fit 315 * into the guest buffer after structure format conversion. 316 * Otherwise we emulate getdents with getdents if the host has it. 317 */ 318 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS 319 #define EMULATE_GETDENTS_WITH_GETDENTS 320 #endif 321 322 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS) 323 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 324 #endif 325 #if (defined(TARGET_NR_getdents) && \ 326 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \ 327 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 328 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 329 #endif 330 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 331 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 332 loff_t *, res, uint, wh); 333 #endif 334 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo) 335 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig, 336 siginfo_t *, uinfo) 337 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 338 #ifdef __NR_exit_group 339 _syscall1(int,exit_group,int,error_code) 340 #endif 341 #if defined(__NR_futex) 342 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 343 const struct timespec *,timeout,int *,uaddr2,int,val3) 344 #endif 345 #if defined(__NR_futex_time64) 346 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val, 347 const struct timespec *,timeout,int *,uaddr2,int,val3) 348 #endif 349 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 350 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 351 unsigned long *, user_mask_ptr); 352 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 353 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 354 unsigned long *, user_mask_ptr); 355 /* sched_attr is not defined in glibc */ 356 struct sched_attr { 357 uint32_t size; 358 uint32_t sched_policy; 359 uint64_t sched_flags; 360 int32_t sched_nice; 361 uint32_t sched_priority; 362 uint64_t sched_runtime; 363 uint64_t sched_deadline; 364 uint64_t sched_period; 365 uint32_t sched_util_min; 366 uint32_t sched_util_max; 367 }; 368 #define __NR_sys_sched_getattr __NR_sched_getattr 369 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr, 370 unsigned int, size, unsigned int, flags); 371 #define __NR_sys_sched_setattr __NR_sched_setattr 372 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr, 373 unsigned int, flags); 374 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler 375 _syscall1(int, sys_sched_getscheduler, pid_t, pid); 376 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler 377 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy, 378 const struct sched_param *, param); 379 #define __NR_sys_sched_getparam __NR_sched_getparam 380 _syscall2(int, sys_sched_getparam, pid_t, pid, 381 struct sched_param *, param); 382 #define __NR_sys_sched_setparam __NR_sched_setparam 383 _syscall2(int, sys_sched_setparam, pid_t, pid, 384 const struct sched_param *, param); 385 #define __NR_sys_getcpu __NR_getcpu 386 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache); 387 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 388 void *, arg); 389 _syscall2(int, capget, struct __user_cap_header_struct *, header, 390 struct __user_cap_data_struct *, data); 391 _syscall2(int, capset, struct __user_cap_header_struct *, header, 392 struct __user_cap_data_struct *, data); 393 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 394 _syscall2(int, ioprio_get, int, which, int, who) 395 #endif 396 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 397 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio) 398 #endif 399 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 400 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags) 401 #endif 402 403 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 404 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type, 405 unsigned long, idx1, unsigned long, idx2) 406 #endif 407 408 /* 409 * It is assumed that struct statx is architecture independent. 410 */ 411 #if defined(TARGET_NR_statx) && defined(__NR_statx) 412 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags, 413 unsigned int, mask, struct target_statx *, statxbuf) 414 #endif 415 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier) 416 _syscall2(int, membarrier, int, cmd, int, flags) 417 #endif 418 419 static const bitmask_transtbl fcntl_flags_tbl[] = { 420 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 421 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 422 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 423 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 424 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 425 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 426 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 427 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 428 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 429 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 430 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 431 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 432 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 433 #if defined(O_DIRECT) 434 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 435 #endif 436 #if defined(O_NOATIME) 437 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 438 #endif 439 #if defined(O_CLOEXEC) 440 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 441 #endif 442 #if defined(O_PATH) 443 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 444 #endif 445 #if defined(O_TMPFILE) 446 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE }, 447 #endif 448 /* Don't terminate the list prematurely on 64-bit host+guest. */ 449 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 450 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 451 #endif 452 { 0, 0, 0, 0 } 453 }; 454 455 _syscall2(int, sys_getcwd1, char *, buf, size_t, size) 456 457 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64) 458 #if defined(__NR_utimensat) 459 #define __NR_sys_utimensat __NR_utimensat 460 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 461 const struct timespec *,tsp,int,flags) 462 #else 463 static int sys_utimensat(int dirfd, const char *pathname, 464 const struct timespec times[2], int flags) 465 { 466 errno = ENOSYS; 467 return -1; 468 } 469 #endif 470 #endif /* TARGET_NR_utimensat */ 471 472 #ifdef TARGET_NR_renameat2 473 #if defined(__NR_renameat2) 474 #define __NR_sys_renameat2 __NR_renameat2 475 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd, 476 const char *, new, unsigned int, flags) 477 #else 478 static int sys_renameat2(int oldfd, const char *old, 479 int newfd, const char *new, int flags) 480 { 481 if (flags == 0) { 482 return renameat(oldfd, old, newfd, new); 483 } 484 errno = ENOSYS; 485 return -1; 486 } 487 #endif 488 #endif /* TARGET_NR_renameat2 */ 489 490 #ifdef CONFIG_INOTIFY 491 #include <sys/inotify.h> 492 #else 493 /* Userspace can usually survive runtime without inotify */ 494 #undef TARGET_NR_inotify_init 495 #undef TARGET_NR_inotify_init1 496 #undef TARGET_NR_inotify_add_watch 497 #undef TARGET_NR_inotify_rm_watch 498 #endif /* CONFIG_INOTIFY */ 499 500 #if defined(TARGET_NR_prlimit64) 501 #ifndef __NR_prlimit64 502 # define __NR_prlimit64 -1 503 #endif 504 #define __NR_sys_prlimit64 __NR_prlimit64 505 /* The glibc rlimit structure may not be that used by the underlying syscall */ 506 struct host_rlimit64 { 507 uint64_t rlim_cur; 508 uint64_t rlim_max; 509 }; 510 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 511 const struct host_rlimit64 *, new_limit, 512 struct host_rlimit64 *, old_limit) 513 #endif 514 515 516 #if defined(TARGET_NR_timer_create) 517 /* Maximum of 32 active POSIX timers allowed at any one time. */ 518 static timer_t g_posix_timers[32] = { 0, } ; 519 520 static inline int next_free_host_timer(void) 521 { 522 int k ; 523 /* FIXME: Does finding the next free slot require a lock? */ 524 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 525 if (g_posix_timers[k] == 0) { 526 g_posix_timers[k] = (timer_t) 1; 527 return k; 528 } 529 } 530 return -1; 531 } 532 #endif 533 534 static inline int host_to_target_errno(int host_errno) 535 { 536 switch (host_errno) { 537 #define E(X) case X: return TARGET_##X; 538 #include "errnos.c.inc" 539 #undef E 540 default: 541 return host_errno; 542 } 543 } 544 545 static inline int target_to_host_errno(int target_errno) 546 { 547 switch (target_errno) { 548 #define E(X) case TARGET_##X: return X; 549 #include "errnos.c.inc" 550 #undef E 551 default: 552 return target_errno; 553 } 554 } 555 556 abi_long get_errno(abi_long ret) 557 { 558 if (ret == -1) 559 return -host_to_target_errno(errno); 560 else 561 return ret; 562 } 563 564 const char *target_strerror(int err) 565 { 566 if (err == QEMU_ERESTARTSYS) { 567 return "To be restarted"; 568 } 569 if (err == QEMU_ESIGRETURN) { 570 return "Successful exit from sigreturn"; 571 } 572 573 return strerror(target_to_host_errno(err)); 574 } 575 576 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize) 577 { 578 int i; 579 uint8_t b; 580 if (usize <= ksize) { 581 return 1; 582 } 583 for (i = ksize; i < usize; i++) { 584 if (get_user_u8(b, addr + i)) { 585 return -TARGET_EFAULT; 586 } 587 if (b != 0) { 588 return 0; 589 } 590 } 591 return 1; 592 } 593 594 #define safe_syscall0(type, name) \ 595 static type safe_##name(void) \ 596 { \ 597 return safe_syscall(__NR_##name); \ 598 } 599 600 #define safe_syscall1(type, name, type1, arg1) \ 601 static type safe_##name(type1 arg1) \ 602 { \ 603 return safe_syscall(__NR_##name, arg1); \ 604 } 605 606 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \ 607 static type safe_##name(type1 arg1, type2 arg2) \ 608 { \ 609 return safe_syscall(__NR_##name, arg1, arg2); \ 610 } 611 612 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ 613 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \ 614 { \ 615 return safe_syscall(__NR_##name, arg1, arg2, arg3); \ 616 } 617 618 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ 619 type4, arg4) \ 620 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ 621 { \ 622 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 623 } 624 625 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ 626 type4, arg4, type5, arg5) \ 627 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 628 type5 arg5) \ 629 { \ 630 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 631 } 632 633 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ 634 type4, arg4, type5, arg5, type6, arg6) \ 635 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 636 type5 arg5, type6 arg6) \ 637 { \ 638 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 639 } 640 641 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count) 642 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count) 643 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \ 644 int, flags, mode_t, mode) 645 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid) 646 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \ 647 struct rusage *, rusage) 648 #endif 649 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \ 650 int, options, struct rusage *, rusage) 651 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp) 652 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \ 653 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 654 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \ 655 fd_set *, exceptfds, struct timespec *, timeout, void *, sig) 656 #endif 657 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64) 658 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds, 659 struct timespec *, tsp, const sigset_t *, sigmask, 660 size_t, sigsetsize) 661 #endif 662 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events, 663 int, maxevents, int, timeout, const sigset_t *, sigmask, 664 size_t, sigsetsize) 665 #if defined(__NR_futex) 666 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \ 667 const struct timespec *,timeout,int *,uaddr2,int,val3) 668 #endif 669 #if defined(__NR_futex_time64) 670 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \ 671 const struct timespec *,timeout,int *,uaddr2,int,val3) 672 #endif 673 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize) 674 safe_syscall2(int, kill, pid_t, pid, int, sig) 675 safe_syscall2(int, tkill, int, tid, int, sig) 676 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig) 677 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt) 678 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt) 679 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt, 680 unsigned long, pos_l, unsigned long, pos_h) 681 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt, 682 unsigned long, pos_l, unsigned long, pos_h) 683 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr, 684 socklen_t, addrlen) 685 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len, 686 int, flags, const struct sockaddr *, addr, socklen_t, addrlen) 687 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len, 688 int, flags, struct sockaddr *, addr, socklen_t *, addrlen) 689 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags) 690 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags) 691 safe_syscall2(int, flock, int, fd, int, operation) 692 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64) 693 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo, 694 const struct timespec *, uts, size_t, sigsetsize) 695 #endif 696 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len, 697 int, flags) 698 #if defined(TARGET_NR_nanosleep) 699 safe_syscall2(int, nanosleep, const struct timespec *, req, 700 struct timespec *, rem) 701 #endif 702 #if defined(TARGET_NR_clock_nanosleep) || \ 703 defined(TARGET_NR_clock_nanosleep_time64) 704 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags, 705 const struct timespec *, req, struct timespec *, rem) 706 #endif 707 #ifdef __NR_ipc 708 #ifdef __s390x__ 709 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third, 710 void *, ptr) 711 #else 712 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third, 713 void *, ptr, long, fifth) 714 #endif 715 #endif 716 #ifdef __NR_msgsnd 717 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz, 718 int, flags) 719 #endif 720 #ifdef __NR_msgrcv 721 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz, 722 long, msgtype, int, flags) 723 #endif 724 #ifdef __NR_semtimedop 725 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops, 726 unsigned, nsops, const struct timespec *, timeout) 727 #endif 728 #if defined(TARGET_NR_mq_timedsend) || \ 729 defined(TARGET_NR_mq_timedsend_time64) 730 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr, 731 size_t, len, unsigned, prio, const struct timespec *, timeout) 732 #endif 733 #if defined(TARGET_NR_mq_timedreceive) || \ 734 defined(TARGET_NR_mq_timedreceive_time64) 735 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr, 736 size_t, len, unsigned *, prio, const struct timespec *, timeout) 737 #endif 738 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range) 739 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff, 740 int, outfd, loff_t *, poutoff, size_t, length, 741 unsigned int, flags) 742 #endif 743 744 /* We do ioctl like this rather than via safe_syscall3 to preserve the 745 * "third argument might be integer or pointer or not present" behaviour of 746 * the libc function. 747 */ 748 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__) 749 /* Similarly for fcntl. Note that callers must always: 750 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK 751 * use the flock64 struct rather than unsuffixed flock 752 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts. 753 */ 754 #ifdef __NR_fcntl64 755 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__) 756 #else 757 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__) 758 #endif 759 760 static inline int host_to_target_sock_type(int host_type) 761 { 762 int target_type; 763 764 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 765 case SOCK_DGRAM: 766 target_type = TARGET_SOCK_DGRAM; 767 break; 768 case SOCK_STREAM: 769 target_type = TARGET_SOCK_STREAM; 770 break; 771 default: 772 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 773 break; 774 } 775 776 #if defined(SOCK_CLOEXEC) 777 if (host_type & SOCK_CLOEXEC) { 778 target_type |= TARGET_SOCK_CLOEXEC; 779 } 780 #endif 781 782 #if defined(SOCK_NONBLOCK) 783 if (host_type & SOCK_NONBLOCK) { 784 target_type |= TARGET_SOCK_NONBLOCK; 785 } 786 #endif 787 788 return target_type; 789 } 790 791 static abi_ulong target_brk; 792 static abi_ulong target_original_brk; 793 static abi_ulong brk_page; 794 795 void target_set_brk(abi_ulong new_brk) 796 { 797 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 798 brk_page = HOST_PAGE_ALIGN(target_brk); 799 } 800 801 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 802 #define DEBUGF_BRK(message, args...) 803 804 /* do_brk() must return target values and target errnos. */ 805 abi_long do_brk(abi_ulong new_brk) 806 { 807 abi_long mapped_addr; 808 abi_ulong new_alloc_size; 809 810 /* brk pointers are always untagged */ 811 812 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 813 814 if (!new_brk) { 815 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 816 return target_brk; 817 } 818 if (new_brk < target_original_brk) { 819 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 820 target_brk); 821 return target_brk; 822 } 823 824 /* If the new brk is less than the highest page reserved to the 825 * target heap allocation, set it and we're almost done... */ 826 if (new_brk <= brk_page) { 827 /* Heap contents are initialized to zero, as for anonymous 828 * mapped pages. */ 829 if (new_brk > target_brk) { 830 memset(g2h_untagged(target_brk), 0, new_brk - target_brk); 831 } 832 target_brk = new_brk; 833 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 834 return target_brk; 835 } 836 837 /* We need to allocate more memory after the brk... Note that 838 * we don't use MAP_FIXED because that will map over the top of 839 * any existing mapping (like the one with the host libc or qemu 840 * itself); instead we treat "mapped but at wrong address" as 841 * a failure and unmap again. 842 */ 843 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 844 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 845 PROT_READ|PROT_WRITE, 846 MAP_ANON|MAP_PRIVATE, 0, 0)); 847 848 if (mapped_addr == brk_page) { 849 /* Heap contents are initialized to zero, as for anonymous 850 * mapped pages. Technically the new pages are already 851 * initialized to zero since they *are* anonymous mapped 852 * pages, however we have to take care with the contents that 853 * come from the remaining part of the previous page: it may 854 * contains garbage data due to a previous heap usage (grown 855 * then shrunken). */ 856 memset(g2h_untagged(target_brk), 0, brk_page - target_brk); 857 858 target_brk = new_brk; 859 brk_page = HOST_PAGE_ALIGN(target_brk); 860 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 861 target_brk); 862 return target_brk; 863 } else if (mapped_addr != -1) { 864 /* Mapped but at wrong address, meaning there wasn't actually 865 * enough space for this brk. 866 */ 867 target_munmap(mapped_addr, new_alloc_size); 868 mapped_addr = -1; 869 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 870 } 871 else { 872 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 873 } 874 875 #if defined(TARGET_ALPHA) 876 /* We (partially) emulate OSF/1 on Alpha, which requires we 877 return a proper errno, not an unchanged brk value. */ 878 return -TARGET_ENOMEM; 879 #endif 880 /* For everything else, return the previous break. */ 881 return target_brk; 882 } 883 884 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \ 885 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 886 static inline abi_long copy_from_user_fdset(fd_set *fds, 887 abi_ulong target_fds_addr, 888 int n) 889 { 890 int i, nw, j, k; 891 abi_ulong b, *target_fds; 892 893 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 894 if (!(target_fds = lock_user(VERIFY_READ, 895 target_fds_addr, 896 sizeof(abi_ulong) * nw, 897 1))) 898 return -TARGET_EFAULT; 899 900 FD_ZERO(fds); 901 k = 0; 902 for (i = 0; i < nw; i++) { 903 /* grab the abi_ulong */ 904 __get_user(b, &target_fds[i]); 905 for (j = 0; j < TARGET_ABI_BITS; j++) { 906 /* check the bit inside the abi_ulong */ 907 if ((b >> j) & 1) 908 FD_SET(k, fds); 909 k++; 910 } 911 } 912 913 unlock_user(target_fds, target_fds_addr, 0); 914 915 return 0; 916 } 917 918 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 919 abi_ulong target_fds_addr, 920 int n) 921 { 922 if (target_fds_addr) { 923 if (copy_from_user_fdset(fds, target_fds_addr, n)) 924 return -TARGET_EFAULT; 925 *fds_ptr = fds; 926 } else { 927 *fds_ptr = NULL; 928 } 929 return 0; 930 } 931 932 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 933 const fd_set *fds, 934 int n) 935 { 936 int i, nw, j, k; 937 abi_long v; 938 abi_ulong *target_fds; 939 940 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS); 941 if (!(target_fds = lock_user(VERIFY_WRITE, 942 target_fds_addr, 943 sizeof(abi_ulong) * nw, 944 0))) 945 return -TARGET_EFAULT; 946 947 k = 0; 948 for (i = 0; i < nw; i++) { 949 v = 0; 950 for (j = 0; j < TARGET_ABI_BITS; j++) { 951 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 952 k++; 953 } 954 __put_user(v, &target_fds[i]); 955 } 956 957 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 958 959 return 0; 960 } 961 #endif 962 963 #if defined(__alpha__) 964 #define HOST_HZ 1024 965 #else 966 #define HOST_HZ 100 967 #endif 968 969 static inline abi_long host_to_target_clock_t(long ticks) 970 { 971 #if HOST_HZ == TARGET_HZ 972 return ticks; 973 #else 974 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 975 #endif 976 } 977 978 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 979 const struct rusage *rusage) 980 { 981 struct target_rusage *target_rusage; 982 983 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 984 return -TARGET_EFAULT; 985 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 986 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 987 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 988 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 989 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 990 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 991 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 992 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 993 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 994 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 995 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 996 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 997 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 998 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 999 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 1000 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 1001 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 1002 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 1003 unlock_user_struct(target_rusage, target_addr, 1); 1004 1005 return 0; 1006 } 1007 1008 #ifdef TARGET_NR_setrlimit 1009 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 1010 { 1011 abi_ulong target_rlim_swap; 1012 rlim_t result; 1013 1014 target_rlim_swap = tswapal(target_rlim); 1015 if (target_rlim_swap == TARGET_RLIM_INFINITY) 1016 return RLIM_INFINITY; 1017 1018 result = target_rlim_swap; 1019 if (target_rlim_swap != (rlim_t)result) 1020 return RLIM_INFINITY; 1021 1022 return result; 1023 } 1024 #endif 1025 1026 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit) 1027 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 1028 { 1029 abi_ulong target_rlim_swap; 1030 abi_ulong result; 1031 1032 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 1033 target_rlim_swap = TARGET_RLIM_INFINITY; 1034 else 1035 target_rlim_swap = rlim; 1036 result = tswapal(target_rlim_swap); 1037 1038 return result; 1039 } 1040 #endif 1041 1042 static inline int target_to_host_resource(int code) 1043 { 1044 switch (code) { 1045 case TARGET_RLIMIT_AS: 1046 return RLIMIT_AS; 1047 case TARGET_RLIMIT_CORE: 1048 return RLIMIT_CORE; 1049 case TARGET_RLIMIT_CPU: 1050 return RLIMIT_CPU; 1051 case TARGET_RLIMIT_DATA: 1052 return RLIMIT_DATA; 1053 case TARGET_RLIMIT_FSIZE: 1054 return RLIMIT_FSIZE; 1055 case TARGET_RLIMIT_LOCKS: 1056 return RLIMIT_LOCKS; 1057 case TARGET_RLIMIT_MEMLOCK: 1058 return RLIMIT_MEMLOCK; 1059 case TARGET_RLIMIT_MSGQUEUE: 1060 return RLIMIT_MSGQUEUE; 1061 case TARGET_RLIMIT_NICE: 1062 return RLIMIT_NICE; 1063 case TARGET_RLIMIT_NOFILE: 1064 return RLIMIT_NOFILE; 1065 case TARGET_RLIMIT_NPROC: 1066 return RLIMIT_NPROC; 1067 case TARGET_RLIMIT_RSS: 1068 return RLIMIT_RSS; 1069 case TARGET_RLIMIT_RTPRIO: 1070 return RLIMIT_RTPRIO; 1071 #ifdef RLIMIT_RTTIME 1072 case TARGET_RLIMIT_RTTIME: 1073 return RLIMIT_RTTIME; 1074 #endif 1075 case TARGET_RLIMIT_SIGPENDING: 1076 return RLIMIT_SIGPENDING; 1077 case TARGET_RLIMIT_STACK: 1078 return RLIMIT_STACK; 1079 default: 1080 return code; 1081 } 1082 } 1083 1084 static inline abi_long copy_from_user_timeval(struct timeval *tv, 1085 abi_ulong target_tv_addr) 1086 { 1087 struct target_timeval *target_tv; 1088 1089 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) { 1090 return -TARGET_EFAULT; 1091 } 1092 1093 __get_user(tv->tv_sec, &target_tv->tv_sec); 1094 __get_user(tv->tv_usec, &target_tv->tv_usec); 1095 1096 unlock_user_struct(target_tv, target_tv_addr, 0); 1097 1098 return 0; 1099 } 1100 1101 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 1102 const struct timeval *tv) 1103 { 1104 struct target_timeval *target_tv; 1105 1106 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) { 1107 return -TARGET_EFAULT; 1108 } 1109 1110 __put_user(tv->tv_sec, &target_tv->tv_sec); 1111 __put_user(tv->tv_usec, &target_tv->tv_usec); 1112 1113 unlock_user_struct(target_tv, target_tv_addr, 1); 1114 1115 return 0; 1116 } 1117 1118 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 1119 static inline abi_long copy_from_user_timeval64(struct timeval *tv, 1120 abi_ulong target_tv_addr) 1121 { 1122 struct target__kernel_sock_timeval *target_tv; 1123 1124 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) { 1125 return -TARGET_EFAULT; 1126 } 1127 1128 __get_user(tv->tv_sec, &target_tv->tv_sec); 1129 __get_user(tv->tv_usec, &target_tv->tv_usec); 1130 1131 unlock_user_struct(target_tv, target_tv_addr, 0); 1132 1133 return 0; 1134 } 1135 #endif 1136 1137 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr, 1138 const struct timeval *tv) 1139 { 1140 struct target__kernel_sock_timeval *target_tv; 1141 1142 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) { 1143 return -TARGET_EFAULT; 1144 } 1145 1146 __put_user(tv->tv_sec, &target_tv->tv_sec); 1147 __put_user(tv->tv_usec, &target_tv->tv_usec); 1148 1149 unlock_user_struct(target_tv, target_tv_addr, 1); 1150 1151 return 0; 1152 } 1153 1154 #if defined(TARGET_NR_futex) || \ 1155 defined(TARGET_NR_rt_sigtimedwait) || \ 1156 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \ 1157 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \ 1158 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \ 1159 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \ 1160 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \ 1161 defined(TARGET_NR_timer_settime) || \ 1162 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)) 1163 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 1164 abi_ulong target_addr) 1165 { 1166 struct target_timespec *target_ts; 1167 1168 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) { 1169 return -TARGET_EFAULT; 1170 } 1171 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 1172 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1173 unlock_user_struct(target_ts, target_addr, 0); 1174 return 0; 1175 } 1176 #endif 1177 1178 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \ 1179 defined(TARGET_NR_timer_settime64) || \ 1180 defined(TARGET_NR_mq_timedsend_time64) || \ 1181 defined(TARGET_NR_mq_timedreceive_time64) || \ 1182 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \ 1183 defined(TARGET_NR_clock_nanosleep_time64) || \ 1184 defined(TARGET_NR_rt_sigtimedwait_time64) || \ 1185 defined(TARGET_NR_utimensat) || \ 1186 defined(TARGET_NR_utimensat_time64) || \ 1187 defined(TARGET_NR_semtimedop_time64) || \ 1188 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64) 1189 static inline abi_long target_to_host_timespec64(struct timespec *host_ts, 1190 abi_ulong target_addr) 1191 { 1192 struct target__kernel_timespec *target_ts; 1193 1194 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) { 1195 return -TARGET_EFAULT; 1196 } 1197 __get_user(host_ts->tv_sec, &target_ts->tv_sec); 1198 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1199 /* in 32bit mode, this drops the padding */ 1200 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec; 1201 unlock_user_struct(target_ts, target_addr, 0); 1202 return 0; 1203 } 1204 #endif 1205 1206 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 1207 struct timespec *host_ts) 1208 { 1209 struct target_timespec *target_ts; 1210 1211 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) { 1212 return -TARGET_EFAULT; 1213 } 1214 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 1215 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1216 unlock_user_struct(target_ts, target_addr, 1); 1217 return 0; 1218 } 1219 1220 static inline abi_long host_to_target_timespec64(abi_ulong target_addr, 1221 struct timespec *host_ts) 1222 { 1223 struct target__kernel_timespec *target_ts; 1224 1225 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) { 1226 return -TARGET_EFAULT; 1227 } 1228 __put_user(host_ts->tv_sec, &target_ts->tv_sec); 1229 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec); 1230 unlock_user_struct(target_ts, target_addr, 1); 1231 return 0; 1232 } 1233 1234 #if defined(TARGET_NR_gettimeofday) 1235 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr, 1236 struct timezone *tz) 1237 { 1238 struct target_timezone *target_tz; 1239 1240 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) { 1241 return -TARGET_EFAULT; 1242 } 1243 1244 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1245 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1246 1247 unlock_user_struct(target_tz, target_tz_addr, 1); 1248 1249 return 0; 1250 } 1251 #endif 1252 1253 #if defined(TARGET_NR_settimeofday) 1254 static inline abi_long copy_from_user_timezone(struct timezone *tz, 1255 abi_ulong target_tz_addr) 1256 { 1257 struct target_timezone *target_tz; 1258 1259 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 1260 return -TARGET_EFAULT; 1261 } 1262 1263 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1264 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1265 1266 unlock_user_struct(target_tz, target_tz_addr, 0); 1267 1268 return 0; 1269 } 1270 #endif 1271 1272 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1273 #include <mqueue.h> 1274 1275 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1276 abi_ulong target_mq_attr_addr) 1277 { 1278 struct target_mq_attr *target_mq_attr; 1279 1280 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1281 target_mq_attr_addr, 1)) 1282 return -TARGET_EFAULT; 1283 1284 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1285 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1286 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1287 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1288 1289 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1290 1291 return 0; 1292 } 1293 1294 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1295 const struct mq_attr *attr) 1296 { 1297 struct target_mq_attr *target_mq_attr; 1298 1299 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1300 target_mq_attr_addr, 0)) 1301 return -TARGET_EFAULT; 1302 1303 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1304 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1305 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1306 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1307 1308 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1309 1310 return 0; 1311 } 1312 #endif 1313 1314 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1315 /* do_select() must return target values and target errnos. */ 1316 static abi_long do_select(int n, 1317 abi_ulong rfd_addr, abi_ulong wfd_addr, 1318 abi_ulong efd_addr, abi_ulong target_tv_addr) 1319 { 1320 fd_set rfds, wfds, efds; 1321 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1322 struct timeval tv; 1323 struct timespec ts, *ts_ptr; 1324 abi_long ret; 1325 1326 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1327 if (ret) { 1328 return ret; 1329 } 1330 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1331 if (ret) { 1332 return ret; 1333 } 1334 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1335 if (ret) { 1336 return ret; 1337 } 1338 1339 if (target_tv_addr) { 1340 if (copy_from_user_timeval(&tv, target_tv_addr)) 1341 return -TARGET_EFAULT; 1342 ts.tv_sec = tv.tv_sec; 1343 ts.tv_nsec = tv.tv_usec * 1000; 1344 ts_ptr = &ts; 1345 } else { 1346 ts_ptr = NULL; 1347 } 1348 1349 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1350 ts_ptr, NULL)); 1351 1352 if (!is_error(ret)) { 1353 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1354 return -TARGET_EFAULT; 1355 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1356 return -TARGET_EFAULT; 1357 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1358 return -TARGET_EFAULT; 1359 1360 if (target_tv_addr) { 1361 tv.tv_sec = ts.tv_sec; 1362 tv.tv_usec = ts.tv_nsec / 1000; 1363 if (copy_to_user_timeval(target_tv_addr, &tv)) { 1364 return -TARGET_EFAULT; 1365 } 1366 } 1367 } 1368 1369 return ret; 1370 } 1371 1372 #if defined(TARGET_WANT_OLD_SYS_SELECT) 1373 static abi_long do_old_select(abi_ulong arg1) 1374 { 1375 struct target_sel_arg_struct *sel; 1376 abi_ulong inp, outp, exp, tvp; 1377 long nsel; 1378 1379 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) { 1380 return -TARGET_EFAULT; 1381 } 1382 1383 nsel = tswapal(sel->n); 1384 inp = tswapal(sel->inp); 1385 outp = tswapal(sel->outp); 1386 exp = tswapal(sel->exp); 1387 tvp = tswapal(sel->tvp); 1388 1389 unlock_user_struct(sel, arg1, 0); 1390 1391 return do_select(nsel, inp, outp, exp, tvp); 1392 } 1393 #endif 1394 #endif 1395 1396 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64) 1397 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3, 1398 abi_long arg4, abi_long arg5, abi_long arg6, 1399 bool time64) 1400 { 1401 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 1402 fd_set rfds, wfds, efds; 1403 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1404 struct timespec ts, *ts_ptr; 1405 abi_long ret; 1406 1407 /* 1408 * The 6th arg is actually two args smashed together, 1409 * so we cannot use the C library. 1410 */ 1411 struct { 1412 sigset_t *set; 1413 size_t size; 1414 } sig, *sig_ptr; 1415 1416 abi_ulong arg_sigset, arg_sigsize, *arg7; 1417 1418 n = arg1; 1419 rfd_addr = arg2; 1420 wfd_addr = arg3; 1421 efd_addr = arg4; 1422 ts_addr = arg5; 1423 1424 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1425 if (ret) { 1426 return ret; 1427 } 1428 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1429 if (ret) { 1430 return ret; 1431 } 1432 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1433 if (ret) { 1434 return ret; 1435 } 1436 1437 /* 1438 * This takes a timespec, and not a timeval, so we cannot 1439 * use the do_select() helper ... 1440 */ 1441 if (ts_addr) { 1442 if (time64) { 1443 if (target_to_host_timespec64(&ts, ts_addr)) { 1444 return -TARGET_EFAULT; 1445 } 1446 } else { 1447 if (target_to_host_timespec(&ts, ts_addr)) { 1448 return -TARGET_EFAULT; 1449 } 1450 } 1451 ts_ptr = &ts; 1452 } else { 1453 ts_ptr = NULL; 1454 } 1455 1456 /* Extract the two packed args for the sigset */ 1457 sig_ptr = NULL; 1458 if (arg6) { 1459 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 1460 if (!arg7) { 1461 return -TARGET_EFAULT; 1462 } 1463 arg_sigset = tswapal(arg7[0]); 1464 arg_sigsize = tswapal(arg7[1]); 1465 unlock_user(arg7, arg6, 0); 1466 1467 if (arg_sigset) { 1468 ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize); 1469 if (ret != 0) { 1470 return ret; 1471 } 1472 sig_ptr = &sig; 1473 sig.size = SIGSET_T_SIZE; 1474 } 1475 } 1476 1477 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 1478 ts_ptr, sig_ptr)); 1479 1480 if (sig_ptr) { 1481 finish_sigsuspend_mask(ret); 1482 } 1483 1484 if (!is_error(ret)) { 1485 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) { 1486 return -TARGET_EFAULT; 1487 } 1488 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) { 1489 return -TARGET_EFAULT; 1490 } 1491 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) { 1492 return -TARGET_EFAULT; 1493 } 1494 if (time64) { 1495 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) { 1496 return -TARGET_EFAULT; 1497 } 1498 } else { 1499 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) { 1500 return -TARGET_EFAULT; 1501 } 1502 } 1503 } 1504 return ret; 1505 } 1506 #endif 1507 1508 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \ 1509 defined(TARGET_NR_ppoll_time64) 1510 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3, 1511 abi_long arg4, abi_long arg5, bool ppoll, bool time64) 1512 { 1513 struct target_pollfd *target_pfd; 1514 unsigned int nfds = arg2; 1515 struct pollfd *pfd; 1516 unsigned int i; 1517 abi_long ret; 1518 1519 pfd = NULL; 1520 target_pfd = NULL; 1521 if (nfds) { 1522 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) { 1523 return -TARGET_EINVAL; 1524 } 1525 target_pfd = lock_user(VERIFY_WRITE, arg1, 1526 sizeof(struct target_pollfd) * nfds, 1); 1527 if (!target_pfd) { 1528 return -TARGET_EFAULT; 1529 } 1530 1531 pfd = alloca(sizeof(struct pollfd) * nfds); 1532 for (i = 0; i < nfds; i++) { 1533 pfd[i].fd = tswap32(target_pfd[i].fd); 1534 pfd[i].events = tswap16(target_pfd[i].events); 1535 } 1536 } 1537 if (ppoll) { 1538 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 1539 sigset_t *set = NULL; 1540 1541 if (arg3) { 1542 if (time64) { 1543 if (target_to_host_timespec64(timeout_ts, arg3)) { 1544 unlock_user(target_pfd, arg1, 0); 1545 return -TARGET_EFAULT; 1546 } 1547 } else { 1548 if (target_to_host_timespec(timeout_ts, arg3)) { 1549 unlock_user(target_pfd, arg1, 0); 1550 return -TARGET_EFAULT; 1551 } 1552 } 1553 } else { 1554 timeout_ts = NULL; 1555 } 1556 1557 if (arg4) { 1558 ret = process_sigsuspend_mask(&set, arg4, arg5); 1559 if (ret != 0) { 1560 unlock_user(target_pfd, arg1, 0); 1561 return ret; 1562 } 1563 } 1564 1565 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts, 1566 set, SIGSET_T_SIZE)); 1567 1568 if (set) { 1569 finish_sigsuspend_mask(ret); 1570 } 1571 if (!is_error(ret) && arg3) { 1572 if (time64) { 1573 if (host_to_target_timespec64(arg3, timeout_ts)) { 1574 return -TARGET_EFAULT; 1575 } 1576 } else { 1577 if (host_to_target_timespec(arg3, timeout_ts)) { 1578 return -TARGET_EFAULT; 1579 } 1580 } 1581 } 1582 } else { 1583 struct timespec ts, *pts; 1584 1585 if (arg3 >= 0) { 1586 /* Convert ms to secs, ns */ 1587 ts.tv_sec = arg3 / 1000; 1588 ts.tv_nsec = (arg3 % 1000) * 1000000LL; 1589 pts = &ts; 1590 } else { 1591 /* -ve poll() timeout means "infinite" */ 1592 pts = NULL; 1593 } 1594 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0)); 1595 } 1596 1597 if (!is_error(ret)) { 1598 for (i = 0; i < nfds; i++) { 1599 target_pfd[i].revents = tswap16(pfd[i].revents); 1600 } 1601 } 1602 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 1603 return ret; 1604 } 1605 #endif 1606 1607 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes, 1608 int flags, int is_pipe2) 1609 { 1610 int host_pipe[2]; 1611 abi_long ret; 1612 ret = pipe2(host_pipe, flags); 1613 1614 if (is_error(ret)) 1615 return get_errno(ret); 1616 1617 /* Several targets have special calling conventions for the original 1618 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1619 if (!is_pipe2) { 1620 #if defined(TARGET_ALPHA) 1621 cpu_env->ir[IR_A4] = host_pipe[1]; 1622 return host_pipe[0]; 1623 #elif defined(TARGET_MIPS) 1624 cpu_env->active_tc.gpr[3] = host_pipe[1]; 1625 return host_pipe[0]; 1626 #elif defined(TARGET_SH4) 1627 cpu_env->gregs[1] = host_pipe[1]; 1628 return host_pipe[0]; 1629 #elif defined(TARGET_SPARC) 1630 cpu_env->regwptr[1] = host_pipe[1]; 1631 return host_pipe[0]; 1632 #endif 1633 } 1634 1635 if (put_user_s32(host_pipe[0], pipedes) 1636 || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int))) 1637 return -TARGET_EFAULT; 1638 return get_errno(ret); 1639 } 1640 1641 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1642 abi_ulong target_addr, 1643 socklen_t len) 1644 { 1645 struct target_ip_mreqn *target_smreqn; 1646 1647 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1648 if (!target_smreqn) 1649 return -TARGET_EFAULT; 1650 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1651 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1652 if (len == sizeof(struct target_ip_mreqn)) 1653 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1654 unlock_user(target_smreqn, target_addr, 0); 1655 1656 return 0; 1657 } 1658 1659 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, 1660 abi_ulong target_addr, 1661 socklen_t len) 1662 { 1663 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1664 sa_family_t sa_family; 1665 struct target_sockaddr *target_saddr; 1666 1667 if (fd_trans_target_to_host_addr(fd)) { 1668 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len); 1669 } 1670 1671 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1672 if (!target_saddr) 1673 return -TARGET_EFAULT; 1674 1675 sa_family = tswap16(target_saddr->sa_family); 1676 1677 /* Oops. The caller might send a incomplete sun_path; sun_path 1678 * must be terminated by \0 (see the manual page), but 1679 * unfortunately it is quite common to specify sockaddr_un 1680 * length as "strlen(x->sun_path)" while it should be 1681 * "strlen(...) + 1". We'll fix that here if needed. 1682 * Linux kernel has a similar feature. 1683 */ 1684 1685 if (sa_family == AF_UNIX) { 1686 if (len < unix_maxlen && len > 0) { 1687 char *cp = (char*)target_saddr; 1688 1689 if ( cp[len-1] && !cp[len] ) 1690 len++; 1691 } 1692 if (len > unix_maxlen) 1693 len = unix_maxlen; 1694 } 1695 1696 memcpy(addr, target_saddr, len); 1697 addr->sa_family = sa_family; 1698 if (sa_family == AF_NETLINK) { 1699 struct sockaddr_nl *nladdr; 1700 1701 nladdr = (struct sockaddr_nl *)addr; 1702 nladdr->nl_pid = tswap32(nladdr->nl_pid); 1703 nladdr->nl_groups = tswap32(nladdr->nl_groups); 1704 } else if (sa_family == AF_PACKET) { 1705 struct target_sockaddr_ll *lladdr; 1706 1707 lladdr = (struct target_sockaddr_ll *)addr; 1708 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); 1709 lladdr->sll_hatype = tswap16(lladdr->sll_hatype); 1710 } 1711 unlock_user(target_saddr, target_addr, 0); 1712 1713 return 0; 1714 } 1715 1716 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1717 struct sockaddr *addr, 1718 socklen_t len) 1719 { 1720 struct target_sockaddr *target_saddr; 1721 1722 if (len == 0) { 1723 return 0; 1724 } 1725 assert(addr); 1726 1727 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1728 if (!target_saddr) 1729 return -TARGET_EFAULT; 1730 memcpy(target_saddr, addr, len); 1731 if (len >= offsetof(struct target_sockaddr, sa_family) + 1732 sizeof(target_saddr->sa_family)) { 1733 target_saddr->sa_family = tswap16(addr->sa_family); 1734 } 1735 if (addr->sa_family == AF_NETLINK && 1736 len >= sizeof(struct target_sockaddr_nl)) { 1737 struct target_sockaddr_nl *target_nl = 1738 (struct target_sockaddr_nl *)target_saddr; 1739 target_nl->nl_pid = tswap32(target_nl->nl_pid); 1740 target_nl->nl_groups = tswap32(target_nl->nl_groups); 1741 } else if (addr->sa_family == AF_PACKET) { 1742 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr; 1743 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex); 1744 target_ll->sll_hatype = tswap16(target_ll->sll_hatype); 1745 } else if (addr->sa_family == AF_INET6 && 1746 len >= sizeof(struct target_sockaddr_in6)) { 1747 struct target_sockaddr_in6 *target_in6 = 1748 (struct target_sockaddr_in6 *)target_saddr; 1749 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id); 1750 } 1751 unlock_user(target_saddr, target_addr, len); 1752 1753 return 0; 1754 } 1755 1756 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1757 struct target_msghdr *target_msgh) 1758 { 1759 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1760 abi_long msg_controllen; 1761 abi_ulong target_cmsg_addr; 1762 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1763 socklen_t space = 0; 1764 1765 msg_controllen = tswapal(target_msgh->msg_controllen); 1766 if (msg_controllen < sizeof (struct target_cmsghdr)) 1767 goto the_end; 1768 target_cmsg_addr = tswapal(target_msgh->msg_control); 1769 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1770 target_cmsg_start = target_cmsg; 1771 if (!target_cmsg) 1772 return -TARGET_EFAULT; 1773 1774 while (cmsg && target_cmsg) { 1775 void *data = CMSG_DATA(cmsg); 1776 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1777 1778 int len = tswapal(target_cmsg->cmsg_len) 1779 - sizeof(struct target_cmsghdr); 1780 1781 space += CMSG_SPACE(len); 1782 if (space > msgh->msg_controllen) { 1783 space -= CMSG_SPACE(len); 1784 /* This is a QEMU bug, since we allocated the payload 1785 * area ourselves (unlike overflow in host-to-target 1786 * conversion, which is just the guest giving us a buffer 1787 * that's too small). It can't happen for the payload types 1788 * we currently support; if it becomes an issue in future 1789 * we would need to improve our allocation strategy to 1790 * something more intelligent than "twice the size of the 1791 * target buffer we're reading from". 1792 */ 1793 qemu_log_mask(LOG_UNIMP, 1794 ("Unsupported ancillary data %d/%d: " 1795 "unhandled msg size\n"), 1796 tswap32(target_cmsg->cmsg_level), 1797 tswap32(target_cmsg->cmsg_type)); 1798 break; 1799 } 1800 1801 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1802 cmsg->cmsg_level = SOL_SOCKET; 1803 } else { 1804 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1805 } 1806 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1807 cmsg->cmsg_len = CMSG_LEN(len); 1808 1809 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 1810 int *fd = (int *)data; 1811 int *target_fd = (int *)target_data; 1812 int i, numfds = len / sizeof(int); 1813 1814 for (i = 0; i < numfds; i++) { 1815 __get_user(fd[i], target_fd + i); 1816 } 1817 } else if (cmsg->cmsg_level == SOL_SOCKET 1818 && cmsg->cmsg_type == SCM_CREDENTIALS) { 1819 struct ucred *cred = (struct ucred *)data; 1820 struct target_ucred *target_cred = 1821 (struct target_ucred *)target_data; 1822 1823 __get_user(cred->pid, &target_cred->pid); 1824 __get_user(cred->uid, &target_cred->uid); 1825 __get_user(cred->gid, &target_cred->gid); 1826 } else { 1827 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", 1828 cmsg->cmsg_level, cmsg->cmsg_type); 1829 memcpy(data, target_data, len); 1830 } 1831 1832 cmsg = CMSG_NXTHDR(msgh, cmsg); 1833 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1834 target_cmsg_start); 1835 } 1836 unlock_user(target_cmsg, target_cmsg_addr, 0); 1837 the_end: 1838 msgh->msg_controllen = space; 1839 return 0; 1840 } 1841 1842 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1843 struct msghdr *msgh) 1844 { 1845 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1846 abi_long msg_controllen; 1847 abi_ulong target_cmsg_addr; 1848 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1849 socklen_t space = 0; 1850 1851 msg_controllen = tswapal(target_msgh->msg_controllen); 1852 if (msg_controllen < sizeof (struct target_cmsghdr)) 1853 goto the_end; 1854 target_cmsg_addr = tswapal(target_msgh->msg_control); 1855 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1856 target_cmsg_start = target_cmsg; 1857 if (!target_cmsg) 1858 return -TARGET_EFAULT; 1859 1860 while (cmsg && target_cmsg) { 1861 void *data = CMSG_DATA(cmsg); 1862 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1863 1864 int len = cmsg->cmsg_len - sizeof(struct cmsghdr); 1865 int tgt_len, tgt_space; 1866 1867 /* We never copy a half-header but may copy half-data; 1868 * this is Linux's behaviour in put_cmsg(). Note that 1869 * truncation here is a guest problem (which we report 1870 * to the guest via the CTRUNC bit), unlike truncation 1871 * in target_to_host_cmsg, which is a QEMU bug. 1872 */ 1873 if (msg_controllen < sizeof(struct target_cmsghdr)) { 1874 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1875 break; 1876 } 1877 1878 if (cmsg->cmsg_level == SOL_SOCKET) { 1879 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1880 } else { 1881 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1882 } 1883 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1884 1885 /* Payload types which need a different size of payload on 1886 * the target must adjust tgt_len here. 1887 */ 1888 tgt_len = len; 1889 switch (cmsg->cmsg_level) { 1890 case SOL_SOCKET: 1891 switch (cmsg->cmsg_type) { 1892 case SO_TIMESTAMP: 1893 tgt_len = sizeof(struct target_timeval); 1894 break; 1895 default: 1896 break; 1897 } 1898 break; 1899 default: 1900 break; 1901 } 1902 1903 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) { 1904 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1905 tgt_len = msg_controllen - sizeof(struct target_cmsghdr); 1906 } 1907 1908 /* We must now copy-and-convert len bytes of payload 1909 * into tgt_len bytes of destination space. Bear in mind 1910 * that in both source and destination we may be dealing 1911 * with a truncated value! 1912 */ 1913 switch (cmsg->cmsg_level) { 1914 case SOL_SOCKET: 1915 switch (cmsg->cmsg_type) { 1916 case SCM_RIGHTS: 1917 { 1918 int *fd = (int *)data; 1919 int *target_fd = (int *)target_data; 1920 int i, numfds = tgt_len / sizeof(int); 1921 1922 for (i = 0; i < numfds; i++) { 1923 __put_user(fd[i], target_fd + i); 1924 } 1925 break; 1926 } 1927 case SO_TIMESTAMP: 1928 { 1929 struct timeval *tv = (struct timeval *)data; 1930 struct target_timeval *target_tv = 1931 (struct target_timeval *)target_data; 1932 1933 if (len != sizeof(struct timeval) || 1934 tgt_len != sizeof(struct target_timeval)) { 1935 goto unimplemented; 1936 } 1937 1938 /* copy struct timeval to target */ 1939 __put_user(tv->tv_sec, &target_tv->tv_sec); 1940 __put_user(tv->tv_usec, &target_tv->tv_usec); 1941 break; 1942 } 1943 case SCM_CREDENTIALS: 1944 { 1945 struct ucred *cred = (struct ucred *)data; 1946 struct target_ucred *target_cred = 1947 (struct target_ucred *)target_data; 1948 1949 __put_user(cred->pid, &target_cred->pid); 1950 __put_user(cred->uid, &target_cred->uid); 1951 __put_user(cred->gid, &target_cred->gid); 1952 break; 1953 } 1954 default: 1955 goto unimplemented; 1956 } 1957 break; 1958 1959 case SOL_IP: 1960 switch (cmsg->cmsg_type) { 1961 case IP_TTL: 1962 { 1963 uint32_t *v = (uint32_t *)data; 1964 uint32_t *t_int = (uint32_t *)target_data; 1965 1966 if (len != sizeof(uint32_t) || 1967 tgt_len != sizeof(uint32_t)) { 1968 goto unimplemented; 1969 } 1970 __put_user(*v, t_int); 1971 break; 1972 } 1973 case IP_RECVERR: 1974 { 1975 struct errhdr_t { 1976 struct sock_extended_err ee; 1977 struct sockaddr_in offender; 1978 }; 1979 struct errhdr_t *errh = (struct errhdr_t *)data; 1980 struct errhdr_t *target_errh = 1981 (struct errhdr_t *)target_data; 1982 1983 if (len != sizeof(struct errhdr_t) || 1984 tgt_len != sizeof(struct errhdr_t)) { 1985 goto unimplemented; 1986 } 1987 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 1988 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 1989 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 1990 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 1991 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 1992 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 1993 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 1994 host_to_target_sockaddr((unsigned long) &target_errh->offender, 1995 (void *) &errh->offender, sizeof(errh->offender)); 1996 break; 1997 } 1998 default: 1999 goto unimplemented; 2000 } 2001 break; 2002 2003 case SOL_IPV6: 2004 switch (cmsg->cmsg_type) { 2005 case IPV6_HOPLIMIT: 2006 { 2007 uint32_t *v = (uint32_t *)data; 2008 uint32_t *t_int = (uint32_t *)target_data; 2009 2010 if (len != sizeof(uint32_t) || 2011 tgt_len != sizeof(uint32_t)) { 2012 goto unimplemented; 2013 } 2014 __put_user(*v, t_int); 2015 break; 2016 } 2017 case IPV6_RECVERR: 2018 { 2019 struct errhdr6_t { 2020 struct sock_extended_err ee; 2021 struct sockaddr_in6 offender; 2022 }; 2023 struct errhdr6_t *errh = (struct errhdr6_t *)data; 2024 struct errhdr6_t *target_errh = 2025 (struct errhdr6_t *)target_data; 2026 2027 if (len != sizeof(struct errhdr6_t) || 2028 tgt_len != sizeof(struct errhdr6_t)) { 2029 goto unimplemented; 2030 } 2031 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno); 2032 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin); 2033 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type); 2034 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code); 2035 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad); 2036 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info); 2037 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data); 2038 host_to_target_sockaddr((unsigned long) &target_errh->offender, 2039 (void *) &errh->offender, sizeof(errh->offender)); 2040 break; 2041 } 2042 default: 2043 goto unimplemented; 2044 } 2045 break; 2046 2047 default: 2048 unimplemented: 2049 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n", 2050 cmsg->cmsg_level, cmsg->cmsg_type); 2051 memcpy(target_data, data, MIN(len, tgt_len)); 2052 if (tgt_len > len) { 2053 memset(target_data + len, 0, tgt_len - len); 2054 } 2055 } 2056 2057 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len)); 2058 tgt_space = TARGET_CMSG_SPACE(tgt_len); 2059 if (msg_controllen < tgt_space) { 2060 tgt_space = msg_controllen; 2061 } 2062 msg_controllen -= tgt_space; 2063 space += tgt_space; 2064 cmsg = CMSG_NXTHDR(msgh, cmsg); 2065 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 2066 target_cmsg_start); 2067 } 2068 unlock_user(target_cmsg, target_cmsg_addr, space); 2069 the_end: 2070 target_msgh->msg_controllen = tswapal(space); 2071 return 0; 2072 } 2073 2074 /* do_setsockopt() Must return target values and target errnos. */ 2075 static abi_long do_setsockopt(int sockfd, int level, int optname, 2076 abi_ulong optval_addr, socklen_t optlen) 2077 { 2078 abi_long ret; 2079 int val; 2080 struct ip_mreqn *ip_mreq; 2081 struct ip_mreq_source *ip_mreq_source; 2082 2083 switch(level) { 2084 case SOL_TCP: 2085 case SOL_UDP: 2086 /* TCP and UDP options all take an 'int' value. */ 2087 if (optlen < sizeof(uint32_t)) 2088 return -TARGET_EINVAL; 2089 2090 if (get_user_u32(val, optval_addr)) 2091 return -TARGET_EFAULT; 2092 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2093 break; 2094 case SOL_IP: 2095 switch(optname) { 2096 case IP_TOS: 2097 case IP_TTL: 2098 case IP_HDRINCL: 2099 case IP_ROUTER_ALERT: 2100 case IP_RECVOPTS: 2101 case IP_RETOPTS: 2102 case IP_PKTINFO: 2103 case IP_MTU_DISCOVER: 2104 case IP_RECVERR: 2105 case IP_RECVTTL: 2106 case IP_RECVTOS: 2107 #ifdef IP_FREEBIND 2108 case IP_FREEBIND: 2109 #endif 2110 case IP_MULTICAST_TTL: 2111 case IP_MULTICAST_LOOP: 2112 val = 0; 2113 if (optlen >= sizeof(uint32_t)) { 2114 if (get_user_u32(val, optval_addr)) 2115 return -TARGET_EFAULT; 2116 } else if (optlen >= 1) { 2117 if (get_user_u8(val, optval_addr)) 2118 return -TARGET_EFAULT; 2119 } 2120 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 2121 break; 2122 case IP_ADD_MEMBERSHIP: 2123 case IP_DROP_MEMBERSHIP: 2124 if (optlen < sizeof (struct target_ip_mreq) || 2125 optlen > sizeof (struct target_ip_mreqn)) 2126 return -TARGET_EINVAL; 2127 2128 ip_mreq = (struct ip_mreqn *) alloca(optlen); 2129 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 2130 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 2131 break; 2132 2133 case IP_BLOCK_SOURCE: 2134 case IP_UNBLOCK_SOURCE: 2135 case IP_ADD_SOURCE_MEMBERSHIP: 2136 case IP_DROP_SOURCE_MEMBERSHIP: 2137 if (optlen != sizeof (struct target_ip_mreq_source)) 2138 return -TARGET_EINVAL; 2139 2140 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2141 if (!ip_mreq_source) { 2142 return -TARGET_EFAULT; 2143 } 2144 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 2145 unlock_user (ip_mreq_source, optval_addr, 0); 2146 break; 2147 2148 default: 2149 goto unimplemented; 2150 } 2151 break; 2152 case SOL_IPV6: 2153 switch (optname) { 2154 case IPV6_MTU_DISCOVER: 2155 case IPV6_MTU: 2156 case IPV6_V6ONLY: 2157 case IPV6_RECVPKTINFO: 2158 case IPV6_UNICAST_HOPS: 2159 case IPV6_MULTICAST_HOPS: 2160 case IPV6_MULTICAST_LOOP: 2161 case IPV6_RECVERR: 2162 case IPV6_RECVHOPLIMIT: 2163 case IPV6_2292HOPLIMIT: 2164 case IPV6_CHECKSUM: 2165 case IPV6_ADDRFORM: 2166 case IPV6_2292PKTINFO: 2167 case IPV6_RECVTCLASS: 2168 case IPV6_RECVRTHDR: 2169 case IPV6_2292RTHDR: 2170 case IPV6_RECVHOPOPTS: 2171 case IPV6_2292HOPOPTS: 2172 case IPV6_RECVDSTOPTS: 2173 case IPV6_2292DSTOPTS: 2174 case IPV6_TCLASS: 2175 case IPV6_ADDR_PREFERENCES: 2176 #ifdef IPV6_RECVPATHMTU 2177 case IPV6_RECVPATHMTU: 2178 #endif 2179 #ifdef IPV6_TRANSPARENT 2180 case IPV6_TRANSPARENT: 2181 #endif 2182 #ifdef IPV6_FREEBIND 2183 case IPV6_FREEBIND: 2184 #endif 2185 #ifdef IPV6_RECVORIGDSTADDR 2186 case IPV6_RECVORIGDSTADDR: 2187 #endif 2188 val = 0; 2189 if (optlen < sizeof(uint32_t)) { 2190 return -TARGET_EINVAL; 2191 } 2192 if (get_user_u32(val, optval_addr)) { 2193 return -TARGET_EFAULT; 2194 } 2195 ret = get_errno(setsockopt(sockfd, level, optname, 2196 &val, sizeof(val))); 2197 break; 2198 case IPV6_PKTINFO: 2199 { 2200 struct in6_pktinfo pki; 2201 2202 if (optlen < sizeof(pki)) { 2203 return -TARGET_EINVAL; 2204 } 2205 2206 if (copy_from_user(&pki, optval_addr, sizeof(pki))) { 2207 return -TARGET_EFAULT; 2208 } 2209 2210 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex); 2211 2212 ret = get_errno(setsockopt(sockfd, level, optname, 2213 &pki, sizeof(pki))); 2214 break; 2215 } 2216 case IPV6_ADD_MEMBERSHIP: 2217 case IPV6_DROP_MEMBERSHIP: 2218 { 2219 struct ipv6_mreq ipv6mreq; 2220 2221 if (optlen < sizeof(ipv6mreq)) { 2222 return -TARGET_EINVAL; 2223 } 2224 2225 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) { 2226 return -TARGET_EFAULT; 2227 } 2228 2229 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface); 2230 2231 ret = get_errno(setsockopt(sockfd, level, optname, 2232 &ipv6mreq, sizeof(ipv6mreq))); 2233 break; 2234 } 2235 default: 2236 goto unimplemented; 2237 } 2238 break; 2239 case SOL_ICMPV6: 2240 switch (optname) { 2241 case ICMPV6_FILTER: 2242 { 2243 struct icmp6_filter icmp6f; 2244 2245 if (optlen > sizeof(icmp6f)) { 2246 optlen = sizeof(icmp6f); 2247 } 2248 2249 if (copy_from_user(&icmp6f, optval_addr, optlen)) { 2250 return -TARGET_EFAULT; 2251 } 2252 2253 for (val = 0; val < 8; val++) { 2254 icmp6f.data[val] = tswap32(icmp6f.data[val]); 2255 } 2256 2257 ret = get_errno(setsockopt(sockfd, level, optname, 2258 &icmp6f, optlen)); 2259 break; 2260 } 2261 default: 2262 goto unimplemented; 2263 } 2264 break; 2265 case SOL_RAW: 2266 switch (optname) { 2267 case ICMP_FILTER: 2268 case IPV6_CHECKSUM: 2269 /* those take an u32 value */ 2270 if (optlen < sizeof(uint32_t)) { 2271 return -TARGET_EINVAL; 2272 } 2273 2274 if (get_user_u32(val, optval_addr)) { 2275 return -TARGET_EFAULT; 2276 } 2277 ret = get_errno(setsockopt(sockfd, level, optname, 2278 &val, sizeof(val))); 2279 break; 2280 2281 default: 2282 goto unimplemented; 2283 } 2284 break; 2285 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE) 2286 case SOL_ALG: 2287 switch (optname) { 2288 case ALG_SET_KEY: 2289 { 2290 char *alg_key = g_malloc(optlen); 2291 2292 if (!alg_key) { 2293 return -TARGET_ENOMEM; 2294 } 2295 if (copy_from_user(alg_key, optval_addr, optlen)) { 2296 g_free(alg_key); 2297 return -TARGET_EFAULT; 2298 } 2299 ret = get_errno(setsockopt(sockfd, level, optname, 2300 alg_key, optlen)); 2301 g_free(alg_key); 2302 break; 2303 } 2304 case ALG_SET_AEAD_AUTHSIZE: 2305 { 2306 ret = get_errno(setsockopt(sockfd, level, optname, 2307 NULL, optlen)); 2308 break; 2309 } 2310 default: 2311 goto unimplemented; 2312 } 2313 break; 2314 #endif 2315 case TARGET_SOL_SOCKET: 2316 switch (optname) { 2317 case TARGET_SO_RCVTIMEO: 2318 { 2319 struct timeval tv; 2320 2321 optname = SO_RCVTIMEO; 2322 2323 set_timeout: 2324 if (optlen != sizeof(struct target_timeval)) { 2325 return -TARGET_EINVAL; 2326 } 2327 2328 if (copy_from_user_timeval(&tv, optval_addr)) { 2329 return -TARGET_EFAULT; 2330 } 2331 2332 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 2333 &tv, sizeof(tv))); 2334 return ret; 2335 } 2336 case TARGET_SO_SNDTIMEO: 2337 optname = SO_SNDTIMEO; 2338 goto set_timeout; 2339 case TARGET_SO_ATTACH_FILTER: 2340 { 2341 struct target_sock_fprog *tfprog; 2342 struct target_sock_filter *tfilter; 2343 struct sock_fprog fprog; 2344 struct sock_filter *filter; 2345 int i; 2346 2347 if (optlen != sizeof(*tfprog)) { 2348 return -TARGET_EINVAL; 2349 } 2350 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 2351 return -TARGET_EFAULT; 2352 } 2353 if (!lock_user_struct(VERIFY_READ, tfilter, 2354 tswapal(tfprog->filter), 0)) { 2355 unlock_user_struct(tfprog, optval_addr, 1); 2356 return -TARGET_EFAULT; 2357 } 2358 2359 fprog.len = tswap16(tfprog->len); 2360 filter = g_try_new(struct sock_filter, fprog.len); 2361 if (filter == NULL) { 2362 unlock_user_struct(tfilter, tfprog->filter, 1); 2363 unlock_user_struct(tfprog, optval_addr, 1); 2364 return -TARGET_ENOMEM; 2365 } 2366 for (i = 0; i < fprog.len; i++) { 2367 filter[i].code = tswap16(tfilter[i].code); 2368 filter[i].jt = tfilter[i].jt; 2369 filter[i].jf = tfilter[i].jf; 2370 filter[i].k = tswap32(tfilter[i].k); 2371 } 2372 fprog.filter = filter; 2373 2374 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 2375 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 2376 g_free(filter); 2377 2378 unlock_user_struct(tfilter, tfprog->filter, 1); 2379 unlock_user_struct(tfprog, optval_addr, 1); 2380 return ret; 2381 } 2382 case TARGET_SO_BINDTODEVICE: 2383 { 2384 char *dev_ifname, *addr_ifname; 2385 2386 if (optlen > IFNAMSIZ - 1) { 2387 optlen = IFNAMSIZ - 1; 2388 } 2389 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1); 2390 if (!dev_ifname) { 2391 return -TARGET_EFAULT; 2392 } 2393 optname = SO_BINDTODEVICE; 2394 addr_ifname = alloca(IFNAMSIZ); 2395 memcpy(addr_ifname, dev_ifname, optlen); 2396 addr_ifname[optlen] = 0; 2397 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 2398 addr_ifname, optlen)); 2399 unlock_user (dev_ifname, optval_addr, 0); 2400 return ret; 2401 } 2402 case TARGET_SO_LINGER: 2403 { 2404 struct linger lg; 2405 struct target_linger *tlg; 2406 2407 if (optlen != sizeof(struct target_linger)) { 2408 return -TARGET_EINVAL; 2409 } 2410 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) { 2411 return -TARGET_EFAULT; 2412 } 2413 __get_user(lg.l_onoff, &tlg->l_onoff); 2414 __get_user(lg.l_linger, &tlg->l_linger); 2415 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER, 2416 &lg, sizeof(lg))); 2417 unlock_user_struct(tlg, optval_addr, 0); 2418 return ret; 2419 } 2420 /* Options with 'int' argument. */ 2421 case TARGET_SO_DEBUG: 2422 optname = SO_DEBUG; 2423 break; 2424 case TARGET_SO_REUSEADDR: 2425 optname = SO_REUSEADDR; 2426 break; 2427 #ifdef SO_REUSEPORT 2428 case TARGET_SO_REUSEPORT: 2429 optname = SO_REUSEPORT; 2430 break; 2431 #endif 2432 case TARGET_SO_TYPE: 2433 optname = SO_TYPE; 2434 break; 2435 case TARGET_SO_ERROR: 2436 optname = SO_ERROR; 2437 break; 2438 case TARGET_SO_DONTROUTE: 2439 optname = SO_DONTROUTE; 2440 break; 2441 case TARGET_SO_BROADCAST: 2442 optname = SO_BROADCAST; 2443 break; 2444 case TARGET_SO_SNDBUF: 2445 optname = SO_SNDBUF; 2446 break; 2447 case TARGET_SO_SNDBUFFORCE: 2448 optname = SO_SNDBUFFORCE; 2449 break; 2450 case TARGET_SO_RCVBUF: 2451 optname = SO_RCVBUF; 2452 break; 2453 case TARGET_SO_RCVBUFFORCE: 2454 optname = SO_RCVBUFFORCE; 2455 break; 2456 case TARGET_SO_KEEPALIVE: 2457 optname = SO_KEEPALIVE; 2458 break; 2459 case TARGET_SO_OOBINLINE: 2460 optname = SO_OOBINLINE; 2461 break; 2462 case TARGET_SO_NO_CHECK: 2463 optname = SO_NO_CHECK; 2464 break; 2465 case TARGET_SO_PRIORITY: 2466 optname = SO_PRIORITY; 2467 break; 2468 #ifdef SO_BSDCOMPAT 2469 case TARGET_SO_BSDCOMPAT: 2470 optname = SO_BSDCOMPAT; 2471 break; 2472 #endif 2473 case TARGET_SO_PASSCRED: 2474 optname = SO_PASSCRED; 2475 break; 2476 case TARGET_SO_PASSSEC: 2477 optname = SO_PASSSEC; 2478 break; 2479 case TARGET_SO_TIMESTAMP: 2480 optname = SO_TIMESTAMP; 2481 break; 2482 case TARGET_SO_RCVLOWAT: 2483 optname = SO_RCVLOWAT; 2484 break; 2485 default: 2486 goto unimplemented; 2487 } 2488 if (optlen < sizeof(uint32_t)) 2489 return -TARGET_EINVAL; 2490 2491 if (get_user_u32(val, optval_addr)) 2492 return -TARGET_EFAULT; 2493 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 2494 break; 2495 #ifdef SOL_NETLINK 2496 case SOL_NETLINK: 2497 switch (optname) { 2498 case NETLINK_PKTINFO: 2499 case NETLINK_ADD_MEMBERSHIP: 2500 case NETLINK_DROP_MEMBERSHIP: 2501 case NETLINK_BROADCAST_ERROR: 2502 case NETLINK_NO_ENOBUFS: 2503 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2504 case NETLINK_LISTEN_ALL_NSID: 2505 case NETLINK_CAP_ACK: 2506 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2507 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) 2508 case NETLINK_EXT_ACK: 2509 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2510 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) 2511 case NETLINK_GET_STRICT_CHK: 2512 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2513 break; 2514 default: 2515 goto unimplemented; 2516 } 2517 val = 0; 2518 if (optlen < sizeof(uint32_t)) { 2519 return -TARGET_EINVAL; 2520 } 2521 if (get_user_u32(val, optval_addr)) { 2522 return -TARGET_EFAULT; 2523 } 2524 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val, 2525 sizeof(val))); 2526 break; 2527 #endif /* SOL_NETLINK */ 2528 default: 2529 unimplemented: 2530 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n", 2531 level, optname); 2532 ret = -TARGET_ENOPROTOOPT; 2533 } 2534 return ret; 2535 } 2536 2537 /* do_getsockopt() Must return target values and target errnos. */ 2538 static abi_long do_getsockopt(int sockfd, int level, int optname, 2539 abi_ulong optval_addr, abi_ulong optlen) 2540 { 2541 abi_long ret; 2542 int len, val; 2543 socklen_t lv; 2544 2545 switch(level) { 2546 case TARGET_SOL_SOCKET: 2547 level = SOL_SOCKET; 2548 switch (optname) { 2549 /* These don't just return a single integer */ 2550 case TARGET_SO_PEERNAME: 2551 goto unimplemented; 2552 case TARGET_SO_RCVTIMEO: { 2553 struct timeval tv; 2554 socklen_t tvlen; 2555 2556 optname = SO_RCVTIMEO; 2557 2558 get_timeout: 2559 if (get_user_u32(len, optlen)) { 2560 return -TARGET_EFAULT; 2561 } 2562 if (len < 0) { 2563 return -TARGET_EINVAL; 2564 } 2565 2566 tvlen = sizeof(tv); 2567 ret = get_errno(getsockopt(sockfd, level, optname, 2568 &tv, &tvlen)); 2569 if (ret < 0) { 2570 return ret; 2571 } 2572 if (len > sizeof(struct target_timeval)) { 2573 len = sizeof(struct target_timeval); 2574 } 2575 if (copy_to_user_timeval(optval_addr, &tv)) { 2576 return -TARGET_EFAULT; 2577 } 2578 if (put_user_u32(len, optlen)) { 2579 return -TARGET_EFAULT; 2580 } 2581 break; 2582 } 2583 case TARGET_SO_SNDTIMEO: 2584 optname = SO_SNDTIMEO; 2585 goto get_timeout; 2586 case TARGET_SO_PEERCRED: { 2587 struct ucred cr; 2588 socklen_t crlen; 2589 struct target_ucred *tcr; 2590 2591 if (get_user_u32(len, optlen)) { 2592 return -TARGET_EFAULT; 2593 } 2594 if (len < 0) { 2595 return -TARGET_EINVAL; 2596 } 2597 2598 crlen = sizeof(cr); 2599 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 2600 &cr, &crlen)); 2601 if (ret < 0) { 2602 return ret; 2603 } 2604 if (len > crlen) { 2605 len = crlen; 2606 } 2607 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 2608 return -TARGET_EFAULT; 2609 } 2610 __put_user(cr.pid, &tcr->pid); 2611 __put_user(cr.uid, &tcr->uid); 2612 __put_user(cr.gid, &tcr->gid); 2613 unlock_user_struct(tcr, optval_addr, 1); 2614 if (put_user_u32(len, optlen)) { 2615 return -TARGET_EFAULT; 2616 } 2617 break; 2618 } 2619 case TARGET_SO_PEERSEC: { 2620 char *name; 2621 2622 if (get_user_u32(len, optlen)) { 2623 return -TARGET_EFAULT; 2624 } 2625 if (len < 0) { 2626 return -TARGET_EINVAL; 2627 } 2628 name = lock_user(VERIFY_WRITE, optval_addr, len, 0); 2629 if (!name) { 2630 return -TARGET_EFAULT; 2631 } 2632 lv = len; 2633 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC, 2634 name, &lv)); 2635 if (put_user_u32(lv, optlen)) { 2636 ret = -TARGET_EFAULT; 2637 } 2638 unlock_user(name, optval_addr, lv); 2639 break; 2640 } 2641 case TARGET_SO_LINGER: 2642 { 2643 struct linger lg; 2644 socklen_t lglen; 2645 struct target_linger *tlg; 2646 2647 if (get_user_u32(len, optlen)) { 2648 return -TARGET_EFAULT; 2649 } 2650 if (len < 0) { 2651 return -TARGET_EINVAL; 2652 } 2653 2654 lglen = sizeof(lg); 2655 ret = get_errno(getsockopt(sockfd, level, SO_LINGER, 2656 &lg, &lglen)); 2657 if (ret < 0) { 2658 return ret; 2659 } 2660 if (len > lglen) { 2661 len = lglen; 2662 } 2663 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) { 2664 return -TARGET_EFAULT; 2665 } 2666 __put_user(lg.l_onoff, &tlg->l_onoff); 2667 __put_user(lg.l_linger, &tlg->l_linger); 2668 unlock_user_struct(tlg, optval_addr, 1); 2669 if (put_user_u32(len, optlen)) { 2670 return -TARGET_EFAULT; 2671 } 2672 break; 2673 } 2674 /* Options with 'int' argument. */ 2675 case TARGET_SO_DEBUG: 2676 optname = SO_DEBUG; 2677 goto int_case; 2678 case TARGET_SO_REUSEADDR: 2679 optname = SO_REUSEADDR; 2680 goto int_case; 2681 #ifdef SO_REUSEPORT 2682 case TARGET_SO_REUSEPORT: 2683 optname = SO_REUSEPORT; 2684 goto int_case; 2685 #endif 2686 case TARGET_SO_TYPE: 2687 optname = SO_TYPE; 2688 goto int_case; 2689 case TARGET_SO_ERROR: 2690 optname = SO_ERROR; 2691 goto int_case; 2692 case TARGET_SO_DONTROUTE: 2693 optname = SO_DONTROUTE; 2694 goto int_case; 2695 case TARGET_SO_BROADCAST: 2696 optname = SO_BROADCAST; 2697 goto int_case; 2698 case TARGET_SO_SNDBUF: 2699 optname = SO_SNDBUF; 2700 goto int_case; 2701 case TARGET_SO_RCVBUF: 2702 optname = SO_RCVBUF; 2703 goto int_case; 2704 case TARGET_SO_KEEPALIVE: 2705 optname = SO_KEEPALIVE; 2706 goto int_case; 2707 case TARGET_SO_OOBINLINE: 2708 optname = SO_OOBINLINE; 2709 goto int_case; 2710 case TARGET_SO_NO_CHECK: 2711 optname = SO_NO_CHECK; 2712 goto int_case; 2713 case TARGET_SO_PRIORITY: 2714 optname = SO_PRIORITY; 2715 goto int_case; 2716 #ifdef SO_BSDCOMPAT 2717 case TARGET_SO_BSDCOMPAT: 2718 optname = SO_BSDCOMPAT; 2719 goto int_case; 2720 #endif 2721 case TARGET_SO_PASSCRED: 2722 optname = SO_PASSCRED; 2723 goto int_case; 2724 case TARGET_SO_TIMESTAMP: 2725 optname = SO_TIMESTAMP; 2726 goto int_case; 2727 case TARGET_SO_RCVLOWAT: 2728 optname = SO_RCVLOWAT; 2729 goto int_case; 2730 case TARGET_SO_ACCEPTCONN: 2731 optname = SO_ACCEPTCONN; 2732 goto int_case; 2733 case TARGET_SO_PROTOCOL: 2734 optname = SO_PROTOCOL; 2735 goto int_case; 2736 case TARGET_SO_DOMAIN: 2737 optname = SO_DOMAIN; 2738 goto int_case; 2739 default: 2740 goto int_case; 2741 } 2742 break; 2743 case SOL_TCP: 2744 case SOL_UDP: 2745 /* TCP and UDP options all take an 'int' value. */ 2746 int_case: 2747 if (get_user_u32(len, optlen)) 2748 return -TARGET_EFAULT; 2749 if (len < 0) 2750 return -TARGET_EINVAL; 2751 lv = sizeof(lv); 2752 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2753 if (ret < 0) 2754 return ret; 2755 if (optname == SO_TYPE) { 2756 val = host_to_target_sock_type(val); 2757 } 2758 if (len > lv) 2759 len = lv; 2760 if (len == 4) { 2761 if (put_user_u32(val, optval_addr)) 2762 return -TARGET_EFAULT; 2763 } else { 2764 if (put_user_u8(val, optval_addr)) 2765 return -TARGET_EFAULT; 2766 } 2767 if (put_user_u32(len, optlen)) 2768 return -TARGET_EFAULT; 2769 break; 2770 case SOL_IP: 2771 switch(optname) { 2772 case IP_TOS: 2773 case IP_TTL: 2774 case IP_HDRINCL: 2775 case IP_ROUTER_ALERT: 2776 case IP_RECVOPTS: 2777 case IP_RETOPTS: 2778 case IP_PKTINFO: 2779 case IP_MTU_DISCOVER: 2780 case IP_RECVERR: 2781 case IP_RECVTOS: 2782 #ifdef IP_FREEBIND 2783 case IP_FREEBIND: 2784 #endif 2785 case IP_MULTICAST_TTL: 2786 case IP_MULTICAST_LOOP: 2787 if (get_user_u32(len, optlen)) 2788 return -TARGET_EFAULT; 2789 if (len < 0) 2790 return -TARGET_EINVAL; 2791 lv = sizeof(lv); 2792 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2793 if (ret < 0) 2794 return ret; 2795 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 2796 len = 1; 2797 if (put_user_u32(len, optlen) 2798 || put_user_u8(val, optval_addr)) 2799 return -TARGET_EFAULT; 2800 } else { 2801 if (len > sizeof(int)) 2802 len = sizeof(int); 2803 if (put_user_u32(len, optlen) 2804 || put_user_u32(val, optval_addr)) 2805 return -TARGET_EFAULT; 2806 } 2807 break; 2808 default: 2809 ret = -TARGET_ENOPROTOOPT; 2810 break; 2811 } 2812 break; 2813 case SOL_IPV6: 2814 switch (optname) { 2815 case IPV6_MTU_DISCOVER: 2816 case IPV6_MTU: 2817 case IPV6_V6ONLY: 2818 case IPV6_RECVPKTINFO: 2819 case IPV6_UNICAST_HOPS: 2820 case IPV6_MULTICAST_HOPS: 2821 case IPV6_MULTICAST_LOOP: 2822 case IPV6_RECVERR: 2823 case IPV6_RECVHOPLIMIT: 2824 case IPV6_2292HOPLIMIT: 2825 case IPV6_CHECKSUM: 2826 case IPV6_ADDRFORM: 2827 case IPV6_2292PKTINFO: 2828 case IPV6_RECVTCLASS: 2829 case IPV6_RECVRTHDR: 2830 case IPV6_2292RTHDR: 2831 case IPV6_RECVHOPOPTS: 2832 case IPV6_2292HOPOPTS: 2833 case IPV6_RECVDSTOPTS: 2834 case IPV6_2292DSTOPTS: 2835 case IPV6_TCLASS: 2836 case IPV6_ADDR_PREFERENCES: 2837 #ifdef IPV6_RECVPATHMTU 2838 case IPV6_RECVPATHMTU: 2839 #endif 2840 #ifdef IPV6_TRANSPARENT 2841 case IPV6_TRANSPARENT: 2842 #endif 2843 #ifdef IPV6_FREEBIND 2844 case IPV6_FREEBIND: 2845 #endif 2846 #ifdef IPV6_RECVORIGDSTADDR 2847 case IPV6_RECVORIGDSTADDR: 2848 #endif 2849 if (get_user_u32(len, optlen)) 2850 return -TARGET_EFAULT; 2851 if (len < 0) 2852 return -TARGET_EINVAL; 2853 lv = sizeof(lv); 2854 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2855 if (ret < 0) 2856 return ret; 2857 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 2858 len = 1; 2859 if (put_user_u32(len, optlen) 2860 || put_user_u8(val, optval_addr)) 2861 return -TARGET_EFAULT; 2862 } else { 2863 if (len > sizeof(int)) 2864 len = sizeof(int); 2865 if (put_user_u32(len, optlen) 2866 || put_user_u32(val, optval_addr)) 2867 return -TARGET_EFAULT; 2868 } 2869 break; 2870 default: 2871 ret = -TARGET_ENOPROTOOPT; 2872 break; 2873 } 2874 break; 2875 #ifdef SOL_NETLINK 2876 case SOL_NETLINK: 2877 switch (optname) { 2878 case NETLINK_PKTINFO: 2879 case NETLINK_BROADCAST_ERROR: 2880 case NETLINK_NO_ENOBUFS: 2881 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2882 case NETLINK_LISTEN_ALL_NSID: 2883 case NETLINK_CAP_ACK: 2884 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2885 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) 2886 case NETLINK_EXT_ACK: 2887 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2888 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) 2889 case NETLINK_GET_STRICT_CHK: 2890 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */ 2891 if (get_user_u32(len, optlen)) { 2892 return -TARGET_EFAULT; 2893 } 2894 if (len != sizeof(val)) { 2895 return -TARGET_EINVAL; 2896 } 2897 lv = len; 2898 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 2899 if (ret < 0) { 2900 return ret; 2901 } 2902 if (put_user_u32(lv, optlen) 2903 || put_user_u32(val, optval_addr)) { 2904 return -TARGET_EFAULT; 2905 } 2906 break; 2907 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 2908 case NETLINK_LIST_MEMBERSHIPS: 2909 { 2910 uint32_t *results; 2911 int i; 2912 if (get_user_u32(len, optlen)) { 2913 return -TARGET_EFAULT; 2914 } 2915 if (len < 0) { 2916 return -TARGET_EINVAL; 2917 } 2918 results = lock_user(VERIFY_WRITE, optval_addr, len, 1); 2919 if (!results && len > 0) { 2920 return -TARGET_EFAULT; 2921 } 2922 lv = len; 2923 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv)); 2924 if (ret < 0) { 2925 unlock_user(results, optval_addr, 0); 2926 return ret; 2927 } 2928 /* swap host endianess to target endianess. */ 2929 for (i = 0; i < (len / sizeof(uint32_t)); i++) { 2930 results[i] = tswap32(results[i]); 2931 } 2932 if (put_user_u32(lv, optlen)) { 2933 return -TARGET_EFAULT; 2934 } 2935 unlock_user(results, optval_addr, 0); 2936 break; 2937 } 2938 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */ 2939 default: 2940 goto unimplemented; 2941 } 2942 break; 2943 #endif /* SOL_NETLINK */ 2944 default: 2945 unimplemented: 2946 qemu_log_mask(LOG_UNIMP, 2947 "getsockopt level=%d optname=%d not yet supported\n", 2948 level, optname); 2949 ret = -TARGET_EOPNOTSUPP; 2950 break; 2951 } 2952 return ret; 2953 } 2954 2955 /* Convert target low/high pair representing file offset into the host 2956 * low/high pair. This function doesn't handle offsets bigger than 64 bits 2957 * as the kernel doesn't handle them either. 2958 */ 2959 static void target_to_host_low_high(abi_ulong tlow, 2960 abi_ulong thigh, 2961 unsigned long *hlow, 2962 unsigned long *hhigh) 2963 { 2964 uint64_t off = tlow | 2965 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) << 2966 TARGET_LONG_BITS / 2; 2967 2968 *hlow = off; 2969 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2; 2970 } 2971 2972 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 2973 abi_ulong count, int copy) 2974 { 2975 struct target_iovec *target_vec; 2976 struct iovec *vec; 2977 abi_ulong total_len, max_len; 2978 int i; 2979 int err = 0; 2980 bool bad_address = false; 2981 2982 if (count == 0) { 2983 errno = 0; 2984 return NULL; 2985 } 2986 if (count > IOV_MAX) { 2987 errno = EINVAL; 2988 return NULL; 2989 } 2990 2991 vec = g_try_new0(struct iovec, count); 2992 if (vec == NULL) { 2993 errno = ENOMEM; 2994 return NULL; 2995 } 2996 2997 target_vec = lock_user(VERIFY_READ, target_addr, 2998 count * sizeof(struct target_iovec), 1); 2999 if (target_vec == NULL) { 3000 err = EFAULT; 3001 goto fail2; 3002 } 3003 3004 /* ??? If host page size > target page size, this will result in a 3005 value larger than what we can actually support. */ 3006 max_len = 0x7fffffff & TARGET_PAGE_MASK; 3007 total_len = 0; 3008 3009 for (i = 0; i < count; i++) { 3010 abi_ulong base = tswapal(target_vec[i].iov_base); 3011 abi_long len = tswapal(target_vec[i].iov_len); 3012 3013 if (len < 0) { 3014 err = EINVAL; 3015 goto fail; 3016 } else if (len == 0) { 3017 /* Zero length pointer is ignored. */ 3018 vec[i].iov_base = 0; 3019 } else { 3020 vec[i].iov_base = lock_user(type, base, len, copy); 3021 /* If the first buffer pointer is bad, this is a fault. But 3022 * subsequent bad buffers will result in a partial write; this 3023 * is realized by filling the vector with null pointers and 3024 * zero lengths. */ 3025 if (!vec[i].iov_base) { 3026 if (i == 0) { 3027 err = EFAULT; 3028 goto fail; 3029 } else { 3030 bad_address = true; 3031 } 3032 } 3033 if (bad_address) { 3034 len = 0; 3035 } 3036 if (len > max_len - total_len) { 3037 len = max_len - total_len; 3038 } 3039 } 3040 vec[i].iov_len = len; 3041 total_len += len; 3042 } 3043 3044 unlock_user(target_vec, target_addr, 0); 3045 return vec; 3046 3047 fail: 3048 while (--i >= 0) { 3049 if (tswapal(target_vec[i].iov_len) > 0) { 3050 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0); 3051 } 3052 } 3053 unlock_user(target_vec, target_addr, 0); 3054 fail2: 3055 g_free(vec); 3056 errno = err; 3057 return NULL; 3058 } 3059 3060 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 3061 abi_ulong count, int copy) 3062 { 3063 struct target_iovec *target_vec; 3064 int i; 3065 3066 target_vec = lock_user(VERIFY_READ, target_addr, 3067 count * sizeof(struct target_iovec), 1); 3068 if (target_vec) { 3069 for (i = 0; i < count; i++) { 3070 abi_ulong base = tswapal(target_vec[i].iov_base); 3071 abi_long len = tswapal(target_vec[i].iov_len); 3072 if (len < 0) { 3073 break; 3074 } 3075 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 3076 } 3077 unlock_user(target_vec, target_addr, 0); 3078 } 3079 3080 g_free(vec); 3081 } 3082 3083 static inline int target_to_host_sock_type(int *type) 3084 { 3085 int host_type = 0; 3086 int target_type = *type; 3087 3088 switch (target_type & TARGET_SOCK_TYPE_MASK) { 3089 case TARGET_SOCK_DGRAM: 3090 host_type = SOCK_DGRAM; 3091 break; 3092 case TARGET_SOCK_STREAM: 3093 host_type = SOCK_STREAM; 3094 break; 3095 default: 3096 host_type = target_type & TARGET_SOCK_TYPE_MASK; 3097 break; 3098 } 3099 if (target_type & TARGET_SOCK_CLOEXEC) { 3100 #if defined(SOCK_CLOEXEC) 3101 host_type |= SOCK_CLOEXEC; 3102 #else 3103 return -TARGET_EINVAL; 3104 #endif 3105 } 3106 if (target_type & TARGET_SOCK_NONBLOCK) { 3107 #if defined(SOCK_NONBLOCK) 3108 host_type |= SOCK_NONBLOCK; 3109 #elif !defined(O_NONBLOCK) 3110 return -TARGET_EINVAL; 3111 #endif 3112 } 3113 *type = host_type; 3114 return 0; 3115 } 3116 3117 /* Try to emulate socket type flags after socket creation. */ 3118 static int sock_flags_fixup(int fd, int target_type) 3119 { 3120 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 3121 if (target_type & TARGET_SOCK_NONBLOCK) { 3122 int flags = fcntl(fd, F_GETFL); 3123 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 3124 close(fd); 3125 return -TARGET_EINVAL; 3126 } 3127 } 3128 #endif 3129 return fd; 3130 } 3131 3132 /* do_socket() Must return target values and target errnos. */ 3133 static abi_long do_socket(int domain, int type, int protocol) 3134 { 3135 int target_type = type; 3136 int ret; 3137 3138 ret = target_to_host_sock_type(&type); 3139 if (ret) { 3140 return ret; 3141 } 3142 3143 if (domain == PF_NETLINK && !( 3144 #ifdef CONFIG_RTNETLINK 3145 protocol == NETLINK_ROUTE || 3146 #endif 3147 protocol == NETLINK_KOBJECT_UEVENT || 3148 protocol == NETLINK_AUDIT)) { 3149 return -TARGET_EPROTONOSUPPORT; 3150 } 3151 3152 if (domain == AF_PACKET || 3153 (domain == AF_INET && type == SOCK_PACKET)) { 3154 protocol = tswap16(protocol); 3155 } 3156 3157 ret = get_errno(socket(domain, type, protocol)); 3158 if (ret >= 0) { 3159 ret = sock_flags_fixup(ret, target_type); 3160 if (type == SOCK_PACKET) { 3161 /* Manage an obsolete case : 3162 * if socket type is SOCK_PACKET, bind by name 3163 */ 3164 fd_trans_register(ret, &target_packet_trans); 3165 } else if (domain == PF_NETLINK) { 3166 switch (protocol) { 3167 #ifdef CONFIG_RTNETLINK 3168 case NETLINK_ROUTE: 3169 fd_trans_register(ret, &target_netlink_route_trans); 3170 break; 3171 #endif 3172 case NETLINK_KOBJECT_UEVENT: 3173 /* nothing to do: messages are strings */ 3174 break; 3175 case NETLINK_AUDIT: 3176 fd_trans_register(ret, &target_netlink_audit_trans); 3177 break; 3178 default: 3179 g_assert_not_reached(); 3180 } 3181 } 3182 } 3183 return ret; 3184 } 3185 3186 /* do_bind() Must return target values and target errnos. */ 3187 static abi_long do_bind(int sockfd, abi_ulong target_addr, 3188 socklen_t addrlen) 3189 { 3190 void *addr; 3191 abi_long ret; 3192 3193 if ((int)addrlen < 0) { 3194 return -TARGET_EINVAL; 3195 } 3196 3197 addr = alloca(addrlen+1); 3198 3199 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3200 if (ret) 3201 return ret; 3202 3203 return get_errno(bind(sockfd, addr, addrlen)); 3204 } 3205 3206 /* do_connect() Must return target values and target errnos. */ 3207 static abi_long do_connect(int sockfd, abi_ulong target_addr, 3208 socklen_t addrlen) 3209 { 3210 void *addr; 3211 abi_long ret; 3212 3213 if ((int)addrlen < 0) { 3214 return -TARGET_EINVAL; 3215 } 3216 3217 addr = alloca(addrlen+1); 3218 3219 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 3220 if (ret) 3221 return ret; 3222 3223 return get_errno(safe_connect(sockfd, addr, addrlen)); 3224 } 3225 3226 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 3227 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 3228 int flags, int send) 3229 { 3230 abi_long ret, len; 3231 struct msghdr msg; 3232 abi_ulong count; 3233 struct iovec *vec; 3234 abi_ulong target_vec; 3235 3236 if (msgp->msg_name) { 3237 msg.msg_namelen = tswap32(msgp->msg_namelen); 3238 msg.msg_name = alloca(msg.msg_namelen+1); 3239 ret = target_to_host_sockaddr(fd, msg.msg_name, 3240 tswapal(msgp->msg_name), 3241 msg.msg_namelen); 3242 if (ret == -TARGET_EFAULT) { 3243 /* For connected sockets msg_name and msg_namelen must 3244 * be ignored, so returning EFAULT immediately is wrong. 3245 * Instead, pass a bad msg_name to the host kernel, and 3246 * let it decide whether to return EFAULT or not. 3247 */ 3248 msg.msg_name = (void *)-1; 3249 } else if (ret) { 3250 goto out2; 3251 } 3252 } else { 3253 msg.msg_name = NULL; 3254 msg.msg_namelen = 0; 3255 } 3256 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 3257 msg.msg_control = alloca(msg.msg_controllen); 3258 memset(msg.msg_control, 0, msg.msg_controllen); 3259 3260 msg.msg_flags = tswap32(msgp->msg_flags); 3261 3262 count = tswapal(msgp->msg_iovlen); 3263 target_vec = tswapal(msgp->msg_iov); 3264 3265 if (count > IOV_MAX) { 3266 /* sendrcvmsg returns a different errno for this condition than 3267 * readv/writev, so we must catch it here before lock_iovec() does. 3268 */ 3269 ret = -TARGET_EMSGSIZE; 3270 goto out2; 3271 } 3272 3273 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 3274 target_vec, count, send); 3275 if (vec == NULL) { 3276 ret = -host_to_target_errno(errno); 3277 goto out2; 3278 } 3279 msg.msg_iovlen = count; 3280 msg.msg_iov = vec; 3281 3282 if (send) { 3283 if (fd_trans_target_to_host_data(fd)) { 3284 void *host_msg; 3285 3286 host_msg = g_malloc(msg.msg_iov->iov_len); 3287 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len); 3288 ret = fd_trans_target_to_host_data(fd)(host_msg, 3289 msg.msg_iov->iov_len); 3290 if (ret >= 0) { 3291 msg.msg_iov->iov_base = host_msg; 3292 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3293 } 3294 g_free(host_msg); 3295 } else { 3296 ret = target_to_host_cmsg(&msg, msgp); 3297 if (ret == 0) { 3298 ret = get_errno(safe_sendmsg(fd, &msg, flags)); 3299 } 3300 } 3301 } else { 3302 ret = get_errno(safe_recvmsg(fd, &msg, flags)); 3303 if (!is_error(ret)) { 3304 len = ret; 3305 if (fd_trans_host_to_target_data(fd)) { 3306 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base, 3307 MIN(msg.msg_iov->iov_len, len)); 3308 } else { 3309 ret = host_to_target_cmsg(msgp, &msg); 3310 } 3311 if (!is_error(ret)) { 3312 msgp->msg_namelen = tswap32(msg.msg_namelen); 3313 msgp->msg_flags = tswap32(msg.msg_flags); 3314 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) { 3315 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 3316 msg.msg_name, msg.msg_namelen); 3317 if (ret) { 3318 goto out; 3319 } 3320 } 3321 3322 ret = len; 3323 } 3324 } 3325 } 3326 3327 out: 3328 unlock_iovec(vec, target_vec, count, !send); 3329 out2: 3330 return ret; 3331 } 3332 3333 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 3334 int flags, int send) 3335 { 3336 abi_long ret; 3337 struct target_msghdr *msgp; 3338 3339 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 3340 msgp, 3341 target_msg, 3342 send ? 1 : 0)) { 3343 return -TARGET_EFAULT; 3344 } 3345 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 3346 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 3347 return ret; 3348 } 3349 3350 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 3351 * so it might not have this *mmsg-specific flag either. 3352 */ 3353 #ifndef MSG_WAITFORONE 3354 #define MSG_WAITFORONE 0x10000 3355 #endif 3356 3357 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 3358 unsigned int vlen, unsigned int flags, 3359 int send) 3360 { 3361 struct target_mmsghdr *mmsgp; 3362 abi_long ret = 0; 3363 int i; 3364 3365 if (vlen > UIO_MAXIOV) { 3366 vlen = UIO_MAXIOV; 3367 } 3368 3369 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 3370 if (!mmsgp) { 3371 return -TARGET_EFAULT; 3372 } 3373 3374 for (i = 0; i < vlen; i++) { 3375 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 3376 if (is_error(ret)) { 3377 break; 3378 } 3379 mmsgp[i].msg_len = tswap32(ret); 3380 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 3381 if (flags & MSG_WAITFORONE) { 3382 flags |= MSG_DONTWAIT; 3383 } 3384 } 3385 3386 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 3387 3388 /* Return number of datagrams sent if we sent any at all; 3389 * otherwise return the error. 3390 */ 3391 if (i) { 3392 return i; 3393 } 3394 return ret; 3395 } 3396 3397 /* do_accept4() Must return target values and target errnos. */ 3398 static abi_long do_accept4(int fd, abi_ulong target_addr, 3399 abi_ulong target_addrlen_addr, int flags) 3400 { 3401 socklen_t addrlen, ret_addrlen; 3402 void *addr; 3403 abi_long ret; 3404 int host_flags; 3405 3406 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 3407 3408 if (target_addr == 0) { 3409 return get_errno(safe_accept4(fd, NULL, NULL, host_flags)); 3410 } 3411 3412 /* linux returns EFAULT if addrlen pointer is invalid */ 3413 if (get_user_u32(addrlen, target_addrlen_addr)) 3414 return -TARGET_EFAULT; 3415 3416 if ((int)addrlen < 0) { 3417 return -TARGET_EINVAL; 3418 } 3419 3420 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3421 return -TARGET_EFAULT; 3422 } 3423 3424 addr = alloca(addrlen); 3425 3426 ret_addrlen = addrlen; 3427 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags)); 3428 if (!is_error(ret)) { 3429 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3430 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3431 ret = -TARGET_EFAULT; 3432 } 3433 } 3434 return ret; 3435 } 3436 3437 /* do_getpeername() Must return target values and target errnos. */ 3438 static abi_long do_getpeername(int fd, abi_ulong target_addr, 3439 abi_ulong target_addrlen_addr) 3440 { 3441 socklen_t addrlen, ret_addrlen; 3442 void *addr; 3443 abi_long ret; 3444 3445 if (get_user_u32(addrlen, target_addrlen_addr)) 3446 return -TARGET_EFAULT; 3447 3448 if ((int)addrlen < 0) { 3449 return -TARGET_EINVAL; 3450 } 3451 3452 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3453 return -TARGET_EFAULT; 3454 } 3455 3456 addr = alloca(addrlen); 3457 3458 ret_addrlen = addrlen; 3459 ret = get_errno(getpeername(fd, addr, &ret_addrlen)); 3460 if (!is_error(ret)) { 3461 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3462 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3463 ret = -TARGET_EFAULT; 3464 } 3465 } 3466 return ret; 3467 } 3468 3469 /* do_getsockname() Must return target values and target errnos. */ 3470 static abi_long do_getsockname(int fd, abi_ulong target_addr, 3471 abi_ulong target_addrlen_addr) 3472 { 3473 socklen_t addrlen, ret_addrlen; 3474 void *addr; 3475 abi_long ret; 3476 3477 if (get_user_u32(addrlen, target_addrlen_addr)) 3478 return -TARGET_EFAULT; 3479 3480 if ((int)addrlen < 0) { 3481 return -TARGET_EINVAL; 3482 } 3483 3484 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) { 3485 return -TARGET_EFAULT; 3486 } 3487 3488 addr = alloca(addrlen); 3489 3490 ret_addrlen = addrlen; 3491 ret = get_errno(getsockname(fd, addr, &ret_addrlen)); 3492 if (!is_error(ret)) { 3493 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen)); 3494 if (put_user_u32(ret_addrlen, target_addrlen_addr)) { 3495 ret = -TARGET_EFAULT; 3496 } 3497 } 3498 return ret; 3499 } 3500 3501 /* do_socketpair() Must return target values and target errnos. */ 3502 static abi_long do_socketpair(int domain, int type, int protocol, 3503 abi_ulong target_tab_addr) 3504 { 3505 int tab[2]; 3506 abi_long ret; 3507 3508 target_to_host_sock_type(&type); 3509 3510 ret = get_errno(socketpair(domain, type, protocol, tab)); 3511 if (!is_error(ret)) { 3512 if (put_user_s32(tab[0], target_tab_addr) 3513 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 3514 ret = -TARGET_EFAULT; 3515 } 3516 return ret; 3517 } 3518 3519 /* do_sendto() Must return target values and target errnos. */ 3520 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 3521 abi_ulong target_addr, socklen_t addrlen) 3522 { 3523 void *addr; 3524 void *host_msg; 3525 void *copy_msg = NULL; 3526 abi_long ret; 3527 3528 if ((int)addrlen < 0) { 3529 return -TARGET_EINVAL; 3530 } 3531 3532 host_msg = lock_user(VERIFY_READ, msg, len, 1); 3533 if (!host_msg) 3534 return -TARGET_EFAULT; 3535 if (fd_trans_target_to_host_data(fd)) { 3536 copy_msg = host_msg; 3537 host_msg = g_malloc(len); 3538 memcpy(host_msg, copy_msg, len); 3539 ret = fd_trans_target_to_host_data(fd)(host_msg, len); 3540 if (ret < 0) { 3541 goto fail; 3542 } 3543 } 3544 if (target_addr) { 3545 addr = alloca(addrlen+1); 3546 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen); 3547 if (ret) { 3548 goto fail; 3549 } 3550 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen)); 3551 } else { 3552 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0)); 3553 } 3554 fail: 3555 if (copy_msg) { 3556 g_free(host_msg); 3557 host_msg = copy_msg; 3558 } 3559 unlock_user(host_msg, msg, 0); 3560 return ret; 3561 } 3562 3563 /* do_recvfrom() Must return target values and target errnos. */ 3564 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 3565 abi_ulong target_addr, 3566 abi_ulong target_addrlen) 3567 { 3568 socklen_t addrlen, ret_addrlen; 3569 void *addr; 3570 void *host_msg; 3571 abi_long ret; 3572 3573 if (!msg) { 3574 host_msg = NULL; 3575 } else { 3576 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 3577 if (!host_msg) { 3578 return -TARGET_EFAULT; 3579 } 3580 } 3581 if (target_addr) { 3582 if (get_user_u32(addrlen, target_addrlen)) { 3583 ret = -TARGET_EFAULT; 3584 goto fail; 3585 } 3586 if ((int)addrlen < 0) { 3587 ret = -TARGET_EINVAL; 3588 goto fail; 3589 } 3590 addr = alloca(addrlen); 3591 ret_addrlen = addrlen; 3592 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, 3593 addr, &ret_addrlen)); 3594 } else { 3595 addr = NULL; /* To keep compiler quiet. */ 3596 addrlen = 0; /* To keep compiler quiet. */ 3597 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0)); 3598 } 3599 if (!is_error(ret)) { 3600 if (fd_trans_host_to_target_data(fd)) { 3601 abi_long trans; 3602 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len)); 3603 if (is_error(trans)) { 3604 ret = trans; 3605 goto fail; 3606 } 3607 } 3608 if (target_addr) { 3609 host_to_target_sockaddr(target_addr, addr, 3610 MIN(addrlen, ret_addrlen)); 3611 if (put_user_u32(ret_addrlen, target_addrlen)) { 3612 ret = -TARGET_EFAULT; 3613 goto fail; 3614 } 3615 } 3616 unlock_user(host_msg, msg, len); 3617 } else { 3618 fail: 3619 unlock_user(host_msg, msg, 0); 3620 } 3621 return ret; 3622 } 3623 3624 #ifdef TARGET_NR_socketcall 3625 /* do_socketcall() must return target values and target errnos. */ 3626 static abi_long do_socketcall(int num, abi_ulong vptr) 3627 { 3628 static const unsigned nargs[] = { /* number of arguments per operation */ 3629 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */ 3630 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */ 3631 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */ 3632 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */ 3633 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */ 3634 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */ 3635 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */ 3636 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */ 3637 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */ 3638 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */ 3639 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */ 3640 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */ 3641 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */ 3642 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 3643 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */ 3644 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */ 3645 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */ 3646 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */ 3647 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */ 3648 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */ 3649 }; 3650 abi_long a[6]; /* max 6 args */ 3651 unsigned i; 3652 3653 /* check the range of the first argument num */ 3654 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */ 3655 if (num < 1 || num > TARGET_SYS_SENDMMSG) { 3656 return -TARGET_EINVAL; 3657 } 3658 /* ensure we have space for args */ 3659 if (nargs[num] > ARRAY_SIZE(a)) { 3660 return -TARGET_EINVAL; 3661 } 3662 /* collect the arguments in a[] according to nargs[] */ 3663 for (i = 0; i < nargs[num]; ++i) { 3664 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 3665 return -TARGET_EFAULT; 3666 } 3667 } 3668 /* now when we have the args, invoke the appropriate underlying function */ 3669 switch (num) { 3670 case TARGET_SYS_SOCKET: /* domain, type, protocol */ 3671 return do_socket(a[0], a[1], a[2]); 3672 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */ 3673 return do_bind(a[0], a[1], a[2]); 3674 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */ 3675 return do_connect(a[0], a[1], a[2]); 3676 case TARGET_SYS_LISTEN: /* sockfd, backlog */ 3677 return get_errno(listen(a[0], a[1])); 3678 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */ 3679 return do_accept4(a[0], a[1], a[2], 0); 3680 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */ 3681 return do_getsockname(a[0], a[1], a[2]); 3682 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */ 3683 return do_getpeername(a[0], a[1], a[2]); 3684 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */ 3685 return do_socketpair(a[0], a[1], a[2], a[3]); 3686 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */ 3687 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 3688 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */ 3689 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 3690 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */ 3691 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 3692 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */ 3693 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 3694 case TARGET_SYS_SHUTDOWN: /* sockfd, how */ 3695 return get_errno(shutdown(a[0], a[1])); 3696 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 3697 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 3698 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */ 3699 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 3700 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */ 3701 return do_sendrecvmsg(a[0], a[1], a[2], 1); 3702 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */ 3703 return do_sendrecvmsg(a[0], a[1], a[2], 0); 3704 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */ 3705 return do_accept4(a[0], a[1], a[2], a[3]); 3706 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */ 3707 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0); 3708 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */ 3709 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1); 3710 default: 3711 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num); 3712 return -TARGET_EINVAL; 3713 } 3714 } 3715 #endif 3716 3717 #define N_SHM_REGIONS 32 3718 3719 static struct shm_region { 3720 abi_ulong start; 3721 abi_ulong size; 3722 bool in_use; 3723 } shm_regions[N_SHM_REGIONS]; 3724 3725 #ifndef TARGET_SEMID64_DS 3726 /* asm-generic version of this struct */ 3727 struct target_semid64_ds 3728 { 3729 struct target_ipc_perm sem_perm; 3730 abi_ulong sem_otime; 3731 #if TARGET_ABI_BITS == 32 3732 abi_ulong __unused1; 3733 #endif 3734 abi_ulong sem_ctime; 3735 #if TARGET_ABI_BITS == 32 3736 abi_ulong __unused2; 3737 #endif 3738 abi_ulong sem_nsems; 3739 abi_ulong __unused3; 3740 abi_ulong __unused4; 3741 }; 3742 #endif 3743 3744 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 3745 abi_ulong target_addr) 3746 { 3747 struct target_ipc_perm *target_ip; 3748 struct target_semid64_ds *target_sd; 3749 3750 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 3751 return -TARGET_EFAULT; 3752 target_ip = &(target_sd->sem_perm); 3753 host_ip->__key = tswap32(target_ip->__key); 3754 host_ip->uid = tswap32(target_ip->uid); 3755 host_ip->gid = tswap32(target_ip->gid); 3756 host_ip->cuid = tswap32(target_ip->cuid); 3757 host_ip->cgid = tswap32(target_ip->cgid); 3758 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 3759 host_ip->mode = tswap32(target_ip->mode); 3760 #else 3761 host_ip->mode = tswap16(target_ip->mode); 3762 #endif 3763 #if defined(TARGET_PPC) 3764 host_ip->__seq = tswap32(target_ip->__seq); 3765 #else 3766 host_ip->__seq = tswap16(target_ip->__seq); 3767 #endif 3768 unlock_user_struct(target_sd, target_addr, 0); 3769 return 0; 3770 } 3771 3772 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 3773 struct ipc_perm *host_ip) 3774 { 3775 struct target_ipc_perm *target_ip; 3776 struct target_semid64_ds *target_sd; 3777 3778 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3779 return -TARGET_EFAULT; 3780 target_ip = &(target_sd->sem_perm); 3781 target_ip->__key = tswap32(host_ip->__key); 3782 target_ip->uid = tswap32(host_ip->uid); 3783 target_ip->gid = tswap32(host_ip->gid); 3784 target_ip->cuid = tswap32(host_ip->cuid); 3785 target_ip->cgid = tswap32(host_ip->cgid); 3786 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 3787 target_ip->mode = tswap32(host_ip->mode); 3788 #else 3789 target_ip->mode = tswap16(host_ip->mode); 3790 #endif 3791 #if defined(TARGET_PPC) 3792 target_ip->__seq = tswap32(host_ip->__seq); 3793 #else 3794 target_ip->__seq = tswap16(host_ip->__seq); 3795 #endif 3796 unlock_user_struct(target_sd, target_addr, 1); 3797 return 0; 3798 } 3799 3800 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 3801 abi_ulong target_addr) 3802 { 3803 struct target_semid64_ds *target_sd; 3804 3805 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 3806 return -TARGET_EFAULT; 3807 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 3808 return -TARGET_EFAULT; 3809 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 3810 host_sd->sem_otime = tswapal(target_sd->sem_otime); 3811 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 3812 unlock_user_struct(target_sd, target_addr, 0); 3813 return 0; 3814 } 3815 3816 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 3817 struct semid_ds *host_sd) 3818 { 3819 struct target_semid64_ds *target_sd; 3820 3821 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3822 return -TARGET_EFAULT; 3823 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 3824 return -TARGET_EFAULT; 3825 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 3826 target_sd->sem_otime = tswapal(host_sd->sem_otime); 3827 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 3828 unlock_user_struct(target_sd, target_addr, 1); 3829 return 0; 3830 } 3831 3832 struct target_seminfo { 3833 int semmap; 3834 int semmni; 3835 int semmns; 3836 int semmnu; 3837 int semmsl; 3838 int semopm; 3839 int semume; 3840 int semusz; 3841 int semvmx; 3842 int semaem; 3843 }; 3844 3845 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 3846 struct seminfo *host_seminfo) 3847 { 3848 struct target_seminfo *target_seminfo; 3849 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 3850 return -TARGET_EFAULT; 3851 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 3852 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 3853 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 3854 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 3855 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 3856 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 3857 __put_user(host_seminfo->semume, &target_seminfo->semume); 3858 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 3859 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 3860 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 3861 unlock_user_struct(target_seminfo, target_addr, 1); 3862 return 0; 3863 } 3864 3865 union semun { 3866 int val; 3867 struct semid_ds *buf; 3868 unsigned short *array; 3869 struct seminfo *__buf; 3870 }; 3871 3872 union target_semun { 3873 int val; 3874 abi_ulong buf; 3875 abi_ulong array; 3876 abi_ulong __buf; 3877 }; 3878 3879 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 3880 abi_ulong target_addr) 3881 { 3882 int nsems; 3883 unsigned short *array; 3884 union semun semun; 3885 struct semid_ds semid_ds; 3886 int i, ret; 3887 3888 semun.buf = &semid_ds; 3889 3890 ret = semctl(semid, 0, IPC_STAT, semun); 3891 if (ret == -1) 3892 return get_errno(ret); 3893 3894 nsems = semid_ds.sem_nsems; 3895 3896 *host_array = g_try_new(unsigned short, nsems); 3897 if (!*host_array) { 3898 return -TARGET_ENOMEM; 3899 } 3900 array = lock_user(VERIFY_READ, target_addr, 3901 nsems*sizeof(unsigned short), 1); 3902 if (!array) { 3903 g_free(*host_array); 3904 return -TARGET_EFAULT; 3905 } 3906 3907 for(i=0; i<nsems; i++) { 3908 __get_user((*host_array)[i], &array[i]); 3909 } 3910 unlock_user(array, target_addr, 0); 3911 3912 return 0; 3913 } 3914 3915 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 3916 unsigned short **host_array) 3917 { 3918 int nsems; 3919 unsigned short *array; 3920 union semun semun; 3921 struct semid_ds semid_ds; 3922 int i, ret; 3923 3924 semun.buf = &semid_ds; 3925 3926 ret = semctl(semid, 0, IPC_STAT, semun); 3927 if (ret == -1) 3928 return get_errno(ret); 3929 3930 nsems = semid_ds.sem_nsems; 3931 3932 array = lock_user(VERIFY_WRITE, target_addr, 3933 nsems*sizeof(unsigned short), 0); 3934 if (!array) 3935 return -TARGET_EFAULT; 3936 3937 for(i=0; i<nsems; i++) { 3938 __put_user((*host_array)[i], &array[i]); 3939 } 3940 g_free(*host_array); 3941 unlock_user(array, target_addr, 1); 3942 3943 return 0; 3944 } 3945 3946 static inline abi_long do_semctl(int semid, int semnum, int cmd, 3947 abi_ulong target_arg) 3948 { 3949 union target_semun target_su = { .buf = target_arg }; 3950 union semun arg; 3951 struct semid_ds dsarg; 3952 unsigned short *array = NULL; 3953 struct seminfo seminfo; 3954 abi_long ret = -TARGET_EINVAL; 3955 abi_long err; 3956 cmd &= 0xff; 3957 3958 switch( cmd ) { 3959 case GETVAL: 3960 case SETVAL: 3961 /* In 64 bit cross-endian situations, we will erroneously pick up 3962 * the wrong half of the union for the "val" element. To rectify 3963 * this, the entire 8-byte structure is byteswapped, followed by 3964 * a swap of the 4 byte val field. In other cases, the data is 3965 * already in proper host byte order. */ 3966 if (sizeof(target_su.val) != (sizeof(target_su.buf))) { 3967 target_su.buf = tswapal(target_su.buf); 3968 arg.val = tswap32(target_su.val); 3969 } else { 3970 arg.val = target_su.val; 3971 } 3972 ret = get_errno(semctl(semid, semnum, cmd, arg)); 3973 break; 3974 case GETALL: 3975 case SETALL: 3976 err = target_to_host_semarray(semid, &array, target_su.array); 3977 if (err) 3978 return err; 3979 arg.array = array; 3980 ret = get_errno(semctl(semid, semnum, cmd, arg)); 3981 err = host_to_target_semarray(semid, target_su.array, &array); 3982 if (err) 3983 return err; 3984 break; 3985 case IPC_STAT: 3986 case IPC_SET: 3987 case SEM_STAT: 3988 err = target_to_host_semid_ds(&dsarg, target_su.buf); 3989 if (err) 3990 return err; 3991 arg.buf = &dsarg; 3992 ret = get_errno(semctl(semid, semnum, cmd, arg)); 3993 err = host_to_target_semid_ds(target_su.buf, &dsarg); 3994 if (err) 3995 return err; 3996 break; 3997 case IPC_INFO: 3998 case SEM_INFO: 3999 arg.__buf = &seminfo; 4000 ret = get_errno(semctl(semid, semnum, cmd, arg)); 4001 err = host_to_target_seminfo(target_su.__buf, &seminfo); 4002 if (err) 4003 return err; 4004 break; 4005 case IPC_RMID: 4006 case GETPID: 4007 case GETNCNT: 4008 case GETZCNT: 4009 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 4010 break; 4011 } 4012 4013 return ret; 4014 } 4015 4016 struct target_sembuf { 4017 unsigned short sem_num; 4018 short sem_op; 4019 short sem_flg; 4020 }; 4021 4022 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 4023 abi_ulong target_addr, 4024 unsigned nsops) 4025 { 4026 struct target_sembuf *target_sembuf; 4027 int i; 4028 4029 target_sembuf = lock_user(VERIFY_READ, target_addr, 4030 nsops*sizeof(struct target_sembuf), 1); 4031 if (!target_sembuf) 4032 return -TARGET_EFAULT; 4033 4034 for(i=0; i<nsops; i++) { 4035 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 4036 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 4037 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 4038 } 4039 4040 unlock_user(target_sembuf, target_addr, 0); 4041 4042 return 0; 4043 } 4044 4045 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \ 4046 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64) 4047 4048 /* 4049 * This macro is required to handle the s390 variants, which passes the 4050 * arguments in a different order than default. 4051 */ 4052 #ifdef __s390x__ 4053 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \ 4054 (__nsops), (__timeout), (__sops) 4055 #else 4056 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \ 4057 (__nsops), 0, (__sops), (__timeout) 4058 #endif 4059 4060 static inline abi_long do_semtimedop(int semid, 4061 abi_long ptr, 4062 unsigned nsops, 4063 abi_long timeout, bool time64) 4064 { 4065 struct sembuf *sops; 4066 struct timespec ts, *pts = NULL; 4067 abi_long ret; 4068 4069 if (timeout) { 4070 pts = &ts; 4071 if (time64) { 4072 if (target_to_host_timespec64(pts, timeout)) { 4073 return -TARGET_EFAULT; 4074 } 4075 } else { 4076 if (target_to_host_timespec(pts, timeout)) { 4077 return -TARGET_EFAULT; 4078 } 4079 } 4080 } 4081 4082 if (nsops > TARGET_SEMOPM) { 4083 return -TARGET_E2BIG; 4084 } 4085 4086 sops = g_new(struct sembuf, nsops); 4087 4088 if (target_to_host_sembuf(sops, ptr, nsops)) { 4089 g_free(sops); 4090 return -TARGET_EFAULT; 4091 } 4092 4093 ret = -TARGET_ENOSYS; 4094 #ifdef __NR_semtimedop 4095 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts)); 4096 #endif 4097 #ifdef __NR_ipc 4098 if (ret == -TARGET_ENOSYS) { 4099 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, 4100 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts))); 4101 } 4102 #endif 4103 g_free(sops); 4104 return ret; 4105 } 4106 #endif 4107 4108 struct target_msqid_ds 4109 { 4110 struct target_ipc_perm msg_perm; 4111 abi_ulong msg_stime; 4112 #if TARGET_ABI_BITS == 32 4113 abi_ulong __unused1; 4114 #endif 4115 abi_ulong msg_rtime; 4116 #if TARGET_ABI_BITS == 32 4117 abi_ulong __unused2; 4118 #endif 4119 abi_ulong msg_ctime; 4120 #if TARGET_ABI_BITS == 32 4121 abi_ulong __unused3; 4122 #endif 4123 abi_ulong __msg_cbytes; 4124 abi_ulong msg_qnum; 4125 abi_ulong msg_qbytes; 4126 abi_ulong msg_lspid; 4127 abi_ulong msg_lrpid; 4128 abi_ulong __unused4; 4129 abi_ulong __unused5; 4130 }; 4131 4132 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 4133 abi_ulong target_addr) 4134 { 4135 struct target_msqid_ds *target_md; 4136 4137 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 4138 return -TARGET_EFAULT; 4139 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 4140 return -TARGET_EFAULT; 4141 host_md->msg_stime = tswapal(target_md->msg_stime); 4142 host_md->msg_rtime = tswapal(target_md->msg_rtime); 4143 host_md->msg_ctime = tswapal(target_md->msg_ctime); 4144 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 4145 host_md->msg_qnum = tswapal(target_md->msg_qnum); 4146 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 4147 host_md->msg_lspid = tswapal(target_md->msg_lspid); 4148 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 4149 unlock_user_struct(target_md, target_addr, 0); 4150 return 0; 4151 } 4152 4153 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 4154 struct msqid_ds *host_md) 4155 { 4156 struct target_msqid_ds *target_md; 4157 4158 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 4159 return -TARGET_EFAULT; 4160 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 4161 return -TARGET_EFAULT; 4162 target_md->msg_stime = tswapal(host_md->msg_stime); 4163 target_md->msg_rtime = tswapal(host_md->msg_rtime); 4164 target_md->msg_ctime = tswapal(host_md->msg_ctime); 4165 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 4166 target_md->msg_qnum = tswapal(host_md->msg_qnum); 4167 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 4168 target_md->msg_lspid = tswapal(host_md->msg_lspid); 4169 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 4170 unlock_user_struct(target_md, target_addr, 1); 4171 return 0; 4172 } 4173 4174 struct target_msginfo { 4175 int msgpool; 4176 int msgmap; 4177 int msgmax; 4178 int msgmnb; 4179 int msgmni; 4180 int msgssz; 4181 int msgtql; 4182 unsigned short int msgseg; 4183 }; 4184 4185 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 4186 struct msginfo *host_msginfo) 4187 { 4188 struct target_msginfo *target_msginfo; 4189 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 4190 return -TARGET_EFAULT; 4191 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 4192 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 4193 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 4194 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 4195 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 4196 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 4197 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 4198 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 4199 unlock_user_struct(target_msginfo, target_addr, 1); 4200 return 0; 4201 } 4202 4203 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 4204 { 4205 struct msqid_ds dsarg; 4206 struct msginfo msginfo; 4207 abi_long ret = -TARGET_EINVAL; 4208 4209 cmd &= 0xff; 4210 4211 switch (cmd) { 4212 case IPC_STAT: 4213 case IPC_SET: 4214 case MSG_STAT: 4215 if (target_to_host_msqid_ds(&dsarg,ptr)) 4216 return -TARGET_EFAULT; 4217 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 4218 if (host_to_target_msqid_ds(ptr,&dsarg)) 4219 return -TARGET_EFAULT; 4220 break; 4221 case IPC_RMID: 4222 ret = get_errno(msgctl(msgid, cmd, NULL)); 4223 break; 4224 case IPC_INFO: 4225 case MSG_INFO: 4226 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 4227 if (host_to_target_msginfo(ptr, &msginfo)) 4228 return -TARGET_EFAULT; 4229 break; 4230 } 4231 4232 return ret; 4233 } 4234 4235 struct target_msgbuf { 4236 abi_long mtype; 4237 char mtext[1]; 4238 }; 4239 4240 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 4241 ssize_t msgsz, int msgflg) 4242 { 4243 struct target_msgbuf *target_mb; 4244 struct msgbuf *host_mb; 4245 abi_long ret = 0; 4246 4247 if (msgsz < 0) { 4248 return -TARGET_EINVAL; 4249 } 4250 4251 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 4252 return -TARGET_EFAULT; 4253 host_mb = g_try_malloc(msgsz + sizeof(long)); 4254 if (!host_mb) { 4255 unlock_user_struct(target_mb, msgp, 0); 4256 return -TARGET_ENOMEM; 4257 } 4258 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 4259 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 4260 ret = -TARGET_ENOSYS; 4261 #ifdef __NR_msgsnd 4262 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg)); 4263 #endif 4264 #ifdef __NR_ipc 4265 if (ret == -TARGET_ENOSYS) { 4266 #ifdef __s390x__ 4267 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg, 4268 host_mb)); 4269 #else 4270 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg, 4271 host_mb, 0)); 4272 #endif 4273 } 4274 #endif 4275 g_free(host_mb); 4276 unlock_user_struct(target_mb, msgp, 0); 4277 4278 return ret; 4279 } 4280 4281 #ifdef __NR_ipc 4282 #if defined(__sparc__) 4283 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */ 4284 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp 4285 #elif defined(__s390x__) 4286 /* The s390 sys_ipc variant has only five parameters. */ 4287 #define MSGRCV_ARGS(__msgp, __msgtyp) \ 4288 ((long int[]){(long int)__msgp, __msgtyp}) 4289 #else 4290 #define MSGRCV_ARGS(__msgp, __msgtyp) \ 4291 ((long int[]){(long int)__msgp, __msgtyp}), 0 4292 #endif 4293 #endif 4294 4295 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 4296 ssize_t msgsz, abi_long msgtyp, 4297 int msgflg) 4298 { 4299 struct target_msgbuf *target_mb; 4300 char *target_mtext; 4301 struct msgbuf *host_mb; 4302 abi_long ret = 0; 4303 4304 if (msgsz < 0) { 4305 return -TARGET_EINVAL; 4306 } 4307 4308 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 4309 return -TARGET_EFAULT; 4310 4311 host_mb = g_try_malloc(msgsz + sizeof(long)); 4312 if (!host_mb) { 4313 ret = -TARGET_ENOMEM; 4314 goto end; 4315 } 4316 ret = -TARGET_ENOSYS; 4317 #ifdef __NR_msgrcv 4318 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 4319 #endif 4320 #ifdef __NR_ipc 4321 if (ret == -TARGET_ENOSYS) { 4322 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz, 4323 msgflg, MSGRCV_ARGS(host_mb, msgtyp))); 4324 } 4325 #endif 4326 4327 if (ret > 0) { 4328 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 4329 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 4330 if (!target_mtext) { 4331 ret = -TARGET_EFAULT; 4332 goto end; 4333 } 4334 memcpy(target_mb->mtext, host_mb->mtext, ret); 4335 unlock_user(target_mtext, target_mtext_addr, ret); 4336 } 4337 4338 target_mb->mtype = tswapal(host_mb->mtype); 4339 4340 end: 4341 if (target_mb) 4342 unlock_user_struct(target_mb, msgp, 1); 4343 g_free(host_mb); 4344 return ret; 4345 } 4346 4347 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 4348 abi_ulong target_addr) 4349 { 4350 struct target_shmid_ds *target_sd; 4351 4352 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 4353 return -TARGET_EFAULT; 4354 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 4355 return -TARGET_EFAULT; 4356 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4357 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 4358 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4359 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4360 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4361 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4362 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4363 unlock_user_struct(target_sd, target_addr, 0); 4364 return 0; 4365 } 4366 4367 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 4368 struct shmid_ds *host_sd) 4369 { 4370 struct target_shmid_ds *target_sd; 4371 4372 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 4373 return -TARGET_EFAULT; 4374 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 4375 return -TARGET_EFAULT; 4376 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 4377 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 4378 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 4379 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 4380 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 4381 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 4382 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 4383 unlock_user_struct(target_sd, target_addr, 1); 4384 return 0; 4385 } 4386 4387 struct target_shminfo { 4388 abi_ulong shmmax; 4389 abi_ulong shmmin; 4390 abi_ulong shmmni; 4391 abi_ulong shmseg; 4392 abi_ulong shmall; 4393 }; 4394 4395 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 4396 struct shminfo *host_shminfo) 4397 { 4398 struct target_shminfo *target_shminfo; 4399 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 4400 return -TARGET_EFAULT; 4401 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 4402 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 4403 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 4404 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 4405 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 4406 unlock_user_struct(target_shminfo, target_addr, 1); 4407 return 0; 4408 } 4409 4410 struct target_shm_info { 4411 int used_ids; 4412 abi_ulong shm_tot; 4413 abi_ulong shm_rss; 4414 abi_ulong shm_swp; 4415 abi_ulong swap_attempts; 4416 abi_ulong swap_successes; 4417 }; 4418 4419 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 4420 struct shm_info *host_shm_info) 4421 { 4422 struct target_shm_info *target_shm_info; 4423 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 4424 return -TARGET_EFAULT; 4425 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 4426 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 4427 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 4428 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 4429 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 4430 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 4431 unlock_user_struct(target_shm_info, target_addr, 1); 4432 return 0; 4433 } 4434 4435 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 4436 { 4437 struct shmid_ds dsarg; 4438 struct shminfo shminfo; 4439 struct shm_info shm_info; 4440 abi_long ret = -TARGET_EINVAL; 4441 4442 cmd &= 0xff; 4443 4444 switch(cmd) { 4445 case IPC_STAT: 4446 case IPC_SET: 4447 case SHM_STAT: 4448 if (target_to_host_shmid_ds(&dsarg, buf)) 4449 return -TARGET_EFAULT; 4450 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 4451 if (host_to_target_shmid_ds(buf, &dsarg)) 4452 return -TARGET_EFAULT; 4453 break; 4454 case IPC_INFO: 4455 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 4456 if (host_to_target_shminfo(buf, &shminfo)) 4457 return -TARGET_EFAULT; 4458 break; 4459 case SHM_INFO: 4460 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 4461 if (host_to_target_shm_info(buf, &shm_info)) 4462 return -TARGET_EFAULT; 4463 break; 4464 case IPC_RMID: 4465 case SHM_LOCK: 4466 case SHM_UNLOCK: 4467 ret = get_errno(shmctl(shmid, cmd, NULL)); 4468 break; 4469 } 4470 4471 return ret; 4472 } 4473 4474 #ifndef TARGET_FORCE_SHMLBA 4475 /* For most architectures, SHMLBA is the same as the page size; 4476 * some architectures have larger values, in which case they should 4477 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function. 4478 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA 4479 * and defining its own value for SHMLBA. 4480 * 4481 * The kernel also permits SHMLBA to be set by the architecture to a 4482 * value larger than the page size without setting __ARCH_FORCE_SHMLBA; 4483 * this means that addresses are rounded to the large size if 4484 * SHM_RND is set but addresses not aligned to that size are not rejected 4485 * as long as they are at least page-aligned. Since the only architecture 4486 * which uses this is ia64 this code doesn't provide for that oddity. 4487 */ 4488 static inline abi_ulong target_shmlba(CPUArchState *cpu_env) 4489 { 4490 return TARGET_PAGE_SIZE; 4491 } 4492 #endif 4493 4494 static inline abi_ulong do_shmat(CPUArchState *cpu_env, 4495 int shmid, abi_ulong shmaddr, int shmflg) 4496 { 4497 CPUState *cpu = env_cpu(cpu_env); 4498 abi_long raddr; 4499 void *host_raddr; 4500 struct shmid_ds shm_info; 4501 int i,ret; 4502 abi_ulong shmlba; 4503 4504 /* shmat pointers are always untagged */ 4505 4506 /* find out the length of the shared memory segment */ 4507 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 4508 if (is_error(ret)) { 4509 /* can't get length, bail out */ 4510 return ret; 4511 } 4512 4513 shmlba = target_shmlba(cpu_env); 4514 4515 if (shmaddr & (shmlba - 1)) { 4516 if (shmflg & SHM_RND) { 4517 shmaddr &= ~(shmlba - 1); 4518 } else { 4519 return -TARGET_EINVAL; 4520 } 4521 } 4522 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) { 4523 return -TARGET_EINVAL; 4524 } 4525 4526 mmap_lock(); 4527 4528 /* 4529 * We're mapping shared memory, so ensure we generate code for parallel 4530 * execution and flush old translations. This will work up to the level 4531 * supported by the host -- anything that requires EXCP_ATOMIC will not 4532 * be atomic with respect to an external process. 4533 */ 4534 if (!(cpu->tcg_cflags & CF_PARALLEL)) { 4535 cpu->tcg_cflags |= CF_PARALLEL; 4536 tb_flush(cpu); 4537 } 4538 4539 if (shmaddr) 4540 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg); 4541 else { 4542 abi_ulong mmap_start; 4543 4544 /* In order to use the host shmat, we need to honor host SHMLBA. */ 4545 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba)); 4546 4547 if (mmap_start == -1) { 4548 errno = ENOMEM; 4549 host_raddr = (void *)-1; 4550 } else 4551 host_raddr = shmat(shmid, g2h_untagged(mmap_start), 4552 shmflg | SHM_REMAP); 4553 } 4554 4555 if (host_raddr == (void *)-1) { 4556 mmap_unlock(); 4557 return get_errno((long)host_raddr); 4558 } 4559 raddr=h2g((unsigned long)host_raddr); 4560 4561 page_set_flags(raddr, raddr + shm_info.shm_segsz, 4562 PAGE_VALID | PAGE_RESET | PAGE_READ | 4563 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE)); 4564 4565 for (i = 0; i < N_SHM_REGIONS; i++) { 4566 if (!shm_regions[i].in_use) { 4567 shm_regions[i].in_use = true; 4568 shm_regions[i].start = raddr; 4569 shm_regions[i].size = shm_info.shm_segsz; 4570 break; 4571 } 4572 } 4573 4574 mmap_unlock(); 4575 return raddr; 4576 4577 } 4578 4579 static inline abi_long do_shmdt(abi_ulong shmaddr) 4580 { 4581 int i; 4582 abi_long rv; 4583 4584 /* shmdt pointers are always untagged */ 4585 4586 mmap_lock(); 4587 4588 for (i = 0; i < N_SHM_REGIONS; ++i) { 4589 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { 4590 shm_regions[i].in_use = false; 4591 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 4592 break; 4593 } 4594 } 4595 rv = get_errno(shmdt(g2h_untagged(shmaddr))); 4596 4597 mmap_unlock(); 4598 4599 return rv; 4600 } 4601 4602 #ifdef TARGET_NR_ipc 4603 /* ??? This only works with linear mappings. */ 4604 /* do_ipc() must return target values and target errnos. */ 4605 static abi_long do_ipc(CPUArchState *cpu_env, 4606 unsigned int call, abi_long first, 4607 abi_long second, abi_long third, 4608 abi_long ptr, abi_long fifth) 4609 { 4610 int version; 4611 abi_long ret = 0; 4612 4613 version = call >> 16; 4614 call &= 0xffff; 4615 4616 switch (call) { 4617 case IPCOP_semop: 4618 ret = do_semtimedop(first, ptr, second, 0, false); 4619 break; 4620 case IPCOP_semtimedop: 4621 /* 4622 * The s390 sys_ipc variant has only five parameters instead of six 4623 * (as for default variant) and the only difference is the handling of 4624 * SEMTIMEDOP where on s390 the third parameter is used as a pointer 4625 * to a struct timespec where the generic variant uses fifth parameter. 4626 */ 4627 #if defined(TARGET_S390X) 4628 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64); 4629 #else 4630 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64); 4631 #endif 4632 break; 4633 4634 case IPCOP_semget: 4635 ret = get_errno(semget(first, second, third)); 4636 break; 4637 4638 case IPCOP_semctl: { 4639 /* The semun argument to semctl is passed by value, so dereference the 4640 * ptr argument. */ 4641 abi_ulong atptr; 4642 get_user_ual(atptr, ptr); 4643 ret = do_semctl(first, second, third, atptr); 4644 break; 4645 } 4646 4647 case IPCOP_msgget: 4648 ret = get_errno(msgget(first, second)); 4649 break; 4650 4651 case IPCOP_msgsnd: 4652 ret = do_msgsnd(first, ptr, second, third); 4653 break; 4654 4655 case IPCOP_msgctl: 4656 ret = do_msgctl(first, second, ptr); 4657 break; 4658 4659 case IPCOP_msgrcv: 4660 switch (version) { 4661 case 0: 4662 { 4663 struct target_ipc_kludge { 4664 abi_long msgp; 4665 abi_long msgtyp; 4666 } *tmp; 4667 4668 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 4669 ret = -TARGET_EFAULT; 4670 break; 4671 } 4672 4673 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 4674 4675 unlock_user_struct(tmp, ptr, 0); 4676 break; 4677 } 4678 default: 4679 ret = do_msgrcv(first, ptr, second, fifth, third); 4680 } 4681 break; 4682 4683 case IPCOP_shmat: 4684 switch (version) { 4685 default: 4686 { 4687 abi_ulong raddr; 4688 raddr = do_shmat(cpu_env, first, ptr, second); 4689 if (is_error(raddr)) 4690 return get_errno(raddr); 4691 if (put_user_ual(raddr, third)) 4692 return -TARGET_EFAULT; 4693 break; 4694 } 4695 case 1: 4696 ret = -TARGET_EINVAL; 4697 break; 4698 } 4699 break; 4700 case IPCOP_shmdt: 4701 ret = do_shmdt(ptr); 4702 break; 4703 4704 case IPCOP_shmget: 4705 /* IPC_* flag values are the same on all linux platforms */ 4706 ret = get_errno(shmget(first, second, third)); 4707 break; 4708 4709 /* IPC_* and SHM_* command values are the same on all linux platforms */ 4710 case IPCOP_shmctl: 4711 ret = do_shmctl(first, second, ptr); 4712 break; 4713 default: 4714 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n", 4715 call, version); 4716 ret = -TARGET_ENOSYS; 4717 break; 4718 } 4719 return ret; 4720 } 4721 #endif 4722 4723 /* kernel structure types definitions */ 4724 4725 #define STRUCT(name, ...) STRUCT_ ## name, 4726 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 4727 enum { 4728 #include "syscall_types.h" 4729 STRUCT_MAX 4730 }; 4731 #undef STRUCT 4732 #undef STRUCT_SPECIAL 4733 4734 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 4735 #define STRUCT_SPECIAL(name) 4736 #include "syscall_types.h" 4737 #undef STRUCT 4738 #undef STRUCT_SPECIAL 4739 4740 #define MAX_STRUCT_SIZE 4096 4741 4742 #ifdef CONFIG_FIEMAP 4743 /* So fiemap access checks don't overflow on 32 bit systems. 4744 * This is very slightly smaller than the limit imposed by 4745 * the underlying kernel. 4746 */ 4747 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 4748 / sizeof(struct fiemap_extent)) 4749 4750 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 4751 int fd, int cmd, abi_long arg) 4752 { 4753 /* The parameter for this ioctl is a struct fiemap followed 4754 * by an array of struct fiemap_extent whose size is set 4755 * in fiemap->fm_extent_count. The array is filled in by the 4756 * ioctl. 4757 */ 4758 int target_size_in, target_size_out; 4759 struct fiemap *fm; 4760 const argtype *arg_type = ie->arg_type; 4761 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 4762 void *argptr, *p; 4763 abi_long ret; 4764 int i, extent_size = thunk_type_size(extent_arg_type, 0); 4765 uint32_t outbufsz; 4766 int free_fm = 0; 4767 4768 assert(arg_type[0] == TYPE_PTR); 4769 assert(ie->access == IOC_RW); 4770 arg_type++; 4771 target_size_in = thunk_type_size(arg_type, 0); 4772 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 4773 if (!argptr) { 4774 return -TARGET_EFAULT; 4775 } 4776 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4777 unlock_user(argptr, arg, 0); 4778 fm = (struct fiemap *)buf_temp; 4779 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 4780 return -TARGET_EINVAL; 4781 } 4782 4783 outbufsz = sizeof (*fm) + 4784 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 4785 4786 if (outbufsz > MAX_STRUCT_SIZE) { 4787 /* We can't fit all the extents into the fixed size buffer. 4788 * Allocate one that is large enough and use it instead. 4789 */ 4790 fm = g_try_malloc(outbufsz); 4791 if (!fm) { 4792 return -TARGET_ENOMEM; 4793 } 4794 memcpy(fm, buf_temp, sizeof(struct fiemap)); 4795 free_fm = 1; 4796 } 4797 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm)); 4798 if (!is_error(ret)) { 4799 target_size_out = target_size_in; 4800 /* An extent_count of 0 means we were only counting the extents 4801 * so there are no structs to copy 4802 */ 4803 if (fm->fm_extent_count != 0) { 4804 target_size_out += fm->fm_mapped_extents * extent_size; 4805 } 4806 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 4807 if (!argptr) { 4808 ret = -TARGET_EFAULT; 4809 } else { 4810 /* Convert the struct fiemap */ 4811 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 4812 if (fm->fm_extent_count != 0) { 4813 p = argptr + target_size_in; 4814 /* ...and then all the struct fiemap_extents */ 4815 for (i = 0; i < fm->fm_mapped_extents; i++) { 4816 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 4817 THUNK_TARGET); 4818 p += extent_size; 4819 } 4820 } 4821 unlock_user(argptr, arg, target_size_out); 4822 } 4823 } 4824 if (free_fm) { 4825 g_free(fm); 4826 } 4827 return ret; 4828 } 4829 #endif 4830 4831 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 4832 int fd, int cmd, abi_long arg) 4833 { 4834 const argtype *arg_type = ie->arg_type; 4835 int target_size; 4836 void *argptr; 4837 int ret; 4838 struct ifconf *host_ifconf; 4839 uint32_t outbufsz; 4840 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 4841 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) }; 4842 int target_ifreq_size; 4843 int nb_ifreq; 4844 int free_buf = 0; 4845 int i; 4846 int target_ifc_len; 4847 abi_long target_ifc_buf; 4848 int host_ifc_len; 4849 char *host_ifc_buf; 4850 4851 assert(arg_type[0] == TYPE_PTR); 4852 assert(ie->access == IOC_RW); 4853 4854 arg_type++; 4855 target_size = thunk_type_size(arg_type, 0); 4856 4857 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 4858 if (!argptr) 4859 return -TARGET_EFAULT; 4860 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4861 unlock_user(argptr, arg, 0); 4862 4863 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 4864 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 4865 target_ifreq_size = thunk_type_size(ifreq_max_type, 0); 4866 4867 if (target_ifc_buf != 0) { 4868 target_ifc_len = host_ifconf->ifc_len; 4869 nb_ifreq = target_ifc_len / target_ifreq_size; 4870 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 4871 4872 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 4873 if (outbufsz > MAX_STRUCT_SIZE) { 4874 /* 4875 * We can't fit all the extents into the fixed size buffer. 4876 * Allocate one that is large enough and use it instead. 4877 */ 4878 host_ifconf = g_try_malloc(outbufsz); 4879 if (!host_ifconf) { 4880 return -TARGET_ENOMEM; 4881 } 4882 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 4883 free_buf = 1; 4884 } 4885 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf); 4886 4887 host_ifconf->ifc_len = host_ifc_len; 4888 } else { 4889 host_ifc_buf = NULL; 4890 } 4891 host_ifconf->ifc_buf = host_ifc_buf; 4892 4893 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf)); 4894 if (!is_error(ret)) { 4895 /* convert host ifc_len to target ifc_len */ 4896 4897 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 4898 target_ifc_len = nb_ifreq * target_ifreq_size; 4899 host_ifconf->ifc_len = target_ifc_len; 4900 4901 /* restore target ifc_buf */ 4902 4903 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 4904 4905 /* copy struct ifconf to target user */ 4906 4907 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 4908 if (!argptr) 4909 return -TARGET_EFAULT; 4910 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 4911 unlock_user(argptr, arg, target_size); 4912 4913 if (target_ifc_buf != 0) { 4914 /* copy ifreq[] to target user */ 4915 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 4916 for (i = 0; i < nb_ifreq ; i++) { 4917 thunk_convert(argptr + i * target_ifreq_size, 4918 host_ifc_buf + i * sizeof(struct ifreq), 4919 ifreq_arg_type, THUNK_TARGET); 4920 } 4921 unlock_user(argptr, target_ifc_buf, target_ifc_len); 4922 } 4923 } 4924 4925 if (free_buf) { 4926 g_free(host_ifconf); 4927 } 4928 4929 return ret; 4930 } 4931 4932 #if defined(CONFIG_USBFS) 4933 #if HOST_LONG_BITS > 64 4934 #error USBDEVFS thunks do not support >64 bit hosts yet. 4935 #endif 4936 struct live_urb { 4937 uint64_t target_urb_adr; 4938 uint64_t target_buf_adr; 4939 char *target_buf_ptr; 4940 struct usbdevfs_urb host_urb; 4941 }; 4942 4943 static GHashTable *usbdevfs_urb_hashtable(void) 4944 { 4945 static GHashTable *urb_hashtable; 4946 4947 if (!urb_hashtable) { 4948 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal); 4949 } 4950 return urb_hashtable; 4951 } 4952 4953 static void urb_hashtable_insert(struct live_urb *urb) 4954 { 4955 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 4956 g_hash_table_insert(urb_hashtable, urb, urb); 4957 } 4958 4959 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr) 4960 { 4961 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 4962 return g_hash_table_lookup(urb_hashtable, &target_urb_adr); 4963 } 4964 4965 static void urb_hashtable_remove(struct live_urb *urb) 4966 { 4967 GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); 4968 g_hash_table_remove(urb_hashtable, urb); 4969 } 4970 4971 static abi_long 4972 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp, 4973 int fd, int cmd, abi_long arg) 4974 { 4975 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) }; 4976 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 }; 4977 struct live_urb *lurb; 4978 void *argptr; 4979 uint64_t hurb; 4980 int target_size; 4981 uintptr_t target_urb_adr; 4982 abi_long ret; 4983 4984 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET); 4985 4986 memset(buf_temp, 0, sizeof(uint64_t)); 4987 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 4988 if (is_error(ret)) { 4989 return ret; 4990 } 4991 4992 memcpy(&hurb, buf_temp, sizeof(uint64_t)); 4993 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb)); 4994 if (!lurb->target_urb_adr) { 4995 return -TARGET_EFAULT; 4996 } 4997 urb_hashtable_remove(lurb); 4998 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 4999 lurb->host_urb.buffer_length); 5000 lurb->target_buf_ptr = NULL; 5001 5002 /* restore the guest buffer pointer */ 5003 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr; 5004 5005 /* update the guest urb struct */ 5006 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0); 5007 if (!argptr) { 5008 g_free(lurb); 5009 return -TARGET_EFAULT; 5010 } 5011 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET); 5012 unlock_user(argptr, lurb->target_urb_adr, target_size); 5013 5014 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET); 5015 /* write back the urb handle */ 5016 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5017 if (!argptr) { 5018 g_free(lurb); 5019 return -TARGET_EFAULT; 5020 } 5021 5022 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */ 5023 target_urb_adr = lurb->target_urb_adr; 5024 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET); 5025 unlock_user(argptr, arg, target_size); 5026 5027 g_free(lurb); 5028 return ret; 5029 } 5030 5031 static abi_long 5032 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie, 5033 uint8_t *buf_temp __attribute__((unused)), 5034 int fd, int cmd, abi_long arg) 5035 { 5036 struct live_urb *lurb; 5037 5038 /* map target address back to host URB with metadata. */ 5039 lurb = urb_hashtable_lookup(arg); 5040 if (!lurb) { 5041 return -TARGET_EFAULT; 5042 } 5043 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); 5044 } 5045 5046 static abi_long 5047 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp, 5048 int fd, int cmd, abi_long arg) 5049 { 5050 const argtype *arg_type = ie->arg_type; 5051 int target_size; 5052 abi_long ret; 5053 void *argptr; 5054 int rw_dir; 5055 struct live_urb *lurb; 5056 5057 /* 5058 * each submitted URB needs to map to a unique ID for the 5059 * kernel, and that unique ID needs to be a pointer to 5060 * host memory. hence, we need to malloc for each URB. 5061 * isochronous transfers have a variable length struct. 5062 */ 5063 arg_type++; 5064 target_size = thunk_type_size(arg_type, THUNK_TARGET); 5065 5066 /* construct host copy of urb and metadata */ 5067 lurb = g_try_new0(struct live_urb, 1); 5068 if (!lurb) { 5069 return -TARGET_ENOMEM; 5070 } 5071 5072 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5073 if (!argptr) { 5074 g_free(lurb); 5075 return -TARGET_EFAULT; 5076 } 5077 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST); 5078 unlock_user(argptr, arg, 0); 5079 5080 lurb->target_urb_adr = arg; 5081 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer; 5082 5083 /* buffer space used depends on endpoint type so lock the entire buffer */ 5084 /* control type urbs should check the buffer contents for true direction */ 5085 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ; 5086 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr, 5087 lurb->host_urb.buffer_length, 1); 5088 if (lurb->target_buf_ptr == NULL) { 5089 g_free(lurb); 5090 return -TARGET_EFAULT; 5091 } 5092 5093 /* update buffer pointer in host copy */ 5094 lurb->host_urb.buffer = lurb->target_buf_ptr; 5095 5096 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); 5097 if (is_error(ret)) { 5098 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0); 5099 g_free(lurb); 5100 } else { 5101 urb_hashtable_insert(lurb); 5102 } 5103 5104 return ret; 5105 } 5106 #endif /* CONFIG_USBFS */ 5107 5108 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5109 int cmd, abi_long arg) 5110 { 5111 void *argptr; 5112 struct dm_ioctl *host_dm; 5113 abi_long guest_data; 5114 uint32_t guest_data_size; 5115 int target_size; 5116 const argtype *arg_type = ie->arg_type; 5117 abi_long ret; 5118 void *big_buf = NULL; 5119 char *host_data; 5120 5121 arg_type++; 5122 target_size = thunk_type_size(arg_type, 0); 5123 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5124 if (!argptr) { 5125 ret = -TARGET_EFAULT; 5126 goto out; 5127 } 5128 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5129 unlock_user(argptr, arg, 0); 5130 5131 /* buf_temp is too small, so fetch things into a bigger buffer */ 5132 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 5133 memcpy(big_buf, buf_temp, target_size); 5134 buf_temp = big_buf; 5135 host_dm = big_buf; 5136 5137 guest_data = arg + host_dm->data_start; 5138 if ((guest_data - arg) < 0) { 5139 ret = -TARGET_EINVAL; 5140 goto out; 5141 } 5142 guest_data_size = host_dm->data_size - host_dm->data_start; 5143 host_data = (char*)host_dm + host_dm->data_start; 5144 5145 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 5146 if (!argptr) { 5147 ret = -TARGET_EFAULT; 5148 goto out; 5149 } 5150 5151 switch (ie->host_cmd) { 5152 case DM_REMOVE_ALL: 5153 case DM_LIST_DEVICES: 5154 case DM_DEV_CREATE: 5155 case DM_DEV_REMOVE: 5156 case DM_DEV_SUSPEND: 5157 case DM_DEV_STATUS: 5158 case DM_DEV_WAIT: 5159 case DM_TABLE_STATUS: 5160 case DM_TABLE_CLEAR: 5161 case DM_TABLE_DEPS: 5162 case DM_LIST_VERSIONS: 5163 /* no input data */ 5164 break; 5165 case DM_DEV_RENAME: 5166 case DM_DEV_SET_GEOMETRY: 5167 /* data contains only strings */ 5168 memcpy(host_data, argptr, guest_data_size); 5169 break; 5170 case DM_TARGET_MSG: 5171 memcpy(host_data, argptr, guest_data_size); 5172 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 5173 break; 5174 case DM_TABLE_LOAD: 5175 { 5176 void *gspec = argptr; 5177 void *cur_data = host_data; 5178 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5179 int spec_size = thunk_type_size(arg_type, 0); 5180 int i; 5181 5182 for (i = 0; i < host_dm->target_count; i++) { 5183 struct dm_target_spec *spec = cur_data; 5184 uint32_t next; 5185 int slen; 5186 5187 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 5188 slen = strlen((char*)gspec + spec_size) + 1; 5189 next = spec->next; 5190 spec->next = sizeof(*spec) + slen; 5191 strcpy((char*)&spec[1], gspec + spec_size); 5192 gspec += next; 5193 cur_data += spec->next; 5194 } 5195 break; 5196 } 5197 default: 5198 ret = -TARGET_EINVAL; 5199 unlock_user(argptr, guest_data, 0); 5200 goto out; 5201 } 5202 unlock_user(argptr, guest_data, 0); 5203 5204 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5205 if (!is_error(ret)) { 5206 guest_data = arg + host_dm->data_start; 5207 guest_data_size = host_dm->data_size - host_dm->data_start; 5208 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 5209 switch (ie->host_cmd) { 5210 case DM_REMOVE_ALL: 5211 case DM_DEV_CREATE: 5212 case DM_DEV_REMOVE: 5213 case DM_DEV_RENAME: 5214 case DM_DEV_SUSPEND: 5215 case DM_DEV_STATUS: 5216 case DM_TABLE_LOAD: 5217 case DM_TABLE_CLEAR: 5218 case DM_TARGET_MSG: 5219 case DM_DEV_SET_GEOMETRY: 5220 /* no return data */ 5221 break; 5222 case DM_LIST_DEVICES: 5223 { 5224 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 5225 uint32_t remaining_data = guest_data_size; 5226 void *cur_data = argptr; 5227 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 5228 int nl_size = 12; /* can't use thunk_size due to alignment */ 5229 5230 while (1) { 5231 uint32_t next = nl->next; 5232 if (next) { 5233 nl->next = nl_size + (strlen(nl->name) + 1); 5234 } 5235 if (remaining_data < nl->next) { 5236 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5237 break; 5238 } 5239 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 5240 strcpy(cur_data + nl_size, nl->name); 5241 cur_data += nl->next; 5242 remaining_data -= nl->next; 5243 if (!next) { 5244 break; 5245 } 5246 nl = (void*)nl + next; 5247 } 5248 break; 5249 } 5250 case DM_DEV_WAIT: 5251 case DM_TABLE_STATUS: 5252 { 5253 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 5254 void *cur_data = argptr; 5255 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 5256 int spec_size = thunk_type_size(arg_type, 0); 5257 int i; 5258 5259 for (i = 0; i < host_dm->target_count; i++) { 5260 uint32_t next = spec->next; 5261 int slen = strlen((char*)&spec[1]) + 1; 5262 spec->next = (cur_data - argptr) + spec_size + slen; 5263 if (guest_data_size < spec->next) { 5264 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5265 break; 5266 } 5267 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 5268 strcpy(cur_data + spec_size, (char*)&spec[1]); 5269 cur_data = argptr + spec->next; 5270 spec = (void*)host_dm + host_dm->data_start + next; 5271 } 5272 break; 5273 } 5274 case DM_TABLE_DEPS: 5275 { 5276 void *hdata = (void*)host_dm + host_dm->data_start; 5277 int count = *(uint32_t*)hdata; 5278 uint64_t *hdev = hdata + 8; 5279 uint64_t *gdev = argptr + 8; 5280 int i; 5281 5282 *(uint32_t*)argptr = tswap32(count); 5283 for (i = 0; i < count; i++) { 5284 *gdev = tswap64(*hdev); 5285 gdev++; 5286 hdev++; 5287 } 5288 break; 5289 } 5290 case DM_LIST_VERSIONS: 5291 { 5292 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 5293 uint32_t remaining_data = guest_data_size; 5294 void *cur_data = argptr; 5295 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 5296 int vers_size = thunk_type_size(arg_type, 0); 5297 5298 while (1) { 5299 uint32_t next = vers->next; 5300 if (next) { 5301 vers->next = vers_size + (strlen(vers->name) + 1); 5302 } 5303 if (remaining_data < vers->next) { 5304 host_dm->flags |= DM_BUFFER_FULL_FLAG; 5305 break; 5306 } 5307 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 5308 strcpy(cur_data + vers_size, vers->name); 5309 cur_data += vers->next; 5310 remaining_data -= vers->next; 5311 if (!next) { 5312 break; 5313 } 5314 vers = (void*)vers + next; 5315 } 5316 break; 5317 } 5318 default: 5319 unlock_user(argptr, guest_data, 0); 5320 ret = -TARGET_EINVAL; 5321 goto out; 5322 } 5323 unlock_user(argptr, guest_data, guest_data_size); 5324 5325 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5326 if (!argptr) { 5327 ret = -TARGET_EFAULT; 5328 goto out; 5329 } 5330 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5331 unlock_user(argptr, arg, target_size); 5332 } 5333 out: 5334 g_free(big_buf); 5335 return ret; 5336 } 5337 5338 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 5339 int cmd, abi_long arg) 5340 { 5341 void *argptr; 5342 int target_size; 5343 const argtype *arg_type = ie->arg_type; 5344 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) }; 5345 abi_long ret; 5346 5347 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp; 5348 struct blkpg_partition host_part; 5349 5350 /* Read and convert blkpg */ 5351 arg_type++; 5352 target_size = thunk_type_size(arg_type, 0); 5353 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5354 if (!argptr) { 5355 ret = -TARGET_EFAULT; 5356 goto out; 5357 } 5358 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5359 unlock_user(argptr, arg, 0); 5360 5361 switch (host_blkpg->op) { 5362 case BLKPG_ADD_PARTITION: 5363 case BLKPG_DEL_PARTITION: 5364 /* payload is struct blkpg_partition */ 5365 break; 5366 default: 5367 /* Unknown opcode */ 5368 ret = -TARGET_EINVAL; 5369 goto out; 5370 } 5371 5372 /* Read and convert blkpg->data */ 5373 arg = (abi_long)(uintptr_t)host_blkpg->data; 5374 target_size = thunk_type_size(part_arg_type, 0); 5375 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5376 if (!argptr) { 5377 ret = -TARGET_EFAULT; 5378 goto out; 5379 } 5380 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST); 5381 unlock_user(argptr, arg, 0); 5382 5383 /* Swizzle the data pointer to our local copy and call! */ 5384 host_blkpg->data = &host_part; 5385 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg)); 5386 5387 out: 5388 return ret; 5389 } 5390 5391 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 5392 int fd, int cmd, abi_long arg) 5393 { 5394 const argtype *arg_type = ie->arg_type; 5395 const StructEntry *se; 5396 const argtype *field_types; 5397 const int *dst_offsets, *src_offsets; 5398 int target_size; 5399 void *argptr; 5400 abi_ulong *target_rt_dev_ptr = NULL; 5401 unsigned long *host_rt_dev_ptr = NULL; 5402 abi_long ret; 5403 int i; 5404 5405 assert(ie->access == IOC_W); 5406 assert(*arg_type == TYPE_PTR); 5407 arg_type++; 5408 assert(*arg_type == TYPE_STRUCT); 5409 target_size = thunk_type_size(arg_type, 0); 5410 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5411 if (!argptr) { 5412 return -TARGET_EFAULT; 5413 } 5414 arg_type++; 5415 assert(*arg_type == (int)STRUCT_rtentry); 5416 se = struct_entries + *arg_type++; 5417 assert(se->convert[0] == NULL); 5418 /* convert struct here to be able to catch rt_dev string */ 5419 field_types = se->field_types; 5420 dst_offsets = se->field_offsets[THUNK_HOST]; 5421 src_offsets = se->field_offsets[THUNK_TARGET]; 5422 for (i = 0; i < se->nb_fields; i++) { 5423 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 5424 assert(*field_types == TYPE_PTRVOID); 5425 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 5426 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 5427 if (*target_rt_dev_ptr != 0) { 5428 *host_rt_dev_ptr = (unsigned long)lock_user_string( 5429 tswapal(*target_rt_dev_ptr)); 5430 if (!*host_rt_dev_ptr) { 5431 unlock_user(argptr, arg, 0); 5432 return -TARGET_EFAULT; 5433 } 5434 } else { 5435 *host_rt_dev_ptr = 0; 5436 } 5437 field_types++; 5438 continue; 5439 } 5440 field_types = thunk_convert(buf_temp + dst_offsets[i], 5441 argptr + src_offsets[i], 5442 field_types, THUNK_HOST); 5443 } 5444 unlock_user(argptr, arg, 0); 5445 5446 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5447 5448 assert(host_rt_dev_ptr != NULL); 5449 assert(target_rt_dev_ptr != NULL); 5450 if (*host_rt_dev_ptr != 0) { 5451 unlock_user((void *)*host_rt_dev_ptr, 5452 *target_rt_dev_ptr, 0); 5453 } 5454 return ret; 5455 } 5456 5457 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 5458 int fd, int cmd, abi_long arg) 5459 { 5460 int sig = target_to_host_signal(arg); 5461 return get_errno(safe_ioctl(fd, ie->host_cmd, sig)); 5462 } 5463 5464 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp, 5465 int fd, int cmd, abi_long arg) 5466 { 5467 struct timeval tv; 5468 abi_long ret; 5469 5470 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv)); 5471 if (is_error(ret)) { 5472 return ret; 5473 } 5474 5475 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) { 5476 if (copy_to_user_timeval(arg, &tv)) { 5477 return -TARGET_EFAULT; 5478 } 5479 } else { 5480 if (copy_to_user_timeval64(arg, &tv)) { 5481 return -TARGET_EFAULT; 5482 } 5483 } 5484 5485 return ret; 5486 } 5487 5488 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp, 5489 int fd, int cmd, abi_long arg) 5490 { 5491 struct timespec ts; 5492 abi_long ret; 5493 5494 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts)); 5495 if (is_error(ret)) { 5496 return ret; 5497 } 5498 5499 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) { 5500 if (host_to_target_timespec(arg, &ts)) { 5501 return -TARGET_EFAULT; 5502 } 5503 } else{ 5504 if (host_to_target_timespec64(arg, &ts)) { 5505 return -TARGET_EFAULT; 5506 } 5507 } 5508 5509 return ret; 5510 } 5511 5512 #ifdef TIOCGPTPEER 5513 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp, 5514 int fd, int cmd, abi_long arg) 5515 { 5516 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl); 5517 return get_errno(safe_ioctl(fd, ie->host_cmd, flags)); 5518 } 5519 #endif 5520 5521 #ifdef HAVE_DRM_H 5522 5523 static void unlock_drm_version(struct drm_version *host_ver, 5524 struct target_drm_version *target_ver, 5525 bool copy) 5526 { 5527 unlock_user(host_ver->name, target_ver->name, 5528 copy ? host_ver->name_len : 0); 5529 unlock_user(host_ver->date, target_ver->date, 5530 copy ? host_ver->date_len : 0); 5531 unlock_user(host_ver->desc, target_ver->desc, 5532 copy ? host_ver->desc_len : 0); 5533 } 5534 5535 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver, 5536 struct target_drm_version *target_ver) 5537 { 5538 memset(host_ver, 0, sizeof(*host_ver)); 5539 5540 __get_user(host_ver->name_len, &target_ver->name_len); 5541 if (host_ver->name_len) { 5542 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name, 5543 target_ver->name_len, 0); 5544 if (!host_ver->name) { 5545 return -EFAULT; 5546 } 5547 } 5548 5549 __get_user(host_ver->date_len, &target_ver->date_len); 5550 if (host_ver->date_len) { 5551 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date, 5552 target_ver->date_len, 0); 5553 if (!host_ver->date) { 5554 goto err; 5555 } 5556 } 5557 5558 __get_user(host_ver->desc_len, &target_ver->desc_len); 5559 if (host_ver->desc_len) { 5560 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc, 5561 target_ver->desc_len, 0); 5562 if (!host_ver->desc) { 5563 goto err; 5564 } 5565 } 5566 5567 return 0; 5568 err: 5569 unlock_drm_version(host_ver, target_ver, false); 5570 return -EFAULT; 5571 } 5572 5573 static inline void host_to_target_drmversion( 5574 struct target_drm_version *target_ver, 5575 struct drm_version *host_ver) 5576 { 5577 __put_user(host_ver->version_major, &target_ver->version_major); 5578 __put_user(host_ver->version_minor, &target_ver->version_minor); 5579 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel); 5580 __put_user(host_ver->name_len, &target_ver->name_len); 5581 __put_user(host_ver->date_len, &target_ver->date_len); 5582 __put_user(host_ver->desc_len, &target_ver->desc_len); 5583 unlock_drm_version(host_ver, target_ver, true); 5584 } 5585 5586 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp, 5587 int fd, int cmd, abi_long arg) 5588 { 5589 struct drm_version *ver; 5590 struct target_drm_version *target_ver; 5591 abi_long ret; 5592 5593 switch (ie->host_cmd) { 5594 case DRM_IOCTL_VERSION: 5595 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) { 5596 return -TARGET_EFAULT; 5597 } 5598 ver = (struct drm_version *)buf_temp; 5599 ret = target_to_host_drmversion(ver, target_ver); 5600 if (!is_error(ret)) { 5601 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver)); 5602 if (is_error(ret)) { 5603 unlock_drm_version(ver, target_ver, false); 5604 } else { 5605 host_to_target_drmversion(target_ver, ver); 5606 } 5607 } 5608 unlock_user_struct(target_ver, arg, 0); 5609 return ret; 5610 } 5611 return -TARGET_ENOSYS; 5612 } 5613 5614 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie, 5615 struct drm_i915_getparam *gparam, 5616 int fd, abi_long arg) 5617 { 5618 abi_long ret; 5619 int value; 5620 struct target_drm_i915_getparam *target_gparam; 5621 5622 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) { 5623 return -TARGET_EFAULT; 5624 } 5625 5626 __get_user(gparam->param, &target_gparam->param); 5627 gparam->value = &value; 5628 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam)); 5629 put_user_s32(value, target_gparam->value); 5630 5631 unlock_user_struct(target_gparam, arg, 0); 5632 return ret; 5633 } 5634 5635 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp, 5636 int fd, int cmd, abi_long arg) 5637 { 5638 switch (ie->host_cmd) { 5639 case DRM_IOCTL_I915_GETPARAM: 5640 return do_ioctl_drm_i915_getparam(ie, 5641 (struct drm_i915_getparam *)buf_temp, 5642 fd, arg); 5643 default: 5644 return -TARGET_ENOSYS; 5645 } 5646 } 5647 5648 #endif 5649 5650 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp, 5651 int fd, int cmd, abi_long arg) 5652 { 5653 struct tun_filter *filter = (struct tun_filter *)buf_temp; 5654 struct tun_filter *target_filter; 5655 char *target_addr; 5656 5657 assert(ie->access == IOC_W); 5658 5659 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1); 5660 if (!target_filter) { 5661 return -TARGET_EFAULT; 5662 } 5663 filter->flags = tswap16(target_filter->flags); 5664 filter->count = tswap16(target_filter->count); 5665 unlock_user(target_filter, arg, 0); 5666 5667 if (filter->count) { 5668 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN > 5669 MAX_STRUCT_SIZE) { 5670 return -TARGET_EFAULT; 5671 } 5672 5673 target_addr = lock_user(VERIFY_READ, 5674 arg + offsetof(struct tun_filter, addr), 5675 filter->count * ETH_ALEN, 1); 5676 if (!target_addr) { 5677 return -TARGET_EFAULT; 5678 } 5679 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN); 5680 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0); 5681 } 5682 5683 return get_errno(safe_ioctl(fd, ie->host_cmd, filter)); 5684 } 5685 5686 IOCTLEntry ioctl_entries[] = { 5687 #define IOCTL(cmd, access, ...) \ 5688 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 5689 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 5690 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 5691 #define IOCTL_IGNORE(cmd) \ 5692 { TARGET_ ## cmd, 0, #cmd }, 5693 #include "ioctls.h" 5694 { 0, 0, }, 5695 }; 5696 5697 /* ??? Implement proper locking for ioctls. */ 5698 /* do_ioctl() Must return target values and target errnos. */ 5699 static abi_long do_ioctl(int fd, int cmd, abi_long arg) 5700 { 5701 const IOCTLEntry *ie; 5702 const argtype *arg_type; 5703 abi_long ret; 5704 uint8_t buf_temp[MAX_STRUCT_SIZE]; 5705 int target_size; 5706 void *argptr; 5707 5708 ie = ioctl_entries; 5709 for(;;) { 5710 if (ie->target_cmd == 0) { 5711 qemu_log_mask( 5712 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 5713 return -TARGET_ENOSYS; 5714 } 5715 if (ie->target_cmd == cmd) 5716 break; 5717 ie++; 5718 } 5719 arg_type = ie->arg_type; 5720 if (ie->do_ioctl) { 5721 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 5722 } else if (!ie->host_cmd) { 5723 /* Some architectures define BSD ioctls in their headers 5724 that are not implemented in Linux. */ 5725 return -TARGET_ENOSYS; 5726 } 5727 5728 switch(arg_type[0]) { 5729 case TYPE_NULL: 5730 /* no argument */ 5731 ret = get_errno(safe_ioctl(fd, ie->host_cmd)); 5732 break; 5733 case TYPE_PTRVOID: 5734 case TYPE_INT: 5735 case TYPE_LONG: 5736 case TYPE_ULONG: 5737 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg)); 5738 break; 5739 case TYPE_PTR: 5740 arg_type++; 5741 target_size = thunk_type_size(arg_type, 0); 5742 switch(ie->access) { 5743 case IOC_R: 5744 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5745 if (!is_error(ret)) { 5746 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5747 if (!argptr) 5748 return -TARGET_EFAULT; 5749 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5750 unlock_user(argptr, arg, target_size); 5751 } 5752 break; 5753 case IOC_W: 5754 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5755 if (!argptr) 5756 return -TARGET_EFAULT; 5757 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5758 unlock_user(argptr, arg, 0); 5759 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5760 break; 5761 default: 5762 case IOC_RW: 5763 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 5764 if (!argptr) 5765 return -TARGET_EFAULT; 5766 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 5767 unlock_user(argptr, arg, 0); 5768 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); 5769 if (!is_error(ret)) { 5770 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 5771 if (!argptr) 5772 return -TARGET_EFAULT; 5773 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 5774 unlock_user(argptr, arg, target_size); 5775 } 5776 break; 5777 } 5778 break; 5779 default: 5780 qemu_log_mask(LOG_UNIMP, 5781 "Unsupported ioctl type: cmd=0x%04lx type=%d\n", 5782 (long)cmd, arg_type[0]); 5783 ret = -TARGET_ENOSYS; 5784 break; 5785 } 5786 return ret; 5787 } 5788 5789 static const bitmask_transtbl iflag_tbl[] = { 5790 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 5791 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 5792 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 5793 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 5794 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 5795 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 5796 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 5797 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 5798 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 5799 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 5800 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 5801 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 5802 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 5803 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 5804 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8}, 5805 { 0, 0, 0, 0 } 5806 }; 5807 5808 static const bitmask_transtbl oflag_tbl[] = { 5809 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 5810 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 5811 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 5812 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 5813 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 5814 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 5815 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 5816 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 5817 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 5818 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 5819 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 5820 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 5821 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 5822 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 5823 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 5824 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 5825 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 5826 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 5827 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 5828 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 5829 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 5830 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 5831 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 5832 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 5833 { 0, 0, 0, 0 } 5834 }; 5835 5836 static const bitmask_transtbl cflag_tbl[] = { 5837 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 5838 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 5839 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 5840 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 5841 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 5842 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 5843 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 5844 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 5845 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 5846 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 5847 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 5848 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 5849 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 5850 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 5851 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 5852 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 5853 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 5854 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 5855 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 5856 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 5857 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 5858 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 5859 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 5860 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 5861 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 5862 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 5863 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 5864 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 5865 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 5866 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 5867 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 5868 { 0, 0, 0, 0 } 5869 }; 5870 5871 static const bitmask_transtbl lflag_tbl[] = { 5872 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 5873 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 5874 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 5875 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 5876 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 5877 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 5878 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 5879 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 5880 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 5881 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 5882 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 5883 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 5884 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 5885 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 5886 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 5887 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC}, 5888 { 0, 0, 0, 0 } 5889 }; 5890 5891 static void target_to_host_termios (void *dst, const void *src) 5892 { 5893 struct host_termios *host = dst; 5894 const struct target_termios *target = src; 5895 5896 host->c_iflag = 5897 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 5898 host->c_oflag = 5899 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 5900 host->c_cflag = 5901 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 5902 host->c_lflag = 5903 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 5904 host->c_line = target->c_line; 5905 5906 memset(host->c_cc, 0, sizeof(host->c_cc)); 5907 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 5908 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 5909 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 5910 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 5911 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 5912 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 5913 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 5914 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 5915 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 5916 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 5917 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 5918 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 5919 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 5920 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 5921 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 5922 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 5923 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 5924 } 5925 5926 static void host_to_target_termios (void *dst, const void *src) 5927 { 5928 struct target_termios *target = dst; 5929 const struct host_termios *host = src; 5930 5931 target->c_iflag = 5932 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 5933 target->c_oflag = 5934 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 5935 target->c_cflag = 5936 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 5937 target->c_lflag = 5938 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 5939 target->c_line = host->c_line; 5940 5941 memset(target->c_cc, 0, sizeof(target->c_cc)); 5942 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 5943 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 5944 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 5945 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 5946 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 5947 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 5948 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 5949 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 5950 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 5951 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 5952 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 5953 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 5954 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 5955 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 5956 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 5957 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 5958 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 5959 } 5960 5961 static const StructEntry struct_termios_def = { 5962 .convert = { host_to_target_termios, target_to_host_termios }, 5963 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 5964 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 5965 .print = print_termios, 5966 }; 5967 5968 static const bitmask_transtbl mmap_flags_tbl[] = { 5969 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 5970 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 5971 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 5972 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, 5973 MAP_ANONYMOUS, MAP_ANONYMOUS }, 5974 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, 5975 MAP_GROWSDOWN, MAP_GROWSDOWN }, 5976 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, 5977 MAP_DENYWRITE, MAP_DENYWRITE }, 5978 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, 5979 MAP_EXECUTABLE, MAP_EXECUTABLE }, 5980 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 5981 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, 5982 MAP_NORESERVE, MAP_NORESERVE }, 5983 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB }, 5984 /* MAP_STACK had been ignored by the kernel for quite some time. 5985 Recognize it for the target insofar as we do not want to pass 5986 it through to the host. */ 5987 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 }, 5988 { 0, 0, 0, 0 } 5989 }; 5990 5991 /* 5992 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64) 5993 * TARGET_I386 is defined if TARGET_X86_64 is defined 5994 */ 5995 #if defined(TARGET_I386) 5996 5997 /* NOTE: there is really one LDT for all the threads */ 5998 static uint8_t *ldt_table; 5999 6000 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 6001 { 6002 int size; 6003 void *p; 6004 6005 if (!ldt_table) 6006 return 0; 6007 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 6008 if (size > bytecount) 6009 size = bytecount; 6010 p = lock_user(VERIFY_WRITE, ptr, size, 0); 6011 if (!p) 6012 return -TARGET_EFAULT; 6013 /* ??? Should this by byteswapped? */ 6014 memcpy(p, ldt_table, size); 6015 unlock_user(p, ptr, size); 6016 return size; 6017 } 6018 6019 /* XXX: add locking support */ 6020 static abi_long write_ldt(CPUX86State *env, 6021 abi_ulong ptr, unsigned long bytecount, int oldmode) 6022 { 6023 struct target_modify_ldt_ldt_s ldt_info; 6024 struct target_modify_ldt_ldt_s *target_ldt_info; 6025 int seg_32bit, contents, read_exec_only, limit_in_pages; 6026 int seg_not_present, useable, lm; 6027 uint32_t *lp, entry_1, entry_2; 6028 6029 if (bytecount != sizeof(ldt_info)) 6030 return -TARGET_EINVAL; 6031 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 6032 return -TARGET_EFAULT; 6033 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6034 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6035 ldt_info.limit = tswap32(target_ldt_info->limit); 6036 ldt_info.flags = tswap32(target_ldt_info->flags); 6037 unlock_user_struct(target_ldt_info, ptr, 0); 6038 6039 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 6040 return -TARGET_EINVAL; 6041 seg_32bit = ldt_info.flags & 1; 6042 contents = (ldt_info.flags >> 1) & 3; 6043 read_exec_only = (ldt_info.flags >> 3) & 1; 6044 limit_in_pages = (ldt_info.flags >> 4) & 1; 6045 seg_not_present = (ldt_info.flags >> 5) & 1; 6046 useable = (ldt_info.flags >> 6) & 1; 6047 #ifdef TARGET_ABI32 6048 lm = 0; 6049 #else 6050 lm = (ldt_info.flags >> 7) & 1; 6051 #endif 6052 if (contents == 3) { 6053 if (oldmode) 6054 return -TARGET_EINVAL; 6055 if (seg_not_present == 0) 6056 return -TARGET_EINVAL; 6057 } 6058 /* allocate the LDT */ 6059 if (!ldt_table) { 6060 env->ldt.base = target_mmap(0, 6061 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 6062 PROT_READ|PROT_WRITE, 6063 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 6064 if (env->ldt.base == -1) 6065 return -TARGET_ENOMEM; 6066 memset(g2h_untagged(env->ldt.base), 0, 6067 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 6068 env->ldt.limit = 0xffff; 6069 ldt_table = g2h_untagged(env->ldt.base); 6070 } 6071 6072 /* NOTE: same code as Linux kernel */ 6073 /* Allow LDTs to be cleared by the user. */ 6074 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6075 if (oldmode || 6076 (contents == 0 && 6077 read_exec_only == 1 && 6078 seg_32bit == 0 && 6079 limit_in_pages == 0 && 6080 seg_not_present == 1 && 6081 useable == 0 )) { 6082 entry_1 = 0; 6083 entry_2 = 0; 6084 goto install; 6085 } 6086 } 6087 6088 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6089 (ldt_info.limit & 0x0ffff); 6090 entry_2 = (ldt_info.base_addr & 0xff000000) | 6091 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6092 (ldt_info.limit & 0xf0000) | 6093 ((read_exec_only ^ 1) << 9) | 6094 (contents << 10) | 6095 ((seg_not_present ^ 1) << 15) | 6096 (seg_32bit << 22) | 6097 (limit_in_pages << 23) | 6098 (lm << 21) | 6099 0x7000; 6100 if (!oldmode) 6101 entry_2 |= (useable << 20); 6102 6103 /* Install the new entry ... */ 6104 install: 6105 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 6106 lp[0] = tswap32(entry_1); 6107 lp[1] = tswap32(entry_2); 6108 return 0; 6109 } 6110 6111 /* specific and weird i386 syscalls */ 6112 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 6113 unsigned long bytecount) 6114 { 6115 abi_long ret; 6116 6117 switch (func) { 6118 case 0: 6119 ret = read_ldt(ptr, bytecount); 6120 break; 6121 case 1: 6122 ret = write_ldt(env, ptr, bytecount, 1); 6123 break; 6124 case 0x11: 6125 ret = write_ldt(env, ptr, bytecount, 0); 6126 break; 6127 default: 6128 ret = -TARGET_ENOSYS; 6129 break; 6130 } 6131 return ret; 6132 } 6133 6134 #if defined(TARGET_ABI32) 6135 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 6136 { 6137 uint64_t *gdt_table = g2h_untagged(env->gdt.base); 6138 struct target_modify_ldt_ldt_s ldt_info; 6139 struct target_modify_ldt_ldt_s *target_ldt_info; 6140 int seg_32bit, contents, read_exec_only, limit_in_pages; 6141 int seg_not_present, useable, lm; 6142 uint32_t *lp, entry_1, entry_2; 6143 int i; 6144 6145 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6146 if (!target_ldt_info) 6147 return -TARGET_EFAULT; 6148 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 6149 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 6150 ldt_info.limit = tswap32(target_ldt_info->limit); 6151 ldt_info.flags = tswap32(target_ldt_info->flags); 6152 if (ldt_info.entry_number == -1) { 6153 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 6154 if (gdt_table[i] == 0) { 6155 ldt_info.entry_number = i; 6156 target_ldt_info->entry_number = tswap32(i); 6157 break; 6158 } 6159 } 6160 } 6161 unlock_user_struct(target_ldt_info, ptr, 1); 6162 6163 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 6164 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 6165 return -TARGET_EINVAL; 6166 seg_32bit = ldt_info.flags & 1; 6167 contents = (ldt_info.flags >> 1) & 3; 6168 read_exec_only = (ldt_info.flags >> 3) & 1; 6169 limit_in_pages = (ldt_info.flags >> 4) & 1; 6170 seg_not_present = (ldt_info.flags >> 5) & 1; 6171 useable = (ldt_info.flags >> 6) & 1; 6172 #ifdef TARGET_ABI32 6173 lm = 0; 6174 #else 6175 lm = (ldt_info.flags >> 7) & 1; 6176 #endif 6177 6178 if (contents == 3) { 6179 if (seg_not_present == 0) 6180 return -TARGET_EINVAL; 6181 } 6182 6183 /* NOTE: same code as Linux kernel */ 6184 /* Allow LDTs to be cleared by the user. */ 6185 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 6186 if ((contents == 0 && 6187 read_exec_only == 1 && 6188 seg_32bit == 0 && 6189 limit_in_pages == 0 && 6190 seg_not_present == 1 && 6191 useable == 0 )) { 6192 entry_1 = 0; 6193 entry_2 = 0; 6194 goto install; 6195 } 6196 } 6197 6198 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 6199 (ldt_info.limit & 0x0ffff); 6200 entry_2 = (ldt_info.base_addr & 0xff000000) | 6201 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 6202 (ldt_info.limit & 0xf0000) | 6203 ((read_exec_only ^ 1) << 9) | 6204 (contents << 10) | 6205 ((seg_not_present ^ 1) << 15) | 6206 (seg_32bit << 22) | 6207 (limit_in_pages << 23) | 6208 (useable << 20) | 6209 (lm << 21) | 6210 0x7000; 6211 6212 /* Install the new entry ... */ 6213 install: 6214 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 6215 lp[0] = tswap32(entry_1); 6216 lp[1] = tswap32(entry_2); 6217 return 0; 6218 } 6219 6220 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 6221 { 6222 struct target_modify_ldt_ldt_s *target_ldt_info; 6223 uint64_t *gdt_table = g2h_untagged(env->gdt.base); 6224 uint32_t base_addr, limit, flags; 6225 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 6226 int seg_not_present, useable, lm; 6227 uint32_t *lp, entry_1, entry_2; 6228 6229 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 6230 if (!target_ldt_info) 6231 return -TARGET_EFAULT; 6232 idx = tswap32(target_ldt_info->entry_number); 6233 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 6234 idx > TARGET_GDT_ENTRY_TLS_MAX) { 6235 unlock_user_struct(target_ldt_info, ptr, 1); 6236 return -TARGET_EINVAL; 6237 } 6238 lp = (uint32_t *)(gdt_table + idx); 6239 entry_1 = tswap32(lp[0]); 6240 entry_2 = tswap32(lp[1]); 6241 6242 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 6243 contents = (entry_2 >> 10) & 3; 6244 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 6245 seg_32bit = (entry_2 >> 22) & 1; 6246 limit_in_pages = (entry_2 >> 23) & 1; 6247 useable = (entry_2 >> 20) & 1; 6248 #ifdef TARGET_ABI32 6249 lm = 0; 6250 #else 6251 lm = (entry_2 >> 21) & 1; 6252 #endif 6253 flags = (seg_32bit << 0) | (contents << 1) | 6254 (read_exec_only << 3) | (limit_in_pages << 4) | 6255 (seg_not_present << 5) | (useable << 6) | (lm << 7); 6256 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 6257 base_addr = (entry_1 >> 16) | 6258 (entry_2 & 0xff000000) | 6259 ((entry_2 & 0xff) << 16); 6260 target_ldt_info->base_addr = tswapal(base_addr); 6261 target_ldt_info->limit = tswap32(limit); 6262 target_ldt_info->flags = tswap32(flags); 6263 unlock_user_struct(target_ldt_info, ptr, 1); 6264 return 0; 6265 } 6266 6267 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6268 { 6269 return -TARGET_ENOSYS; 6270 } 6271 #else 6272 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 6273 { 6274 abi_long ret = 0; 6275 abi_ulong val; 6276 int idx; 6277 6278 switch(code) { 6279 case TARGET_ARCH_SET_GS: 6280 case TARGET_ARCH_SET_FS: 6281 if (code == TARGET_ARCH_SET_GS) 6282 idx = R_GS; 6283 else 6284 idx = R_FS; 6285 cpu_x86_load_seg(env, idx, 0); 6286 env->segs[idx].base = addr; 6287 break; 6288 case TARGET_ARCH_GET_GS: 6289 case TARGET_ARCH_GET_FS: 6290 if (code == TARGET_ARCH_GET_GS) 6291 idx = R_GS; 6292 else 6293 idx = R_FS; 6294 val = env->segs[idx].base; 6295 if (put_user(val, addr, abi_ulong)) 6296 ret = -TARGET_EFAULT; 6297 break; 6298 default: 6299 ret = -TARGET_EINVAL; 6300 break; 6301 } 6302 return ret; 6303 } 6304 #endif /* defined(TARGET_ABI32 */ 6305 #endif /* defined(TARGET_I386) */ 6306 6307 /* 6308 * These constants are generic. Supply any that are missing from the host. 6309 */ 6310 #ifndef PR_SET_NAME 6311 # define PR_SET_NAME 15 6312 # define PR_GET_NAME 16 6313 #endif 6314 #ifndef PR_SET_FP_MODE 6315 # define PR_SET_FP_MODE 45 6316 # define PR_GET_FP_MODE 46 6317 # define PR_FP_MODE_FR (1 << 0) 6318 # define PR_FP_MODE_FRE (1 << 1) 6319 #endif 6320 #ifndef PR_SVE_SET_VL 6321 # define PR_SVE_SET_VL 50 6322 # define PR_SVE_GET_VL 51 6323 # define PR_SVE_VL_LEN_MASK 0xffff 6324 # define PR_SVE_VL_INHERIT (1 << 17) 6325 #endif 6326 #ifndef PR_PAC_RESET_KEYS 6327 # define PR_PAC_RESET_KEYS 54 6328 # define PR_PAC_APIAKEY (1 << 0) 6329 # define PR_PAC_APIBKEY (1 << 1) 6330 # define PR_PAC_APDAKEY (1 << 2) 6331 # define PR_PAC_APDBKEY (1 << 3) 6332 # define PR_PAC_APGAKEY (1 << 4) 6333 #endif 6334 #ifndef PR_SET_TAGGED_ADDR_CTRL 6335 # define PR_SET_TAGGED_ADDR_CTRL 55 6336 # define PR_GET_TAGGED_ADDR_CTRL 56 6337 # define PR_TAGGED_ADDR_ENABLE (1UL << 0) 6338 #endif 6339 #ifndef PR_MTE_TCF_SHIFT 6340 # define PR_MTE_TCF_SHIFT 1 6341 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT) 6342 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT) 6343 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT) 6344 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT) 6345 # define PR_MTE_TAG_SHIFT 3 6346 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT) 6347 #endif 6348 #ifndef PR_SET_IO_FLUSHER 6349 # define PR_SET_IO_FLUSHER 57 6350 # define PR_GET_IO_FLUSHER 58 6351 #endif 6352 #ifndef PR_SET_SYSCALL_USER_DISPATCH 6353 # define PR_SET_SYSCALL_USER_DISPATCH 59 6354 #endif 6355 #ifndef PR_SME_SET_VL 6356 # define PR_SME_SET_VL 63 6357 # define PR_SME_GET_VL 64 6358 # define PR_SME_VL_LEN_MASK 0xffff 6359 # define PR_SME_VL_INHERIT (1 << 17) 6360 #endif 6361 6362 #include "target_prctl.h" 6363 6364 static abi_long do_prctl_inval0(CPUArchState *env) 6365 { 6366 return -TARGET_EINVAL; 6367 } 6368 6369 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2) 6370 { 6371 return -TARGET_EINVAL; 6372 } 6373 6374 #ifndef do_prctl_get_fp_mode 6375 #define do_prctl_get_fp_mode do_prctl_inval0 6376 #endif 6377 #ifndef do_prctl_set_fp_mode 6378 #define do_prctl_set_fp_mode do_prctl_inval1 6379 #endif 6380 #ifndef do_prctl_sve_get_vl 6381 #define do_prctl_sve_get_vl do_prctl_inval0 6382 #endif 6383 #ifndef do_prctl_sve_set_vl 6384 #define do_prctl_sve_set_vl do_prctl_inval1 6385 #endif 6386 #ifndef do_prctl_reset_keys 6387 #define do_prctl_reset_keys do_prctl_inval1 6388 #endif 6389 #ifndef do_prctl_set_tagged_addr_ctrl 6390 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1 6391 #endif 6392 #ifndef do_prctl_get_tagged_addr_ctrl 6393 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0 6394 #endif 6395 #ifndef do_prctl_get_unalign 6396 #define do_prctl_get_unalign do_prctl_inval1 6397 #endif 6398 #ifndef do_prctl_set_unalign 6399 #define do_prctl_set_unalign do_prctl_inval1 6400 #endif 6401 #ifndef do_prctl_sme_get_vl 6402 #define do_prctl_sme_get_vl do_prctl_inval0 6403 #endif 6404 #ifndef do_prctl_sme_set_vl 6405 #define do_prctl_sme_set_vl do_prctl_inval1 6406 #endif 6407 6408 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, 6409 abi_long arg3, abi_long arg4, abi_long arg5) 6410 { 6411 abi_long ret; 6412 6413 switch (option) { 6414 case PR_GET_PDEATHSIG: 6415 { 6416 int deathsig; 6417 ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig, 6418 arg3, arg4, arg5)); 6419 if (!is_error(ret) && 6420 put_user_s32(host_to_target_signal(deathsig), arg2)) { 6421 return -TARGET_EFAULT; 6422 } 6423 return ret; 6424 } 6425 case PR_SET_PDEATHSIG: 6426 return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2), 6427 arg3, arg4, arg5)); 6428 case PR_GET_NAME: 6429 { 6430 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 6431 if (!name) { 6432 return -TARGET_EFAULT; 6433 } 6434 ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name, 6435 arg3, arg4, arg5)); 6436 unlock_user(name, arg2, 16); 6437 return ret; 6438 } 6439 case PR_SET_NAME: 6440 { 6441 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 6442 if (!name) { 6443 return -TARGET_EFAULT; 6444 } 6445 ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name, 6446 arg3, arg4, arg5)); 6447 unlock_user(name, arg2, 0); 6448 return ret; 6449 } 6450 case PR_GET_FP_MODE: 6451 return do_prctl_get_fp_mode(env); 6452 case PR_SET_FP_MODE: 6453 return do_prctl_set_fp_mode(env, arg2); 6454 case PR_SVE_GET_VL: 6455 return do_prctl_sve_get_vl(env); 6456 case PR_SVE_SET_VL: 6457 return do_prctl_sve_set_vl(env, arg2); 6458 case PR_SME_GET_VL: 6459 return do_prctl_sme_get_vl(env); 6460 case PR_SME_SET_VL: 6461 return do_prctl_sme_set_vl(env, arg2); 6462 case PR_PAC_RESET_KEYS: 6463 if (arg3 || arg4 || arg5) { 6464 return -TARGET_EINVAL; 6465 } 6466 return do_prctl_reset_keys(env, arg2); 6467 case PR_SET_TAGGED_ADDR_CTRL: 6468 if (arg3 || arg4 || arg5) { 6469 return -TARGET_EINVAL; 6470 } 6471 return do_prctl_set_tagged_addr_ctrl(env, arg2); 6472 case PR_GET_TAGGED_ADDR_CTRL: 6473 if (arg2 || arg3 || arg4 || arg5) { 6474 return -TARGET_EINVAL; 6475 } 6476 return do_prctl_get_tagged_addr_ctrl(env); 6477 6478 case PR_GET_UNALIGN: 6479 return do_prctl_get_unalign(env, arg2); 6480 case PR_SET_UNALIGN: 6481 return do_prctl_set_unalign(env, arg2); 6482 6483 case PR_CAP_AMBIENT: 6484 case PR_CAPBSET_READ: 6485 case PR_CAPBSET_DROP: 6486 case PR_GET_DUMPABLE: 6487 case PR_SET_DUMPABLE: 6488 case PR_GET_KEEPCAPS: 6489 case PR_SET_KEEPCAPS: 6490 case PR_GET_SECUREBITS: 6491 case PR_SET_SECUREBITS: 6492 case PR_GET_TIMING: 6493 case PR_SET_TIMING: 6494 case PR_GET_TIMERSLACK: 6495 case PR_SET_TIMERSLACK: 6496 case PR_MCE_KILL: 6497 case PR_MCE_KILL_GET: 6498 case PR_GET_NO_NEW_PRIVS: 6499 case PR_SET_NO_NEW_PRIVS: 6500 case PR_GET_IO_FLUSHER: 6501 case PR_SET_IO_FLUSHER: 6502 /* Some prctl options have no pointer arguments and we can pass on. */ 6503 return get_errno(prctl(option, arg2, arg3, arg4, arg5)); 6504 6505 case PR_GET_CHILD_SUBREAPER: 6506 case PR_SET_CHILD_SUBREAPER: 6507 case PR_GET_SPECULATION_CTRL: 6508 case PR_SET_SPECULATION_CTRL: 6509 case PR_GET_TID_ADDRESS: 6510 /* TODO */ 6511 return -TARGET_EINVAL; 6512 6513 case PR_GET_FPEXC: 6514 case PR_SET_FPEXC: 6515 /* Was used for SPE on PowerPC. */ 6516 return -TARGET_EINVAL; 6517 6518 case PR_GET_ENDIAN: 6519 case PR_SET_ENDIAN: 6520 case PR_GET_FPEMU: 6521 case PR_SET_FPEMU: 6522 case PR_SET_MM: 6523 case PR_GET_SECCOMP: 6524 case PR_SET_SECCOMP: 6525 case PR_SET_SYSCALL_USER_DISPATCH: 6526 case PR_GET_THP_DISABLE: 6527 case PR_SET_THP_DISABLE: 6528 case PR_GET_TSC: 6529 case PR_SET_TSC: 6530 /* Disable to prevent the target disabling stuff we need. */ 6531 return -TARGET_EINVAL; 6532 6533 default: 6534 qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n", 6535 option); 6536 return -TARGET_EINVAL; 6537 } 6538 } 6539 6540 #define NEW_STACK_SIZE 0x40000 6541 6542 6543 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 6544 typedef struct { 6545 CPUArchState *env; 6546 pthread_mutex_t mutex; 6547 pthread_cond_t cond; 6548 pthread_t thread; 6549 uint32_t tid; 6550 abi_ulong child_tidptr; 6551 abi_ulong parent_tidptr; 6552 sigset_t sigmask; 6553 } new_thread_info; 6554 6555 static void *clone_func(void *arg) 6556 { 6557 new_thread_info *info = arg; 6558 CPUArchState *env; 6559 CPUState *cpu; 6560 TaskState *ts; 6561 6562 rcu_register_thread(); 6563 tcg_register_thread(); 6564 env = info->env; 6565 cpu = env_cpu(env); 6566 thread_cpu = cpu; 6567 ts = (TaskState *)cpu->opaque; 6568 info->tid = sys_gettid(); 6569 task_settid(ts); 6570 if (info->child_tidptr) 6571 put_user_u32(info->tid, info->child_tidptr); 6572 if (info->parent_tidptr) 6573 put_user_u32(info->tid, info->parent_tidptr); 6574 qemu_guest_random_seed_thread_part2(cpu->random_seed); 6575 /* Enable signals. */ 6576 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 6577 /* Signal to the parent that we're ready. */ 6578 pthread_mutex_lock(&info->mutex); 6579 pthread_cond_broadcast(&info->cond); 6580 pthread_mutex_unlock(&info->mutex); 6581 /* Wait until the parent has finished initializing the tls state. */ 6582 pthread_mutex_lock(&clone_lock); 6583 pthread_mutex_unlock(&clone_lock); 6584 cpu_loop(env); 6585 /* never exits */ 6586 return NULL; 6587 } 6588 6589 /* do_fork() Must return host values and target errnos (unlike most 6590 do_*() functions). */ 6591 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 6592 abi_ulong parent_tidptr, target_ulong newtls, 6593 abi_ulong child_tidptr) 6594 { 6595 CPUState *cpu = env_cpu(env); 6596 int ret; 6597 TaskState *ts; 6598 CPUState *new_cpu; 6599 CPUArchState *new_env; 6600 sigset_t sigmask; 6601 6602 flags &= ~CLONE_IGNORED_FLAGS; 6603 6604 /* Emulate vfork() with fork() */ 6605 if (flags & CLONE_VFORK) 6606 flags &= ~(CLONE_VFORK | CLONE_VM); 6607 6608 if (flags & CLONE_VM) { 6609 TaskState *parent_ts = (TaskState *)cpu->opaque; 6610 new_thread_info info; 6611 pthread_attr_t attr; 6612 6613 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) || 6614 (flags & CLONE_INVALID_THREAD_FLAGS)) { 6615 return -TARGET_EINVAL; 6616 } 6617 6618 ts = g_new0(TaskState, 1); 6619 init_task_state(ts); 6620 6621 /* Grab a mutex so that thread setup appears atomic. */ 6622 pthread_mutex_lock(&clone_lock); 6623 6624 /* 6625 * If this is our first additional thread, we need to ensure we 6626 * generate code for parallel execution and flush old translations. 6627 * Do this now so that the copy gets CF_PARALLEL too. 6628 */ 6629 if (!(cpu->tcg_cflags & CF_PARALLEL)) { 6630 cpu->tcg_cflags |= CF_PARALLEL; 6631 tb_flush(cpu); 6632 } 6633 6634 /* we create a new CPU instance. */ 6635 new_env = cpu_copy(env); 6636 /* Init regs that differ from the parent. */ 6637 cpu_clone_regs_child(new_env, newsp, flags); 6638 cpu_clone_regs_parent(env, flags); 6639 new_cpu = env_cpu(new_env); 6640 new_cpu->opaque = ts; 6641 ts->bprm = parent_ts->bprm; 6642 ts->info = parent_ts->info; 6643 ts->signal_mask = parent_ts->signal_mask; 6644 6645 if (flags & CLONE_CHILD_CLEARTID) { 6646 ts->child_tidptr = child_tidptr; 6647 } 6648 6649 if (flags & CLONE_SETTLS) { 6650 cpu_set_tls (new_env, newtls); 6651 } 6652 6653 memset(&info, 0, sizeof(info)); 6654 pthread_mutex_init(&info.mutex, NULL); 6655 pthread_mutex_lock(&info.mutex); 6656 pthread_cond_init(&info.cond, NULL); 6657 info.env = new_env; 6658 if (flags & CLONE_CHILD_SETTID) { 6659 info.child_tidptr = child_tidptr; 6660 } 6661 if (flags & CLONE_PARENT_SETTID) { 6662 info.parent_tidptr = parent_tidptr; 6663 } 6664 6665 ret = pthread_attr_init(&attr); 6666 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 6667 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 6668 /* It is not safe to deliver signals until the child has finished 6669 initializing, so temporarily block all signals. */ 6670 sigfillset(&sigmask); 6671 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 6672 cpu->random_seed = qemu_guest_random_seed_thread_part1(); 6673 6674 ret = pthread_create(&info.thread, &attr, clone_func, &info); 6675 /* TODO: Free new CPU state if thread creation failed. */ 6676 6677 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 6678 pthread_attr_destroy(&attr); 6679 if (ret == 0) { 6680 /* Wait for the child to initialize. */ 6681 pthread_cond_wait(&info.cond, &info.mutex); 6682 ret = info.tid; 6683 } else { 6684 ret = -1; 6685 } 6686 pthread_mutex_unlock(&info.mutex); 6687 pthread_cond_destroy(&info.cond); 6688 pthread_mutex_destroy(&info.mutex); 6689 pthread_mutex_unlock(&clone_lock); 6690 } else { 6691 /* if no CLONE_VM, we consider it is a fork */ 6692 if (flags & CLONE_INVALID_FORK_FLAGS) { 6693 return -TARGET_EINVAL; 6694 } 6695 6696 /* We can't support custom termination signals */ 6697 if ((flags & CSIGNAL) != TARGET_SIGCHLD) { 6698 return -TARGET_EINVAL; 6699 } 6700 6701 if (block_signals()) { 6702 return -QEMU_ERESTARTSYS; 6703 } 6704 6705 fork_start(); 6706 ret = fork(); 6707 if (ret == 0) { 6708 /* Child Process. */ 6709 cpu_clone_regs_child(env, newsp, flags); 6710 fork_end(1); 6711 /* There is a race condition here. The parent process could 6712 theoretically read the TID in the child process before the child 6713 tid is set. This would require using either ptrace 6714 (not implemented) or having *_tidptr to point at a shared memory 6715 mapping. We can't repeat the spinlock hack used above because 6716 the child process gets its own copy of the lock. */ 6717 if (flags & CLONE_CHILD_SETTID) 6718 put_user_u32(sys_gettid(), child_tidptr); 6719 if (flags & CLONE_PARENT_SETTID) 6720 put_user_u32(sys_gettid(), parent_tidptr); 6721 ts = (TaskState *)cpu->opaque; 6722 if (flags & CLONE_SETTLS) 6723 cpu_set_tls (env, newtls); 6724 if (flags & CLONE_CHILD_CLEARTID) 6725 ts->child_tidptr = child_tidptr; 6726 } else { 6727 cpu_clone_regs_parent(env, flags); 6728 fork_end(0); 6729 } 6730 } 6731 return ret; 6732 } 6733 6734 /* warning : doesn't handle linux specific flags... */ 6735 static int target_to_host_fcntl_cmd(int cmd) 6736 { 6737 int ret; 6738 6739 switch(cmd) { 6740 case TARGET_F_DUPFD: 6741 case TARGET_F_GETFD: 6742 case TARGET_F_SETFD: 6743 case TARGET_F_GETFL: 6744 case TARGET_F_SETFL: 6745 case TARGET_F_OFD_GETLK: 6746 case TARGET_F_OFD_SETLK: 6747 case TARGET_F_OFD_SETLKW: 6748 ret = cmd; 6749 break; 6750 case TARGET_F_GETLK: 6751 ret = F_GETLK64; 6752 break; 6753 case TARGET_F_SETLK: 6754 ret = F_SETLK64; 6755 break; 6756 case TARGET_F_SETLKW: 6757 ret = F_SETLKW64; 6758 break; 6759 case TARGET_F_GETOWN: 6760 ret = F_GETOWN; 6761 break; 6762 case TARGET_F_SETOWN: 6763 ret = F_SETOWN; 6764 break; 6765 case TARGET_F_GETSIG: 6766 ret = F_GETSIG; 6767 break; 6768 case TARGET_F_SETSIG: 6769 ret = F_SETSIG; 6770 break; 6771 #if TARGET_ABI_BITS == 32 6772 case TARGET_F_GETLK64: 6773 ret = F_GETLK64; 6774 break; 6775 case TARGET_F_SETLK64: 6776 ret = F_SETLK64; 6777 break; 6778 case TARGET_F_SETLKW64: 6779 ret = F_SETLKW64; 6780 break; 6781 #endif 6782 case TARGET_F_SETLEASE: 6783 ret = F_SETLEASE; 6784 break; 6785 case TARGET_F_GETLEASE: 6786 ret = F_GETLEASE; 6787 break; 6788 #ifdef F_DUPFD_CLOEXEC 6789 case TARGET_F_DUPFD_CLOEXEC: 6790 ret = F_DUPFD_CLOEXEC; 6791 break; 6792 #endif 6793 case TARGET_F_NOTIFY: 6794 ret = F_NOTIFY; 6795 break; 6796 #ifdef F_GETOWN_EX 6797 case TARGET_F_GETOWN_EX: 6798 ret = F_GETOWN_EX; 6799 break; 6800 #endif 6801 #ifdef F_SETOWN_EX 6802 case TARGET_F_SETOWN_EX: 6803 ret = F_SETOWN_EX; 6804 break; 6805 #endif 6806 #ifdef F_SETPIPE_SZ 6807 case TARGET_F_SETPIPE_SZ: 6808 ret = F_SETPIPE_SZ; 6809 break; 6810 case TARGET_F_GETPIPE_SZ: 6811 ret = F_GETPIPE_SZ; 6812 break; 6813 #endif 6814 #ifdef F_ADD_SEALS 6815 case TARGET_F_ADD_SEALS: 6816 ret = F_ADD_SEALS; 6817 break; 6818 case TARGET_F_GET_SEALS: 6819 ret = F_GET_SEALS; 6820 break; 6821 #endif 6822 default: 6823 ret = -TARGET_EINVAL; 6824 break; 6825 } 6826 6827 #if defined(__powerpc64__) 6828 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and 6829 * is not supported by kernel. The glibc fcntl call actually adjusts 6830 * them to 5, 6 and 7 before making the syscall(). Since we make the 6831 * syscall directly, adjust to what is supported by the kernel. 6832 */ 6833 if (ret >= F_GETLK64 && ret <= F_SETLKW64) { 6834 ret -= F_GETLK64 - 5; 6835 } 6836 #endif 6837 6838 return ret; 6839 } 6840 6841 #define FLOCK_TRANSTBL \ 6842 switch (type) { \ 6843 TRANSTBL_CONVERT(F_RDLCK); \ 6844 TRANSTBL_CONVERT(F_WRLCK); \ 6845 TRANSTBL_CONVERT(F_UNLCK); \ 6846 } 6847 6848 static int target_to_host_flock(int type) 6849 { 6850 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a 6851 FLOCK_TRANSTBL 6852 #undef TRANSTBL_CONVERT 6853 return -TARGET_EINVAL; 6854 } 6855 6856 static int host_to_target_flock(int type) 6857 { 6858 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a 6859 FLOCK_TRANSTBL 6860 #undef TRANSTBL_CONVERT 6861 /* if we don't know how to convert the value coming 6862 * from the host we copy to the target field as-is 6863 */ 6864 return type; 6865 } 6866 6867 static inline abi_long copy_from_user_flock(struct flock64 *fl, 6868 abi_ulong target_flock_addr) 6869 { 6870 struct target_flock *target_fl; 6871 int l_type; 6872 6873 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6874 return -TARGET_EFAULT; 6875 } 6876 6877 __get_user(l_type, &target_fl->l_type); 6878 l_type = target_to_host_flock(l_type); 6879 if (l_type < 0) { 6880 return l_type; 6881 } 6882 fl->l_type = l_type; 6883 __get_user(fl->l_whence, &target_fl->l_whence); 6884 __get_user(fl->l_start, &target_fl->l_start); 6885 __get_user(fl->l_len, &target_fl->l_len); 6886 __get_user(fl->l_pid, &target_fl->l_pid); 6887 unlock_user_struct(target_fl, target_flock_addr, 0); 6888 return 0; 6889 } 6890 6891 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr, 6892 const struct flock64 *fl) 6893 { 6894 struct target_flock *target_fl; 6895 short l_type; 6896 6897 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6898 return -TARGET_EFAULT; 6899 } 6900 6901 l_type = host_to_target_flock(fl->l_type); 6902 __put_user(l_type, &target_fl->l_type); 6903 __put_user(fl->l_whence, &target_fl->l_whence); 6904 __put_user(fl->l_start, &target_fl->l_start); 6905 __put_user(fl->l_len, &target_fl->l_len); 6906 __put_user(fl->l_pid, &target_fl->l_pid); 6907 unlock_user_struct(target_fl, target_flock_addr, 1); 6908 return 0; 6909 } 6910 6911 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr); 6912 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl); 6913 6914 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32 6915 struct target_oabi_flock64 { 6916 abi_short l_type; 6917 abi_short l_whence; 6918 abi_llong l_start; 6919 abi_llong l_len; 6920 abi_int l_pid; 6921 } QEMU_PACKED; 6922 6923 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl, 6924 abi_ulong target_flock_addr) 6925 { 6926 struct target_oabi_flock64 *target_fl; 6927 int l_type; 6928 6929 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6930 return -TARGET_EFAULT; 6931 } 6932 6933 __get_user(l_type, &target_fl->l_type); 6934 l_type = target_to_host_flock(l_type); 6935 if (l_type < 0) { 6936 return l_type; 6937 } 6938 fl->l_type = l_type; 6939 __get_user(fl->l_whence, &target_fl->l_whence); 6940 __get_user(fl->l_start, &target_fl->l_start); 6941 __get_user(fl->l_len, &target_fl->l_len); 6942 __get_user(fl->l_pid, &target_fl->l_pid); 6943 unlock_user_struct(target_fl, target_flock_addr, 0); 6944 return 0; 6945 } 6946 6947 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr, 6948 const struct flock64 *fl) 6949 { 6950 struct target_oabi_flock64 *target_fl; 6951 short l_type; 6952 6953 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6954 return -TARGET_EFAULT; 6955 } 6956 6957 l_type = host_to_target_flock(fl->l_type); 6958 __put_user(l_type, &target_fl->l_type); 6959 __put_user(fl->l_whence, &target_fl->l_whence); 6960 __put_user(fl->l_start, &target_fl->l_start); 6961 __put_user(fl->l_len, &target_fl->l_len); 6962 __put_user(fl->l_pid, &target_fl->l_pid); 6963 unlock_user_struct(target_fl, target_flock_addr, 1); 6964 return 0; 6965 } 6966 #endif 6967 6968 static inline abi_long copy_from_user_flock64(struct flock64 *fl, 6969 abi_ulong target_flock_addr) 6970 { 6971 struct target_flock64 *target_fl; 6972 int l_type; 6973 6974 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) { 6975 return -TARGET_EFAULT; 6976 } 6977 6978 __get_user(l_type, &target_fl->l_type); 6979 l_type = target_to_host_flock(l_type); 6980 if (l_type < 0) { 6981 return l_type; 6982 } 6983 fl->l_type = l_type; 6984 __get_user(fl->l_whence, &target_fl->l_whence); 6985 __get_user(fl->l_start, &target_fl->l_start); 6986 __get_user(fl->l_len, &target_fl->l_len); 6987 __get_user(fl->l_pid, &target_fl->l_pid); 6988 unlock_user_struct(target_fl, target_flock_addr, 0); 6989 return 0; 6990 } 6991 6992 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr, 6993 const struct flock64 *fl) 6994 { 6995 struct target_flock64 *target_fl; 6996 short l_type; 6997 6998 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) { 6999 return -TARGET_EFAULT; 7000 } 7001 7002 l_type = host_to_target_flock(fl->l_type); 7003 __put_user(l_type, &target_fl->l_type); 7004 __put_user(fl->l_whence, &target_fl->l_whence); 7005 __put_user(fl->l_start, &target_fl->l_start); 7006 __put_user(fl->l_len, &target_fl->l_len); 7007 __put_user(fl->l_pid, &target_fl->l_pid); 7008 unlock_user_struct(target_fl, target_flock_addr, 1); 7009 return 0; 7010 } 7011 7012 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 7013 { 7014 struct flock64 fl64; 7015 #ifdef F_GETOWN_EX 7016 struct f_owner_ex fox; 7017 struct target_f_owner_ex *target_fox; 7018 #endif 7019 abi_long ret; 7020 int host_cmd = target_to_host_fcntl_cmd(cmd); 7021 7022 if (host_cmd == -TARGET_EINVAL) 7023 return host_cmd; 7024 7025 switch(cmd) { 7026 case TARGET_F_GETLK: 7027 ret = copy_from_user_flock(&fl64, arg); 7028 if (ret) { 7029 return ret; 7030 } 7031 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7032 if (ret == 0) { 7033 ret = copy_to_user_flock(arg, &fl64); 7034 } 7035 break; 7036 7037 case TARGET_F_SETLK: 7038 case TARGET_F_SETLKW: 7039 ret = copy_from_user_flock(&fl64, arg); 7040 if (ret) { 7041 return ret; 7042 } 7043 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7044 break; 7045 7046 case TARGET_F_GETLK64: 7047 case TARGET_F_OFD_GETLK: 7048 ret = copy_from_user_flock64(&fl64, arg); 7049 if (ret) { 7050 return ret; 7051 } 7052 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7053 if (ret == 0) { 7054 ret = copy_to_user_flock64(arg, &fl64); 7055 } 7056 break; 7057 case TARGET_F_SETLK64: 7058 case TARGET_F_SETLKW64: 7059 case TARGET_F_OFD_SETLK: 7060 case TARGET_F_OFD_SETLKW: 7061 ret = copy_from_user_flock64(&fl64, arg); 7062 if (ret) { 7063 return ret; 7064 } 7065 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64)); 7066 break; 7067 7068 case TARGET_F_GETFL: 7069 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 7070 if (ret >= 0) { 7071 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 7072 } 7073 break; 7074 7075 case TARGET_F_SETFL: 7076 ret = get_errno(safe_fcntl(fd, host_cmd, 7077 target_to_host_bitmask(arg, 7078 fcntl_flags_tbl))); 7079 break; 7080 7081 #ifdef F_GETOWN_EX 7082 case TARGET_F_GETOWN_EX: 7083 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 7084 if (ret >= 0) { 7085 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 7086 return -TARGET_EFAULT; 7087 target_fox->type = tswap32(fox.type); 7088 target_fox->pid = tswap32(fox.pid); 7089 unlock_user_struct(target_fox, arg, 1); 7090 } 7091 break; 7092 #endif 7093 7094 #ifdef F_SETOWN_EX 7095 case TARGET_F_SETOWN_EX: 7096 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 7097 return -TARGET_EFAULT; 7098 fox.type = tswap32(target_fox->type); 7099 fox.pid = tswap32(target_fox->pid); 7100 unlock_user_struct(target_fox, arg, 0); 7101 ret = get_errno(safe_fcntl(fd, host_cmd, &fox)); 7102 break; 7103 #endif 7104 7105 case TARGET_F_SETSIG: 7106 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg))); 7107 break; 7108 7109 case TARGET_F_GETSIG: 7110 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg))); 7111 break; 7112 7113 case TARGET_F_SETOWN: 7114 case TARGET_F_GETOWN: 7115 case TARGET_F_SETLEASE: 7116 case TARGET_F_GETLEASE: 7117 case TARGET_F_SETPIPE_SZ: 7118 case TARGET_F_GETPIPE_SZ: 7119 case TARGET_F_ADD_SEALS: 7120 case TARGET_F_GET_SEALS: 7121 ret = get_errno(safe_fcntl(fd, host_cmd, arg)); 7122 break; 7123 7124 default: 7125 ret = get_errno(safe_fcntl(fd, cmd, arg)); 7126 break; 7127 } 7128 return ret; 7129 } 7130 7131 #ifdef USE_UID16 7132 7133 static inline int high2lowuid(int uid) 7134 { 7135 if (uid > 65535) 7136 return 65534; 7137 else 7138 return uid; 7139 } 7140 7141 static inline int high2lowgid(int gid) 7142 { 7143 if (gid > 65535) 7144 return 65534; 7145 else 7146 return gid; 7147 } 7148 7149 static inline int low2highuid(int uid) 7150 { 7151 if ((int16_t)uid == -1) 7152 return -1; 7153 else 7154 return uid; 7155 } 7156 7157 static inline int low2highgid(int gid) 7158 { 7159 if ((int16_t)gid == -1) 7160 return -1; 7161 else 7162 return gid; 7163 } 7164 static inline int tswapid(int id) 7165 { 7166 return tswap16(id); 7167 } 7168 7169 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 7170 7171 #else /* !USE_UID16 */ 7172 static inline int high2lowuid(int uid) 7173 { 7174 return uid; 7175 } 7176 static inline int high2lowgid(int gid) 7177 { 7178 return gid; 7179 } 7180 static inline int low2highuid(int uid) 7181 { 7182 return uid; 7183 } 7184 static inline int low2highgid(int gid) 7185 { 7186 return gid; 7187 } 7188 static inline int tswapid(int id) 7189 { 7190 return tswap32(id); 7191 } 7192 7193 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 7194 7195 #endif /* USE_UID16 */ 7196 7197 /* We must do direct syscalls for setting UID/GID, because we want to 7198 * implement the Linux system call semantics of "change only for this thread", 7199 * not the libc/POSIX semantics of "change for all threads in process". 7200 * (See http://ewontfix.com/17/ for more details.) 7201 * We use the 32-bit version of the syscalls if present; if it is not 7202 * then either the host architecture supports 32-bit UIDs natively with 7203 * the standard syscall, or the 16-bit UID is the best we can do. 7204 */ 7205 #ifdef __NR_setuid32 7206 #define __NR_sys_setuid __NR_setuid32 7207 #else 7208 #define __NR_sys_setuid __NR_setuid 7209 #endif 7210 #ifdef __NR_setgid32 7211 #define __NR_sys_setgid __NR_setgid32 7212 #else 7213 #define __NR_sys_setgid __NR_setgid 7214 #endif 7215 #ifdef __NR_setresuid32 7216 #define __NR_sys_setresuid __NR_setresuid32 7217 #else 7218 #define __NR_sys_setresuid __NR_setresuid 7219 #endif 7220 #ifdef __NR_setresgid32 7221 #define __NR_sys_setresgid __NR_setresgid32 7222 #else 7223 #define __NR_sys_setresgid __NR_setresgid 7224 #endif 7225 7226 _syscall1(int, sys_setuid, uid_t, uid) 7227 _syscall1(int, sys_setgid, gid_t, gid) 7228 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 7229 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 7230 7231 void syscall_init(void) 7232 { 7233 IOCTLEntry *ie; 7234 const argtype *arg_type; 7235 int size; 7236 7237 thunk_init(STRUCT_MAX); 7238 7239 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 7240 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 7241 #include "syscall_types.h" 7242 #undef STRUCT 7243 #undef STRUCT_SPECIAL 7244 7245 /* we patch the ioctl size if necessary. We rely on the fact that 7246 no ioctl has all the bits at '1' in the size field */ 7247 ie = ioctl_entries; 7248 while (ie->target_cmd != 0) { 7249 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 7250 TARGET_IOC_SIZEMASK) { 7251 arg_type = ie->arg_type; 7252 if (arg_type[0] != TYPE_PTR) { 7253 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 7254 ie->target_cmd); 7255 exit(1); 7256 } 7257 arg_type++; 7258 size = thunk_type_size(arg_type, 0); 7259 ie->target_cmd = (ie->target_cmd & 7260 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 7261 (size << TARGET_IOC_SIZESHIFT); 7262 } 7263 7264 /* automatic consistency check if same arch */ 7265 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 7266 (defined(__x86_64__) && defined(TARGET_X86_64)) 7267 if (unlikely(ie->target_cmd != ie->host_cmd)) { 7268 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 7269 ie->name, ie->target_cmd, ie->host_cmd); 7270 } 7271 #endif 7272 ie++; 7273 } 7274 } 7275 7276 #ifdef TARGET_NR_truncate64 7277 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1, 7278 abi_long arg2, 7279 abi_long arg3, 7280 abi_long arg4) 7281 { 7282 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) { 7283 arg2 = arg3; 7284 arg3 = arg4; 7285 } 7286 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 7287 } 7288 #endif 7289 7290 #ifdef TARGET_NR_ftruncate64 7291 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1, 7292 abi_long arg2, 7293 abi_long arg3, 7294 abi_long arg4) 7295 { 7296 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) { 7297 arg2 = arg3; 7298 arg3 = arg4; 7299 } 7300 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 7301 } 7302 #endif 7303 7304 #if defined(TARGET_NR_timer_settime) || \ 7305 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)) 7306 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its, 7307 abi_ulong target_addr) 7308 { 7309 if (target_to_host_timespec(&host_its->it_interval, target_addr + 7310 offsetof(struct target_itimerspec, 7311 it_interval)) || 7312 target_to_host_timespec(&host_its->it_value, target_addr + 7313 offsetof(struct target_itimerspec, 7314 it_value))) { 7315 return -TARGET_EFAULT; 7316 } 7317 7318 return 0; 7319 } 7320 #endif 7321 7322 #if defined(TARGET_NR_timer_settime64) || \ 7323 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) 7324 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its, 7325 abi_ulong target_addr) 7326 { 7327 if (target_to_host_timespec64(&host_its->it_interval, target_addr + 7328 offsetof(struct target__kernel_itimerspec, 7329 it_interval)) || 7330 target_to_host_timespec64(&host_its->it_value, target_addr + 7331 offsetof(struct target__kernel_itimerspec, 7332 it_value))) { 7333 return -TARGET_EFAULT; 7334 } 7335 7336 return 0; 7337 } 7338 #endif 7339 7340 #if ((defined(TARGET_NR_timerfd_gettime) || \ 7341 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \ 7342 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime) 7343 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 7344 struct itimerspec *host_its) 7345 { 7346 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec, 7347 it_interval), 7348 &host_its->it_interval) || 7349 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec, 7350 it_value), 7351 &host_its->it_value)) { 7352 return -TARGET_EFAULT; 7353 } 7354 return 0; 7355 } 7356 #endif 7357 7358 #if ((defined(TARGET_NR_timerfd_gettime64) || \ 7359 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \ 7360 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64) 7361 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr, 7362 struct itimerspec *host_its) 7363 { 7364 if (host_to_target_timespec64(target_addr + 7365 offsetof(struct target__kernel_itimerspec, 7366 it_interval), 7367 &host_its->it_interval) || 7368 host_to_target_timespec64(target_addr + 7369 offsetof(struct target__kernel_itimerspec, 7370 it_value), 7371 &host_its->it_value)) { 7372 return -TARGET_EFAULT; 7373 } 7374 return 0; 7375 } 7376 #endif 7377 7378 #if defined(TARGET_NR_adjtimex) || \ 7379 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)) 7380 static inline abi_long target_to_host_timex(struct timex *host_tx, 7381 abi_long target_addr) 7382 { 7383 struct target_timex *target_tx; 7384 7385 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7386 return -TARGET_EFAULT; 7387 } 7388 7389 __get_user(host_tx->modes, &target_tx->modes); 7390 __get_user(host_tx->offset, &target_tx->offset); 7391 __get_user(host_tx->freq, &target_tx->freq); 7392 __get_user(host_tx->maxerror, &target_tx->maxerror); 7393 __get_user(host_tx->esterror, &target_tx->esterror); 7394 __get_user(host_tx->status, &target_tx->status); 7395 __get_user(host_tx->constant, &target_tx->constant); 7396 __get_user(host_tx->precision, &target_tx->precision); 7397 __get_user(host_tx->tolerance, &target_tx->tolerance); 7398 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7399 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7400 __get_user(host_tx->tick, &target_tx->tick); 7401 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7402 __get_user(host_tx->jitter, &target_tx->jitter); 7403 __get_user(host_tx->shift, &target_tx->shift); 7404 __get_user(host_tx->stabil, &target_tx->stabil); 7405 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7406 __get_user(host_tx->calcnt, &target_tx->calcnt); 7407 __get_user(host_tx->errcnt, &target_tx->errcnt); 7408 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7409 __get_user(host_tx->tai, &target_tx->tai); 7410 7411 unlock_user_struct(target_tx, target_addr, 0); 7412 return 0; 7413 } 7414 7415 static inline abi_long host_to_target_timex(abi_long target_addr, 7416 struct timex *host_tx) 7417 { 7418 struct target_timex *target_tx; 7419 7420 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7421 return -TARGET_EFAULT; 7422 } 7423 7424 __put_user(host_tx->modes, &target_tx->modes); 7425 __put_user(host_tx->offset, &target_tx->offset); 7426 __put_user(host_tx->freq, &target_tx->freq); 7427 __put_user(host_tx->maxerror, &target_tx->maxerror); 7428 __put_user(host_tx->esterror, &target_tx->esterror); 7429 __put_user(host_tx->status, &target_tx->status); 7430 __put_user(host_tx->constant, &target_tx->constant); 7431 __put_user(host_tx->precision, &target_tx->precision); 7432 __put_user(host_tx->tolerance, &target_tx->tolerance); 7433 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec); 7434 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec); 7435 __put_user(host_tx->tick, &target_tx->tick); 7436 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7437 __put_user(host_tx->jitter, &target_tx->jitter); 7438 __put_user(host_tx->shift, &target_tx->shift); 7439 __put_user(host_tx->stabil, &target_tx->stabil); 7440 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7441 __put_user(host_tx->calcnt, &target_tx->calcnt); 7442 __put_user(host_tx->errcnt, &target_tx->errcnt); 7443 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7444 __put_user(host_tx->tai, &target_tx->tai); 7445 7446 unlock_user_struct(target_tx, target_addr, 1); 7447 return 0; 7448 } 7449 #endif 7450 7451 7452 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 7453 static inline abi_long target_to_host_timex64(struct timex *host_tx, 7454 abi_long target_addr) 7455 { 7456 struct target__kernel_timex *target_tx; 7457 7458 if (copy_from_user_timeval64(&host_tx->time, target_addr + 7459 offsetof(struct target__kernel_timex, 7460 time))) { 7461 return -TARGET_EFAULT; 7462 } 7463 7464 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) { 7465 return -TARGET_EFAULT; 7466 } 7467 7468 __get_user(host_tx->modes, &target_tx->modes); 7469 __get_user(host_tx->offset, &target_tx->offset); 7470 __get_user(host_tx->freq, &target_tx->freq); 7471 __get_user(host_tx->maxerror, &target_tx->maxerror); 7472 __get_user(host_tx->esterror, &target_tx->esterror); 7473 __get_user(host_tx->status, &target_tx->status); 7474 __get_user(host_tx->constant, &target_tx->constant); 7475 __get_user(host_tx->precision, &target_tx->precision); 7476 __get_user(host_tx->tolerance, &target_tx->tolerance); 7477 __get_user(host_tx->tick, &target_tx->tick); 7478 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7479 __get_user(host_tx->jitter, &target_tx->jitter); 7480 __get_user(host_tx->shift, &target_tx->shift); 7481 __get_user(host_tx->stabil, &target_tx->stabil); 7482 __get_user(host_tx->jitcnt, &target_tx->jitcnt); 7483 __get_user(host_tx->calcnt, &target_tx->calcnt); 7484 __get_user(host_tx->errcnt, &target_tx->errcnt); 7485 __get_user(host_tx->stbcnt, &target_tx->stbcnt); 7486 __get_user(host_tx->tai, &target_tx->tai); 7487 7488 unlock_user_struct(target_tx, target_addr, 0); 7489 return 0; 7490 } 7491 7492 static inline abi_long host_to_target_timex64(abi_long target_addr, 7493 struct timex *host_tx) 7494 { 7495 struct target__kernel_timex *target_tx; 7496 7497 if (copy_to_user_timeval64(target_addr + 7498 offsetof(struct target__kernel_timex, time), 7499 &host_tx->time)) { 7500 return -TARGET_EFAULT; 7501 } 7502 7503 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) { 7504 return -TARGET_EFAULT; 7505 } 7506 7507 __put_user(host_tx->modes, &target_tx->modes); 7508 __put_user(host_tx->offset, &target_tx->offset); 7509 __put_user(host_tx->freq, &target_tx->freq); 7510 __put_user(host_tx->maxerror, &target_tx->maxerror); 7511 __put_user(host_tx->esterror, &target_tx->esterror); 7512 __put_user(host_tx->status, &target_tx->status); 7513 __put_user(host_tx->constant, &target_tx->constant); 7514 __put_user(host_tx->precision, &target_tx->precision); 7515 __put_user(host_tx->tolerance, &target_tx->tolerance); 7516 __put_user(host_tx->tick, &target_tx->tick); 7517 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq); 7518 __put_user(host_tx->jitter, &target_tx->jitter); 7519 __put_user(host_tx->shift, &target_tx->shift); 7520 __put_user(host_tx->stabil, &target_tx->stabil); 7521 __put_user(host_tx->jitcnt, &target_tx->jitcnt); 7522 __put_user(host_tx->calcnt, &target_tx->calcnt); 7523 __put_user(host_tx->errcnt, &target_tx->errcnt); 7524 __put_user(host_tx->stbcnt, &target_tx->stbcnt); 7525 __put_user(host_tx->tai, &target_tx->tai); 7526 7527 unlock_user_struct(target_tx, target_addr, 1); 7528 return 0; 7529 } 7530 #endif 7531 7532 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID 7533 #define sigev_notify_thread_id _sigev_un._tid 7534 #endif 7535 7536 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, 7537 abi_ulong target_addr) 7538 { 7539 struct target_sigevent *target_sevp; 7540 7541 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { 7542 return -TARGET_EFAULT; 7543 } 7544 7545 /* This union is awkward on 64 bit systems because it has a 32 bit 7546 * integer and a pointer in it; we follow the conversion approach 7547 * used for handling sigval types in signal.c so the guest should get 7548 * the correct value back even if we did a 64 bit byteswap and it's 7549 * using the 32 bit integer. 7550 */ 7551 host_sevp->sigev_value.sival_ptr = 7552 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); 7553 host_sevp->sigev_signo = 7554 target_to_host_signal(tswap32(target_sevp->sigev_signo)); 7555 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); 7556 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid); 7557 7558 unlock_user_struct(target_sevp, target_addr, 1); 7559 return 0; 7560 } 7561 7562 #if defined(TARGET_NR_mlockall) 7563 static inline int target_to_host_mlockall_arg(int arg) 7564 { 7565 int result = 0; 7566 7567 if (arg & TARGET_MCL_CURRENT) { 7568 result |= MCL_CURRENT; 7569 } 7570 if (arg & TARGET_MCL_FUTURE) { 7571 result |= MCL_FUTURE; 7572 } 7573 #ifdef MCL_ONFAULT 7574 if (arg & TARGET_MCL_ONFAULT) { 7575 result |= MCL_ONFAULT; 7576 } 7577 #endif 7578 7579 return result; 7580 } 7581 #endif 7582 7583 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \ 7584 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \ 7585 defined(TARGET_NR_newfstatat)) 7586 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env, 7587 abi_ulong target_addr, 7588 struct stat *host_st) 7589 { 7590 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 7591 if (cpu_env->eabi) { 7592 struct target_eabi_stat64 *target_st; 7593 7594 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7595 return -TARGET_EFAULT; 7596 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 7597 __put_user(host_st->st_dev, &target_st->st_dev); 7598 __put_user(host_st->st_ino, &target_st->st_ino); 7599 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7600 __put_user(host_st->st_ino, &target_st->__st_ino); 7601 #endif 7602 __put_user(host_st->st_mode, &target_st->st_mode); 7603 __put_user(host_st->st_nlink, &target_st->st_nlink); 7604 __put_user(host_st->st_uid, &target_st->st_uid); 7605 __put_user(host_st->st_gid, &target_st->st_gid); 7606 __put_user(host_st->st_rdev, &target_st->st_rdev); 7607 __put_user(host_st->st_size, &target_st->st_size); 7608 __put_user(host_st->st_blksize, &target_st->st_blksize); 7609 __put_user(host_st->st_blocks, &target_st->st_blocks); 7610 __put_user(host_st->st_atime, &target_st->target_st_atime); 7611 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7612 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7613 #ifdef HAVE_STRUCT_STAT_ST_ATIM 7614 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); 7615 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); 7616 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); 7617 #endif 7618 unlock_user_struct(target_st, target_addr, 1); 7619 } else 7620 #endif 7621 { 7622 #if defined(TARGET_HAS_STRUCT_STAT64) 7623 struct target_stat64 *target_st; 7624 #else 7625 struct target_stat *target_st; 7626 #endif 7627 7628 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 7629 return -TARGET_EFAULT; 7630 memset(target_st, 0, sizeof(*target_st)); 7631 __put_user(host_st->st_dev, &target_st->st_dev); 7632 __put_user(host_st->st_ino, &target_st->st_ino); 7633 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 7634 __put_user(host_st->st_ino, &target_st->__st_ino); 7635 #endif 7636 __put_user(host_st->st_mode, &target_st->st_mode); 7637 __put_user(host_st->st_nlink, &target_st->st_nlink); 7638 __put_user(host_st->st_uid, &target_st->st_uid); 7639 __put_user(host_st->st_gid, &target_st->st_gid); 7640 __put_user(host_st->st_rdev, &target_st->st_rdev); 7641 /* XXX: better use of kernel struct */ 7642 __put_user(host_st->st_size, &target_st->st_size); 7643 __put_user(host_st->st_blksize, &target_st->st_blksize); 7644 __put_user(host_st->st_blocks, &target_st->st_blocks); 7645 __put_user(host_st->st_atime, &target_st->target_st_atime); 7646 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 7647 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 7648 #ifdef HAVE_STRUCT_STAT_ST_ATIM 7649 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec); 7650 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec); 7651 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec); 7652 #endif 7653 unlock_user_struct(target_st, target_addr, 1); 7654 } 7655 7656 return 0; 7657 } 7658 #endif 7659 7660 #if defined(TARGET_NR_statx) && defined(__NR_statx) 7661 static inline abi_long host_to_target_statx(struct target_statx *host_stx, 7662 abi_ulong target_addr) 7663 { 7664 struct target_statx *target_stx; 7665 7666 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) { 7667 return -TARGET_EFAULT; 7668 } 7669 memset(target_stx, 0, sizeof(*target_stx)); 7670 7671 __put_user(host_stx->stx_mask, &target_stx->stx_mask); 7672 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize); 7673 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes); 7674 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink); 7675 __put_user(host_stx->stx_uid, &target_stx->stx_uid); 7676 __put_user(host_stx->stx_gid, &target_stx->stx_gid); 7677 __put_user(host_stx->stx_mode, &target_stx->stx_mode); 7678 __put_user(host_stx->stx_ino, &target_stx->stx_ino); 7679 __put_user(host_stx->stx_size, &target_stx->stx_size); 7680 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks); 7681 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask); 7682 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec); 7683 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec); 7684 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec); 7685 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec); 7686 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec); 7687 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec); 7688 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec); 7689 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec); 7690 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major); 7691 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor); 7692 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major); 7693 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor); 7694 7695 unlock_user_struct(target_stx, target_addr, 1); 7696 7697 return 0; 7698 } 7699 #endif 7700 7701 static int do_sys_futex(int *uaddr, int op, int val, 7702 const struct timespec *timeout, int *uaddr2, 7703 int val3) 7704 { 7705 #if HOST_LONG_BITS == 64 7706 #if defined(__NR_futex) 7707 /* always a 64-bit time_t, it doesn't define _time64 version */ 7708 return sys_futex(uaddr, op, val, timeout, uaddr2, val3); 7709 7710 #endif 7711 #else /* HOST_LONG_BITS == 64 */ 7712 #if defined(__NR_futex_time64) 7713 if (sizeof(timeout->tv_sec) == 8) { 7714 /* _time64 function on 32bit arch */ 7715 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3); 7716 } 7717 #endif 7718 #if defined(__NR_futex) 7719 /* old function on 32bit arch */ 7720 return sys_futex(uaddr, op, val, timeout, uaddr2, val3); 7721 #endif 7722 #endif /* HOST_LONG_BITS == 64 */ 7723 g_assert_not_reached(); 7724 } 7725 7726 static int do_safe_futex(int *uaddr, int op, int val, 7727 const struct timespec *timeout, int *uaddr2, 7728 int val3) 7729 { 7730 #if HOST_LONG_BITS == 64 7731 #if defined(__NR_futex) 7732 /* always a 64-bit time_t, it doesn't define _time64 version */ 7733 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3)); 7734 #endif 7735 #else /* HOST_LONG_BITS == 64 */ 7736 #if defined(__NR_futex_time64) 7737 if (sizeof(timeout->tv_sec) == 8) { 7738 /* _time64 function on 32bit arch */ 7739 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2, 7740 val3)); 7741 } 7742 #endif 7743 #if defined(__NR_futex) 7744 /* old function on 32bit arch */ 7745 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3)); 7746 #endif 7747 #endif /* HOST_LONG_BITS == 64 */ 7748 return -TARGET_ENOSYS; 7749 } 7750 7751 /* ??? Using host futex calls even when target atomic operations 7752 are not really atomic probably breaks things. However implementing 7753 futexes locally would make futexes shared between multiple processes 7754 tricky. However they're probably useless because guest atomic 7755 operations won't work either. */ 7756 #if defined(TARGET_NR_futex) 7757 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val, 7758 target_ulong timeout, target_ulong uaddr2, int val3) 7759 { 7760 struct timespec ts, *pts; 7761 int base_op; 7762 7763 /* ??? We assume FUTEX_* constants are the same on both host 7764 and target. */ 7765 #ifdef FUTEX_CMD_MASK 7766 base_op = op & FUTEX_CMD_MASK; 7767 #else 7768 base_op = op; 7769 #endif 7770 switch (base_op) { 7771 case FUTEX_WAIT: 7772 case FUTEX_WAIT_BITSET: 7773 if (timeout) { 7774 pts = &ts; 7775 target_to_host_timespec(pts, timeout); 7776 } else { 7777 pts = NULL; 7778 } 7779 return do_safe_futex(g2h(cpu, uaddr), 7780 op, tswap32(val), pts, NULL, val3); 7781 case FUTEX_WAKE: 7782 return do_safe_futex(g2h(cpu, uaddr), 7783 op, val, NULL, NULL, 0); 7784 case FUTEX_FD: 7785 return do_safe_futex(g2h(cpu, uaddr), 7786 op, val, NULL, NULL, 0); 7787 case FUTEX_REQUEUE: 7788 case FUTEX_CMP_REQUEUE: 7789 case FUTEX_WAKE_OP: 7790 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 7791 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 7792 But the prototype takes a `struct timespec *'; insert casts 7793 to satisfy the compiler. We do not need to tswap TIMEOUT 7794 since it's not compared to guest memory. */ 7795 pts = (struct timespec *)(uintptr_t) timeout; 7796 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2), 7797 (base_op == FUTEX_CMP_REQUEUE 7798 ? tswap32(val3) : val3)); 7799 default: 7800 return -TARGET_ENOSYS; 7801 } 7802 } 7803 #endif 7804 7805 #if defined(TARGET_NR_futex_time64) 7806 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op, 7807 int val, target_ulong timeout, 7808 target_ulong uaddr2, int val3) 7809 { 7810 struct timespec ts, *pts; 7811 int base_op; 7812 7813 /* ??? We assume FUTEX_* constants are the same on both host 7814 and target. */ 7815 #ifdef FUTEX_CMD_MASK 7816 base_op = op & FUTEX_CMD_MASK; 7817 #else 7818 base_op = op; 7819 #endif 7820 switch (base_op) { 7821 case FUTEX_WAIT: 7822 case FUTEX_WAIT_BITSET: 7823 if (timeout) { 7824 pts = &ts; 7825 if (target_to_host_timespec64(pts, timeout)) { 7826 return -TARGET_EFAULT; 7827 } 7828 } else { 7829 pts = NULL; 7830 } 7831 return do_safe_futex(g2h(cpu, uaddr), op, 7832 tswap32(val), pts, NULL, val3); 7833 case FUTEX_WAKE: 7834 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0); 7835 case FUTEX_FD: 7836 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0); 7837 case FUTEX_REQUEUE: 7838 case FUTEX_CMP_REQUEUE: 7839 case FUTEX_WAKE_OP: 7840 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 7841 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 7842 But the prototype takes a `struct timespec *'; insert casts 7843 to satisfy the compiler. We do not need to tswap TIMEOUT 7844 since it's not compared to guest memory. */ 7845 pts = (struct timespec *)(uintptr_t) timeout; 7846 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2), 7847 (base_op == FUTEX_CMP_REQUEUE 7848 ? tswap32(val3) : val3)); 7849 default: 7850 return -TARGET_ENOSYS; 7851 } 7852 } 7853 #endif 7854 7855 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7856 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname, 7857 abi_long handle, abi_long mount_id, 7858 abi_long flags) 7859 { 7860 struct file_handle *target_fh; 7861 struct file_handle *fh; 7862 int mid = 0; 7863 abi_long ret; 7864 char *name; 7865 unsigned int size, total_size; 7866 7867 if (get_user_s32(size, handle)) { 7868 return -TARGET_EFAULT; 7869 } 7870 7871 name = lock_user_string(pathname); 7872 if (!name) { 7873 return -TARGET_EFAULT; 7874 } 7875 7876 total_size = sizeof(struct file_handle) + size; 7877 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0); 7878 if (!target_fh) { 7879 unlock_user(name, pathname, 0); 7880 return -TARGET_EFAULT; 7881 } 7882 7883 fh = g_malloc0(total_size); 7884 fh->handle_bytes = size; 7885 7886 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags)); 7887 unlock_user(name, pathname, 0); 7888 7889 /* man name_to_handle_at(2): 7890 * Other than the use of the handle_bytes field, the caller should treat 7891 * the file_handle structure as an opaque data type 7892 */ 7893 7894 memcpy(target_fh, fh, total_size); 7895 target_fh->handle_bytes = tswap32(fh->handle_bytes); 7896 target_fh->handle_type = tswap32(fh->handle_type); 7897 g_free(fh); 7898 unlock_user(target_fh, handle, total_size); 7899 7900 if (put_user_s32(mid, mount_id)) { 7901 return -TARGET_EFAULT; 7902 } 7903 7904 return ret; 7905 7906 } 7907 #endif 7908 7909 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 7910 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle, 7911 abi_long flags) 7912 { 7913 struct file_handle *target_fh; 7914 struct file_handle *fh; 7915 unsigned int size, total_size; 7916 abi_long ret; 7917 7918 if (get_user_s32(size, handle)) { 7919 return -TARGET_EFAULT; 7920 } 7921 7922 total_size = sizeof(struct file_handle) + size; 7923 target_fh = lock_user(VERIFY_READ, handle, total_size, 1); 7924 if (!target_fh) { 7925 return -TARGET_EFAULT; 7926 } 7927 7928 fh = g_memdup(target_fh, total_size); 7929 fh->handle_bytes = size; 7930 fh->handle_type = tswap32(target_fh->handle_type); 7931 7932 ret = get_errno(open_by_handle_at(mount_fd, fh, 7933 target_to_host_bitmask(flags, fcntl_flags_tbl))); 7934 7935 g_free(fh); 7936 7937 unlock_user(target_fh, handle, total_size); 7938 7939 return ret; 7940 } 7941 #endif 7942 7943 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4) 7944 7945 static abi_long do_signalfd4(int fd, abi_long mask, int flags) 7946 { 7947 int host_flags; 7948 target_sigset_t *target_mask; 7949 sigset_t host_mask; 7950 abi_long ret; 7951 7952 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) { 7953 return -TARGET_EINVAL; 7954 } 7955 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) { 7956 return -TARGET_EFAULT; 7957 } 7958 7959 target_to_host_sigset(&host_mask, target_mask); 7960 7961 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 7962 7963 ret = get_errno(signalfd(fd, &host_mask, host_flags)); 7964 if (ret >= 0) { 7965 fd_trans_register(ret, &target_signalfd_trans); 7966 } 7967 7968 unlock_user_struct(target_mask, mask, 0); 7969 7970 return ret; 7971 } 7972 #endif 7973 7974 /* Map host to target signal numbers for the wait family of syscalls. 7975 Assume all other status bits are the same. */ 7976 int host_to_target_waitstatus(int status) 7977 { 7978 if (WIFSIGNALED(status)) { 7979 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 7980 } 7981 if (WIFSTOPPED(status)) { 7982 return (host_to_target_signal(WSTOPSIG(status)) << 8) 7983 | (status & 0xff); 7984 } 7985 return status; 7986 } 7987 7988 static int open_self_cmdline(CPUArchState *cpu_env, int fd) 7989 { 7990 CPUState *cpu = env_cpu(cpu_env); 7991 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; 7992 int i; 7993 7994 for (i = 0; i < bprm->argc; i++) { 7995 size_t len = strlen(bprm->argv[i]) + 1; 7996 7997 if (write(fd, bprm->argv[i], len) != len) { 7998 return -1; 7999 } 8000 } 8001 8002 return 0; 8003 } 8004 8005 static int open_self_maps(CPUArchState *cpu_env, int fd) 8006 { 8007 CPUState *cpu = env_cpu(cpu_env); 8008 TaskState *ts = cpu->opaque; 8009 GSList *map_info = read_self_maps(); 8010 GSList *s; 8011 int count; 8012 8013 for (s = map_info; s; s = g_slist_next(s)) { 8014 MapInfo *e = (MapInfo *) s->data; 8015 8016 if (h2g_valid(e->start)) { 8017 unsigned long min = e->start; 8018 unsigned long max = e->end; 8019 int flags = page_get_flags(h2g(min)); 8020 const char *path; 8021 8022 max = h2g_valid(max - 1) ? 8023 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1; 8024 8025 if (page_check_range(h2g(min), max - min, flags) == -1) { 8026 continue; 8027 } 8028 8029 if (h2g(min) == ts->info->stack_limit) { 8030 path = "[stack]"; 8031 } else { 8032 path = e->path; 8033 } 8034 8035 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr 8036 " %c%c%c%c %08" PRIx64 " %s %"PRId64, 8037 h2g(min), h2g(max - 1) + 1, 8038 (flags & PAGE_READ) ? 'r' : '-', 8039 (flags & PAGE_WRITE_ORG) ? 'w' : '-', 8040 (flags & PAGE_EXEC) ? 'x' : '-', 8041 e->is_priv ? 'p' : 's', 8042 (uint64_t) e->offset, e->dev, e->inode); 8043 if (path) { 8044 dprintf(fd, "%*s%s\n", 73 - count, "", path); 8045 } else { 8046 dprintf(fd, "\n"); 8047 } 8048 } 8049 } 8050 8051 free_self_maps(map_info); 8052 8053 #ifdef TARGET_VSYSCALL_PAGE 8054 /* 8055 * We only support execution from the vsyscall page. 8056 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3. 8057 */ 8058 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx 8059 " --xp 00000000 00:00 0", 8060 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE); 8061 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]"); 8062 #endif 8063 8064 return 0; 8065 } 8066 8067 static int open_self_stat(CPUArchState *cpu_env, int fd) 8068 { 8069 CPUState *cpu = env_cpu(cpu_env); 8070 TaskState *ts = cpu->opaque; 8071 g_autoptr(GString) buf = g_string_new(NULL); 8072 int i; 8073 8074 for (i = 0; i < 44; i++) { 8075 if (i == 0) { 8076 /* pid */ 8077 g_string_printf(buf, FMT_pid " ", getpid()); 8078 } else if (i == 1) { 8079 /* app name */ 8080 gchar *bin = g_strrstr(ts->bprm->argv[0], "/"); 8081 bin = bin ? bin + 1 : ts->bprm->argv[0]; 8082 g_string_printf(buf, "(%.15s) ", bin); 8083 } else if (i == 3) { 8084 /* ppid */ 8085 g_string_printf(buf, FMT_pid " ", getppid()); 8086 } else if (i == 21) { 8087 /* starttime */ 8088 g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime); 8089 } else if (i == 27) { 8090 /* stack bottom */ 8091 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack); 8092 } else { 8093 /* for the rest, there is MasterCard */ 8094 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' '); 8095 } 8096 8097 if (write(fd, buf->str, buf->len) != buf->len) { 8098 return -1; 8099 } 8100 } 8101 8102 return 0; 8103 } 8104 8105 static int open_self_auxv(CPUArchState *cpu_env, int fd) 8106 { 8107 CPUState *cpu = env_cpu(cpu_env); 8108 TaskState *ts = cpu->opaque; 8109 abi_ulong auxv = ts->info->saved_auxv; 8110 abi_ulong len = ts->info->auxv_len; 8111 char *ptr; 8112 8113 /* 8114 * Auxiliary vector is stored in target process stack. 8115 * read in whole auxv vector and copy it to file 8116 */ 8117 ptr = lock_user(VERIFY_READ, auxv, len, 0); 8118 if (ptr != NULL) { 8119 while (len > 0) { 8120 ssize_t r; 8121 r = write(fd, ptr, len); 8122 if (r <= 0) { 8123 break; 8124 } 8125 len -= r; 8126 ptr += r; 8127 } 8128 lseek(fd, 0, SEEK_SET); 8129 unlock_user(ptr, auxv, len); 8130 } 8131 8132 return 0; 8133 } 8134 8135 static int is_proc_myself(const char *filename, const char *entry) 8136 { 8137 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 8138 filename += strlen("/proc/"); 8139 if (!strncmp(filename, "self/", strlen("self/"))) { 8140 filename += strlen("self/"); 8141 } else if (*filename >= '1' && *filename <= '9') { 8142 char myself[80]; 8143 snprintf(myself, sizeof(myself), "%d/", getpid()); 8144 if (!strncmp(filename, myself, strlen(myself))) { 8145 filename += strlen(myself); 8146 } else { 8147 return 0; 8148 } 8149 } else { 8150 return 0; 8151 } 8152 if (!strcmp(filename, entry)) { 8153 return 1; 8154 } 8155 } 8156 return 0; 8157 } 8158 8159 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \ 8160 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) 8161 static int is_proc(const char *filename, const char *entry) 8162 { 8163 return strcmp(filename, entry) == 0; 8164 } 8165 #endif 8166 8167 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN 8168 static int open_net_route(CPUArchState *cpu_env, int fd) 8169 { 8170 FILE *fp; 8171 char *line = NULL; 8172 size_t len = 0; 8173 ssize_t read; 8174 8175 fp = fopen("/proc/net/route", "r"); 8176 if (fp == NULL) { 8177 return -1; 8178 } 8179 8180 /* read header */ 8181 8182 read = getline(&line, &len, fp); 8183 dprintf(fd, "%s", line); 8184 8185 /* read routes */ 8186 8187 while ((read = getline(&line, &len, fp)) != -1) { 8188 char iface[16]; 8189 uint32_t dest, gw, mask; 8190 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 8191 int fields; 8192 8193 fields = sscanf(line, 8194 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 8195 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 8196 &mask, &mtu, &window, &irtt); 8197 if (fields != 11) { 8198 continue; 8199 } 8200 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 8201 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 8202 metric, tswap32(mask), mtu, window, irtt); 8203 } 8204 8205 free(line); 8206 fclose(fp); 8207 8208 return 0; 8209 } 8210 #endif 8211 8212 #if defined(TARGET_SPARC) 8213 static int open_cpuinfo(CPUArchState *cpu_env, int fd) 8214 { 8215 dprintf(fd, "type\t\t: sun4u\n"); 8216 return 0; 8217 } 8218 #endif 8219 8220 #if defined(TARGET_HPPA) 8221 static int open_cpuinfo(CPUArchState *cpu_env, int fd) 8222 { 8223 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n"); 8224 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n"); 8225 dprintf(fd, "capabilities\t: os32\n"); 8226 dprintf(fd, "model\t\t: 9000/778/B160L\n"); 8227 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n"); 8228 return 0; 8229 } 8230 #endif 8231 8232 #if defined(TARGET_M68K) 8233 static int open_hardware(CPUArchState *cpu_env, int fd) 8234 { 8235 dprintf(fd, "Model:\t\tqemu-m68k\n"); 8236 return 0; 8237 } 8238 #endif 8239 8240 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) 8241 { 8242 struct fake_open { 8243 const char *filename; 8244 int (*fill)(CPUArchState *cpu_env, int fd); 8245 int (*cmp)(const char *s1, const char *s2); 8246 }; 8247 const struct fake_open *fake_open; 8248 static const struct fake_open fakes[] = { 8249 { "maps", open_self_maps, is_proc_myself }, 8250 { "stat", open_self_stat, is_proc_myself }, 8251 { "auxv", open_self_auxv, is_proc_myself }, 8252 { "cmdline", open_self_cmdline, is_proc_myself }, 8253 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN 8254 { "/proc/net/route", open_net_route, is_proc }, 8255 #endif 8256 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) 8257 { "/proc/cpuinfo", open_cpuinfo, is_proc }, 8258 #endif 8259 #if defined(TARGET_M68K) 8260 { "/proc/hardware", open_hardware, is_proc }, 8261 #endif 8262 { NULL, NULL, NULL } 8263 }; 8264 8265 if (is_proc_myself(pathname, "exe")) { 8266 int execfd = qemu_getauxval(AT_EXECFD); 8267 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode); 8268 } 8269 8270 for (fake_open = fakes; fake_open->filename; fake_open++) { 8271 if (fake_open->cmp(pathname, fake_open->filename)) { 8272 break; 8273 } 8274 } 8275 8276 if (fake_open->filename) { 8277 const char *tmpdir; 8278 char filename[PATH_MAX]; 8279 int fd, r; 8280 8281 fd = memfd_create("qemu-open", 0); 8282 if (fd < 0) { 8283 if (errno != ENOSYS) { 8284 return fd; 8285 } 8286 /* create temporary file to map stat to */ 8287 tmpdir = getenv("TMPDIR"); 8288 if (!tmpdir) 8289 tmpdir = "/tmp"; 8290 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 8291 fd = mkstemp(filename); 8292 if (fd < 0) { 8293 return fd; 8294 } 8295 unlink(filename); 8296 } 8297 8298 if ((r = fake_open->fill(cpu_env, fd))) { 8299 int e = errno; 8300 close(fd); 8301 errno = e; 8302 return r; 8303 } 8304 lseek(fd, 0, SEEK_SET); 8305 8306 return fd; 8307 } 8308 8309 return safe_openat(dirfd, path(pathname), flags, mode); 8310 } 8311 8312 #define TIMER_MAGIC 0x0caf0000 8313 #define TIMER_MAGIC_MASK 0xffff0000 8314 8315 /* Convert QEMU provided timer ID back to internal 16bit index format */ 8316 static target_timer_t get_timer_id(abi_long arg) 8317 { 8318 target_timer_t timerid = arg; 8319 8320 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) { 8321 return -TARGET_EINVAL; 8322 } 8323 8324 timerid &= 0xffff; 8325 8326 if (timerid >= ARRAY_SIZE(g_posix_timers)) { 8327 return -TARGET_EINVAL; 8328 } 8329 8330 return timerid; 8331 } 8332 8333 static int target_to_host_cpu_mask(unsigned long *host_mask, 8334 size_t host_size, 8335 abi_ulong target_addr, 8336 size_t target_size) 8337 { 8338 unsigned target_bits = sizeof(abi_ulong) * 8; 8339 unsigned host_bits = sizeof(*host_mask) * 8; 8340 abi_ulong *target_mask; 8341 unsigned i, j; 8342 8343 assert(host_size >= target_size); 8344 8345 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1); 8346 if (!target_mask) { 8347 return -TARGET_EFAULT; 8348 } 8349 memset(host_mask, 0, host_size); 8350 8351 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 8352 unsigned bit = i * target_bits; 8353 abi_ulong val; 8354 8355 __get_user(val, &target_mask[i]); 8356 for (j = 0; j < target_bits; j++, bit++) { 8357 if (val & (1UL << j)) { 8358 host_mask[bit / host_bits] |= 1UL << (bit % host_bits); 8359 } 8360 } 8361 } 8362 8363 unlock_user(target_mask, target_addr, 0); 8364 return 0; 8365 } 8366 8367 static int host_to_target_cpu_mask(const unsigned long *host_mask, 8368 size_t host_size, 8369 abi_ulong target_addr, 8370 size_t target_size) 8371 { 8372 unsigned target_bits = sizeof(abi_ulong) * 8; 8373 unsigned host_bits = sizeof(*host_mask) * 8; 8374 abi_ulong *target_mask; 8375 unsigned i, j; 8376 8377 assert(host_size >= target_size); 8378 8379 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0); 8380 if (!target_mask) { 8381 return -TARGET_EFAULT; 8382 } 8383 8384 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) { 8385 unsigned bit = i * target_bits; 8386 abi_ulong val = 0; 8387 8388 for (j = 0; j < target_bits; j++, bit++) { 8389 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) { 8390 val |= 1UL << j; 8391 } 8392 } 8393 __put_user(val, &target_mask[i]); 8394 } 8395 8396 unlock_user(target_mask, target_addr, target_size); 8397 return 0; 8398 } 8399 8400 #ifdef TARGET_NR_getdents 8401 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count) 8402 { 8403 g_autofree void *hdirp = NULL; 8404 void *tdirp; 8405 int hlen, hoff, toff; 8406 int hreclen, treclen; 8407 off64_t prev_diroff = 0; 8408 8409 hdirp = g_try_malloc(count); 8410 if (!hdirp) { 8411 return -TARGET_ENOMEM; 8412 } 8413 8414 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 8415 hlen = sys_getdents(dirfd, hdirp, count); 8416 #else 8417 hlen = sys_getdents64(dirfd, hdirp, count); 8418 #endif 8419 8420 hlen = get_errno(hlen); 8421 if (is_error(hlen)) { 8422 return hlen; 8423 } 8424 8425 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0); 8426 if (!tdirp) { 8427 return -TARGET_EFAULT; 8428 } 8429 8430 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) { 8431 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 8432 struct linux_dirent *hde = hdirp + hoff; 8433 #else 8434 struct linux_dirent64 *hde = hdirp + hoff; 8435 #endif 8436 struct target_dirent *tde = tdirp + toff; 8437 int namelen; 8438 uint8_t type; 8439 8440 namelen = strlen(hde->d_name); 8441 hreclen = hde->d_reclen; 8442 treclen = offsetof(struct target_dirent, d_name) + namelen + 2; 8443 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent)); 8444 8445 if (toff + treclen > count) { 8446 /* 8447 * If the host struct is smaller than the target struct, or 8448 * requires less alignment and thus packs into less space, 8449 * then the host can return more entries than we can pass 8450 * on to the guest. 8451 */ 8452 if (toff == 0) { 8453 toff = -TARGET_EINVAL; /* result buffer is too small */ 8454 break; 8455 } 8456 /* 8457 * Return what we have, resetting the file pointer to the 8458 * location of the first record not returned. 8459 */ 8460 lseek64(dirfd, prev_diroff, SEEK_SET); 8461 break; 8462 } 8463 8464 prev_diroff = hde->d_off; 8465 tde->d_ino = tswapal(hde->d_ino); 8466 tde->d_off = tswapal(hde->d_off); 8467 tde->d_reclen = tswap16(treclen); 8468 memcpy(tde->d_name, hde->d_name, namelen + 1); 8469 8470 /* 8471 * The getdents type is in what was formerly a padding byte at the 8472 * end of the structure. 8473 */ 8474 #ifdef EMULATE_GETDENTS_WITH_GETDENTS 8475 type = *((uint8_t *)hde + hreclen - 1); 8476 #else 8477 type = hde->d_type; 8478 #endif 8479 *((uint8_t *)tde + treclen - 1) = type; 8480 } 8481 8482 unlock_user(tdirp, arg2, toff); 8483 return toff; 8484 } 8485 #endif /* TARGET_NR_getdents */ 8486 8487 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 8488 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count) 8489 { 8490 g_autofree void *hdirp = NULL; 8491 void *tdirp; 8492 int hlen, hoff, toff; 8493 int hreclen, treclen; 8494 off64_t prev_diroff = 0; 8495 8496 hdirp = g_try_malloc(count); 8497 if (!hdirp) { 8498 return -TARGET_ENOMEM; 8499 } 8500 8501 hlen = get_errno(sys_getdents64(dirfd, hdirp, count)); 8502 if (is_error(hlen)) { 8503 return hlen; 8504 } 8505 8506 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0); 8507 if (!tdirp) { 8508 return -TARGET_EFAULT; 8509 } 8510 8511 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) { 8512 struct linux_dirent64 *hde = hdirp + hoff; 8513 struct target_dirent64 *tde = tdirp + toff; 8514 int namelen; 8515 8516 namelen = strlen(hde->d_name) + 1; 8517 hreclen = hde->d_reclen; 8518 treclen = offsetof(struct target_dirent64, d_name) + namelen; 8519 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64)); 8520 8521 if (toff + treclen > count) { 8522 /* 8523 * If the host struct is smaller than the target struct, or 8524 * requires less alignment and thus packs into less space, 8525 * then the host can return more entries than we can pass 8526 * on to the guest. 8527 */ 8528 if (toff == 0) { 8529 toff = -TARGET_EINVAL; /* result buffer is too small */ 8530 break; 8531 } 8532 /* 8533 * Return what we have, resetting the file pointer to the 8534 * location of the first record not returned. 8535 */ 8536 lseek64(dirfd, prev_diroff, SEEK_SET); 8537 break; 8538 } 8539 8540 prev_diroff = hde->d_off; 8541 tde->d_ino = tswap64(hde->d_ino); 8542 tde->d_off = tswap64(hde->d_off); 8543 tde->d_reclen = tswap16(treclen); 8544 tde->d_type = hde->d_type; 8545 memcpy(tde->d_name, hde->d_name, namelen); 8546 } 8547 8548 unlock_user(tdirp, arg2, toff); 8549 return toff; 8550 } 8551 #endif /* TARGET_NR_getdents64 */ 8552 8553 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root) 8554 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old) 8555 #endif 8556 8557 /* This is an internal helper for do_syscall so that it is easier 8558 * to have a single return point, so that actions, such as logging 8559 * of syscall results, can be performed. 8560 * All errnos that do_syscall() returns must be -TARGET_<errcode>. 8561 */ 8562 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, 8563 abi_long arg2, abi_long arg3, abi_long arg4, 8564 abi_long arg5, abi_long arg6, abi_long arg7, 8565 abi_long arg8) 8566 { 8567 CPUState *cpu = env_cpu(cpu_env); 8568 abi_long ret; 8569 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \ 8570 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \ 8571 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \ 8572 || defined(TARGET_NR_statx) 8573 struct stat st; 8574 #endif 8575 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \ 8576 || defined(TARGET_NR_fstatfs) 8577 struct statfs stfs; 8578 #endif 8579 void *p; 8580 8581 switch(num) { 8582 case TARGET_NR_exit: 8583 /* In old applications this may be used to implement _exit(2). 8584 However in threaded applications it is used for thread termination, 8585 and _exit_group is used for application termination. 8586 Do thread termination if we have more then one thread. */ 8587 8588 if (block_signals()) { 8589 return -QEMU_ERESTARTSYS; 8590 } 8591 8592 pthread_mutex_lock(&clone_lock); 8593 8594 if (CPU_NEXT(first_cpu)) { 8595 TaskState *ts = cpu->opaque; 8596 8597 object_property_set_bool(OBJECT(cpu), "realized", false, NULL); 8598 object_unref(OBJECT(cpu)); 8599 /* 8600 * At this point the CPU should be unrealized and removed 8601 * from cpu lists. We can clean-up the rest of the thread 8602 * data without the lock held. 8603 */ 8604 8605 pthread_mutex_unlock(&clone_lock); 8606 8607 if (ts->child_tidptr) { 8608 put_user_u32(0, ts->child_tidptr); 8609 do_sys_futex(g2h(cpu, ts->child_tidptr), 8610 FUTEX_WAKE, INT_MAX, NULL, NULL, 0); 8611 } 8612 thread_cpu = NULL; 8613 g_free(ts); 8614 rcu_unregister_thread(); 8615 pthread_exit(NULL); 8616 } 8617 8618 pthread_mutex_unlock(&clone_lock); 8619 preexit_cleanup(cpu_env, arg1); 8620 _exit(arg1); 8621 return 0; /* avoid warning */ 8622 case TARGET_NR_read: 8623 if (arg2 == 0 && arg3 == 0) { 8624 return get_errno(safe_read(arg1, 0, 0)); 8625 } else { 8626 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 8627 return -TARGET_EFAULT; 8628 ret = get_errno(safe_read(arg1, p, arg3)); 8629 if (ret >= 0 && 8630 fd_trans_host_to_target_data(arg1)) { 8631 ret = fd_trans_host_to_target_data(arg1)(p, ret); 8632 } 8633 unlock_user(p, arg2, ret); 8634 } 8635 return ret; 8636 case TARGET_NR_write: 8637 if (arg2 == 0 && arg3 == 0) { 8638 return get_errno(safe_write(arg1, 0, 0)); 8639 } 8640 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 8641 return -TARGET_EFAULT; 8642 if (fd_trans_target_to_host_data(arg1)) { 8643 void *copy = g_malloc(arg3); 8644 memcpy(copy, p, arg3); 8645 ret = fd_trans_target_to_host_data(arg1)(copy, arg3); 8646 if (ret >= 0) { 8647 ret = get_errno(safe_write(arg1, copy, ret)); 8648 } 8649 g_free(copy); 8650 } else { 8651 ret = get_errno(safe_write(arg1, p, arg3)); 8652 } 8653 unlock_user(p, arg2, 0); 8654 return ret; 8655 8656 #ifdef TARGET_NR_open 8657 case TARGET_NR_open: 8658 if (!(p = lock_user_string(arg1))) 8659 return -TARGET_EFAULT; 8660 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, 8661 target_to_host_bitmask(arg2, fcntl_flags_tbl), 8662 arg3)); 8663 fd_trans_unregister(ret); 8664 unlock_user(p, arg1, 0); 8665 return ret; 8666 #endif 8667 case TARGET_NR_openat: 8668 if (!(p = lock_user_string(arg2))) 8669 return -TARGET_EFAULT; 8670 ret = get_errno(do_openat(cpu_env, arg1, p, 8671 target_to_host_bitmask(arg3, fcntl_flags_tbl), 8672 arg4)); 8673 fd_trans_unregister(ret); 8674 unlock_user(p, arg2, 0); 8675 return ret; 8676 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 8677 case TARGET_NR_name_to_handle_at: 8678 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5); 8679 return ret; 8680 #endif 8681 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 8682 case TARGET_NR_open_by_handle_at: 8683 ret = do_open_by_handle_at(arg1, arg2, arg3); 8684 fd_trans_unregister(ret); 8685 return ret; 8686 #endif 8687 case TARGET_NR_close: 8688 fd_trans_unregister(arg1); 8689 return get_errno(close(arg1)); 8690 8691 case TARGET_NR_brk: 8692 return do_brk(arg1); 8693 #ifdef TARGET_NR_fork 8694 case TARGET_NR_fork: 8695 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0)); 8696 #endif 8697 #ifdef TARGET_NR_waitpid 8698 case TARGET_NR_waitpid: 8699 { 8700 int status; 8701 ret = get_errno(safe_wait4(arg1, &status, arg3, 0)); 8702 if (!is_error(ret) && arg2 && ret 8703 && put_user_s32(host_to_target_waitstatus(status), arg2)) 8704 return -TARGET_EFAULT; 8705 } 8706 return ret; 8707 #endif 8708 #ifdef TARGET_NR_waitid 8709 case TARGET_NR_waitid: 8710 { 8711 siginfo_t info; 8712 info.si_pid = 0; 8713 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL)); 8714 if (!is_error(ret) && arg3 && info.si_pid != 0) { 8715 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 8716 return -TARGET_EFAULT; 8717 host_to_target_siginfo(p, &info); 8718 unlock_user(p, arg3, sizeof(target_siginfo_t)); 8719 } 8720 } 8721 return ret; 8722 #endif 8723 #ifdef TARGET_NR_creat /* not on alpha */ 8724 case TARGET_NR_creat: 8725 if (!(p = lock_user_string(arg1))) 8726 return -TARGET_EFAULT; 8727 ret = get_errno(creat(p, arg2)); 8728 fd_trans_unregister(ret); 8729 unlock_user(p, arg1, 0); 8730 return ret; 8731 #endif 8732 #ifdef TARGET_NR_link 8733 case TARGET_NR_link: 8734 { 8735 void * p2; 8736 p = lock_user_string(arg1); 8737 p2 = lock_user_string(arg2); 8738 if (!p || !p2) 8739 ret = -TARGET_EFAULT; 8740 else 8741 ret = get_errno(link(p, p2)); 8742 unlock_user(p2, arg2, 0); 8743 unlock_user(p, arg1, 0); 8744 } 8745 return ret; 8746 #endif 8747 #if defined(TARGET_NR_linkat) 8748 case TARGET_NR_linkat: 8749 { 8750 void * p2 = NULL; 8751 if (!arg2 || !arg4) 8752 return -TARGET_EFAULT; 8753 p = lock_user_string(arg2); 8754 p2 = lock_user_string(arg4); 8755 if (!p || !p2) 8756 ret = -TARGET_EFAULT; 8757 else 8758 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 8759 unlock_user(p, arg2, 0); 8760 unlock_user(p2, arg4, 0); 8761 } 8762 return ret; 8763 #endif 8764 #ifdef TARGET_NR_unlink 8765 case TARGET_NR_unlink: 8766 if (!(p = lock_user_string(arg1))) 8767 return -TARGET_EFAULT; 8768 ret = get_errno(unlink(p)); 8769 unlock_user(p, arg1, 0); 8770 return ret; 8771 #endif 8772 #if defined(TARGET_NR_unlinkat) 8773 case TARGET_NR_unlinkat: 8774 if (!(p = lock_user_string(arg2))) 8775 return -TARGET_EFAULT; 8776 ret = get_errno(unlinkat(arg1, p, arg3)); 8777 unlock_user(p, arg2, 0); 8778 return ret; 8779 #endif 8780 case TARGET_NR_execve: 8781 { 8782 char **argp, **envp; 8783 int argc, envc; 8784 abi_ulong gp; 8785 abi_ulong guest_argp; 8786 abi_ulong guest_envp; 8787 abi_ulong addr; 8788 char **q; 8789 8790 argc = 0; 8791 guest_argp = arg2; 8792 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 8793 if (get_user_ual(addr, gp)) 8794 return -TARGET_EFAULT; 8795 if (!addr) 8796 break; 8797 argc++; 8798 } 8799 envc = 0; 8800 guest_envp = arg3; 8801 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 8802 if (get_user_ual(addr, gp)) 8803 return -TARGET_EFAULT; 8804 if (!addr) 8805 break; 8806 envc++; 8807 } 8808 8809 argp = g_new0(char *, argc + 1); 8810 envp = g_new0(char *, envc + 1); 8811 8812 for (gp = guest_argp, q = argp; gp; 8813 gp += sizeof(abi_ulong), q++) { 8814 if (get_user_ual(addr, gp)) 8815 goto execve_efault; 8816 if (!addr) 8817 break; 8818 if (!(*q = lock_user_string(addr))) 8819 goto execve_efault; 8820 } 8821 *q = NULL; 8822 8823 for (gp = guest_envp, q = envp; gp; 8824 gp += sizeof(abi_ulong), q++) { 8825 if (get_user_ual(addr, gp)) 8826 goto execve_efault; 8827 if (!addr) 8828 break; 8829 if (!(*q = lock_user_string(addr))) 8830 goto execve_efault; 8831 } 8832 *q = NULL; 8833 8834 if (!(p = lock_user_string(arg1))) 8835 goto execve_efault; 8836 /* Although execve() is not an interruptible syscall it is 8837 * a special case where we must use the safe_syscall wrapper: 8838 * if we allow a signal to happen before we make the host 8839 * syscall then we will 'lose' it, because at the point of 8840 * execve the process leaves QEMU's control. So we use the 8841 * safe syscall wrapper to ensure that we either take the 8842 * signal as a guest signal, or else it does not happen 8843 * before the execve completes and makes it the other 8844 * program's problem. 8845 */ 8846 ret = get_errno(safe_execve(p, argp, envp)); 8847 unlock_user(p, arg1, 0); 8848 8849 goto execve_end; 8850 8851 execve_efault: 8852 ret = -TARGET_EFAULT; 8853 8854 execve_end: 8855 for (gp = guest_argp, q = argp; *q; 8856 gp += sizeof(abi_ulong), q++) { 8857 if (get_user_ual(addr, gp) 8858 || !addr) 8859 break; 8860 unlock_user(*q, addr, 0); 8861 } 8862 for (gp = guest_envp, q = envp; *q; 8863 gp += sizeof(abi_ulong), q++) { 8864 if (get_user_ual(addr, gp) 8865 || !addr) 8866 break; 8867 unlock_user(*q, addr, 0); 8868 } 8869 8870 g_free(argp); 8871 g_free(envp); 8872 } 8873 return ret; 8874 case TARGET_NR_chdir: 8875 if (!(p = lock_user_string(arg1))) 8876 return -TARGET_EFAULT; 8877 ret = get_errno(chdir(p)); 8878 unlock_user(p, arg1, 0); 8879 return ret; 8880 #ifdef TARGET_NR_time 8881 case TARGET_NR_time: 8882 { 8883 time_t host_time; 8884 ret = get_errno(time(&host_time)); 8885 if (!is_error(ret) 8886 && arg1 8887 && put_user_sal(host_time, arg1)) 8888 return -TARGET_EFAULT; 8889 } 8890 return ret; 8891 #endif 8892 #ifdef TARGET_NR_mknod 8893 case TARGET_NR_mknod: 8894 if (!(p = lock_user_string(arg1))) 8895 return -TARGET_EFAULT; 8896 ret = get_errno(mknod(p, arg2, arg3)); 8897 unlock_user(p, arg1, 0); 8898 return ret; 8899 #endif 8900 #if defined(TARGET_NR_mknodat) 8901 case TARGET_NR_mknodat: 8902 if (!(p = lock_user_string(arg2))) 8903 return -TARGET_EFAULT; 8904 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 8905 unlock_user(p, arg2, 0); 8906 return ret; 8907 #endif 8908 #ifdef TARGET_NR_chmod 8909 case TARGET_NR_chmod: 8910 if (!(p = lock_user_string(arg1))) 8911 return -TARGET_EFAULT; 8912 ret = get_errno(chmod(p, arg2)); 8913 unlock_user(p, arg1, 0); 8914 return ret; 8915 #endif 8916 #ifdef TARGET_NR_lseek 8917 case TARGET_NR_lseek: 8918 return get_errno(lseek(arg1, arg2, arg3)); 8919 #endif 8920 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 8921 /* Alpha specific */ 8922 case TARGET_NR_getxpid: 8923 cpu_env->ir[IR_A4] = getppid(); 8924 return get_errno(getpid()); 8925 #endif 8926 #ifdef TARGET_NR_getpid 8927 case TARGET_NR_getpid: 8928 return get_errno(getpid()); 8929 #endif 8930 case TARGET_NR_mount: 8931 { 8932 /* need to look at the data field */ 8933 void *p2, *p3; 8934 8935 if (arg1) { 8936 p = lock_user_string(arg1); 8937 if (!p) { 8938 return -TARGET_EFAULT; 8939 } 8940 } else { 8941 p = NULL; 8942 } 8943 8944 p2 = lock_user_string(arg2); 8945 if (!p2) { 8946 if (arg1) { 8947 unlock_user(p, arg1, 0); 8948 } 8949 return -TARGET_EFAULT; 8950 } 8951 8952 if (arg3) { 8953 p3 = lock_user_string(arg3); 8954 if (!p3) { 8955 if (arg1) { 8956 unlock_user(p, arg1, 0); 8957 } 8958 unlock_user(p2, arg2, 0); 8959 return -TARGET_EFAULT; 8960 } 8961 } else { 8962 p3 = NULL; 8963 } 8964 8965 /* FIXME - arg5 should be locked, but it isn't clear how to 8966 * do that since it's not guaranteed to be a NULL-terminated 8967 * string. 8968 */ 8969 if (!arg5) { 8970 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 8971 } else { 8972 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5)); 8973 } 8974 ret = get_errno(ret); 8975 8976 if (arg1) { 8977 unlock_user(p, arg1, 0); 8978 } 8979 unlock_user(p2, arg2, 0); 8980 if (arg3) { 8981 unlock_user(p3, arg3, 0); 8982 } 8983 } 8984 return ret; 8985 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount) 8986 #if defined(TARGET_NR_umount) 8987 case TARGET_NR_umount: 8988 #endif 8989 #if defined(TARGET_NR_oldumount) 8990 case TARGET_NR_oldumount: 8991 #endif 8992 if (!(p = lock_user_string(arg1))) 8993 return -TARGET_EFAULT; 8994 ret = get_errno(umount(p)); 8995 unlock_user(p, arg1, 0); 8996 return ret; 8997 #endif 8998 #ifdef TARGET_NR_stime /* not on alpha */ 8999 case TARGET_NR_stime: 9000 { 9001 struct timespec ts; 9002 ts.tv_nsec = 0; 9003 if (get_user_sal(ts.tv_sec, arg1)) { 9004 return -TARGET_EFAULT; 9005 } 9006 return get_errno(clock_settime(CLOCK_REALTIME, &ts)); 9007 } 9008 #endif 9009 #ifdef TARGET_NR_alarm /* not on alpha */ 9010 case TARGET_NR_alarm: 9011 return alarm(arg1); 9012 #endif 9013 #ifdef TARGET_NR_pause /* not on alpha */ 9014 case TARGET_NR_pause: 9015 if (!block_signals()) { 9016 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask); 9017 } 9018 return -TARGET_EINTR; 9019 #endif 9020 #ifdef TARGET_NR_utime 9021 case TARGET_NR_utime: 9022 { 9023 struct utimbuf tbuf, *host_tbuf; 9024 struct target_utimbuf *target_tbuf; 9025 if (arg2) { 9026 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 9027 return -TARGET_EFAULT; 9028 tbuf.actime = tswapal(target_tbuf->actime); 9029 tbuf.modtime = tswapal(target_tbuf->modtime); 9030 unlock_user_struct(target_tbuf, arg2, 0); 9031 host_tbuf = &tbuf; 9032 } else { 9033 host_tbuf = NULL; 9034 } 9035 if (!(p = lock_user_string(arg1))) 9036 return -TARGET_EFAULT; 9037 ret = get_errno(utime(p, host_tbuf)); 9038 unlock_user(p, arg1, 0); 9039 } 9040 return ret; 9041 #endif 9042 #ifdef TARGET_NR_utimes 9043 case TARGET_NR_utimes: 9044 { 9045 struct timeval *tvp, tv[2]; 9046 if (arg2) { 9047 if (copy_from_user_timeval(&tv[0], arg2) 9048 || copy_from_user_timeval(&tv[1], 9049 arg2 + sizeof(struct target_timeval))) 9050 return -TARGET_EFAULT; 9051 tvp = tv; 9052 } else { 9053 tvp = NULL; 9054 } 9055 if (!(p = lock_user_string(arg1))) 9056 return -TARGET_EFAULT; 9057 ret = get_errno(utimes(p, tvp)); 9058 unlock_user(p, arg1, 0); 9059 } 9060 return ret; 9061 #endif 9062 #if defined(TARGET_NR_futimesat) 9063 case TARGET_NR_futimesat: 9064 { 9065 struct timeval *tvp, tv[2]; 9066 if (arg3) { 9067 if (copy_from_user_timeval(&tv[0], arg3) 9068 || copy_from_user_timeval(&tv[1], 9069 arg3 + sizeof(struct target_timeval))) 9070 return -TARGET_EFAULT; 9071 tvp = tv; 9072 } else { 9073 tvp = NULL; 9074 } 9075 if (!(p = lock_user_string(arg2))) { 9076 return -TARGET_EFAULT; 9077 } 9078 ret = get_errno(futimesat(arg1, path(p), tvp)); 9079 unlock_user(p, arg2, 0); 9080 } 9081 return ret; 9082 #endif 9083 #ifdef TARGET_NR_access 9084 case TARGET_NR_access: 9085 if (!(p = lock_user_string(arg1))) { 9086 return -TARGET_EFAULT; 9087 } 9088 ret = get_errno(access(path(p), arg2)); 9089 unlock_user(p, arg1, 0); 9090 return ret; 9091 #endif 9092 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 9093 case TARGET_NR_faccessat: 9094 if (!(p = lock_user_string(arg2))) { 9095 return -TARGET_EFAULT; 9096 } 9097 ret = get_errno(faccessat(arg1, p, arg3, 0)); 9098 unlock_user(p, arg2, 0); 9099 return ret; 9100 #endif 9101 #ifdef TARGET_NR_nice /* not on alpha */ 9102 case TARGET_NR_nice: 9103 return get_errno(nice(arg1)); 9104 #endif 9105 case TARGET_NR_sync: 9106 sync(); 9107 return 0; 9108 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS) 9109 case TARGET_NR_syncfs: 9110 return get_errno(syncfs(arg1)); 9111 #endif 9112 case TARGET_NR_kill: 9113 return get_errno(safe_kill(arg1, target_to_host_signal(arg2))); 9114 #ifdef TARGET_NR_rename 9115 case TARGET_NR_rename: 9116 { 9117 void *p2; 9118 p = lock_user_string(arg1); 9119 p2 = lock_user_string(arg2); 9120 if (!p || !p2) 9121 ret = -TARGET_EFAULT; 9122 else 9123 ret = get_errno(rename(p, p2)); 9124 unlock_user(p2, arg2, 0); 9125 unlock_user(p, arg1, 0); 9126 } 9127 return ret; 9128 #endif 9129 #if defined(TARGET_NR_renameat) 9130 case TARGET_NR_renameat: 9131 { 9132 void *p2; 9133 p = lock_user_string(arg2); 9134 p2 = lock_user_string(arg4); 9135 if (!p || !p2) 9136 ret = -TARGET_EFAULT; 9137 else 9138 ret = get_errno(renameat(arg1, p, arg3, p2)); 9139 unlock_user(p2, arg4, 0); 9140 unlock_user(p, arg2, 0); 9141 } 9142 return ret; 9143 #endif 9144 #if defined(TARGET_NR_renameat2) 9145 case TARGET_NR_renameat2: 9146 { 9147 void *p2; 9148 p = lock_user_string(arg2); 9149 p2 = lock_user_string(arg4); 9150 if (!p || !p2) { 9151 ret = -TARGET_EFAULT; 9152 } else { 9153 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5)); 9154 } 9155 unlock_user(p2, arg4, 0); 9156 unlock_user(p, arg2, 0); 9157 } 9158 return ret; 9159 #endif 9160 #ifdef TARGET_NR_mkdir 9161 case TARGET_NR_mkdir: 9162 if (!(p = lock_user_string(arg1))) 9163 return -TARGET_EFAULT; 9164 ret = get_errno(mkdir(p, arg2)); 9165 unlock_user(p, arg1, 0); 9166 return ret; 9167 #endif 9168 #if defined(TARGET_NR_mkdirat) 9169 case TARGET_NR_mkdirat: 9170 if (!(p = lock_user_string(arg2))) 9171 return -TARGET_EFAULT; 9172 ret = get_errno(mkdirat(arg1, p, arg3)); 9173 unlock_user(p, arg2, 0); 9174 return ret; 9175 #endif 9176 #ifdef TARGET_NR_rmdir 9177 case TARGET_NR_rmdir: 9178 if (!(p = lock_user_string(arg1))) 9179 return -TARGET_EFAULT; 9180 ret = get_errno(rmdir(p)); 9181 unlock_user(p, arg1, 0); 9182 return ret; 9183 #endif 9184 case TARGET_NR_dup: 9185 ret = get_errno(dup(arg1)); 9186 if (ret >= 0) { 9187 fd_trans_dup(arg1, ret); 9188 } 9189 return ret; 9190 #ifdef TARGET_NR_pipe 9191 case TARGET_NR_pipe: 9192 return do_pipe(cpu_env, arg1, 0, 0); 9193 #endif 9194 #ifdef TARGET_NR_pipe2 9195 case TARGET_NR_pipe2: 9196 return do_pipe(cpu_env, arg1, 9197 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 9198 #endif 9199 case TARGET_NR_times: 9200 { 9201 struct target_tms *tmsp; 9202 struct tms tms; 9203 ret = get_errno(times(&tms)); 9204 if (arg1) { 9205 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 9206 if (!tmsp) 9207 return -TARGET_EFAULT; 9208 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 9209 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 9210 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 9211 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 9212 } 9213 if (!is_error(ret)) 9214 ret = host_to_target_clock_t(ret); 9215 } 9216 return ret; 9217 case TARGET_NR_acct: 9218 if (arg1 == 0) { 9219 ret = get_errno(acct(NULL)); 9220 } else { 9221 if (!(p = lock_user_string(arg1))) { 9222 return -TARGET_EFAULT; 9223 } 9224 ret = get_errno(acct(path(p))); 9225 unlock_user(p, arg1, 0); 9226 } 9227 return ret; 9228 #ifdef TARGET_NR_umount2 9229 case TARGET_NR_umount2: 9230 if (!(p = lock_user_string(arg1))) 9231 return -TARGET_EFAULT; 9232 ret = get_errno(umount2(p, arg2)); 9233 unlock_user(p, arg1, 0); 9234 return ret; 9235 #endif 9236 case TARGET_NR_ioctl: 9237 return do_ioctl(arg1, arg2, arg3); 9238 #ifdef TARGET_NR_fcntl 9239 case TARGET_NR_fcntl: 9240 return do_fcntl(arg1, arg2, arg3); 9241 #endif 9242 case TARGET_NR_setpgid: 9243 return get_errno(setpgid(arg1, arg2)); 9244 case TARGET_NR_umask: 9245 return get_errno(umask(arg1)); 9246 case TARGET_NR_chroot: 9247 if (!(p = lock_user_string(arg1))) 9248 return -TARGET_EFAULT; 9249 ret = get_errno(chroot(p)); 9250 unlock_user(p, arg1, 0); 9251 return ret; 9252 #ifdef TARGET_NR_dup2 9253 case TARGET_NR_dup2: 9254 ret = get_errno(dup2(arg1, arg2)); 9255 if (ret >= 0) { 9256 fd_trans_dup(arg1, arg2); 9257 } 9258 return ret; 9259 #endif 9260 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 9261 case TARGET_NR_dup3: 9262 { 9263 int host_flags; 9264 9265 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) { 9266 return -EINVAL; 9267 } 9268 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl); 9269 ret = get_errno(dup3(arg1, arg2, host_flags)); 9270 if (ret >= 0) { 9271 fd_trans_dup(arg1, arg2); 9272 } 9273 return ret; 9274 } 9275 #endif 9276 #ifdef TARGET_NR_getppid /* not on alpha */ 9277 case TARGET_NR_getppid: 9278 return get_errno(getppid()); 9279 #endif 9280 #ifdef TARGET_NR_getpgrp 9281 case TARGET_NR_getpgrp: 9282 return get_errno(getpgrp()); 9283 #endif 9284 case TARGET_NR_setsid: 9285 return get_errno(setsid()); 9286 #ifdef TARGET_NR_sigaction 9287 case TARGET_NR_sigaction: 9288 { 9289 #if defined(TARGET_MIPS) 9290 struct target_sigaction act, oact, *pact, *old_act; 9291 9292 if (arg2) { 9293 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 9294 return -TARGET_EFAULT; 9295 act._sa_handler = old_act->_sa_handler; 9296 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 9297 act.sa_flags = old_act->sa_flags; 9298 unlock_user_struct(old_act, arg2, 0); 9299 pact = &act; 9300 } else { 9301 pact = NULL; 9302 } 9303 9304 ret = get_errno(do_sigaction(arg1, pact, &oact, 0)); 9305 9306 if (!is_error(ret) && arg3) { 9307 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 9308 return -TARGET_EFAULT; 9309 old_act->_sa_handler = oact._sa_handler; 9310 old_act->sa_flags = oact.sa_flags; 9311 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 9312 old_act->sa_mask.sig[1] = 0; 9313 old_act->sa_mask.sig[2] = 0; 9314 old_act->sa_mask.sig[3] = 0; 9315 unlock_user_struct(old_act, arg3, 1); 9316 } 9317 #else 9318 struct target_old_sigaction *old_act; 9319 struct target_sigaction act, oact, *pact; 9320 if (arg2) { 9321 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 9322 return -TARGET_EFAULT; 9323 act._sa_handler = old_act->_sa_handler; 9324 target_siginitset(&act.sa_mask, old_act->sa_mask); 9325 act.sa_flags = old_act->sa_flags; 9326 #ifdef TARGET_ARCH_HAS_SA_RESTORER 9327 act.sa_restorer = old_act->sa_restorer; 9328 #endif 9329 unlock_user_struct(old_act, arg2, 0); 9330 pact = &act; 9331 } else { 9332 pact = NULL; 9333 } 9334 ret = get_errno(do_sigaction(arg1, pact, &oact, 0)); 9335 if (!is_error(ret) && arg3) { 9336 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 9337 return -TARGET_EFAULT; 9338 old_act->_sa_handler = oact._sa_handler; 9339 old_act->sa_mask = oact.sa_mask.sig[0]; 9340 old_act->sa_flags = oact.sa_flags; 9341 #ifdef TARGET_ARCH_HAS_SA_RESTORER 9342 old_act->sa_restorer = oact.sa_restorer; 9343 #endif 9344 unlock_user_struct(old_act, arg3, 1); 9345 } 9346 #endif 9347 } 9348 return ret; 9349 #endif 9350 case TARGET_NR_rt_sigaction: 9351 { 9352 /* 9353 * For Alpha and SPARC this is a 5 argument syscall, with 9354 * a 'restorer' parameter which must be copied into the 9355 * sa_restorer field of the sigaction struct. 9356 * For Alpha that 'restorer' is arg5; for SPARC it is arg4, 9357 * and arg5 is the sigsetsize. 9358 */ 9359 #if defined(TARGET_ALPHA) 9360 target_ulong sigsetsize = arg4; 9361 target_ulong restorer = arg5; 9362 #elif defined(TARGET_SPARC) 9363 target_ulong restorer = arg4; 9364 target_ulong sigsetsize = arg5; 9365 #else 9366 target_ulong sigsetsize = arg4; 9367 target_ulong restorer = 0; 9368 #endif 9369 struct target_sigaction *act = NULL; 9370 struct target_sigaction *oact = NULL; 9371 9372 if (sigsetsize != sizeof(target_sigset_t)) { 9373 return -TARGET_EINVAL; 9374 } 9375 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) { 9376 return -TARGET_EFAULT; 9377 } 9378 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 9379 ret = -TARGET_EFAULT; 9380 } else { 9381 ret = get_errno(do_sigaction(arg1, act, oact, restorer)); 9382 if (oact) { 9383 unlock_user_struct(oact, arg3, 1); 9384 } 9385 } 9386 if (act) { 9387 unlock_user_struct(act, arg2, 0); 9388 } 9389 } 9390 return ret; 9391 #ifdef TARGET_NR_sgetmask /* not on alpha */ 9392 case TARGET_NR_sgetmask: 9393 { 9394 sigset_t cur_set; 9395 abi_ulong target_set; 9396 ret = do_sigprocmask(0, NULL, &cur_set); 9397 if (!ret) { 9398 host_to_target_old_sigset(&target_set, &cur_set); 9399 ret = target_set; 9400 } 9401 } 9402 return ret; 9403 #endif 9404 #ifdef TARGET_NR_ssetmask /* not on alpha */ 9405 case TARGET_NR_ssetmask: 9406 { 9407 sigset_t set, oset; 9408 abi_ulong target_set = arg1; 9409 target_to_host_old_sigset(&set, &target_set); 9410 ret = do_sigprocmask(SIG_SETMASK, &set, &oset); 9411 if (!ret) { 9412 host_to_target_old_sigset(&target_set, &oset); 9413 ret = target_set; 9414 } 9415 } 9416 return ret; 9417 #endif 9418 #ifdef TARGET_NR_sigprocmask 9419 case TARGET_NR_sigprocmask: 9420 { 9421 #if defined(TARGET_ALPHA) 9422 sigset_t set, oldset; 9423 abi_ulong mask; 9424 int how; 9425 9426 switch (arg1) { 9427 case TARGET_SIG_BLOCK: 9428 how = SIG_BLOCK; 9429 break; 9430 case TARGET_SIG_UNBLOCK: 9431 how = SIG_UNBLOCK; 9432 break; 9433 case TARGET_SIG_SETMASK: 9434 how = SIG_SETMASK; 9435 break; 9436 default: 9437 return -TARGET_EINVAL; 9438 } 9439 mask = arg2; 9440 target_to_host_old_sigset(&set, &mask); 9441 9442 ret = do_sigprocmask(how, &set, &oldset); 9443 if (!is_error(ret)) { 9444 host_to_target_old_sigset(&mask, &oldset); 9445 ret = mask; 9446 cpu_env->ir[IR_V0] = 0; /* force no error */ 9447 } 9448 #else 9449 sigset_t set, oldset, *set_ptr; 9450 int how; 9451 9452 if (arg2) { 9453 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1); 9454 if (!p) { 9455 return -TARGET_EFAULT; 9456 } 9457 target_to_host_old_sigset(&set, p); 9458 unlock_user(p, arg2, 0); 9459 set_ptr = &set; 9460 switch (arg1) { 9461 case TARGET_SIG_BLOCK: 9462 how = SIG_BLOCK; 9463 break; 9464 case TARGET_SIG_UNBLOCK: 9465 how = SIG_UNBLOCK; 9466 break; 9467 case TARGET_SIG_SETMASK: 9468 how = SIG_SETMASK; 9469 break; 9470 default: 9471 return -TARGET_EINVAL; 9472 } 9473 } else { 9474 how = 0; 9475 set_ptr = NULL; 9476 } 9477 ret = do_sigprocmask(how, set_ptr, &oldset); 9478 if (!is_error(ret) && arg3) { 9479 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9480 return -TARGET_EFAULT; 9481 host_to_target_old_sigset(p, &oldset); 9482 unlock_user(p, arg3, sizeof(target_sigset_t)); 9483 } 9484 #endif 9485 } 9486 return ret; 9487 #endif 9488 case TARGET_NR_rt_sigprocmask: 9489 { 9490 int how = arg1; 9491 sigset_t set, oldset, *set_ptr; 9492 9493 if (arg4 != sizeof(target_sigset_t)) { 9494 return -TARGET_EINVAL; 9495 } 9496 9497 if (arg2) { 9498 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1); 9499 if (!p) { 9500 return -TARGET_EFAULT; 9501 } 9502 target_to_host_sigset(&set, p); 9503 unlock_user(p, arg2, 0); 9504 set_ptr = &set; 9505 switch(how) { 9506 case TARGET_SIG_BLOCK: 9507 how = SIG_BLOCK; 9508 break; 9509 case TARGET_SIG_UNBLOCK: 9510 how = SIG_UNBLOCK; 9511 break; 9512 case TARGET_SIG_SETMASK: 9513 how = SIG_SETMASK; 9514 break; 9515 default: 9516 return -TARGET_EINVAL; 9517 } 9518 } else { 9519 how = 0; 9520 set_ptr = NULL; 9521 } 9522 ret = do_sigprocmask(how, set_ptr, &oldset); 9523 if (!is_error(ret) && arg3) { 9524 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 9525 return -TARGET_EFAULT; 9526 host_to_target_sigset(p, &oldset); 9527 unlock_user(p, arg3, sizeof(target_sigset_t)); 9528 } 9529 } 9530 return ret; 9531 #ifdef TARGET_NR_sigpending 9532 case TARGET_NR_sigpending: 9533 { 9534 sigset_t set; 9535 ret = get_errno(sigpending(&set)); 9536 if (!is_error(ret)) { 9537 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9538 return -TARGET_EFAULT; 9539 host_to_target_old_sigset(p, &set); 9540 unlock_user(p, arg1, sizeof(target_sigset_t)); 9541 } 9542 } 9543 return ret; 9544 #endif 9545 case TARGET_NR_rt_sigpending: 9546 { 9547 sigset_t set; 9548 9549 /* Yes, this check is >, not != like most. We follow the kernel's 9550 * logic and it does it like this because it implements 9551 * NR_sigpending through the same code path, and in that case 9552 * the old_sigset_t is smaller in size. 9553 */ 9554 if (arg2 > sizeof(target_sigset_t)) { 9555 return -TARGET_EINVAL; 9556 } 9557 9558 ret = get_errno(sigpending(&set)); 9559 if (!is_error(ret)) { 9560 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 9561 return -TARGET_EFAULT; 9562 host_to_target_sigset(p, &set); 9563 unlock_user(p, arg1, sizeof(target_sigset_t)); 9564 } 9565 } 9566 return ret; 9567 #ifdef TARGET_NR_sigsuspend 9568 case TARGET_NR_sigsuspend: 9569 { 9570 sigset_t *set; 9571 9572 #if defined(TARGET_ALPHA) 9573 TaskState *ts = cpu->opaque; 9574 /* target_to_host_old_sigset will bswap back */ 9575 abi_ulong mask = tswapal(arg1); 9576 set = &ts->sigsuspend_mask; 9577 target_to_host_old_sigset(set, &mask); 9578 #else 9579 ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t)); 9580 if (ret != 0) { 9581 return ret; 9582 } 9583 #endif 9584 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE)); 9585 finish_sigsuspend_mask(ret); 9586 } 9587 return ret; 9588 #endif 9589 case TARGET_NR_rt_sigsuspend: 9590 { 9591 sigset_t *set; 9592 9593 ret = process_sigsuspend_mask(&set, arg1, arg2); 9594 if (ret != 0) { 9595 return ret; 9596 } 9597 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE)); 9598 finish_sigsuspend_mask(ret); 9599 } 9600 return ret; 9601 #ifdef TARGET_NR_rt_sigtimedwait 9602 case TARGET_NR_rt_sigtimedwait: 9603 { 9604 sigset_t set; 9605 struct timespec uts, *puts; 9606 siginfo_t uinfo; 9607 9608 if (arg4 != sizeof(target_sigset_t)) { 9609 return -TARGET_EINVAL; 9610 } 9611 9612 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 9613 return -TARGET_EFAULT; 9614 target_to_host_sigset(&set, p); 9615 unlock_user(p, arg1, 0); 9616 if (arg3) { 9617 puts = &uts; 9618 if (target_to_host_timespec(puts, arg3)) { 9619 return -TARGET_EFAULT; 9620 } 9621 } else { 9622 puts = NULL; 9623 } 9624 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9625 SIGSET_T_SIZE)); 9626 if (!is_error(ret)) { 9627 if (arg2) { 9628 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 9629 0); 9630 if (!p) { 9631 return -TARGET_EFAULT; 9632 } 9633 host_to_target_siginfo(p, &uinfo); 9634 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9635 } 9636 ret = host_to_target_signal(ret); 9637 } 9638 } 9639 return ret; 9640 #endif 9641 #ifdef TARGET_NR_rt_sigtimedwait_time64 9642 case TARGET_NR_rt_sigtimedwait_time64: 9643 { 9644 sigset_t set; 9645 struct timespec uts, *puts; 9646 siginfo_t uinfo; 9647 9648 if (arg4 != sizeof(target_sigset_t)) { 9649 return -TARGET_EINVAL; 9650 } 9651 9652 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1); 9653 if (!p) { 9654 return -TARGET_EFAULT; 9655 } 9656 target_to_host_sigset(&set, p); 9657 unlock_user(p, arg1, 0); 9658 if (arg3) { 9659 puts = &uts; 9660 if (target_to_host_timespec64(puts, arg3)) { 9661 return -TARGET_EFAULT; 9662 } 9663 } else { 9664 puts = NULL; 9665 } 9666 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, 9667 SIGSET_T_SIZE)); 9668 if (!is_error(ret)) { 9669 if (arg2) { 9670 p = lock_user(VERIFY_WRITE, arg2, 9671 sizeof(target_siginfo_t), 0); 9672 if (!p) { 9673 return -TARGET_EFAULT; 9674 } 9675 host_to_target_siginfo(p, &uinfo); 9676 unlock_user(p, arg2, sizeof(target_siginfo_t)); 9677 } 9678 ret = host_to_target_signal(ret); 9679 } 9680 } 9681 return ret; 9682 #endif 9683 case TARGET_NR_rt_sigqueueinfo: 9684 { 9685 siginfo_t uinfo; 9686 9687 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); 9688 if (!p) { 9689 return -TARGET_EFAULT; 9690 } 9691 target_to_host_siginfo(&uinfo, p); 9692 unlock_user(p, arg3, 0); 9693 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 9694 } 9695 return ret; 9696 case TARGET_NR_rt_tgsigqueueinfo: 9697 { 9698 siginfo_t uinfo; 9699 9700 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1); 9701 if (!p) { 9702 return -TARGET_EFAULT; 9703 } 9704 target_to_host_siginfo(&uinfo, p); 9705 unlock_user(p, arg4, 0); 9706 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo)); 9707 } 9708 return ret; 9709 #ifdef TARGET_NR_sigreturn 9710 case TARGET_NR_sigreturn: 9711 if (block_signals()) { 9712 return -QEMU_ERESTARTSYS; 9713 } 9714 return do_sigreturn(cpu_env); 9715 #endif 9716 case TARGET_NR_rt_sigreturn: 9717 if (block_signals()) { 9718 return -QEMU_ERESTARTSYS; 9719 } 9720 return do_rt_sigreturn(cpu_env); 9721 case TARGET_NR_sethostname: 9722 if (!(p = lock_user_string(arg1))) 9723 return -TARGET_EFAULT; 9724 ret = get_errno(sethostname(p, arg2)); 9725 unlock_user(p, arg1, 0); 9726 return ret; 9727 #ifdef TARGET_NR_setrlimit 9728 case TARGET_NR_setrlimit: 9729 { 9730 int resource = target_to_host_resource(arg1); 9731 struct target_rlimit *target_rlim; 9732 struct rlimit rlim; 9733 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 9734 return -TARGET_EFAULT; 9735 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 9736 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 9737 unlock_user_struct(target_rlim, arg2, 0); 9738 /* 9739 * If we just passed through resource limit settings for memory then 9740 * they would also apply to QEMU's own allocations, and QEMU will 9741 * crash or hang or die if its allocations fail. Ideally we would 9742 * track the guest allocations in QEMU and apply the limits ourselves. 9743 * For now, just tell the guest the call succeeded but don't actually 9744 * limit anything. 9745 */ 9746 if (resource != RLIMIT_AS && 9747 resource != RLIMIT_DATA && 9748 resource != RLIMIT_STACK) { 9749 return get_errno(setrlimit(resource, &rlim)); 9750 } else { 9751 return 0; 9752 } 9753 } 9754 #endif 9755 #ifdef TARGET_NR_getrlimit 9756 case TARGET_NR_getrlimit: 9757 { 9758 int resource = target_to_host_resource(arg1); 9759 struct target_rlimit *target_rlim; 9760 struct rlimit rlim; 9761 9762 ret = get_errno(getrlimit(resource, &rlim)); 9763 if (!is_error(ret)) { 9764 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 9765 return -TARGET_EFAULT; 9766 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 9767 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 9768 unlock_user_struct(target_rlim, arg2, 1); 9769 } 9770 } 9771 return ret; 9772 #endif 9773 case TARGET_NR_getrusage: 9774 { 9775 struct rusage rusage; 9776 ret = get_errno(getrusage(arg1, &rusage)); 9777 if (!is_error(ret)) { 9778 ret = host_to_target_rusage(arg2, &rusage); 9779 } 9780 } 9781 return ret; 9782 #if defined(TARGET_NR_gettimeofday) 9783 case TARGET_NR_gettimeofday: 9784 { 9785 struct timeval tv; 9786 struct timezone tz; 9787 9788 ret = get_errno(gettimeofday(&tv, &tz)); 9789 if (!is_error(ret)) { 9790 if (arg1 && copy_to_user_timeval(arg1, &tv)) { 9791 return -TARGET_EFAULT; 9792 } 9793 if (arg2 && copy_to_user_timezone(arg2, &tz)) { 9794 return -TARGET_EFAULT; 9795 } 9796 } 9797 } 9798 return ret; 9799 #endif 9800 #if defined(TARGET_NR_settimeofday) 9801 case TARGET_NR_settimeofday: 9802 { 9803 struct timeval tv, *ptv = NULL; 9804 struct timezone tz, *ptz = NULL; 9805 9806 if (arg1) { 9807 if (copy_from_user_timeval(&tv, arg1)) { 9808 return -TARGET_EFAULT; 9809 } 9810 ptv = &tv; 9811 } 9812 9813 if (arg2) { 9814 if (copy_from_user_timezone(&tz, arg2)) { 9815 return -TARGET_EFAULT; 9816 } 9817 ptz = &tz; 9818 } 9819 9820 return get_errno(settimeofday(ptv, ptz)); 9821 } 9822 #endif 9823 #if defined(TARGET_NR_select) 9824 case TARGET_NR_select: 9825 #if defined(TARGET_WANT_NI_OLD_SELECT) 9826 /* some architectures used to have old_select here 9827 * but now ENOSYS it. 9828 */ 9829 ret = -TARGET_ENOSYS; 9830 #elif defined(TARGET_WANT_OLD_SYS_SELECT) 9831 ret = do_old_select(arg1); 9832 #else 9833 ret = do_select(arg1, arg2, arg3, arg4, arg5); 9834 #endif 9835 return ret; 9836 #endif 9837 #ifdef TARGET_NR_pselect6 9838 case TARGET_NR_pselect6: 9839 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false); 9840 #endif 9841 #ifdef TARGET_NR_pselect6_time64 9842 case TARGET_NR_pselect6_time64: 9843 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true); 9844 #endif 9845 #ifdef TARGET_NR_symlink 9846 case TARGET_NR_symlink: 9847 { 9848 void *p2; 9849 p = lock_user_string(arg1); 9850 p2 = lock_user_string(arg2); 9851 if (!p || !p2) 9852 ret = -TARGET_EFAULT; 9853 else 9854 ret = get_errno(symlink(p, p2)); 9855 unlock_user(p2, arg2, 0); 9856 unlock_user(p, arg1, 0); 9857 } 9858 return ret; 9859 #endif 9860 #if defined(TARGET_NR_symlinkat) 9861 case TARGET_NR_symlinkat: 9862 { 9863 void *p2; 9864 p = lock_user_string(arg1); 9865 p2 = lock_user_string(arg3); 9866 if (!p || !p2) 9867 ret = -TARGET_EFAULT; 9868 else 9869 ret = get_errno(symlinkat(p, arg2, p2)); 9870 unlock_user(p2, arg3, 0); 9871 unlock_user(p, arg1, 0); 9872 } 9873 return ret; 9874 #endif 9875 #ifdef TARGET_NR_readlink 9876 case TARGET_NR_readlink: 9877 { 9878 void *p2; 9879 p = lock_user_string(arg1); 9880 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9881 if (!p || !p2) { 9882 ret = -TARGET_EFAULT; 9883 } else if (!arg3) { 9884 /* Short circuit this for the magic exe check. */ 9885 ret = -TARGET_EINVAL; 9886 } else if (is_proc_myself((const char *)p, "exe")) { 9887 char real[PATH_MAX], *temp; 9888 temp = realpath(exec_path, real); 9889 /* Return value is # of bytes that we wrote to the buffer. */ 9890 if (temp == NULL) { 9891 ret = get_errno(-1); 9892 } else { 9893 /* Don't worry about sign mismatch as earlier mapping 9894 * logic would have thrown a bad address error. */ 9895 ret = MIN(strlen(real), arg3); 9896 /* We cannot NUL terminate the string. */ 9897 memcpy(p2, real, ret); 9898 } 9899 } else { 9900 ret = get_errno(readlink(path(p), p2, arg3)); 9901 } 9902 unlock_user(p2, arg2, ret); 9903 unlock_user(p, arg1, 0); 9904 } 9905 return ret; 9906 #endif 9907 #if defined(TARGET_NR_readlinkat) 9908 case TARGET_NR_readlinkat: 9909 { 9910 void *p2; 9911 p = lock_user_string(arg2); 9912 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 9913 if (!p || !p2) { 9914 ret = -TARGET_EFAULT; 9915 } else if (!arg4) { 9916 /* Short circuit this for the magic exe check. */ 9917 ret = -TARGET_EINVAL; 9918 } else if (is_proc_myself((const char *)p, "exe")) { 9919 char real[PATH_MAX], *temp; 9920 temp = realpath(exec_path, real); 9921 /* Return value is # of bytes that we wrote to the buffer. */ 9922 if (temp == NULL) { 9923 ret = get_errno(-1); 9924 } else { 9925 /* Don't worry about sign mismatch as earlier mapping 9926 * logic would have thrown a bad address error. */ 9927 ret = MIN(strlen(real), arg4); 9928 /* We cannot NUL terminate the string. */ 9929 memcpy(p2, real, ret); 9930 } 9931 } else { 9932 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 9933 } 9934 unlock_user(p2, arg3, ret); 9935 unlock_user(p, arg2, 0); 9936 } 9937 return ret; 9938 #endif 9939 #ifdef TARGET_NR_swapon 9940 case TARGET_NR_swapon: 9941 if (!(p = lock_user_string(arg1))) 9942 return -TARGET_EFAULT; 9943 ret = get_errno(swapon(p, arg2)); 9944 unlock_user(p, arg1, 0); 9945 return ret; 9946 #endif 9947 case TARGET_NR_reboot: 9948 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 9949 /* arg4 must be ignored in all other cases */ 9950 p = lock_user_string(arg4); 9951 if (!p) { 9952 return -TARGET_EFAULT; 9953 } 9954 ret = get_errno(reboot(arg1, arg2, arg3, p)); 9955 unlock_user(p, arg4, 0); 9956 } else { 9957 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 9958 } 9959 return ret; 9960 #ifdef TARGET_NR_mmap 9961 case TARGET_NR_mmap: 9962 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 9963 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 9964 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 9965 || defined(TARGET_S390X) 9966 { 9967 abi_ulong *v; 9968 abi_ulong v1, v2, v3, v4, v5, v6; 9969 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 9970 return -TARGET_EFAULT; 9971 v1 = tswapal(v[0]); 9972 v2 = tswapal(v[1]); 9973 v3 = tswapal(v[2]); 9974 v4 = tswapal(v[3]); 9975 v5 = tswapal(v[4]); 9976 v6 = tswapal(v[5]); 9977 unlock_user(v, arg1, 0); 9978 ret = get_errno(target_mmap(v1, v2, v3, 9979 target_to_host_bitmask(v4, mmap_flags_tbl), 9980 v5, v6)); 9981 } 9982 #else 9983 /* mmap pointers are always untagged */ 9984 ret = get_errno(target_mmap(arg1, arg2, arg3, 9985 target_to_host_bitmask(arg4, mmap_flags_tbl), 9986 arg5, 9987 arg6)); 9988 #endif 9989 return ret; 9990 #endif 9991 #ifdef TARGET_NR_mmap2 9992 case TARGET_NR_mmap2: 9993 #ifndef MMAP_SHIFT 9994 #define MMAP_SHIFT 12 9995 #endif 9996 ret = target_mmap(arg1, arg2, arg3, 9997 target_to_host_bitmask(arg4, mmap_flags_tbl), 9998 arg5, arg6 << MMAP_SHIFT); 9999 return get_errno(ret); 10000 #endif 10001 case TARGET_NR_munmap: 10002 arg1 = cpu_untagged_addr(cpu, arg1); 10003 return get_errno(target_munmap(arg1, arg2)); 10004 case TARGET_NR_mprotect: 10005 arg1 = cpu_untagged_addr(cpu, arg1); 10006 { 10007 TaskState *ts = cpu->opaque; 10008 /* Special hack to detect libc making the stack executable. */ 10009 if ((arg3 & PROT_GROWSDOWN) 10010 && arg1 >= ts->info->stack_limit 10011 && arg1 <= ts->info->start_stack) { 10012 arg3 &= ~PROT_GROWSDOWN; 10013 arg2 = arg2 + arg1 - ts->info->stack_limit; 10014 arg1 = ts->info->stack_limit; 10015 } 10016 } 10017 return get_errno(target_mprotect(arg1, arg2, arg3)); 10018 #ifdef TARGET_NR_mremap 10019 case TARGET_NR_mremap: 10020 arg1 = cpu_untagged_addr(cpu, arg1); 10021 /* mremap new_addr (arg5) is always untagged */ 10022 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 10023 #endif 10024 /* ??? msync/mlock/munlock are broken for softmmu. */ 10025 #ifdef TARGET_NR_msync 10026 case TARGET_NR_msync: 10027 return get_errno(msync(g2h(cpu, arg1), arg2, arg3)); 10028 #endif 10029 #ifdef TARGET_NR_mlock 10030 case TARGET_NR_mlock: 10031 return get_errno(mlock(g2h(cpu, arg1), arg2)); 10032 #endif 10033 #ifdef TARGET_NR_munlock 10034 case TARGET_NR_munlock: 10035 return get_errno(munlock(g2h(cpu, arg1), arg2)); 10036 #endif 10037 #ifdef TARGET_NR_mlockall 10038 case TARGET_NR_mlockall: 10039 return get_errno(mlockall(target_to_host_mlockall_arg(arg1))); 10040 #endif 10041 #ifdef TARGET_NR_munlockall 10042 case TARGET_NR_munlockall: 10043 return get_errno(munlockall()); 10044 #endif 10045 #ifdef TARGET_NR_truncate 10046 case TARGET_NR_truncate: 10047 if (!(p = lock_user_string(arg1))) 10048 return -TARGET_EFAULT; 10049 ret = get_errno(truncate(p, arg2)); 10050 unlock_user(p, arg1, 0); 10051 return ret; 10052 #endif 10053 #ifdef TARGET_NR_ftruncate 10054 case TARGET_NR_ftruncate: 10055 return get_errno(ftruncate(arg1, arg2)); 10056 #endif 10057 case TARGET_NR_fchmod: 10058 return get_errno(fchmod(arg1, arg2)); 10059 #if defined(TARGET_NR_fchmodat) 10060 case TARGET_NR_fchmodat: 10061 if (!(p = lock_user_string(arg2))) 10062 return -TARGET_EFAULT; 10063 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 10064 unlock_user(p, arg2, 0); 10065 return ret; 10066 #endif 10067 case TARGET_NR_getpriority: 10068 /* Note that negative values are valid for getpriority, so we must 10069 differentiate based on errno settings. */ 10070 errno = 0; 10071 ret = getpriority(arg1, arg2); 10072 if (ret == -1 && errno != 0) { 10073 return -host_to_target_errno(errno); 10074 } 10075 #ifdef TARGET_ALPHA 10076 /* Return value is the unbiased priority. Signal no error. */ 10077 cpu_env->ir[IR_V0] = 0; 10078 #else 10079 /* Return value is a biased priority to avoid negative numbers. */ 10080 ret = 20 - ret; 10081 #endif 10082 return ret; 10083 case TARGET_NR_setpriority: 10084 return get_errno(setpriority(arg1, arg2, arg3)); 10085 #ifdef TARGET_NR_statfs 10086 case TARGET_NR_statfs: 10087 if (!(p = lock_user_string(arg1))) { 10088 return -TARGET_EFAULT; 10089 } 10090 ret = get_errno(statfs(path(p), &stfs)); 10091 unlock_user(p, arg1, 0); 10092 convert_statfs: 10093 if (!is_error(ret)) { 10094 struct target_statfs *target_stfs; 10095 10096 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 10097 return -TARGET_EFAULT; 10098 __put_user(stfs.f_type, &target_stfs->f_type); 10099 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 10100 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 10101 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 10102 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 10103 __put_user(stfs.f_files, &target_stfs->f_files); 10104 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 10105 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 10106 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 10107 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 10108 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 10109 #ifdef _STATFS_F_FLAGS 10110 __put_user(stfs.f_flags, &target_stfs->f_flags); 10111 #else 10112 __put_user(0, &target_stfs->f_flags); 10113 #endif 10114 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 10115 unlock_user_struct(target_stfs, arg2, 1); 10116 } 10117 return ret; 10118 #endif 10119 #ifdef TARGET_NR_fstatfs 10120 case TARGET_NR_fstatfs: 10121 ret = get_errno(fstatfs(arg1, &stfs)); 10122 goto convert_statfs; 10123 #endif 10124 #ifdef TARGET_NR_statfs64 10125 case TARGET_NR_statfs64: 10126 if (!(p = lock_user_string(arg1))) { 10127 return -TARGET_EFAULT; 10128 } 10129 ret = get_errno(statfs(path(p), &stfs)); 10130 unlock_user(p, arg1, 0); 10131 convert_statfs64: 10132 if (!is_error(ret)) { 10133 struct target_statfs64 *target_stfs; 10134 10135 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 10136 return -TARGET_EFAULT; 10137 __put_user(stfs.f_type, &target_stfs->f_type); 10138 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 10139 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 10140 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 10141 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 10142 __put_user(stfs.f_files, &target_stfs->f_files); 10143 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 10144 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 10145 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 10146 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 10147 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 10148 #ifdef _STATFS_F_FLAGS 10149 __put_user(stfs.f_flags, &target_stfs->f_flags); 10150 #else 10151 __put_user(0, &target_stfs->f_flags); 10152 #endif 10153 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 10154 unlock_user_struct(target_stfs, arg3, 1); 10155 } 10156 return ret; 10157 case TARGET_NR_fstatfs64: 10158 ret = get_errno(fstatfs(arg1, &stfs)); 10159 goto convert_statfs64; 10160 #endif 10161 #ifdef TARGET_NR_socketcall 10162 case TARGET_NR_socketcall: 10163 return do_socketcall(arg1, arg2); 10164 #endif 10165 #ifdef TARGET_NR_accept 10166 case TARGET_NR_accept: 10167 return do_accept4(arg1, arg2, arg3, 0); 10168 #endif 10169 #ifdef TARGET_NR_accept4 10170 case TARGET_NR_accept4: 10171 return do_accept4(arg1, arg2, arg3, arg4); 10172 #endif 10173 #ifdef TARGET_NR_bind 10174 case TARGET_NR_bind: 10175 return do_bind(arg1, arg2, arg3); 10176 #endif 10177 #ifdef TARGET_NR_connect 10178 case TARGET_NR_connect: 10179 return do_connect(arg1, arg2, arg3); 10180 #endif 10181 #ifdef TARGET_NR_getpeername 10182 case TARGET_NR_getpeername: 10183 return do_getpeername(arg1, arg2, arg3); 10184 #endif 10185 #ifdef TARGET_NR_getsockname 10186 case TARGET_NR_getsockname: 10187 return do_getsockname(arg1, arg2, arg3); 10188 #endif 10189 #ifdef TARGET_NR_getsockopt 10190 case TARGET_NR_getsockopt: 10191 return do_getsockopt(arg1, arg2, arg3, arg4, arg5); 10192 #endif 10193 #ifdef TARGET_NR_listen 10194 case TARGET_NR_listen: 10195 return get_errno(listen(arg1, arg2)); 10196 #endif 10197 #ifdef TARGET_NR_recv 10198 case TARGET_NR_recv: 10199 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 10200 #endif 10201 #ifdef TARGET_NR_recvfrom 10202 case TARGET_NR_recvfrom: 10203 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 10204 #endif 10205 #ifdef TARGET_NR_recvmsg 10206 case TARGET_NR_recvmsg: 10207 return do_sendrecvmsg(arg1, arg2, arg3, 0); 10208 #endif 10209 #ifdef TARGET_NR_send 10210 case TARGET_NR_send: 10211 return do_sendto(arg1, arg2, arg3, arg4, 0, 0); 10212 #endif 10213 #ifdef TARGET_NR_sendmsg 10214 case TARGET_NR_sendmsg: 10215 return do_sendrecvmsg(arg1, arg2, arg3, 1); 10216 #endif 10217 #ifdef TARGET_NR_sendmmsg 10218 case TARGET_NR_sendmmsg: 10219 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 10220 #endif 10221 #ifdef TARGET_NR_recvmmsg 10222 case TARGET_NR_recvmmsg: 10223 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 10224 #endif 10225 #ifdef TARGET_NR_sendto 10226 case TARGET_NR_sendto: 10227 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 10228 #endif 10229 #ifdef TARGET_NR_shutdown 10230 case TARGET_NR_shutdown: 10231 return get_errno(shutdown(arg1, arg2)); 10232 #endif 10233 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 10234 case TARGET_NR_getrandom: 10235 p = lock_user(VERIFY_WRITE, arg1, arg2, 0); 10236 if (!p) { 10237 return -TARGET_EFAULT; 10238 } 10239 ret = get_errno(getrandom(p, arg2, arg3)); 10240 unlock_user(p, arg1, ret); 10241 return ret; 10242 #endif 10243 #ifdef TARGET_NR_socket 10244 case TARGET_NR_socket: 10245 return do_socket(arg1, arg2, arg3); 10246 #endif 10247 #ifdef TARGET_NR_socketpair 10248 case TARGET_NR_socketpair: 10249 return do_socketpair(arg1, arg2, arg3, arg4); 10250 #endif 10251 #ifdef TARGET_NR_setsockopt 10252 case TARGET_NR_setsockopt: 10253 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 10254 #endif 10255 #if defined(TARGET_NR_syslog) 10256 case TARGET_NR_syslog: 10257 { 10258 int len = arg2; 10259 10260 switch (arg1) { 10261 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */ 10262 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */ 10263 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */ 10264 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */ 10265 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */ 10266 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */ 10267 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */ 10268 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */ 10269 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3)); 10270 case TARGET_SYSLOG_ACTION_READ: /* Read from log */ 10271 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */ 10272 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */ 10273 { 10274 if (len < 0) { 10275 return -TARGET_EINVAL; 10276 } 10277 if (len == 0) { 10278 return 0; 10279 } 10280 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 10281 if (!p) { 10282 return -TARGET_EFAULT; 10283 } 10284 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 10285 unlock_user(p, arg2, arg3); 10286 } 10287 return ret; 10288 default: 10289 return -TARGET_EINVAL; 10290 } 10291 } 10292 break; 10293 #endif 10294 case TARGET_NR_setitimer: 10295 { 10296 struct itimerval value, ovalue, *pvalue; 10297 10298 if (arg2) { 10299 pvalue = &value; 10300 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 10301 || copy_from_user_timeval(&pvalue->it_value, 10302 arg2 + sizeof(struct target_timeval))) 10303 return -TARGET_EFAULT; 10304 } else { 10305 pvalue = NULL; 10306 } 10307 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 10308 if (!is_error(ret) && arg3) { 10309 if (copy_to_user_timeval(arg3, 10310 &ovalue.it_interval) 10311 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 10312 &ovalue.it_value)) 10313 return -TARGET_EFAULT; 10314 } 10315 } 10316 return ret; 10317 case TARGET_NR_getitimer: 10318 { 10319 struct itimerval value; 10320 10321 ret = get_errno(getitimer(arg1, &value)); 10322 if (!is_error(ret) && arg2) { 10323 if (copy_to_user_timeval(arg2, 10324 &value.it_interval) 10325 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 10326 &value.it_value)) 10327 return -TARGET_EFAULT; 10328 } 10329 } 10330 return ret; 10331 #ifdef TARGET_NR_stat 10332 case TARGET_NR_stat: 10333 if (!(p = lock_user_string(arg1))) { 10334 return -TARGET_EFAULT; 10335 } 10336 ret = get_errno(stat(path(p), &st)); 10337 unlock_user(p, arg1, 0); 10338 goto do_stat; 10339 #endif 10340 #ifdef TARGET_NR_lstat 10341 case TARGET_NR_lstat: 10342 if (!(p = lock_user_string(arg1))) { 10343 return -TARGET_EFAULT; 10344 } 10345 ret = get_errno(lstat(path(p), &st)); 10346 unlock_user(p, arg1, 0); 10347 goto do_stat; 10348 #endif 10349 #ifdef TARGET_NR_fstat 10350 case TARGET_NR_fstat: 10351 { 10352 ret = get_errno(fstat(arg1, &st)); 10353 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat) 10354 do_stat: 10355 #endif 10356 if (!is_error(ret)) { 10357 struct target_stat *target_st; 10358 10359 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 10360 return -TARGET_EFAULT; 10361 memset(target_st, 0, sizeof(*target_st)); 10362 __put_user(st.st_dev, &target_st->st_dev); 10363 __put_user(st.st_ino, &target_st->st_ino); 10364 __put_user(st.st_mode, &target_st->st_mode); 10365 __put_user(st.st_uid, &target_st->st_uid); 10366 __put_user(st.st_gid, &target_st->st_gid); 10367 __put_user(st.st_nlink, &target_st->st_nlink); 10368 __put_user(st.st_rdev, &target_st->st_rdev); 10369 __put_user(st.st_size, &target_st->st_size); 10370 __put_user(st.st_blksize, &target_st->st_blksize); 10371 __put_user(st.st_blocks, &target_st->st_blocks); 10372 __put_user(st.st_atime, &target_st->target_st_atime); 10373 __put_user(st.st_mtime, &target_st->target_st_mtime); 10374 __put_user(st.st_ctime, &target_st->target_st_ctime); 10375 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC) 10376 __put_user(st.st_atim.tv_nsec, 10377 &target_st->target_st_atime_nsec); 10378 __put_user(st.st_mtim.tv_nsec, 10379 &target_st->target_st_mtime_nsec); 10380 __put_user(st.st_ctim.tv_nsec, 10381 &target_st->target_st_ctime_nsec); 10382 #endif 10383 unlock_user_struct(target_st, arg2, 1); 10384 } 10385 } 10386 return ret; 10387 #endif 10388 case TARGET_NR_vhangup: 10389 return get_errno(vhangup()); 10390 #ifdef TARGET_NR_syscall 10391 case TARGET_NR_syscall: 10392 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 10393 arg6, arg7, arg8, 0); 10394 #endif 10395 #if defined(TARGET_NR_wait4) 10396 case TARGET_NR_wait4: 10397 { 10398 int status; 10399 abi_long status_ptr = arg2; 10400 struct rusage rusage, *rusage_ptr; 10401 abi_ulong target_rusage = arg4; 10402 abi_long rusage_err; 10403 if (target_rusage) 10404 rusage_ptr = &rusage; 10405 else 10406 rusage_ptr = NULL; 10407 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr)); 10408 if (!is_error(ret)) { 10409 if (status_ptr && ret) { 10410 status = host_to_target_waitstatus(status); 10411 if (put_user_s32(status, status_ptr)) 10412 return -TARGET_EFAULT; 10413 } 10414 if (target_rusage) { 10415 rusage_err = host_to_target_rusage(target_rusage, &rusage); 10416 if (rusage_err) { 10417 ret = rusage_err; 10418 } 10419 } 10420 } 10421 } 10422 return ret; 10423 #endif 10424 #ifdef TARGET_NR_swapoff 10425 case TARGET_NR_swapoff: 10426 if (!(p = lock_user_string(arg1))) 10427 return -TARGET_EFAULT; 10428 ret = get_errno(swapoff(p)); 10429 unlock_user(p, arg1, 0); 10430 return ret; 10431 #endif 10432 case TARGET_NR_sysinfo: 10433 { 10434 struct target_sysinfo *target_value; 10435 struct sysinfo value; 10436 ret = get_errno(sysinfo(&value)); 10437 if (!is_error(ret) && arg1) 10438 { 10439 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 10440 return -TARGET_EFAULT; 10441 __put_user(value.uptime, &target_value->uptime); 10442 __put_user(value.loads[0], &target_value->loads[0]); 10443 __put_user(value.loads[1], &target_value->loads[1]); 10444 __put_user(value.loads[2], &target_value->loads[2]); 10445 __put_user(value.totalram, &target_value->totalram); 10446 __put_user(value.freeram, &target_value->freeram); 10447 __put_user(value.sharedram, &target_value->sharedram); 10448 __put_user(value.bufferram, &target_value->bufferram); 10449 __put_user(value.totalswap, &target_value->totalswap); 10450 __put_user(value.freeswap, &target_value->freeswap); 10451 __put_user(value.procs, &target_value->procs); 10452 __put_user(value.totalhigh, &target_value->totalhigh); 10453 __put_user(value.freehigh, &target_value->freehigh); 10454 __put_user(value.mem_unit, &target_value->mem_unit); 10455 unlock_user_struct(target_value, arg1, 1); 10456 } 10457 } 10458 return ret; 10459 #ifdef TARGET_NR_ipc 10460 case TARGET_NR_ipc: 10461 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6); 10462 #endif 10463 #ifdef TARGET_NR_semget 10464 case TARGET_NR_semget: 10465 return get_errno(semget(arg1, arg2, arg3)); 10466 #endif 10467 #ifdef TARGET_NR_semop 10468 case TARGET_NR_semop: 10469 return do_semtimedop(arg1, arg2, arg3, 0, false); 10470 #endif 10471 #ifdef TARGET_NR_semtimedop 10472 case TARGET_NR_semtimedop: 10473 return do_semtimedop(arg1, arg2, arg3, arg4, false); 10474 #endif 10475 #ifdef TARGET_NR_semtimedop_time64 10476 case TARGET_NR_semtimedop_time64: 10477 return do_semtimedop(arg1, arg2, arg3, arg4, true); 10478 #endif 10479 #ifdef TARGET_NR_semctl 10480 case TARGET_NR_semctl: 10481 return do_semctl(arg1, arg2, arg3, arg4); 10482 #endif 10483 #ifdef TARGET_NR_msgctl 10484 case TARGET_NR_msgctl: 10485 return do_msgctl(arg1, arg2, arg3); 10486 #endif 10487 #ifdef TARGET_NR_msgget 10488 case TARGET_NR_msgget: 10489 return get_errno(msgget(arg1, arg2)); 10490 #endif 10491 #ifdef TARGET_NR_msgrcv 10492 case TARGET_NR_msgrcv: 10493 return do_msgrcv(arg1, arg2, arg3, arg4, arg5); 10494 #endif 10495 #ifdef TARGET_NR_msgsnd 10496 case TARGET_NR_msgsnd: 10497 return do_msgsnd(arg1, arg2, arg3, arg4); 10498 #endif 10499 #ifdef TARGET_NR_shmget 10500 case TARGET_NR_shmget: 10501 return get_errno(shmget(arg1, arg2, arg3)); 10502 #endif 10503 #ifdef TARGET_NR_shmctl 10504 case TARGET_NR_shmctl: 10505 return do_shmctl(arg1, arg2, arg3); 10506 #endif 10507 #ifdef TARGET_NR_shmat 10508 case TARGET_NR_shmat: 10509 return do_shmat(cpu_env, arg1, arg2, arg3); 10510 #endif 10511 #ifdef TARGET_NR_shmdt 10512 case TARGET_NR_shmdt: 10513 return do_shmdt(arg1); 10514 #endif 10515 case TARGET_NR_fsync: 10516 return get_errno(fsync(arg1)); 10517 case TARGET_NR_clone: 10518 /* Linux manages to have three different orderings for its 10519 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 10520 * match the kernel's CONFIG_CLONE_* settings. 10521 * Microblaze is further special in that it uses a sixth 10522 * implicit argument to clone for the TLS pointer. 10523 */ 10524 #if defined(TARGET_MICROBLAZE) 10525 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 10526 #elif defined(TARGET_CLONE_BACKWARDS) 10527 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 10528 #elif defined(TARGET_CLONE_BACKWARDS2) 10529 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 10530 #else 10531 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 10532 #endif 10533 return ret; 10534 #ifdef __NR_exit_group 10535 /* new thread calls */ 10536 case TARGET_NR_exit_group: 10537 preexit_cleanup(cpu_env, arg1); 10538 return get_errno(exit_group(arg1)); 10539 #endif 10540 case TARGET_NR_setdomainname: 10541 if (!(p = lock_user_string(arg1))) 10542 return -TARGET_EFAULT; 10543 ret = get_errno(setdomainname(p, arg2)); 10544 unlock_user(p, arg1, 0); 10545 return ret; 10546 case TARGET_NR_uname: 10547 /* no need to transcode because we use the linux syscall */ 10548 { 10549 struct new_utsname * buf; 10550 10551 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 10552 return -TARGET_EFAULT; 10553 ret = get_errno(sys_uname(buf)); 10554 if (!is_error(ret)) { 10555 /* Overwrite the native machine name with whatever is being 10556 emulated. */ 10557 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env), 10558 sizeof(buf->machine)); 10559 /* Allow the user to override the reported release. */ 10560 if (qemu_uname_release && *qemu_uname_release) { 10561 g_strlcpy(buf->release, qemu_uname_release, 10562 sizeof(buf->release)); 10563 } 10564 } 10565 unlock_user_struct(buf, arg1, 1); 10566 } 10567 return ret; 10568 #ifdef TARGET_I386 10569 case TARGET_NR_modify_ldt: 10570 return do_modify_ldt(cpu_env, arg1, arg2, arg3); 10571 #if !defined(TARGET_X86_64) 10572 case TARGET_NR_vm86: 10573 return do_vm86(cpu_env, arg1, arg2); 10574 #endif 10575 #endif 10576 #if defined(TARGET_NR_adjtimex) 10577 case TARGET_NR_adjtimex: 10578 { 10579 struct timex host_buf; 10580 10581 if (target_to_host_timex(&host_buf, arg1) != 0) { 10582 return -TARGET_EFAULT; 10583 } 10584 ret = get_errno(adjtimex(&host_buf)); 10585 if (!is_error(ret)) { 10586 if (host_to_target_timex(arg1, &host_buf) != 0) { 10587 return -TARGET_EFAULT; 10588 } 10589 } 10590 } 10591 return ret; 10592 #endif 10593 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) 10594 case TARGET_NR_clock_adjtime: 10595 { 10596 struct timex htx, *phtx = &htx; 10597 10598 if (target_to_host_timex(phtx, arg2) != 0) { 10599 return -TARGET_EFAULT; 10600 } 10601 ret = get_errno(clock_adjtime(arg1, phtx)); 10602 if (!is_error(ret) && phtx) { 10603 if (host_to_target_timex(arg2, phtx) != 0) { 10604 return -TARGET_EFAULT; 10605 } 10606 } 10607 } 10608 return ret; 10609 #endif 10610 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME) 10611 case TARGET_NR_clock_adjtime64: 10612 { 10613 struct timex htx; 10614 10615 if (target_to_host_timex64(&htx, arg2) != 0) { 10616 return -TARGET_EFAULT; 10617 } 10618 ret = get_errno(clock_adjtime(arg1, &htx)); 10619 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) { 10620 return -TARGET_EFAULT; 10621 } 10622 } 10623 return ret; 10624 #endif 10625 case TARGET_NR_getpgid: 10626 return get_errno(getpgid(arg1)); 10627 case TARGET_NR_fchdir: 10628 return get_errno(fchdir(arg1)); 10629 case TARGET_NR_personality: 10630 return get_errno(personality(arg1)); 10631 #ifdef TARGET_NR__llseek /* Not on alpha */ 10632 case TARGET_NR__llseek: 10633 { 10634 int64_t res; 10635 #if !defined(__NR_llseek) 10636 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5); 10637 if (res == -1) { 10638 ret = get_errno(res); 10639 } else { 10640 ret = 0; 10641 } 10642 #else 10643 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 10644 #endif 10645 if ((ret == 0) && put_user_s64(res, arg4)) { 10646 return -TARGET_EFAULT; 10647 } 10648 } 10649 return ret; 10650 #endif 10651 #ifdef TARGET_NR_getdents 10652 case TARGET_NR_getdents: 10653 return do_getdents(arg1, arg2, arg3); 10654 #endif /* TARGET_NR_getdents */ 10655 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 10656 case TARGET_NR_getdents64: 10657 return do_getdents64(arg1, arg2, arg3); 10658 #endif /* TARGET_NR_getdents64 */ 10659 #if defined(TARGET_NR__newselect) 10660 case TARGET_NR__newselect: 10661 return do_select(arg1, arg2, arg3, arg4, arg5); 10662 #endif 10663 #ifdef TARGET_NR_poll 10664 case TARGET_NR_poll: 10665 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false); 10666 #endif 10667 #ifdef TARGET_NR_ppoll 10668 case TARGET_NR_ppoll: 10669 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false); 10670 #endif 10671 #ifdef TARGET_NR_ppoll_time64 10672 case TARGET_NR_ppoll_time64: 10673 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true); 10674 #endif 10675 case TARGET_NR_flock: 10676 /* NOTE: the flock constant seems to be the same for every 10677 Linux platform */ 10678 return get_errno(safe_flock(arg1, arg2)); 10679 case TARGET_NR_readv: 10680 { 10681 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10682 if (vec != NULL) { 10683 ret = get_errno(safe_readv(arg1, vec, arg3)); 10684 unlock_iovec(vec, arg2, arg3, 1); 10685 } else { 10686 ret = -host_to_target_errno(errno); 10687 } 10688 } 10689 return ret; 10690 case TARGET_NR_writev: 10691 { 10692 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10693 if (vec != NULL) { 10694 ret = get_errno(safe_writev(arg1, vec, arg3)); 10695 unlock_iovec(vec, arg2, arg3, 0); 10696 } else { 10697 ret = -host_to_target_errno(errno); 10698 } 10699 } 10700 return ret; 10701 #if defined(TARGET_NR_preadv) 10702 case TARGET_NR_preadv: 10703 { 10704 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 10705 if (vec != NULL) { 10706 unsigned long low, high; 10707 10708 target_to_host_low_high(arg4, arg5, &low, &high); 10709 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high)); 10710 unlock_iovec(vec, arg2, arg3, 1); 10711 } else { 10712 ret = -host_to_target_errno(errno); 10713 } 10714 } 10715 return ret; 10716 #endif 10717 #if defined(TARGET_NR_pwritev) 10718 case TARGET_NR_pwritev: 10719 { 10720 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 10721 if (vec != NULL) { 10722 unsigned long low, high; 10723 10724 target_to_host_low_high(arg4, arg5, &low, &high); 10725 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high)); 10726 unlock_iovec(vec, arg2, arg3, 0); 10727 } else { 10728 ret = -host_to_target_errno(errno); 10729 } 10730 } 10731 return ret; 10732 #endif 10733 case TARGET_NR_getsid: 10734 return get_errno(getsid(arg1)); 10735 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 10736 case TARGET_NR_fdatasync: 10737 return get_errno(fdatasync(arg1)); 10738 #endif 10739 case TARGET_NR_sched_getaffinity: 10740 { 10741 unsigned int mask_size; 10742 unsigned long *mask; 10743 10744 /* 10745 * sched_getaffinity needs multiples of ulong, so need to take 10746 * care of mismatches between target ulong and host ulong sizes. 10747 */ 10748 if (arg2 & (sizeof(abi_ulong) - 1)) { 10749 return -TARGET_EINVAL; 10750 } 10751 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10752 10753 mask = alloca(mask_size); 10754 memset(mask, 0, mask_size); 10755 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 10756 10757 if (!is_error(ret)) { 10758 if (ret > arg2) { 10759 /* More data returned than the caller's buffer will fit. 10760 * This only happens if sizeof(abi_long) < sizeof(long) 10761 * and the caller passed us a buffer holding an odd number 10762 * of abi_longs. If the host kernel is actually using the 10763 * extra 4 bytes then fail EINVAL; otherwise we can just 10764 * ignore them and only copy the interesting part. 10765 */ 10766 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 10767 if (numcpus > arg2 * 8) { 10768 return -TARGET_EINVAL; 10769 } 10770 ret = arg2; 10771 } 10772 10773 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) { 10774 return -TARGET_EFAULT; 10775 } 10776 } 10777 } 10778 return ret; 10779 case TARGET_NR_sched_setaffinity: 10780 { 10781 unsigned int mask_size; 10782 unsigned long *mask; 10783 10784 /* 10785 * sched_setaffinity needs multiples of ulong, so need to take 10786 * care of mismatches between target ulong and host ulong sizes. 10787 */ 10788 if (arg2 & (sizeof(abi_ulong) - 1)) { 10789 return -TARGET_EINVAL; 10790 } 10791 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 10792 mask = alloca(mask_size); 10793 10794 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2); 10795 if (ret) { 10796 return ret; 10797 } 10798 10799 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 10800 } 10801 case TARGET_NR_getcpu: 10802 { 10803 unsigned cpu, node; 10804 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL, 10805 arg2 ? &node : NULL, 10806 NULL)); 10807 if (is_error(ret)) { 10808 return ret; 10809 } 10810 if (arg1 && put_user_u32(cpu, arg1)) { 10811 return -TARGET_EFAULT; 10812 } 10813 if (arg2 && put_user_u32(node, arg2)) { 10814 return -TARGET_EFAULT; 10815 } 10816 } 10817 return ret; 10818 case TARGET_NR_sched_setparam: 10819 { 10820 struct target_sched_param *target_schp; 10821 struct sched_param schp; 10822 10823 if (arg2 == 0) { 10824 return -TARGET_EINVAL; 10825 } 10826 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) { 10827 return -TARGET_EFAULT; 10828 } 10829 schp.sched_priority = tswap32(target_schp->sched_priority); 10830 unlock_user_struct(target_schp, arg2, 0); 10831 return get_errno(sys_sched_setparam(arg1, &schp)); 10832 } 10833 case TARGET_NR_sched_getparam: 10834 { 10835 struct target_sched_param *target_schp; 10836 struct sched_param schp; 10837 10838 if (arg2 == 0) { 10839 return -TARGET_EINVAL; 10840 } 10841 ret = get_errno(sys_sched_getparam(arg1, &schp)); 10842 if (!is_error(ret)) { 10843 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) { 10844 return -TARGET_EFAULT; 10845 } 10846 target_schp->sched_priority = tswap32(schp.sched_priority); 10847 unlock_user_struct(target_schp, arg2, 1); 10848 } 10849 } 10850 return ret; 10851 case TARGET_NR_sched_setscheduler: 10852 { 10853 struct target_sched_param *target_schp; 10854 struct sched_param schp; 10855 if (arg3 == 0) { 10856 return -TARGET_EINVAL; 10857 } 10858 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) { 10859 return -TARGET_EFAULT; 10860 } 10861 schp.sched_priority = tswap32(target_schp->sched_priority); 10862 unlock_user_struct(target_schp, arg3, 0); 10863 return get_errno(sys_sched_setscheduler(arg1, arg2, &schp)); 10864 } 10865 case TARGET_NR_sched_getscheduler: 10866 return get_errno(sys_sched_getscheduler(arg1)); 10867 case TARGET_NR_sched_getattr: 10868 { 10869 struct target_sched_attr *target_scha; 10870 struct sched_attr scha; 10871 if (arg2 == 0) { 10872 return -TARGET_EINVAL; 10873 } 10874 if (arg3 > sizeof(scha)) { 10875 arg3 = sizeof(scha); 10876 } 10877 ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4)); 10878 if (!is_error(ret)) { 10879 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0); 10880 if (!target_scha) { 10881 return -TARGET_EFAULT; 10882 } 10883 target_scha->size = tswap32(scha.size); 10884 target_scha->sched_policy = tswap32(scha.sched_policy); 10885 target_scha->sched_flags = tswap64(scha.sched_flags); 10886 target_scha->sched_nice = tswap32(scha.sched_nice); 10887 target_scha->sched_priority = tswap32(scha.sched_priority); 10888 target_scha->sched_runtime = tswap64(scha.sched_runtime); 10889 target_scha->sched_deadline = tswap64(scha.sched_deadline); 10890 target_scha->sched_period = tswap64(scha.sched_period); 10891 if (scha.size > offsetof(struct sched_attr, sched_util_min)) { 10892 target_scha->sched_util_min = tswap32(scha.sched_util_min); 10893 target_scha->sched_util_max = tswap32(scha.sched_util_max); 10894 } 10895 unlock_user(target_scha, arg2, arg3); 10896 } 10897 return ret; 10898 } 10899 case TARGET_NR_sched_setattr: 10900 { 10901 struct target_sched_attr *target_scha; 10902 struct sched_attr scha; 10903 uint32_t size; 10904 int zeroed; 10905 if (arg2 == 0) { 10906 return -TARGET_EINVAL; 10907 } 10908 if (get_user_u32(size, arg2)) { 10909 return -TARGET_EFAULT; 10910 } 10911 if (!size) { 10912 size = offsetof(struct target_sched_attr, sched_util_min); 10913 } 10914 if (size < offsetof(struct target_sched_attr, sched_util_min)) { 10915 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) { 10916 return -TARGET_EFAULT; 10917 } 10918 return -TARGET_E2BIG; 10919 } 10920 10921 zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size); 10922 if (zeroed < 0) { 10923 return zeroed; 10924 } else if (zeroed == 0) { 10925 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) { 10926 return -TARGET_EFAULT; 10927 } 10928 return -TARGET_E2BIG; 10929 } 10930 if (size > sizeof(struct target_sched_attr)) { 10931 size = sizeof(struct target_sched_attr); 10932 } 10933 10934 target_scha = lock_user(VERIFY_READ, arg2, size, 1); 10935 if (!target_scha) { 10936 return -TARGET_EFAULT; 10937 } 10938 scha.size = size; 10939 scha.sched_policy = tswap32(target_scha->sched_policy); 10940 scha.sched_flags = tswap64(target_scha->sched_flags); 10941 scha.sched_nice = tswap32(target_scha->sched_nice); 10942 scha.sched_priority = tswap32(target_scha->sched_priority); 10943 scha.sched_runtime = tswap64(target_scha->sched_runtime); 10944 scha.sched_deadline = tswap64(target_scha->sched_deadline); 10945 scha.sched_period = tswap64(target_scha->sched_period); 10946 if (size > offsetof(struct target_sched_attr, sched_util_min)) { 10947 scha.sched_util_min = tswap32(target_scha->sched_util_min); 10948 scha.sched_util_max = tswap32(target_scha->sched_util_max); 10949 } 10950 unlock_user(target_scha, arg2, 0); 10951 return get_errno(sys_sched_setattr(arg1, &scha, arg3)); 10952 } 10953 case TARGET_NR_sched_yield: 10954 return get_errno(sched_yield()); 10955 case TARGET_NR_sched_get_priority_max: 10956 return get_errno(sched_get_priority_max(arg1)); 10957 case TARGET_NR_sched_get_priority_min: 10958 return get_errno(sched_get_priority_min(arg1)); 10959 #ifdef TARGET_NR_sched_rr_get_interval 10960 case TARGET_NR_sched_rr_get_interval: 10961 { 10962 struct timespec ts; 10963 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 10964 if (!is_error(ret)) { 10965 ret = host_to_target_timespec(arg2, &ts); 10966 } 10967 } 10968 return ret; 10969 #endif 10970 #ifdef TARGET_NR_sched_rr_get_interval_time64 10971 case TARGET_NR_sched_rr_get_interval_time64: 10972 { 10973 struct timespec ts; 10974 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 10975 if (!is_error(ret)) { 10976 ret = host_to_target_timespec64(arg2, &ts); 10977 } 10978 } 10979 return ret; 10980 #endif 10981 #if defined(TARGET_NR_nanosleep) 10982 case TARGET_NR_nanosleep: 10983 { 10984 struct timespec req, rem; 10985 target_to_host_timespec(&req, arg1); 10986 ret = get_errno(safe_nanosleep(&req, &rem)); 10987 if (is_error(ret) && arg2) { 10988 host_to_target_timespec(arg2, &rem); 10989 } 10990 } 10991 return ret; 10992 #endif 10993 case TARGET_NR_prctl: 10994 return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5); 10995 break; 10996 #ifdef TARGET_NR_arch_prctl 10997 case TARGET_NR_arch_prctl: 10998 return do_arch_prctl(cpu_env, arg1, arg2); 10999 #endif 11000 #ifdef TARGET_NR_pread64 11001 case TARGET_NR_pread64: 11002 if (regpairs_aligned(cpu_env, num)) { 11003 arg4 = arg5; 11004 arg5 = arg6; 11005 } 11006 if (arg2 == 0 && arg3 == 0) { 11007 /* Special-case NULL buffer and zero length, which should succeed */ 11008 p = 0; 11009 } else { 11010 p = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11011 if (!p) { 11012 return -TARGET_EFAULT; 11013 } 11014 } 11015 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 11016 unlock_user(p, arg2, ret); 11017 return ret; 11018 case TARGET_NR_pwrite64: 11019 if (regpairs_aligned(cpu_env, num)) { 11020 arg4 = arg5; 11021 arg5 = arg6; 11022 } 11023 if (arg2 == 0 && arg3 == 0) { 11024 /* Special-case NULL buffer and zero length, which should succeed */ 11025 p = 0; 11026 } else { 11027 p = lock_user(VERIFY_READ, arg2, arg3, 1); 11028 if (!p) { 11029 return -TARGET_EFAULT; 11030 } 11031 } 11032 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 11033 unlock_user(p, arg2, 0); 11034 return ret; 11035 #endif 11036 case TARGET_NR_getcwd: 11037 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 11038 return -TARGET_EFAULT; 11039 ret = get_errno(sys_getcwd1(p, arg2)); 11040 unlock_user(p, arg1, ret); 11041 return ret; 11042 case TARGET_NR_capget: 11043 case TARGET_NR_capset: 11044 { 11045 struct target_user_cap_header *target_header; 11046 struct target_user_cap_data *target_data = NULL; 11047 struct __user_cap_header_struct header; 11048 struct __user_cap_data_struct data[2]; 11049 struct __user_cap_data_struct *dataptr = NULL; 11050 int i, target_datalen; 11051 int data_items = 1; 11052 11053 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 11054 return -TARGET_EFAULT; 11055 } 11056 header.version = tswap32(target_header->version); 11057 header.pid = tswap32(target_header->pid); 11058 11059 if (header.version != _LINUX_CAPABILITY_VERSION) { 11060 /* Version 2 and up takes pointer to two user_data structs */ 11061 data_items = 2; 11062 } 11063 11064 target_datalen = sizeof(*target_data) * data_items; 11065 11066 if (arg2) { 11067 if (num == TARGET_NR_capget) { 11068 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 11069 } else { 11070 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 11071 } 11072 if (!target_data) { 11073 unlock_user_struct(target_header, arg1, 0); 11074 return -TARGET_EFAULT; 11075 } 11076 11077 if (num == TARGET_NR_capset) { 11078 for (i = 0; i < data_items; i++) { 11079 data[i].effective = tswap32(target_data[i].effective); 11080 data[i].permitted = tswap32(target_data[i].permitted); 11081 data[i].inheritable = tswap32(target_data[i].inheritable); 11082 } 11083 } 11084 11085 dataptr = data; 11086 } 11087 11088 if (num == TARGET_NR_capget) { 11089 ret = get_errno(capget(&header, dataptr)); 11090 } else { 11091 ret = get_errno(capset(&header, dataptr)); 11092 } 11093 11094 /* The kernel always updates version for both capget and capset */ 11095 target_header->version = tswap32(header.version); 11096 unlock_user_struct(target_header, arg1, 1); 11097 11098 if (arg2) { 11099 if (num == TARGET_NR_capget) { 11100 for (i = 0; i < data_items; i++) { 11101 target_data[i].effective = tswap32(data[i].effective); 11102 target_data[i].permitted = tswap32(data[i].permitted); 11103 target_data[i].inheritable = tswap32(data[i].inheritable); 11104 } 11105 unlock_user(target_data, arg2, target_datalen); 11106 } else { 11107 unlock_user(target_data, arg2, 0); 11108 } 11109 } 11110 return ret; 11111 } 11112 case TARGET_NR_sigaltstack: 11113 return do_sigaltstack(arg1, arg2, cpu_env); 11114 11115 #ifdef CONFIG_SENDFILE 11116 #ifdef TARGET_NR_sendfile 11117 case TARGET_NR_sendfile: 11118 { 11119 off_t *offp = NULL; 11120 off_t off; 11121 if (arg3) { 11122 ret = get_user_sal(off, arg3); 11123 if (is_error(ret)) { 11124 return ret; 11125 } 11126 offp = &off; 11127 } 11128 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 11129 if (!is_error(ret) && arg3) { 11130 abi_long ret2 = put_user_sal(off, arg3); 11131 if (is_error(ret2)) { 11132 ret = ret2; 11133 } 11134 } 11135 return ret; 11136 } 11137 #endif 11138 #ifdef TARGET_NR_sendfile64 11139 case TARGET_NR_sendfile64: 11140 { 11141 off_t *offp = NULL; 11142 off_t off; 11143 if (arg3) { 11144 ret = get_user_s64(off, arg3); 11145 if (is_error(ret)) { 11146 return ret; 11147 } 11148 offp = &off; 11149 } 11150 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 11151 if (!is_error(ret) && arg3) { 11152 abi_long ret2 = put_user_s64(off, arg3); 11153 if (is_error(ret2)) { 11154 ret = ret2; 11155 } 11156 } 11157 return ret; 11158 } 11159 #endif 11160 #endif 11161 #ifdef TARGET_NR_vfork 11162 case TARGET_NR_vfork: 11163 return get_errno(do_fork(cpu_env, 11164 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD, 11165 0, 0, 0, 0)); 11166 #endif 11167 #ifdef TARGET_NR_ugetrlimit 11168 case TARGET_NR_ugetrlimit: 11169 { 11170 struct rlimit rlim; 11171 int resource = target_to_host_resource(arg1); 11172 ret = get_errno(getrlimit(resource, &rlim)); 11173 if (!is_error(ret)) { 11174 struct target_rlimit *target_rlim; 11175 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 11176 return -TARGET_EFAULT; 11177 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 11178 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 11179 unlock_user_struct(target_rlim, arg2, 1); 11180 } 11181 return ret; 11182 } 11183 #endif 11184 #ifdef TARGET_NR_truncate64 11185 case TARGET_NR_truncate64: 11186 if (!(p = lock_user_string(arg1))) 11187 return -TARGET_EFAULT; 11188 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 11189 unlock_user(p, arg1, 0); 11190 return ret; 11191 #endif 11192 #ifdef TARGET_NR_ftruncate64 11193 case TARGET_NR_ftruncate64: 11194 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 11195 #endif 11196 #ifdef TARGET_NR_stat64 11197 case TARGET_NR_stat64: 11198 if (!(p = lock_user_string(arg1))) { 11199 return -TARGET_EFAULT; 11200 } 11201 ret = get_errno(stat(path(p), &st)); 11202 unlock_user(p, arg1, 0); 11203 if (!is_error(ret)) 11204 ret = host_to_target_stat64(cpu_env, arg2, &st); 11205 return ret; 11206 #endif 11207 #ifdef TARGET_NR_lstat64 11208 case TARGET_NR_lstat64: 11209 if (!(p = lock_user_string(arg1))) { 11210 return -TARGET_EFAULT; 11211 } 11212 ret = get_errno(lstat(path(p), &st)); 11213 unlock_user(p, arg1, 0); 11214 if (!is_error(ret)) 11215 ret = host_to_target_stat64(cpu_env, arg2, &st); 11216 return ret; 11217 #endif 11218 #ifdef TARGET_NR_fstat64 11219 case TARGET_NR_fstat64: 11220 ret = get_errno(fstat(arg1, &st)); 11221 if (!is_error(ret)) 11222 ret = host_to_target_stat64(cpu_env, arg2, &st); 11223 return ret; 11224 #endif 11225 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 11226 #ifdef TARGET_NR_fstatat64 11227 case TARGET_NR_fstatat64: 11228 #endif 11229 #ifdef TARGET_NR_newfstatat 11230 case TARGET_NR_newfstatat: 11231 #endif 11232 if (!(p = lock_user_string(arg2))) { 11233 return -TARGET_EFAULT; 11234 } 11235 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 11236 unlock_user(p, arg2, 0); 11237 if (!is_error(ret)) 11238 ret = host_to_target_stat64(cpu_env, arg3, &st); 11239 return ret; 11240 #endif 11241 #if defined(TARGET_NR_statx) 11242 case TARGET_NR_statx: 11243 { 11244 struct target_statx *target_stx; 11245 int dirfd = arg1; 11246 int flags = arg3; 11247 11248 p = lock_user_string(arg2); 11249 if (p == NULL) { 11250 return -TARGET_EFAULT; 11251 } 11252 #if defined(__NR_statx) 11253 { 11254 /* 11255 * It is assumed that struct statx is architecture independent. 11256 */ 11257 struct target_statx host_stx; 11258 int mask = arg4; 11259 11260 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx)); 11261 if (!is_error(ret)) { 11262 if (host_to_target_statx(&host_stx, arg5) != 0) { 11263 unlock_user(p, arg2, 0); 11264 return -TARGET_EFAULT; 11265 } 11266 } 11267 11268 if (ret != -TARGET_ENOSYS) { 11269 unlock_user(p, arg2, 0); 11270 return ret; 11271 } 11272 } 11273 #endif 11274 ret = get_errno(fstatat(dirfd, path(p), &st, flags)); 11275 unlock_user(p, arg2, 0); 11276 11277 if (!is_error(ret)) { 11278 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) { 11279 return -TARGET_EFAULT; 11280 } 11281 memset(target_stx, 0, sizeof(*target_stx)); 11282 __put_user(major(st.st_dev), &target_stx->stx_dev_major); 11283 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor); 11284 __put_user(st.st_ino, &target_stx->stx_ino); 11285 __put_user(st.st_mode, &target_stx->stx_mode); 11286 __put_user(st.st_uid, &target_stx->stx_uid); 11287 __put_user(st.st_gid, &target_stx->stx_gid); 11288 __put_user(st.st_nlink, &target_stx->stx_nlink); 11289 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major); 11290 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor); 11291 __put_user(st.st_size, &target_stx->stx_size); 11292 __put_user(st.st_blksize, &target_stx->stx_blksize); 11293 __put_user(st.st_blocks, &target_stx->stx_blocks); 11294 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec); 11295 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec); 11296 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec); 11297 unlock_user_struct(target_stx, arg5, 1); 11298 } 11299 } 11300 return ret; 11301 #endif 11302 #ifdef TARGET_NR_lchown 11303 case TARGET_NR_lchown: 11304 if (!(p = lock_user_string(arg1))) 11305 return -TARGET_EFAULT; 11306 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 11307 unlock_user(p, arg1, 0); 11308 return ret; 11309 #endif 11310 #ifdef TARGET_NR_getuid 11311 case TARGET_NR_getuid: 11312 return get_errno(high2lowuid(getuid())); 11313 #endif 11314 #ifdef TARGET_NR_getgid 11315 case TARGET_NR_getgid: 11316 return get_errno(high2lowgid(getgid())); 11317 #endif 11318 #ifdef TARGET_NR_geteuid 11319 case TARGET_NR_geteuid: 11320 return get_errno(high2lowuid(geteuid())); 11321 #endif 11322 #ifdef TARGET_NR_getegid 11323 case TARGET_NR_getegid: 11324 return get_errno(high2lowgid(getegid())); 11325 #endif 11326 case TARGET_NR_setreuid: 11327 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 11328 case TARGET_NR_setregid: 11329 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 11330 case TARGET_NR_getgroups: 11331 { 11332 int gidsetsize = arg1; 11333 target_id *target_grouplist; 11334 gid_t *grouplist; 11335 int i; 11336 11337 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11338 ret = get_errno(getgroups(gidsetsize, grouplist)); 11339 if (gidsetsize == 0) 11340 return ret; 11341 if (!is_error(ret)) { 11342 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 11343 if (!target_grouplist) 11344 return -TARGET_EFAULT; 11345 for(i = 0;i < ret; i++) 11346 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 11347 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 11348 } 11349 } 11350 return ret; 11351 case TARGET_NR_setgroups: 11352 { 11353 int gidsetsize = arg1; 11354 target_id *target_grouplist; 11355 gid_t *grouplist = NULL; 11356 int i; 11357 if (gidsetsize) { 11358 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11359 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 11360 if (!target_grouplist) { 11361 return -TARGET_EFAULT; 11362 } 11363 for (i = 0; i < gidsetsize; i++) { 11364 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 11365 } 11366 unlock_user(target_grouplist, arg2, 0); 11367 } 11368 return get_errno(setgroups(gidsetsize, grouplist)); 11369 } 11370 case TARGET_NR_fchown: 11371 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 11372 #if defined(TARGET_NR_fchownat) 11373 case TARGET_NR_fchownat: 11374 if (!(p = lock_user_string(arg2))) 11375 return -TARGET_EFAULT; 11376 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 11377 low2highgid(arg4), arg5)); 11378 unlock_user(p, arg2, 0); 11379 return ret; 11380 #endif 11381 #ifdef TARGET_NR_setresuid 11382 case TARGET_NR_setresuid: 11383 return get_errno(sys_setresuid(low2highuid(arg1), 11384 low2highuid(arg2), 11385 low2highuid(arg3))); 11386 #endif 11387 #ifdef TARGET_NR_getresuid 11388 case TARGET_NR_getresuid: 11389 { 11390 uid_t ruid, euid, suid; 11391 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11392 if (!is_error(ret)) { 11393 if (put_user_id(high2lowuid(ruid), arg1) 11394 || put_user_id(high2lowuid(euid), arg2) 11395 || put_user_id(high2lowuid(suid), arg3)) 11396 return -TARGET_EFAULT; 11397 } 11398 } 11399 return ret; 11400 #endif 11401 #ifdef TARGET_NR_getresgid 11402 case TARGET_NR_setresgid: 11403 return get_errno(sys_setresgid(low2highgid(arg1), 11404 low2highgid(arg2), 11405 low2highgid(arg3))); 11406 #endif 11407 #ifdef TARGET_NR_getresgid 11408 case TARGET_NR_getresgid: 11409 { 11410 gid_t rgid, egid, sgid; 11411 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11412 if (!is_error(ret)) { 11413 if (put_user_id(high2lowgid(rgid), arg1) 11414 || put_user_id(high2lowgid(egid), arg2) 11415 || put_user_id(high2lowgid(sgid), arg3)) 11416 return -TARGET_EFAULT; 11417 } 11418 } 11419 return ret; 11420 #endif 11421 #ifdef TARGET_NR_chown 11422 case TARGET_NR_chown: 11423 if (!(p = lock_user_string(arg1))) 11424 return -TARGET_EFAULT; 11425 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 11426 unlock_user(p, arg1, 0); 11427 return ret; 11428 #endif 11429 case TARGET_NR_setuid: 11430 return get_errno(sys_setuid(low2highuid(arg1))); 11431 case TARGET_NR_setgid: 11432 return get_errno(sys_setgid(low2highgid(arg1))); 11433 case TARGET_NR_setfsuid: 11434 return get_errno(setfsuid(arg1)); 11435 case TARGET_NR_setfsgid: 11436 return get_errno(setfsgid(arg1)); 11437 11438 #ifdef TARGET_NR_lchown32 11439 case TARGET_NR_lchown32: 11440 if (!(p = lock_user_string(arg1))) 11441 return -TARGET_EFAULT; 11442 ret = get_errno(lchown(p, arg2, arg3)); 11443 unlock_user(p, arg1, 0); 11444 return ret; 11445 #endif 11446 #ifdef TARGET_NR_getuid32 11447 case TARGET_NR_getuid32: 11448 return get_errno(getuid()); 11449 #endif 11450 11451 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 11452 /* Alpha specific */ 11453 case TARGET_NR_getxuid: 11454 { 11455 uid_t euid; 11456 euid=geteuid(); 11457 cpu_env->ir[IR_A4]=euid; 11458 } 11459 return get_errno(getuid()); 11460 #endif 11461 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 11462 /* Alpha specific */ 11463 case TARGET_NR_getxgid: 11464 { 11465 uid_t egid; 11466 egid=getegid(); 11467 cpu_env->ir[IR_A4]=egid; 11468 } 11469 return get_errno(getgid()); 11470 #endif 11471 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 11472 /* Alpha specific */ 11473 case TARGET_NR_osf_getsysinfo: 11474 ret = -TARGET_EOPNOTSUPP; 11475 switch (arg1) { 11476 case TARGET_GSI_IEEE_FP_CONTROL: 11477 { 11478 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env); 11479 uint64_t swcr = cpu_env->swcr; 11480 11481 swcr &= ~SWCR_STATUS_MASK; 11482 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK; 11483 11484 if (put_user_u64 (swcr, arg2)) 11485 return -TARGET_EFAULT; 11486 ret = 0; 11487 } 11488 break; 11489 11490 /* case GSI_IEEE_STATE_AT_SIGNAL: 11491 -- Not implemented in linux kernel. 11492 case GSI_UACPROC: 11493 -- Retrieves current unaligned access state; not much used. 11494 case GSI_PROC_TYPE: 11495 -- Retrieves implver information; surely not used. 11496 case GSI_GET_HWRPB: 11497 -- Grabs a copy of the HWRPB; surely not used. 11498 */ 11499 } 11500 return ret; 11501 #endif 11502 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 11503 /* Alpha specific */ 11504 case TARGET_NR_osf_setsysinfo: 11505 ret = -TARGET_EOPNOTSUPP; 11506 switch (arg1) { 11507 case TARGET_SSI_IEEE_FP_CONTROL: 11508 { 11509 uint64_t swcr, fpcr; 11510 11511 if (get_user_u64 (swcr, arg2)) { 11512 return -TARGET_EFAULT; 11513 } 11514 11515 /* 11516 * The kernel calls swcr_update_status to update the 11517 * status bits from the fpcr at every point that it 11518 * could be queried. Therefore, we store the status 11519 * bits only in FPCR. 11520 */ 11521 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK); 11522 11523 fpcr = cpu_alpha_load_fpcr(cpu_env); 11524 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32); 11525 fpcr |= alpha_ieee_swcr_to_fpcr(swcr); 11526 cpu_alpha_store_fpcr(cpu_env, fpcr); 11527 ret = 0; 11528 } 11529 break; 11530 11531 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 11532 { 11533 uint64_t exc, fpcr, fex; 11534 11535 if (get_user_u64(exc, arg2)) { 11536 return -TARGET_EFAULT; 11537 } 11538 exc &= SWCR_STATUS_MASK; 11539 fpcr = cpu_alpha_load_fpcr(cpu_env); 11540 11541 /* Old exceptions are not signaled. */ 11542 fex = alpha_ieee_fpcr_to_swcr(fpcr); 11543 fex = exc & ~fex; 11544 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT; 11545 fex &= (cpu_env)->swcr; 11546 11547 /* Update the hardware fpcr. */ 11548 fpcr |= alpha_ieee_swcr_to_fpcr(exc); 11549 cpu_alpha_store_fpcr(cpu_env, fpcr); 11550 11551 if (fex) { 11552 int si_code = TARGET_FPE_FLTUNK; 11553 target_siginfo_t info; 11554 11555 if (fex & SWCR_TRAP_ENABLE_DNO) { 11556 si_code = TARGET_FPE_FLTUND; 11557 } 11558 if (fex & SWCR_TRAP_ENABLE_INE) { 11559 si_code = TARGET_FPE_FLTRES; 11560 } 11561 if (fex & SWCR_TRAP_ENABLE_UNF) { 11562 si_code = TARGET_FPE_FLTUND; 11563 } 11564 if (fex & SWCR_TRAP_ENABLE_OVF) { 11565 si_code = TARGET_FPE_FLTOVF; 11566 } 11567 if (fex & SWCR_TRAP_ENABLE_DZE) { 11568 si_code = TARGET_FPE_FLTDIV; 11569 } 11570 if (fex & SWCR_TRAP_ENABLE_INV) { 11571 si_code = TARGET_FPE_FLTINV; 11572 } 11573 11574 info.si_signo = SIGFPE; 11575 info.si_errno = 0; 11576 info.si_code = si_code; 11577 info._sifields._sigfault._addr = (cpu_env)->pc; 11578 queue_signal(cpu_env, info.si_signo, 11579 QEMU_SI_FAULT, &info); 11580 } 11581 ret = 0; 11582 } 11583 break; 11584 11585 /* case SSI_NVPAIRS: 11586 -- Used with SSIN_UACPROC to enable unaligned accesses. 11587 case SSI_IEEE_STATE_AT_SIGNAL: 11588 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 11589 -- Not implemented in linux kernel 11590 */ 11591 } 11592 return ret; 11593 #endif 11594 #ifdef TARGET_NR_osf_sigprocmask 11595 /* Alpha specific. */ 11596 case TARGET_NR_osf_sigprocmask: 11597 { 11598 abi_ulong mask; 11599 int how; 11600 sigset_t set, oldset; 11601 11602 switch(arg1) { 11603 case TARGET_SIG_BLOCK: 11604 how = SIG_BLOCK; 11605 break; 11606 case TARGET_SIG_UNBLOCK: 11607 how = SIG_UNBLOCK; 11608 break; 11609 case TARGET_SIG_SETMASK: 11610 how = SIG_SETMASK; 11611 break; 11612 default: 11613 return -TARGET_EINVAL; 11614 } 11615 mask = arg2; 11616 target_to_host_old_sigset(&set, &mask); 11617 ret = do_sigprocmask(how, &set, &oldset); 11618 if (!ret) { 11619 host_to_target_old_sigset(&mask, &oldset); 11620 ret = mask; 11621 } 11622 } 11623 return ret; 11624 #endif 11625 11626 #ifdef TARGET_NR_getgid32 11627 case TARGET_NR_getgid32: 11628 return get_errno(getgid()); 11629 #endif 11630 #ifdef TARGET_NR_geteuid32 11631 case TARGET_NR_geteuid32: 11632 return get_errno(geteuid()); 11633 #endif 11634 #ifdef TARGET_NR_getegid32 11635 case TARGET_NR_getegid32: 11636 return get_errno(getegid()); 11637 #endif 11638 #ifdef TARGET_NR_setreuid32 11639 case TARGET_NR_setreuid32: 11640 return get_errno(setreuid(arg1, arg2)); 11641 #endif 11642 #ifdef TARGET_NR_setregid32 11643 case TARGET_NR_setregid32: 11644 return get_errno(setregid(arg1, arg2)); 11645 #endif 11646 #ifdef TARGET_NR_getgroups32 11647 case TARGET_NR_getgroups32: 11648 { 11649 int gidsetsize = arg1; 11650 uint32_t *target_grouplist; 11651 gid_t *grouplist; 11652 int i; 11653 11654 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11655 ret = get_errno(getgroups(gidsetsize, grouplist)); 11656 if (gidsetsize == 0) 11657 return ret; 11658 if (!is_error(ret)) { 11659 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 11660 if (!target_grouplist) { 11661 return -TARGET_EFAULT; 11662 } 11663 for(i = 0;i < ret; i++) 11664 target_grouplist[i] = tswap32(grouplist[i]); 11665 unlock_user(target_grouplist, arg2, gidsetsize * 4); 11666 } 11667 } 11668 return ret; 11669 #endif 11670 #ifdef TARGET_NR_setgroups32 11671 case TARGET_NR_setgroups32: 11672 { 11673 int gidsetsize = arg1; 11674 uint32_t *target_grouplist; 11675 gid_t *grouplist; 11676 int i; 11677 11678 grouplist = alloca(gidsetsize * sizeof(gid_t)); 11679 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 11680 if (!target_grouplist) { 11681 return -TARGET_EFAULT; 11682 } 11683 for(i = 0;i < gidsetsize; i++) 11684 grouplist[i] = tswap32(target_grouplist[i]); 11685 unlock_user(target_grouplist, arg2, 0); 11686 return get_errno(setgroups(gidsetsize, grouplist)); 11687 } 11688 #endif 11689 #ifdef TARGET_NR_fchown32 11690 case TARGET_NR_fchown32: 11691 return get_errno(fchown(arg1, arg2, arg3)); 11692 #endif 11693 #ifdef TARGET_NR_setresuid32 11694 case TARGET_NR_setresuid32: 11695 return get_errno(sys_setresuid(arg1, arg2, arg3)); 11696 #endif 11697 #ifdef TARGET_NR_getresuid32 11698 case TARGET_NR_getresuid32: 11699 { 11700 uid_t ruid, euid, suid; 11701 ret = get_errno(getresuid(&ruid, &euid, &suid)); 11702 if (!is_error(ret)) { 11703 if (put_user_u32(ruid, arg1) 11704 || put_user_u32(euid, arg2) 11705 || put_user_u32(suid, arg3)) 11706 return -TARGET_EFAULT; 11707 } 11708 } 11709 return ret; 11710 #endif 11711 #ifdef TARGET_NR_setresgid32 11712 case TARGET_NR_setresgid32: 11713 return get_errno(sys_setresgid(arg1, arg2, arg3)); 11714 #endif 11715 #ifdef TARGET_NR_getresgid32 11716 case TARGET_NR_getresgid32: 11717 { 11718 gid_t rgid, egid, sgid; 11719 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 11720 if (!is_error(ret)) { 11721 if (put_user_u32(rgid, arg1) 11722 || put_user_u32(egid, arg2) 11723 || put_user_u32(sgid, arg3)) 11724 return -TARGET_EFAULT; 11725 } 11726 } 11727 return ret; 11728 #endif 11729 #ifdef TARGET_NR_chown32 11730 case TARGET_NR_chown32: 11731 if (!(p = lock_user_string(arg1))) 11732 return -TARGET_EFAULT; 11733 ret = get_errno(chown(p, arg2, arg3)); 11734 unlock_user(p, arg1, 0); 11735 return ret; 11736 #endif 11737 #ifdef TARGET_NR_setuid32 11738 case TARGET_NR_setuid32: 11739 return get_errno(sys_setuid(arg1)); 11740 #endif 11741 #ifdef TARGET_NR_setgid32 11742 case TARGET_NR_setgid32: 11743 return get_errno(sys_setgid(arg1)); 11744 #endif 11745 #ifdef TARGET_NR_setfsuid32 11746 case TARGET_NR_setfsuid32: 11747 return get_errno(setfsuid(arg1)); 11748 #endif 11749 #ifdef TARGET_NR_setfsgid32 11750 case TARGET_NR_setfsgid32: 11751 return get_errno(setfsgid(arg1)); 11752 #endif 11753 #ifdef TARGET_NR_mincore 11754 case TARGET_NR_mincore: 11755 { 11756 void *a = lock_user(VERIFY_READ, arg1, arg2, 0); 11757 if (!a) { 11758 return -TARGET_ENOMEM; 11759 } 11760 p = lock_user_string(arg3); 11761 if (!p) { 11762 ret = -TARGET_EFAULT; 11763 } else { 11764 ret = get_errno(mincore(a, arg2, p)); 11765 unlock_user(p, arg3, ret); 11766 } 11767 unlock_user(a, arg1, 0); 11768 } 11769 return ret; 11770 #endif 11771 #ifdef TARGET_NR_arm_fadvise64_64 11772 case TARGET_NR_arm_fadvise64_64: 11773 /* arm_fadvise64_64 looks like fadvise64_64 but 11774 * with different argument order: fd, advice, offset, len 11775 * rather than the usual fd, offset, len, advice. 11776 * Note that offset and len are both 64-bit so appear as 11777 * pairs of 32-bit registers. 11778 */ 11779 ret = posix_fadvise(arg1, target_offset64(arg3, arg4), 11780 target_offset64(arg5, arg6), arg2); 11781 return -host_to_target_errno(ret); 11782 #endif 11783 11784 #if TARGET_ABI_BITS == 32 11785 11786 #ifdef TARGET_NR_fadvise64_64 11787 case TARGET_NR_fadvise64_64: 11788 #if defined(TARGET_PPC) || defined(TARGET_XTENSA) 11789 /* 6 args: fd, advice, offset (high, low), len (high, low) */ 11790 ret = arg2; 11791 arg2 = arg3; 11792 arg3 = arg4; 11793 arg4 = arg5; 11794 arg5 = arg6; 11795 arg6 = ret; 11796 #else 11797 /* 6 args: fd, offset (high, low), len (high, low), advice */ 11798 if (regpairs_aligned(cpu_env, num)) { 11799 /* offset is in (3,4), len in (5,6) and advice in 7 */ 11800 arg2 = arg3; 11801 arg3 = arg4; 11802 arg4 = arg5; 11803 arg5 = arg6; 11804 arg6 = arg7; 11805 } 11806 #endif 11807 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), 11808 target_offset64(arg4, arg5), arg6); 11809 return -host_to_target_errno(ret); 11810 #endif 11811 11812 #ifdef TARGET_NR_fadvise64 11813 case TARGET_NR_fadvise64: 11814 /* 5 args: fd, offset (high, low), len, advice */ 11815 if (regpairs_aligned(cpu_env, num)) { 11816 /* offset is in (3,4), len in 5 and advice in 6 */ 11817 arg2 = arg3; 11818 arg3 = arg4; 11819 arg4 = arg5; 11820 arg5 = arg6; 11821 } 11822 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5); 11823 return -host_to_target_errno(ret); 11824 #endif 11825 11826 #else /* not a 32-bit ABI */ 11827 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64) 11828 #ifdef TARGET_NR_fadvise64_64 11829 case TARGET_NR_fadvise64_64: 11830 #endif 11831 #ifdef TARGET_NR_fadvise64 11832 case TARGET_NR_fadvise64: 11833 #endif 11834 #ifdef TARGET_S390X 11835 switch (arg4) { 11836 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 11837 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 11838 case 6: arg4 = POSIX_FADV_DONTNEED; break; 11839 case 7: arg4 = POSIX_FADV_NOREUSE; break; 11840 default: break; 11841 } 11842 #endif 11843 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4)); 11844 #endif 11845 #endif /* end of 64-bit ABI fadvise handling */ 11846 11847 #ifdef TARGET_NR_madvise 11848 case TARGET_NR_madvise: 11849 return target_madvise(arg1, arg2, arg3); 11850 #endif 11851 #ifdef TARGET_NR_fcntl64 11852 case TARGET_NR_fcntl64: 11853 { 11854 int cmd; 11855 struct flock64 fl; 11856 from_flock64_fn *copyfrom = copy_from_user_flock64; 11857 to_flock64_fn *copyto = copy_to_user_flock64; 11858 11859 #ifdef TARGET_ARM 11860 if (!cpu_env->eabi) { 11861 copyfrom = copy_from_user_oabi_flock64; 11862 copyto = copy_to_user_oabi_flock64; 11863 } 11864 #endif 11865 11866 cmd = target_to_host_fcntl_cmd(arg2); 11867 if (cmd == -TARGET_EINVAL) { 11868 return cmd; 11869 } 11870 11871 switch(arg2) { 11872 case TARGET_F_GETLK64: 11873 ret = copyfrom(&fl, arg3); 11874 if (ret) { 11875 break; 11876 } 11877 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 11878 if (ret == 0) { 11879 ret = copyto(arg3, &fl); 11880 } 11881 break; 11882 11883 case TARGET_F_SETLK64: 11884 case TARGET_F_SETLKW64: 11885 ret = copyfrom(&fl, arg3); 11886 if (ret) { 11887 break; 11888 } 11889 ret = get_errno(safe_fcntl(arg1, cmd, &fl)); 11890 break; 11891 default: 11892 ret = do_fcntl(arg1, arg2, arg3); 11893 break; 11894 } 11895 return ret; 11896 } 11897 #endif 11898 #ifdef TARGET_NR_cacheflush 11899 case TARGET_NR_cacheflush: 11900 /* self-modifying code is handled automatically, so nothing needed */ 11901 return 0; 11902 #endif 11903 #ifdef TARGET_NR_getpagesize 11904 case TARGET_NR_getpagesize: 11905 return TARGET_PAGE_SIZE; 11906 #endif 11907 case TARGET_NR_gettid: 11908 return get_errno(sys_gettid()); 11909 #ifdef TARGET_NR_readahead 11910 case TARGET_NR_readahead: 11911 #if TARGET_ABI_BITS == 32 11912 if (regpairs_aligned(cpu_env, num)) { 11913 arg2 = arg3; 11914 arg3 = arg4; 11915 arg4 = arg5; 11916 } 11917 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4)); 11918 #else 11919 ret = get_errno(readahead(arg1, arg2, arg3)); 11920 #endif 11921 return ret; 11922 #endif 11923 #ifdef CONFIG_ATTR 11924 #ifdef TARGET_NR_setxattr 11925 case TARGET_NR_listxattr: 11926 case TARGET_NR_llistxattr: 11927 { 11928 void *p, *b = 0; 11929 if (arg2) { 11930 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11931 if (!b) { 11932 return -TARGET_EFAULT; 11933 } 11934 } 11935 p = lock_user_string(arg1); 11936 if (p) { 11937 if (num == TARGET_NR_listxattr) { 11938 ret = get_errno(listxattr(p, b, arg3)); 11939 } else { 11940 ret = get_errno(llistxattr(p, b, arg3)); 11941 } 11942 } else { 11943 ret = -TARGET_EFAULT; 11944 } 11945 unlock_user(p, arg1, 0); 11946 unlock_user(b, arg2, arg3); 11947 return ret; 11948 } 11949 case TARGET_NR_flistxattr: 11950 { 11951 void *b = 0; 11952 if (arg2) { 11953 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 11954 if (!b) { 11955 return -TARGET_EFAULT; 11956 } 11957 } 11958 ret = get_errno(flistxattr(arg1, b, arg3)); 11959 unlock_user(b, arg2, arg3); 11960 return ret; 11961 } 11962 case TARGET_NR_setxattr: 11963 case TARGET_NR_lsetxattr: 11964 { 11965 void *p, *n, *v = 0; 11966 if (arg3) { 11967 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11968 if (!v) { 11969 return -TARGET_EFAULT; 11970 } 11971 } 11972 p = lock_user_string(arg1); 11973 n = lock_user_string(arg2); 11974 if (p && n) { 11975 if (num == TARGET_NR_setxattr) { 11976 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 11977 } else { 11978 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 11979 } 11980 } else { 11981 ret = -TARGET_EFAULT; 11982 } 11983 unlock_user(p, arg1, 0); 11984 unlock_user(n, arg2, 0); 11985 unlock_user(v, arg3, 0); 11986 } 11987 return ret; 11988 case TARGET_NR_fsetxattr: 11989 { 11990 void *n, *v = 0; 11991 if (arg3) { 11992 v = lock_user(VERIFY_READ, arg3, arg4, 1); 11993 if (!v) { 11994 return -TARGET_EFAULT; 11995 } 11996 } 11997 n = lock_user_string(arg2); 11998 if (n) { 11999 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 12000 } else { 12001 ret = -TARGET_EFAULT; 12002 } 12003 unlock_user(n, arg2, 0); 12004 unlock_user(v, arg3, 0); 12005 } 12006 return ret; 12007 case TARGET_NR_getxattr: 12008 case TARGET_NR_lgetxattr: 12009 { 12010 void *p, *n, *v = 0; 12011 if (arg3) { 12012 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 12013 if (!v) { 12014 return -TARGET_EFAULT; 12015 } 12016 } 12017 p = lock_user_string(arg1); 12018 n = lock_user_string(arg2); 12019 if (p && n) { 12020 if (num == TARGET_NR_getxattr) { 12021 ret = get_errno(getxattr(p, n, v, arg4)); 12022 } else { 12023 ret = get_errno(lgetxattr(p, n, v, arg4)); 12024 } 12025 } else { 12026 ret = -TARGET_EFAULT; 12027 } 12028 unlock_user(p, arg1, 0); 12029 unlock_user(n, arg2, 0); 12030 unlock_user(v, arg3, arg4); 12031 } 12032 return ret; 12033 case TARGET_NR_fgetxattr: 12034 { 12035 void *n, *v = 0; 12036 if (arg3) { 12037 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 12038 if (!v) { 12039 return -TARGET_EFAULT; 12040 } 12041 } 12042 n = lock_user_string(arg2); 12043 if (n) { 12044 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 12045 } else { 12046 ret = -TARGET_EFAULT; 12047 } 12048 unlock_user(n, arg2, 0); 12049 unlock_user(v, arg3, arg4); 12050 } 12051 return ret; 12052 case TARGET_NR_removexattr: 12053 case TARGET_NR_lremovexattr: 12054 { 12055 void *p, *n; 12056 p = lock_user_string(arg1); 12057 n = lock_user_string(arg2); 12058 if (p && n) { 12059 if (num == TARGET_NR_removexattr) { 12060 ret = get_errno(removexattr(p, n)); 12061 } else { 12062 ret = get_errno(lremovexattr(p, n)); 12063 } 12064 } else { 12065 ret = -TARGET_EFAULT; 12066 } 12067 unlock_user(p, arg1, 0); 12068 unlock_user(n, arg2, 0); 12069 } 12070 return ret; 12071 case TARGET_NR_fremovexattr: 12072 { 12073 void *n; 12074 n = lock_user_string(arg2); 12075 if (n) { 12076 ret = get_errno(fremovexattr(arg1, n)); 12077 } else { 12078 ret = -TARGET_EFAULT; 12079 } 12080 unlock_user(n, arg2, 0); 12081 } 12082 return ret; 12083 #endif 12084 #endif /* CONFIG_ATTR */ 12085 #ifdef TARGET_NR_set_thread_area 12086 case TARGET_NR_set_thread_area: 12087 #if defined(TARGET_MIPS) 12088 cpu_env->active_tc.CP0_UserLocal = arg1; 12089 return 0; 12090 #elif defined(TARGET_CRIS) 12091 if (arg1 & 0xff) 12092 ret = -TARGET_EINVAL; 12093 else { 12094 cpu_env->pregs[PR_PID] = arg1; 12095 ret = 0; 12096 } 12097 return ret; 12098 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 12099 return do_set_thread_area(cpu_env, arg1); 12100 #elif defined(TARGET_M68K) 12101 { 12102 TaskState *ts = cpu->opaque; 12103 ts->tp_value = arg1; 12104 return 0; 12105 } 12106 #else 12107 return -TARGET_ENOSYS; 12108 #endif 12109 #endif 12110 #ifdef TARGET_NR_get_thread_area 12111 case TARGET_NR_get_thread_area: 12112 #if defined(TARGET_I386) && defined(TARGET_ABI32) 12113 return do_get_thread_area(cpu_env, arg1); 12114 #elif defined(TARGET_M68K) 12115 { 12116 TaskState *ts = cpu->opaque; 12117 return ts->tp_value; 12118 } 12119 #else 12120 return -TARGET_ENOSYS; 12121 #endif 12122 #endif 12123 #ifdef TARGET_NR_getdomainname 12124 case TARGET_NR_getdomainname: 12125 return -TARGET_ENOSYS; 12126 #endif 12127 12128 #ifdef TARGET_NR_clock_settime 12129 case TARGET_NR_clock_settime: 12130 { 12131 struct timespec ts; 12132 12133 ret = target_to_host_timespec(&ts, arg2); 12134 if (!is_error(ret)) { 12135 ret = get_errno(clock_settime(arg1, &ts)); 12136 } 12137 return ret; 12138 } 12139 #endif 12140 #ifdef TARGET_NR_clock_settime64 12141 case TARGET_NR_clock_settime64: 12142 { 12143 struct timespec ts; 12144 12145 ret = target_to_host_timespec64(&ts, arg2); 12146 if (!is_error(ret)) { 12147 ret = get_errno(clock_settime(arg1, &ts)); 12148 } 12149 return ret; 12150 } 12151 #endif 12152 #ifdef TARGET_NR_clock_gettime 12153 case TARGET_NR_clock_gettime: 12154 { 12155 struct timespec ts; 12156 ret = get_errno(clock_gettime(arg1, &ts)); 12157 if (!is_error(ret)) { 12158 ret = host_to_target_timespec(arg2, &ts); 12159 } 12160 return ret; 12161 } 12162 #endif 12163 #ifdef TARGET_NR_clock_gettime64 12164 case TARGET_NR_clock_gettime64: 12165 { 12166 struct timespec ts; 12167 ret = get_errno(clock_gettime(arg1, &ts)); 12168 if (!is_error(ret)) { 12169 ret = host_to_target_timespec64(arg2, &ts); 12170 } 12171 return ret; 12172 } 12173 #endif 12174 #ifdef TARGET_NR_clock_getres 12175 case TARGET_NR_clock_getres: 12176 { 12177 struct timespec ts; 12178 ret = get_errno(clock_getres(arg1, &ts)); 12179 if (!is_error(ret)) { 12180 host_to_target_timespec(arg2, &ts); 12181 } 12182 return ret; 12183 } 12184 #endif 12185 #ifdef TARGET_NR_clock_getres_time64 12186 case TARGET_NR_clock_getres_time64: 12187 { 12188 struct timespec ts; 12189 ret = get_errno(clock_getres(arg1, &ts)); 12190 if (!is_error(ret)) { 12191 host_to_target_timespec64(arg2, &ts); 12192 } 12193 return ret; 12194 } 12195 #endif 12196 #ifdef TARGET_NR_clock_nanosleep 12197 case TARGET_NR_clock_nanosleep: 12198 { 12199 struct timespec ts; 12200 if (target_to_host_timespec(&ts, arg3)) { 12201 return -TARGET_EFAULT; 12202 } 12203 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 12204 &ts, arg4 ? &ts : NULL)); 12205 /* 12206 * if the call is interrupted by a signal handler, it fails 12207 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not 12208 * TIMER_ABSTIME, it returns the remaining unslept time in arg4. 12209 */ 12210 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME && 12211 host_to_target_timespec(arg4, &ts)) { 12212 return -TARGET_EFAULT; 12213 } 12214 12215 return ret; 12216 } 12217 #endif 12218 #ifdef TARGET_NR_clock_nanosleep_time64 12219 case TARGET_NR_clock_nanosleep_time64: 12220 { 12221 struct timespec ts; 12222 12223 if (target_to_host_timespec64(&ts, arg3)) { 12224 return -TARGET_EFAULT; 12225 } 12226 12227 ret = get_errno(safe_clock_nanosleep(arg1, arg2, 12228 &ts, arg4 ? &ts : NULL)); 12229 12230 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME && 12231 host_to_target_timespec64(arg4, &ts)) { 12232 return -TARGET_EFAULT; 12233 } 12234 return ret; 12235 } 12236 #endif 12237 12238 #if defined(TARGET_NR_set_tid_address) 12239 case TARGET_NR_set_tid_address: 12240 { 12241 TaskState *ts = cpu->opaque; 12242 ts->child_tidptr = arg1; 12243 /* do not call host set_tid_address() syscall, instead return tid() */ 12244 return get_errno(sys_gettid()); 12245 } 12246 #endif 12247 12248 case TARGET_NR_tkill: 12249 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2))); 12250 12251 case TARGET_NR_tgkill: 12252 return get_errno(safe_tgkill((int)arg1, (int)arg2, 12253 target_to_host_signal(arg3))); 12254 12255 #ifdef TARGET_NR_set_robust_list 12256 case TARGET_NR_set_robust_list: 12257 case TARGET_NR_get_robust_list: 12258 /* The ABI for supporting robust futexes has userspace pass 12259 * the kernel a pointer to a linked list which is updated by 12260 * userspace after the syscall; the list is walked by the kernel 12261 * when the thread exits. Since the linked list in QEMU guest 12262 * memory isn't a valid linked list for the host and we have 12263 * no way to reliably intercept the thread-death event, we can't 12264 * support these. Silently return ENOSYS so that guest userspace 12265 * falls back to a non-robust futex implementation (which should 12266 * be OK except in the corner case of the guest crashing while 12267 * holding a mutex that is shared with another process via 12268 * shared memory). 12269 */ 12270 return -TARGET_ENOSYS; 12271 #endif 12272 12273 #if defined(TARGET_NR_utimensat) 12274 case TARGET_NR_utimensat: 12275 { 12276 struct timespec *tsp, ts[2]; 12277 if (!arg3) { 12278 tsp = NULL; 12279 } else { 12280 if (target_to_host_timespec(ts, arg3)) { 12281 return -TARGET_EFAULT; 12282 } 12283 if (target_to_host_timespec(ts + 1, arg3 + 12284 sizeof(struct target_timespec))) { 12285 return -TARGET_EFAULT; 12286 } 12287 tsp = ts; 12288 } 12289 if (!arg2) 12290 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 12291 else { 12292 if (!(p = lock_user_string(arg2))) { 12293 return -TARGET_EFAULT; 12294 } 12295 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 12296 unlock_user(p, arg2, 0); 12297 } 12298 } 12299 return ret; 12300 #endif 12301 #ifdef TARGET_NR_utimensat_time64 12302 case TARGET_NR_utimensat_time64: 12303 { 12304 struct timespec *tsp, ts[2]; 12305 if (!arg3) { 12306 tsp = NULL; 12307 } else { 12308 if (target_to_host_timespec64(ts, arg3)) { 12309 return -TARGET_EFAULT; 12310 } 12311 if (target_to_host_timespec64(ts + 1, arg3 + 12312 sizeof(struct target__kernel_timespec))) { 12313 return -TARGET_EFAULT; 12314 } 12315 tsp = ts; 12316 } 12317 if (!arg2) 12318 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 12319 else { 12320 p = lock_user_string(arg2); 12321 if (!p) { 12322 return -TARGET_EFAULT; 12323 } 12324 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 12325 unlock_user(p, arg2, 0); 12326 } 12327 } 12328 return ret; 12329 #endif 12330 #ifdef TARGET_NR_futex 12331 case TARGET_NR_futex: 12332 return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6); 12333 #endif 12334 #ifdef TARGET_NR_futex_time64 12335 case TARGET_NR_futex_time64: 12336 return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6); 12337 #endif 12338 #ifdef CONFIG_INOTIFY 12339 #if defined(TARGET_NR_inotify_init) 12340 case TARGET_NR_inotify_init: 12341 ret = get_errno(inotify_init()); 12342 if (ret >= 0) { 12343 fd_trans_register(ret, &target_inotify_trans); 12344 } 12345 return ret; 12346 #endif 12347 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1) 12348 case TARGET_NR_inotify_init1: 12349 ret = get_errno(inotify_init1(target_to_host_bitmask(arg1, 12350 fcntl_flags_tbl))); 12351 if (ret >= 0) { 12352 fd_trans_register(ret, &target_inotify_trans); 12353 } 12354 return ret; 12355 #endif 12356 #if defined(TARGET_NR_inotify_add_watch) 12357 case TARGET_NR_inotify_add_watch: 12358 p = lock_user_string(arg2); 12359 ret = get_errno(inotify_add_watch(arg1, path(p), arg3)); 12360 unlock_user(p, arg2, 0); 12361 return ret; 12362 #endif 12363 #if defined(TARGET_NR_inotify_rm_watch) 12364 case TARGET_NR_inotify_rm_watch: 12365 return get_errno(inotify_rm_watch(arg1, arg2)); 12366 #endif 12367 #endif 12368 12369 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 12370 case TARGET_NR_mq_open: 12371 { 12372 struct mq_attr posix_mq_attr; 12373 struct mq_attr *pposix_mq_attr; 12374 int host_flags; 12375 12376 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl); 12377 pposix_mq_attr = NULL; 12378 if (arg4) { 12379 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) { 12380 return -TARGET_EFAULT; 12381 } 12382 pposix_mq_attr = &posix_mq_attr; 12383 } 12384 p = lock_user_string(arg1 - 1); 12385 if (!p) { 12386 return -TARGET_EFAULT; 12387 } 12388 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr)); 12389 unlock_user (p, arg1, 0); 12390 } 12391 return ret; 12392 12393 case TARGET_NR_mq_unlink: 12394 p = lock_user_string(arg1 - 1); 12395 if (!p) { 12396 return -TARGET_EFAULT; 12397 } 12398 ret = get_errno(mq_unlink(p)); 12399 unlock_user (p, arg1, 0); 12400 return ret; 12401 12402 #ifdef TARGET_NR_mq_timedsend 12403 case TARGET_NR_mq_timedsend: 12404 { 12405 struct timespec ts; 12406 12407 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12408 if (arg5 != 0) { 12409 if (target_to_host_timespec(&ts, arg5)) { 12410 return -TARGET_EFAULT; 12411 } 12412 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12413 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) { 12414 return -TARGET_EFAULT; 12415 } 12416 } else { 12417 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12418 } 12419 unlock_user (p, arg2, arg3); 12420 } 12421 return ret; 12422 #endif 12423 #ifdef TARGET_NR_mq_timedsend_time64 12424 case TARGET_NR_mq_timedsend_time64: 12425 { 12426 struct timespec ts; 12427 12428 p = lock_user(VERIFY_READ, arg2, arg3, 1); 12429 if (arg5 != 0) { 12430 if (target_to_host_timespec64(&ts, arg5)) { 12431 return -TARGET_EFAULT; 12432 } 12433 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); 12434 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) { 12435 return -TARGET_EFAULT; 12436 } 12437 } else { 12438 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); 12439 } 12440 unlock_user(p, arg2, arg3); 12441 } 12442 return ret; 12443 #endif 12444 12445 #ifdef TARGET_NR_mq_timedreceive 12446 case TARGET_NR_mq_timedreceive: 12447 { 12448 struct timespec ts; 12449 unsigned int prio; 12450 12451 p = lock_user (VERIFY_READ, arg2, arg3, 1); 12452 if (arg5 != 0) { 12453 if (target_to_host_timespec(&ts, arg5)) { 12454 return -TARGET_EFAULT; 12455 } 12456 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12457 &prio, &ts)); 12458 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) { 12459 return -TARGET_EFAULT; 12460 } 12461 } else { 12462 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12463 &prio, NULL)); 12464 } 12465 unlock_user (p, arg2, arg3); 12466 if (arg4 != 0) 12467 put_user_u32(prio, arg4); 12468 } 12469 return ret; 12470 #endif 12471 #ifdef TARGET_NR_mq_timedreceive_time64 12472 case TARGET_NR_mq_timedreceive_time64: 12473 { 12474 struct timespec ts; 12475 unsigned int prio; 12476 12477 p = lock_user(VERIFY_READ, arg2, arg3, 1); 12478 if (arg5 != 0) { 12479 if (target_to_host_timespec64(&ts, arg5)) { 12480 return -TARGET_EFAULT; 12481 } 12482 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12483 &prio, &ts)); 12484 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) { 12485 return -TARGET_EFAULT; 12486 } 12487 } else { 12488 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, 12489 &prio, NULL)); 12490 } 12491 unlock_user(p, arg2, arg3); 12492 if (arg4 != 0) { 12493 put_user_u32(prio, arg4); 12494 } 12495 } 12496 return ret; 12497 #endif 12498 12499 /* Not implemented for now... */ 12500 /* case TARGET_NR_mq_notify: */ 12501 /* break; */ 12502 12503 case TARGET_NR_mq_getsetattr: 12504 { 12505 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 12506 ret = 0; 12507 if (arg2 != 0) { 12508 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 12509 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in, 12510 &posix_mq_attr_out)); 12511 } else if (arg3 != 0) { 12512 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out)); 12513 } 12514 if (ret == 0 && arg3 != 0) { 12515 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 12516 } 12517 } 12518 return ret; 12519 #endif 12520 12521 #ifdef CONFIG_SPLICE 12522 #ifdef TARGET_NR_tee 12523 case TARGET_NR_tee: 12524 { 12525 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 12526 } 12527 return ret; 12528 #endif 12529 #ifdef TARGET_NR_splice 12530 case TARGET_NR_splice: 12531 { 12532 loff_t loff_in, loff_out; 12533 loff_t *ploff_in = NULL, *ploff_out = NULL; 12534 if (arg2) { 12535 if (get_user_u64(loff_in, arg2)) { 12536 return -TARGET_EFAULT; 12537 } 12538 ploff_in = &loff_in; 12539 } 12540 if (arg4) { 12541 if (get_user_u64(loff_out, arg4)) { 12542 return -TARGET_EFAULT; 12543 } 12544 ploff_out = &loff_out; 12545 } 12546 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 12547 if (arg2) { 12548 if (put_user_u64(loff_in, arg2)) { 12549 return -TARGET_EFAULT; 12550 } 12551 } 12552 if (arg4) { 12553 if (put_user_u64(loff_out, arg4)) { 12554 return -TARGET_EFAULT; 12555 } 12556 } 12557 } 12558 return ret; 12559 #endif 12560 #ifdef TARGET_NR_vmsplice 12561 case TARGET_NR_vmsplice: 12562 { 12563 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 12564 if (vec != NULL) { 12565 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 12566 unlock_iovec(vec, arg2, arg3, 0); 12567 } else { 12568 ret = -host_to_target_errno(errno); 12569 } 12570 } 12571 return ret; 12572 #endif 12573 #endif /* CONFIG_SPLICE */ 12574 #ifdef CONFIG_EVENTFD 12575 #if defined(TARGET_NR_eventfd) 12576 case TARGET_NR_eventfd: 12577 ret = get_errno(eventfd(arg1, 0)); 12578 if (ret >= 0) { 12579 fd_trans_register(ret, &target_eventfd_trans); 12580 } 12581 return ret; 12582 #endif 12583 #if defined(TARGET_NR_eventfd2) 12584 case TARGET_NR_eventfd2: 12585 { 12586 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)); 12587 if (arg2 & TARGET_O_NONBLOCK) { 12588 host_flags |= O_NONBLOCK; 12589 } 12590 if (arg2 & TARGET_O_CLOEXEC) { 12591 host_flags |= O_CLOEXEC; 12592 } 12593 ret = get_errno(eventfd(arg1, host_flags)); 12594 if (ret >= 0) { 12595 fd_trans_register(ret, &target_eventfd_trans); 12596 } 12597 return ret; 12598 } 12599 #endif 12600 #endif /* CONFIG_EVENTFD */ 12601 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 12602 case TARGET_NR_fallocate: 12603 #if TARGET_ABI_BITS == 32 12604 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 12605 target_offset64(arg5, arg6))); 12606 #else 12607 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 12608 #endif 12609 return ret; 12610 #endif 12611 #if defined(CONFIG_SYNC_FILE_RANGE) 12612 #if defined(TARGET_NR_sync_file_range) 12613 case TARGET_NR_sync_file_range: 12614 #if TARGET_ABI_BITS == 32 12615 #if defined(TARGET_MIPS) 12616 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12617 target_offset64(arg5, arg6), arg7)); 12618 #else 12619 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 12620 target_offset64(arg4, arg5), arg6)); 12621 #endif /* !TARGET_MIPS */ 12622 #else 12623 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 12624 #endif 12625 return ret; 12626 #endif 12627 #if defined(TARGET_NR_sync_file_range2) || \ 12628 defined(TARGET_NR_arm_sync_file_range) 12629 #if defined(TARGET_NR_sync_file_range2) 12630 case TARGET_NR_sync_file_range2: 12631 #endif 12632 #if defined(TARGET_NR_arm_sync_file_range) 12633 case TARGET_NR_arm_sync_file_range: 12634 #endif 12635 /* This is like sync_file_range but the arguments are reordered */ 12636 #if TARGET_ABI_BITS == 32 12637 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 12638 target_offset64(arg5, arg6), arg2)); 12639 #else 12640 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 12641 #endif 12642 return ret; 12643 #endif 12644 #endif 12645 #if defined(TARGET_NR_signalfd4) 12646 case TARGET_NR_signalfd4: 12647 return do_signalfd4(arg1, arg2, arg4); 12648 #endif 12649 #if defined(TARGET_NR_signalfd) 12650 case TARGET_NR_signalfd: 12651 return do_signalfd4(arg1, arg2, 0); 12652 #endif 12653 #if defined(CONFIG_EPOLL) 12654 #if defined(TARGET_NR_epoll_create) 12655 case TARGET_NR_epoll_create: 12656 return get_errno(epoll_create(arg1)); 12657 #endif 12658 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 12659 case TARGET_NR_epoll_create1: 12660 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl))); 12661 #endif 12662 #if defined(TARGET_NR_epoll_ctl) 12663 case TARGET_NR_epoll_ctl: 12664 { 12665 struct epoll_event ep; 12666 struct epoll_event *epp = 0; 12667 if (arg4) { 12668 if (arg2 != EPOLL_CTL_DEL) { 12669 struct target_epoll_event *target_ep; 12670 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 12671 return -TARGET_EFAULT; 12672 } 12673 ep.events = tswap32(target_ep->events); 12674 /* 12675 * The epoll_data_t union is just opaque data to the kernel, 12676 * so we transfer all 64 bits across and need not worry what 12677 * actual data type it is. 12678 */ 12679 ep.data.u64 = tswap64(target_ep->data.u64); 12680 unlock_user_struct(target_ep, arg4, 0); 12681 } 12682 /* 12683 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a 12684 * non-null pointer, even though this argument is ignored. 12685 * 12686 */ 12687 epp = &ep; 12688 } 12689 return get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 12690 } 12691 #endif 12692 12693 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait) 12694 #if defined(TARGET_NR_epoll_wait) 12695 case TARGET_NR_epoll_wait: 12696 #endif 12697 #if defined(TARGET_NR_epoll_pwait) 12698 case TARGET_NR_epoll_pwait: 12699 #endif 12700 { 12701 struct target_epoll_event *target_ep; 12702 struct epoll_event *ep; 12703 int epfd = arg1; 12704 int maxevents = arg3; 12705 int timeout = arg4; 12706 12707 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) { 12708 return -TARGET_EINVAL; 12709 } 12710 12711 target_ep = lock_user(VERIFY_WRITE, arg2, 12712 maxevents * sizeof(struct target_epoll_event), 1); 12713 if (!target_ep) { 12714 return -TARGET_EFAULT; 12715 } 12716 12717 ep = g_try_new(struct epoll_event, maxevents); 12718 if (!ep) { 12719 unlock_user(target_ep, arg2, 0); 12720 return -TARGET_ENOMEM; 12721 } 12722 12723 switch (num) { 12724 #if defined(TARGET_NR_epoll_pwait) 12725 case TARGET_NR_epoll_pwait: 12726 { 12727 sigset_t *set = NULL; 12728 12729 if (arg5) { 12730 ret = process_sigsuspend_mask(&set, arg5, arg6); 12731 if (ret != 0) { 12732 break; 12733 } 12734 } 12735 12736 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12737 set, SIGSET_T_SIZE)); 12738 12739 if (set) { 12740 finish_sigsuspend_mask(ret); 12741 } 12742 break; 12743 } 12744 #endif 12745 #if defined(TARGET_NR_epoll_wait) 12746 case TARGET_NR_epoll_wait: 12747 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, 12748 NULL, 0)); 12749 break; 12750 #endif 12751 default: 12752 ret = -TARGET_ENOSYS; 12753 } 12754 if (!is_error(ret)) { 12755 int i; 12756 for (i = 0; i < ret; i++) { 12757 target_ep[i].events = tswap32(ep[i].events); 12758 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 12759 } 12760 unlock_user(target_ep, arg2, 12761 ret * sizeof(struct target_epoll_event)); 12762 } else { 12763 unlock_user(target_ep, arg2, 0); 12764 } 12765 g_free(ep); 12766 return ret; 12767 } 12768 #endif 12769 #endif 12770 #ifdef TARGET_NR_prlimit64 12771 case TARGET_NR_prlimit64: 12772 { 12773 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 12774 struct target_rlimit64 *target_rnew, *target_rold; 12775 struct host_rlimit64 rnew, rold, *rnewp = 0; 12776 int resource = target_to_host_resource(arg2); 12777 12778 if (arg3 && (resource != RLIMIT_AS && 12779 resource != RLIMIT_DATA && 12780 resource != RLIMIT_STACK)) { 12781 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 12782 return -TARGET_EFAULT; 12783 } 12784 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 12785 rnew.rlim_max = tswap64(target_rnew->rlim_max); 12786 unlock_user_struct(target_rnew, arg3, 0); 12787 rnewp = &rnew; 12788 } 12789 12790 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0)); 12791 if (!is_error(ret) && arg4) { 12792 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 12793 return -TARGET_EFAULT; 12794 } 12795 target_rold->rlim_cur = tswap64(rold.rlim_cur); 12796 target_rold->rlim_max = tswap64(rold.rlim_max); 12797 unlock_user_struct(target_rold, arg4, 1); 12798 } 12799 return ret; 12800 } 12801 #endif 12802 #ifdef TARGET_NR_gethostname 12803 case TARGET_NR_gethostname: 12804 { 12805 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 12806 if (name) { 12807 ret = get_errno(gethostname(name, arg2)); 12808 unlock_user(name, arg1, arg2); 12809 } else { 12810 ret = -TARGET_EFAULT; 12811 } 12812 return ret; 12813 } 12814 #endif 12815 #ifdef TARGET_NR_atomic_cmpxchg_32 12816 case TARGET_NR_atomic_cmpxchg_32: 12817 { 12818 /* should use start_exclusive from main.c */ 12819 abi_ulong mem_value; 12820 if (get_user_u32(mem_value, arg6)) { 12821 target_siginfo_t info; 12822 info.si_signo = SIGSEGV; 12823 info.si_errno = 0; 12824 info.si_code = TARGET_SEGV_MAPERR; 12825 info._sifields._sigfault._addr = arg6; 12826 queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info); 12827 ret = 0xdeadbeef; 12828 12829 } 12830 if (mem_value == arg2) 12831 put_user_u32(arg1, arg6); 12832 return mem_value; 12833 } 12834 #endif 12835 #ifdef TARGET_NR_atomic_barrier 12836 case TARGET_NR_atomic_barrier: 12837 /* Like the kernel implementation and the 12838 qemu arm barrier, no-op this? */ 12839 return 0; 12840 #endif 12841 12842 #ifdef TARGET_NR_timer_create 12843 case TARGET_NR_timer_create: 12844 { 12845 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 12846 12847 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 12848 12849 int clkid = arg1; 12850 int timer_index = next_free_host_timer(); 12851 12852 if (timer_index < 0) { 12853 ret = -TARGET_EAGAIN; 12854 } else { 12855 timer_t *phtimer = g_posix_timers + timer_index; 12856 12857 if (arg2) { 12858 phost_sevp = &host_sevp; 12859 ret = target_to_host_sigevent(phost_sevp, arg2); 12860 if (ret != 0) { 12861 return ret; 12862 } 12863 } 12864 12865 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 12866 if (ret) { 12867 phtimer = NULL; 12868 } else { 12869 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { 12870 return -TARGET_EFAULT; 12871 } 12872 } 12873 } 12874 return ret; 12875 } 12876 #endif 12877 12878 #ifdef TARGET_NR_timer_settime 12879 case TARGET_NR_timer_settime: 12880 { 12881 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 12882 * struct itimerspec * old_value */ 12883 target_timer_t timerid = get_timer_id(arg1); 12884 12885 if (timerid < 0) { 12886 ret = timerid; 12887 } else if (arg3 == 0) { 12888 ret = -TARGET_EINVAL; 12889 } else { 12890 timer_t htimer = g_posix_timers[timerid]; 12891 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 12892 12893 if (target_to_host_itimerspec(&hspec_new, arg3)) { 12894 return -TARGET_EFAULT; 12895 } 12896 ret = get_errno( 12897 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 12898 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) { 12899 return -TARGET_EFAULT; 12900 } 12901 } 12902 return ret; 12903 } 12904 #endif 12905 12906 #ifdef TARGET_NR_timer_settime64 12907 case TARGET_NR_timer_settime64: 12908 { 12909 target_timer_t timerid = get_timer_id(arg1); 12910 12911 if (timerid < 0) { 12912 ret = timerid; 12913 } else if (arg3 == 0) { 12914 ret = -TARGET_EINVAL; 12915 } else { 12916 timer_t htimer = g_posix_timers[timerid]; 12917 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 12918 12919 if (target_to_host_itimerspec64(&hspec_new, arg3)) { 12920 return -TARGET_EFAULT; 12921 } 12922 ret = get_errno( 12923 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 12924 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) { 12925 return -TARGET_EFAULT; 12926 } 12927 } 12928 return ret; 12929 } 12930 #endif 12931 12932 #ifdef TARGET_NR_timer_gettime 12933 case TARGET_NR_timer_gettime: 12934 { 12935 /* args: timer_t timerid, struct itimerspec *curr_value */ 12936 target_timer_t timerid = get_timer_id(arg1); 12937 12938 if (timerid < 0) { 12939 ret = timerid; 12940 } else if (!arg2) { 12941 ret = -TARGET_EFAULT; 12942 } else { 12943 timer_t htimer = g_posix_timers[timerid]; 12944 struct itimerspec hspec; 12945 ret = get_errno(timer_gettime(htimer, &hspec)); 12946 12947 if (host_to_target_itimerspec(arg2, &hspec)) { 12948 ret = -TARGET_EFAULT; 12949 } 12950 } 12951 return ret; 12952 } 12953 #endif 12954 12955 #ifdef TARGET_NR_timer_gettime64 12956 case TARGET_NR_timer_gettime64: 12957 { 12958 /* args: timer_t timerid, struct itimerspec64 *curr_value */ 12959 target_timer_t timerid = get_timer_id(arg1); 12960 12961 if (timerid < 0) { 12962 ret = timerid; 12963 } else if (!arg2) { 12964 ret = -TARGET_EFAULT; 12965 } else { 12966 timer_t htimer = g_posix_timers[timerid]; 12967 struct itimerspec hspec; 12968 ret = get_errno(timer_gettime(htimer, &hspec)); 12969 12970 if (host_to_target_itimerspec64(arg2, &hspec)) { 12971 ret = -TARGET_EFAULT; 12972 } 12973 } 12974 return ret; 12975 } 12976 #endif 12977 12978 #ifdef TARGET_NR_timer_getoverrun 12979 case TARGET_NR_timer_getoverrun: 12980 { 12981 /* args: timer_t timerid */ 12982 target_timer_t timerid = get_timer_id(arg1); 12983 12984 if (timerid < 0) { 12985 ret = timerid; 12986 } else { 12987 timer_t htimer = g_posix_timers[timerid]; 12988 ret = get_errno(timer_getoverrun(htimer)); 12989 } 12990 return ret; 12991 } 12992 #endif 12993 12994 #ifdef TARGET_NR_timer_delete 12995 case TARGET_NR_timer_delete: 12996 { 12997 /* args: timer_t timerid */ 12998 target_timer_t timerid = get_timer_id(arg1); 12999 13000 if (timerid < 0) { 13001 ret = timerid; 13002 } else { 13003 timer_t htimer = g_posix_timers[timerid]; 13004 ret = get_errno(timer_delete(htimer)); 13005 g_posix_timers[timerid] = 0; 13006 } 13007 return ret; 13008 } 13009 #endif 13010 13011 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) 13012 case TARGET_NR_timerfd_create: 13013 return get_errno(timerfd_create(arg1, 13014 target_to_host_bitmask(arg2, fcntl_flags_tbl))); 13015 #endif 13016 13017 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) 13018 case TARGET_NR_timerfd_gettime: 13019 { 13020 struct itimerspec its_curr; 13021 13022 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 13023 13024 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { 13025 return -TARGET_EFAULT; 13026 } 13027 } 13028 return ret; 13029 #endif 13030 13031 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD) 13032 case TARGET_NR_timerfd_gettime64: 13033 { 13034 struct itimerspec its_curr; 13035 13036 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 13037 13038 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) { 13039 return -TARGET_EFAULT; 13040 } 13041 } 13042 return ret; 13043 #endif 13044 13045 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) 13046 case TARGET_NR_timerfd_settime: 13047 { 13048 struct itimerspec its_new, its_old, *p_new; 13049 13050 if (arg3) { 13051 if (target_to_host_itimerspec(&its_new, arg3)) { 13052 return -TARGET_EFAULT; 13053 } 13054 p_new = &its_new; 13055 } else { 13056 p_new = NULL; 13057 } 13058 13059 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 13060 13061 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { 13062 return -TARGET_EFAULT; 13063 } 13064 } 13065 return ret; 13066 #endif 13067 13068 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD) 13069 case TARGET_NR_timerfd_settime64: 13070 { 13071 struct itimerspec its_new, its_old, *p_new; 13072 13073 if (arg3) { 13074 if (target_to_host_itimerspec64(&its_new, arg3)) { 13075 return -TARGET_EFAULT; 13076 } 13077 p_new = &its_new; 13078 } else { 13079 p_new = NULL; 13080 } 13081 13082 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 13083 13084 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) { 13085 return -TARGET_EFAULT; 13086 } 13087 } 13088 return ret; 13089 #endif 13090 13091 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 13092 case TARGET_NR_ioprio_get: 13093 return get_errno(ioprio_get(arg1, arg2)); 13094 #endif 13095 13096 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 13097 case TARGET_NR_ioprio_set: 13098 return get_errno(ioprio_set(arg1, arg2, arg3)); 13099 #endif 13100 13101 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) 13102 case TARGET_NR_setns: 13103 return get_errno(setns(arg1, arg2)); 13104 #endif 13105 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) 13106 case TARGET_NR_unshare: 13107 return get_errno(unshare(arg1)); 13108 #endif 13109 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp) 13110 case TARGET_NR_kcmp: 13111 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5)); 13112 #endif 13113 #ifdef TARGET_NR_swapcontext 13114 case TARGET_NR_swapcontext: 13115 /* PowerPC specific. */ 13116 return do_swapcontext(cpu_env, arg1, arg2, arg3); 13117 #endif 13118 #ifdef TARGET_NR_memfd_create 13119 case TARGET_NR_memfd_create: 13120 p = lock_user_string(arg1); 13121 if (!p) { 13122 return -TARGET_EFAULT; 13123 } 13124 ret = get_errno(memfd_create(p, arg2)); 13125 fd_trans_unregister(ret); 13126 unlock_user(p, arg1, 0); 13127 return ret; 13128 #endif 13129 #if defined TARGET_NR_membarrier && defined __NR_membarrier 13130 case TARGET_NR_membarrier: 13131 return get_errno(membarrier(arg1, arg2)); 13132 #endif 13133 13134 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range) 13135 case TARGET_NR_copy_file_range: 13136 { 13137 loff_t inoff, outoff; 13138 loff_t *pinoff = NULL, *poutoff = NULL; 13139 13140 if (arg2) { 13141 if (get_user_u64(inoff, arg2)) { 13142 return -TARGET_EFAULT; 13143 } 13144 pinoff = &inoff; 13145 } 13146 if (arg4) { 13147 if (get_user_u64(outoff, arg4)) { 13148 return -TARGET_EFAULT; 13149 } 13150 poutoff = &outoff; 13151 } 13152 /* Do not sign-extend the count parameter. */ 13153 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff, 13154 (abi_ulong)arg5, arg6)); 13155 if (!is_error(ret) && ret > 0) { 13156 if (arg2) { 13157 if (put_user_u64(inoff, arg2)) { 13158 return -TARGET_EFAULT; 13159 } 13160 } 13161 if (arg4) { 13162 if (put_user_u64(outoff, arg4)) { 13163 return -TARGET_EFAULT; 13164 } 13165 } 13166 } 13167 } 13168 return ret; 13169 #endif 13170 13171 #if defined(TARGET_NR_pivot_root) 13172 case TARGET_NR_pivot_root: 13173 { 13174 void *p2; 13175 p = lock_user_string(arg1); /* new_root */ 13176 p2 = lock_user_string(arg2); /* put_old */ 13177 if (!p || !p2) { 13178 ret = -TARGET_EFAULT; 13179 } else { 13180 ret = get_errno(pivot_root(p, p2)); 13181 } 13182 unlock_user(p2, arg2, 0); 13183 unlock_user(p, arg1, 0); 13184 } 13185 return ret; 13186 #endif 13187 13188 default: 13189 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num); 13190 return -TARGET_ENOSYS; 13191 } 13192 return ret; 13193 } 13194 13195 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1, 13196 abi_long arg2, abi_long arg3, abi_long arg4, 13197 abi_long arg5, abi_long arg6, abi_long arg7, 13198 abi_long arg8) 13199 { 13200 CPUState *cpu = env_cpu(cpu_env); 13201 abi_long ret; 13202 13203 #ifdef DEBUG_ERESTARTSYS 13204 /* Debug-only code for exercising the syscall-restart code paths 13205 * in the per-architecture cpu main loops: restart every syscall 13206 * the guest makes once before letting it through. 13207 */ 13208 { 13209 static bool flag; 13210 flag = !flag; 13211 if (flag) { 13212 return -QEMU_ERESTARTSYS; 13213 } 13214 } 13215 #endif 13216 13217 record_syscall_start(cpu, num, arg1, 13218 arg2, arg3, arg4, arg5, arg6, arg7, arg8); 13219 13220 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { 13221 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6); 13222 } 13223 13224 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4, 13225 arg5, arg6, arg7, arg8); 13226 13227 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { 13228 print_syscall_ret(cpu_env, num, ret, arg1, arg2, 13229 arg3, arg4, arg5, arg6); 13230 } 13231 13232 record_syscall_return(cpu, num, ret); 13233 return ret; 13234 } 13235