1 /* 2 * Linux syscalls 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #define _ATFILE_SOURCE 20 #include "qemu/osdep.h" 21 #include "qemu/cutils.h" 22 #include "qemu/path.h" 23 #include <elf.h> 24 #include <endian.h> 25 #include <grp.h> 26 #include <sys/ipc.h> 27 #include <sys/msg.h> 28 #include <sys/wait.h> 29 #include <sys/mount.h> 30 #include <sys/file.h> 31 #include <sys/fsuid.h> 32 #include <sys/personality.h> 33 #include <sys/prctl.h> 34 #include <sys/resource.h> 35 #include <sys/mman.h> 36 #include <sys/swap.h> 37 #include <linux/capability.h> 38 #include <sched.h> 39 #ifdef __ia64__ 40 int __clone2(int (*fn)(void *), void *child_stack_base, 41 size_t stack_size, int flags, void *arg, ...); 42 #endif 43 #include <sys/socket.h> 44 #include <sys/un.h> 45 #include <sys/uio.h> 46 #include <sys/poll.h> 47 #include <sys/times.h> 48 #include <sys/shm.h> 49 #include <sys/sem.h> 50 #include <sys/statfs.h> 51 #include <utime.h> 52 #include <sys/sysinfo.h> 53 #include <sys/signalfd.h> 54 //#include <sys/user.h> 55 #include <netinet/ip.h> 56 #include <netinet/tcp.h> 57 #include <linux/wireless.h> 58 #include <linux/icmp.h> 59 #include "qemu-common.h" 60 #ifdef CONFIG_TIMERFD 61 #include <sys/timerfd.h> 62 #endif 63 #ifdef TARGET_GPROF 64 #include <sys/gmon.h> 65 #endif 66 #ifdef CONFIG_EVENTFD 67 #include <sys/eventfd.h> 68 #endif 69 #ifdef CONFIG_EPOLL 70 #include <sys/epoll.h> 71 #endif 72 #ifdef CONFIG_ATTR 73 #include "qemu/xattr.h" 74 #endif 75 #ifdef CONFIG_SENDFILE 76 #include <sys/sendfile.h> 77 #endif 78 79 #define termios host_termios 80 #define winsize host_winsize 81 #define termio host_termio 82 #define sgttyb host_sgttyb /* same as target */ 83 #define tchars host_tchars /* same as target */ 84 #define ltchars host_ltchars /* same as target */ 85 86 #include <linux/termios.h> 87 #include <linux/unistd.h> 88 #include <linux/cdrom.h> 89 #include <linux/hdreg.h> 90 #include <linux/soundcard.h> 91 #include <linux/kd.h> 92 #include <linux/mtio.h> 93 #include <linux/fs.h> 94 #if defined(CONFIG_FIEMAP) 95 #include <linux/fiemap.h> 96 #endif 97 #include <linux/fb.h> 98 #include <linux/vt.h> 99 #include <linux/dm-ioctl.h> 100 #include <linux/reboot.h> 101 #include <linux/route.h> 102 #include <linux/filter.h> 103 #include <linux/blkpg.h> 104 #include "linux_loop.h" 105 #include "uname.h" 106 107 #include "qemu.h" 108 109 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ 110 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) 111 112 //#define DEBUG 113 114 //#include <linux/msdos_fs.h> 115 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2]) 116 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2]) 117 118 119 #undef _syscall0 120 #undef _syscall1 121 #undef _syscall2 122 #undef _syscall3 123 #undef _syscall4 124 #undef _syscall5 125 #undef _syscall6 126 127 #define _syscall0(type,name) \ 128 static type name (void) \ 129 { \ 130 return syscall(__NR_##name); \ 131 } 132 133 #define _syscall1(type,name,type1,arg1) \ 134 static type name (type1 arg1) \ 135 { \ 136 return syscall(__NR_##name, arg1); \ 137 } 138 139 #define _syscall2(type,name,type1,arg1,type2,arg2) \ 140 static type name (type1 arg1,type2 arg2) \ 141 { \ 142 return syscall(__NR_##name, arg1, arg2); \ 143 } 144 145 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 146 static type name (type1 arg1,type2 arg2,type3 arg3) \ 147 { \ 148 return syscall(__NR_##name, arg1, arg2, arg3); \ 149 } 150 151 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ 152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \ 153 { \ 154 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \ 155 } 156 157 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 158 type5,arg5) \ 159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 160 { \ 161 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \ 162 } 163 164 165 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ 166 type5,arg5,type6,arg6) \ 167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \ 168 type6 arg6) \ 169 { \ 170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \ 171 } 172 173 174 #define __NR_sys_uname __NR_uname 175 #define __NR_sys_getcwd1 __NR_getcwd 176 #define __NR_sys_getdents __NR_getdents 177 #define __NR_sys_getdents64 __NR_getdents64 178 #define __NR_sys_getpriority __NR_getpriority 179 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo 180 #define __NR_sys_syslog __NR_syslog 181 #define __NR_sys_tgkill __NR_tgkill 182 #define __NR_sys_tkill __NR_tkill 183 #define __NR_sys_futex __NR_futex 184 #define __NR_sys_inotify_init __NR_inotify_init 185 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch 186 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch 187 188 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \ 189 defined(__s390x__) 190 #define __NR__llseek __NR_lseek 191 #endif 192 193 /* Newer kernel ports have llseek() instead of _llseek() */ 194 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek) 195 #define TARGET_NR__llseek TARGET_NR_llseek 196 #endif 197 198 #ifdef __NR_gettid 199 _syscall0(int, gettid) 200 #else 201 /* This is a replacement for the host gettid() and must return a host 202 errno. */ 203 static int gettid(void) { 204 return -ENOSYS; 205 } 206 #endif 207 #if defined(TARGET_NR_getdents) && defined(__NR_getdents) 208 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count); 209 #endif 210 #if !defined(__NR_getdents) || \ 211 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64)) 212 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count); 213 #endif 214 #if defined(TARGET_NR__llseek) && defined(__NR_llseek) 215 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, 216 loff_t *, res, uint, wh); 217 #endif 218 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo) 219 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len) 220 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 221 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig) 222 #endif 223 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 224 _syscall2(int,sys_tkill,int,tid,int,sig) 225 #endif 226 #ifdef __NR_exit_group 227 _syscall1(int,exit_group,int,error_code) 228 #endif 229 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 230 _syscall1(int,set_tid_address,int *,tidptr) 231 #endif 232 #if defined(TARGET_NR_futex) && defined(__NR_futex) 233 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, 234 const struct timespec *,timeout,int *,uaddr2,int,val3) 235 #endif 236 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity 237 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, 238 unsigned long *, user_mask_ptr); 239 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity 240 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len, 241 unsigned long *, user_mask_ptr); 242 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd, 243 void *, arg); 244 _syscall2(int, capget, struct __user_cap_header_struct *, header, 245 struct __user_cap_data_struct *, data); 246 _syscall2(int, capset, struct __user_cap_header_struct *, header, 247 struct __user_cap_data_struct *, data); 248 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 249 _syscall2(int, ioprio_get, int, which, int, who) 250 #endif 251 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 252 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio) 253 #endif 254 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 255 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags) 256 #endif 257 258 static bitmask_transtbl fcntl_flags_tbl[] = { 259 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, }, 260 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, }, 261 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, }, 262 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, }, 263 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, }, 264 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, }, 265 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, }, 266 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, }, 267 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, }, 268 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, }, 269 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, }, 270 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, }, 271 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, }, 272 #if defined(O_DIRECT) 273 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, }, 274 #endif 275 #if defined(O_NOATIME) 276 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME }, 277 #endif 278 #if defined(O_CLOEXEC) 279 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC }, 280 #endif 281 #if defined(O_PATH) 282 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH }, 283 #endif 284 /* Don't terminate the list prematurely on 64-bit host+guest. */ 285 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0 286 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, }, 287 #endif 288 { 0, 0, 0, 0 } 289 }; 290 291 typedef abi_long (*TargetFdDataFunc)(void *, size_t); 292 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t); 293 typedef struct TargetFdTrans { 294 TargetFdDataFunc host_to_target_data; 295 TargetFdDataFunc target_to_host_data; 296 TargetFdAddrFunc target_to_host_addr; 297 } TargetFdTrans; 298 299 static TargetFdTrans **target_fd_trans; 300 301 static unsigned int target_fd_max; 302 303 static TargetFdDataFunc fd_trans_host_to_target_data(int fd) 304 { 305 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 306 return target_fd_trans[fd]->host_to_target_data; 307 } 308 return NULL; 309 } 310 311 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd) 312 { 313 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) { 314 return target_fd_trans[fd]->target_to_host_addr; 315 } 316 return NULL; 317 } 318 319 static void fd_trans_register(int fd, TargetFdTrans *trans) 320 { 321 unsigned int oldmax; 322 323 if (fd >= target_fd_max) { 324 oldmax = target_fd_max; 325 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */ 326 target_fd_trans = g_renew(TargetFdTrans *, 327 target_fd_trans, target_fd_max); 328 memset((void *)(target_fd_trans + oldmax), 0, 329 (target_fd_max - oldmax) * sizeof(TargetFdTrans *)); 330 } 331 target_fd_trans[fd] = trans; 332 } 333 334 static void fd_trans_unregister(int fd) 335 { 336 if (fd >= 0 && fd < target_fd_max) { 337 target_fd_trans[fd] = NULL; 338 } 339 } 340 341 static void fd_trans_dup(int oldfd, int newfd) 342 { 343 fd_trans_unregister(newfd); 344 if (oldfd < target_fd_max && target_fd_trans[oldfd]) { 345 fd_trans_register(newfd, target_fd_trans[oldfd]); 346 } 347 } 348 349 static int sys_getcwd1(char *buf, size_t size) 350 { 351 if (getcwd(buf, size) == NULL) { 352 /* getcwd() sets errno */ 353 return (-1); 354 } 355 return strlen(buf)+1; 356 } 357 358 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode) 359 { 360 /* 361 * open(2) has extra parameter 'mode' when called with 362 * flag O_CREAT. 363 */ 364 if ((flags & O_CREAT) != 0) { 365 return (openat(dirfd, pathname, flags, mode)); 366 } 367 return (openat(dirfd, pathname, flags)); 368 } 369 370 #ifdef TARGET_NR_utimensat 371 #ifdef CONFIG_UTIMENSAT 372 static int sys_utimensat(int dirfd, const char *pathname, 373 const struct timespec times[2], int flags) 374 { 375 if (pathname == NULL) 376 return futimens(dirfd, times); 377 else 378 return utimensat(dirfd, pathname, times, flags); 379 } 380 #elif defined(__NR_utimensat) 381 #define __NR_sys_utimensat __NR_utimensat 382 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname, 383 const struct timespec *,tsp,int,flags) 384 #else 385 static int sys_utimensat(int dirfd, const char *pathname, 386 const struct timespec times[2], int flags) 387 { 388 errno = ENOSYS; 389 return -1; 390 } 391 #endif 392 #endif /* TARGET_NR_utimensat */ 393 394 #ifdef CONFIG_INOTIFY 395 #include <sys/inotify.h> 396 397 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 398 static int sys_inotify_init(void) 399 { 400 return (inotify_init()); 401 } 402 #endif 403 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 404 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask) 405 { 406 return (inotify_add_watch(fd, pathname, mask)); 407 } 408 #endif 409 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 410 static int sys_inotify_rm_watch(int fd, int32_t wd) 411 { 412 return (inotify_rm_watch(fd, wd)); 413 } 414 #endif 415 #ifdef CONFIG_INOTIFY1 416 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 417 static int sys_inotify_init1(int flags) 418 { 419 return (inotify_init1(flags)); 420 } 421 #endif 422 #endif 423 #else 424 /* Userspace can usually survive runtime without inotify */ 425 #undef TARGET_NR_inotify_init 426 #undef TARGET_NR_inotify_init1 427 #undef TARGET_NR_inotify_add_watch 428 #undef TARGET_NR_inotify_rm_watch 429 #endif /* CONFIG_INOTIFY */ 430 431 #if defined(TARGET_NR_ppoll) 432 #ifndef __NR_ppoll 433 # define __NR_ppoll -1 434 #endif 435 #define __NR_sys_ppoll __NR_ppoll 436 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds, 437 struct timespec *, timeout, const sigset_t *, sigmask, 438 size_t, sigsetsize) 439 #endif 440 441 #if defined(TARGET_NR_pselect6) 442 #ifndef __NR_pselect6 443 # define __NR_pselect6 -1 444 #endif 445 #define __NR_sys_pselect6 __NR_pselect6 446 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, 447 fd_set *, exceptfds, struct timespec *, timeout, void *, sig); 448 #endif 449 450 #if defined(TARGET_NR_prlimit64) 451 #ifndef __NR_prlimit64 452 # define __NR_prlimit64 -1 453 #endif 454 #define __NR_sys_prlimit64 __NR_prlimit64 455 /* The glibc rlimit structure may not be that used by the underlying syscall */ 456 struct host_rlimit64 { 457 uint64_t rlim_cur; 458 uint64_t rlim_max; 459 }; 460 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource, 461 const struct host_rlimit64 *, new_limit, 462 struct host_rlimit64 *, old_limit) 463 #endif 464 465 466 #if defined(TARGET_NR_timer_create) 467 /* Maxiumum of 32 active POSIX timers allowed at any one time. */ 468 static timer_t g_posix_timers[32] = { 0, } ; 469 470 static inline int next_free_host_timer(void) 471 { 472 int k ; 473 /* FIXME: Does finding the next free slot require a lock? */ 474 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) { 475 if (g_posix_timers[k] == 0) { 476 g_posix_timers[k] = (timer_t) 1; 477 return k; 478 } 479 } 480 return -1; 481 } 482 #endif 483 484 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */ 485 #ifdef TARGET_ARM 486 static inline int regpairs_aligned(void *cpu_env) { 487 return ((((CPUARMState *)cpu_env)->eabi) == 1) ; 488 } 489 #elif defined(TARGET_MIPS) 490 static inline int regpairs_aligned(void *cpu_env) { return 1; } 491 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64) 492 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs 493 * of registers which translates to the same as ARM/MIPS, because we start with 494 * r3 as arg1 */ 495 static inline int regpairs_aligned(void *cpu_env) { return 1; } 496 #else 497 static inline int regpairs_aligned(void *cpu_env) { return 0; } 498 #endif 499 500 #define ERRNO_TABLE_SIZE 1200 501 502 /* target_to_host_errno_table[] is initialized from 503 * host_to_target_errno_table[] in syscall_init(). */ 504 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = { 505 }; 506 507 /* 508 * This list is the union of errno values overridden in asm-<arch>/errno.h 509 * minus the errnos that are not actually generic to all archs. 510 */ 511 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = { 512 [EAGAIN] = TARGET_EAGAIN, 513 [EIDRM] = TARGET_EIDRM, 514 [ECHRNG] = TARGET_ECHRNG, 515 [EL2NSYNC] = TARGET_EL2NSYNC, 516 [EL3HLT] = TARGET_EL3HLT, 517 [EL3RST] = TARGET_EL3RST, 518 [ELNRNG] = TARGET_ELNRNG, 519 [EUNATCH] = TARGET_EUNATCH, 520 [ENOCSI] = TARGET_ENOCSI, 521 [EL2HLT] = TARGET_EL2HLT, 522 [EDEADLK] = TARGET_EDEADLK, 523 [ENOLCK] = TARGET_ENOLCK, 524 [EBADE] = TARGET_EBADE, 525 [EBADR] = TARGET_EBADR, 526 [EXFULL] = TARGET_EXFULL, 527 [ENOANO] = TARGET_ENOANO, 528 [EBADRQC] = TARGET_EBADRQC, 529 [EBADSLT] = TARGET_EBADSLT, 530 [EBFONT] = TARGET_EBFONT, 531 [ENOSTR] = TARGET_ENOSTR, 532 [ENODATA] = TARGET_ENODATA, 533 [ETIME] = TARGET_ETIME, 534 [ENOSR] = TARGET_ENOSR, 535 [ENONET] = TARGET_ENONET, 536 [ENOPKG] = TARGET_ENOPKG, 537 [EREMOTE] = TARGET_EREMOTE, 538 [ENOLINK] = TARGET_ENOLINK, 539 [EADV] = TARGET_EADV, 540 [ESRMNT] = TARGET_ESRMNT, 541 [ECOMM] = TARGET_ECOMM, 542 [EPROTO] = TARGET_EPROTO, 543 [EDOTDOT] = TARGET_EDOTDOT, 544 [EMULTIHOP] = TARGET_EMULTIHOP, 545 [EBADMSG] = TARGET_EBADMSG, 546 [ENAMETOOLONG] = TARGET_ENAMETOOLONG, 547 [EOVERFLOW] = TARGET_EOVERFLOW, 548 [ENOTUNIQ] = TARGET_ENOTUNIQ, 549 [EBADFD] = TARGET_EBADFD, 550 [EREMCHG] = TARGET_EREMCHG, 551 [ELIBACC] = TARGET_ELIBACC, 552 [ELIBBAD] = TARGET_ELIBBAD, 553 [ELIBSCN] = TARGET_ELIBSCN, 554 [ELIBMAX] = TARGET_ELIBMAX, 555 [ELIBEXEC] = TARGET_ELIBEXEC, 556 [EILSEQ] = TARGET_EILSEQ, 557 [ENOSYS] = TARGET_ENOSYS, 558 [ELOOP] = TARGET_ELOOP, 559 [ERESTART] = TARGET_ERESTART, 560 [ESTRPIPE] = TARGET_ESTRPIPE, 561 [ENOTEMPTY] = TARGET_ENOTEMPTY, 562 [EUSERS] = TARGET_EUSERS, 563 [ENOTSOCK] = TARGET_ENOTSOCK, 564 [EDESTADDRREQ] = TARGET_EDESTADDRREQ, 565 [EMSGSIZE] = TARGET_EMSGSIZE, 566 [EPROTOTYPE] = TARGET_EPROTOTYPE, 567 [ENOPROTOOPT] = TARGET_ENOPROTOOPT, 568 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT, 569 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT, 570 [EOPNOTSUPP] = TARGET_EOPNOTSUPP, 571 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT, 572 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT, 573 [EADDRINUSE] = TARGET_EADDRINUSE, 574 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL, 575 [ENETDOWN] = TARGET_ENETDOWN, 576 [ENETUNREACH] = TARGET_ENETUNREACH, 577 [ENETRESET] = TARGET_ENETRESET, 578 [ECONNABORTED] = TARGET_ECONNABORTED, 579 [ECONNRESET] = TARGET_ECONNRESET, 580 [ENOBUFS] = TARGET_ENOBUFS, 581 [EISCONN] = TARGET_EISCONN, 582 [ENOTCONN] = TARGET_ENOTCONN, 583 [EUCLEAN] = TARGET_EUCLEAN, 584 [ENOTNAM] = TARGET_ENOTNAM, 585 [ENAVAIL] = TARGET_ENAVAIL, 586 [EISNAM] = TARGET_EISNAM, 587 [EREMOTEIO] = TARGET_EREMOTEIO, 588 [ESHUTDOWN] = TARGET_ESHUTDOWN, 589 [ETOOMANYREFS] = TARGET_ETOOMANYREFS, 590 [ETIMEDOUT] = TARGET_ETIMEDOUT, 591 [ECONNREFUSED] = TARGET_ECONNREFUSED, 592 [EHOSTDOWN] = TARGET_EHOSTDOWN, 593 [EHOSTUNREACH] = TARGET_EHOSTUNREACH, 594 [EALREADY] = TARGET_EALREADY, 595 [EINPROGRESS] = TARGET_EINPROGRESS, 596 [ESTALE] = TARGET_ESTALE, 597 [ECANCELED] = TARGET_ECANCELED, 598 [ENOMEDIUM] = TARGET_ENOMEDIUM, 599 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE, 600 #ifdef ENOKEY 601 [ENOKEY] = TARGET_ENOKEY, 602 #endif 603 #ifdef EKEYEXPIRED 604 [EKEYEXPIRED] = TARGET_EKEYEXPIRED, 605 #endif 606 #ifdef EKEYREVOKED 607 [EKEYREVOKED] = TARGET_EKEYREVOKED, 608 #endif 609 #ifdef EKEYREJECTED 610 [EKEYREJECTED] = TARGET_EKEYREJECTED, 611 #endif 612 #ifdef EOWNERDEAD 613 [EOWNERDEAD] = TARGET_EOWNERDEAD, 614 #endif 615 #ifdef ENOTRECOVERABLE 616 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE, 617 #endif 618 }; 619 620 static inline int host_to_target_errno(int err) 621 { 622 if (err >= 0 && err < ERRNO_TABLE_SIZE && 623 host_to_target_errno_table[err]) { 624 return host_to_target_errno_table[err]; 625 } 626 return err; 627 } 628 629 static inline int target_to_host_errno(int err) 630 { 631 if (err >= 0 && err < ERRNO_TABLE_SIZE && 632 target_to_host_errno_table[err]) { 633 return target_to_host_errno_table[err]; 634 } 635 return err; 636 } 637 638 static inline abi_long get_errno(abi_long ret) 639 { 640 if (ret == -1) 641 return -host_to_target_errno(errno); 642 else 643 return ret; 644 } 645 646 static inline int is_error(abi_long ret) 647 { 648 return (abi_ulong)ret >= (abi_ulong)(-4096); 649 } 650 651 char *target_strerror(int err) 652 { 653 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) { 654 return NULL; 655 } 656 return strerror(target_to_host_errno(err)); 657 } 658 659 static inline int host_to_target_sock_type(int host_type) 660 { 661 int target_type; 662 663 switch (host_type & 0xf /* SOCK_TYPE_MASK */) { 664 case SOCK_DGRAM: 665 target_type = TARGET_SOCK_DGRAM; 666 break; 667 case SOCK_STREAM: 668 target_type = TARGET_SOCK_STREAM; 669 break; 670 default: 671 target_type = host_type & 0xf /* SOCK_TYPE_MASK */; 672 break; 673 } 674 675 #if defined(SOCK_CLOEXEC) 676 if (host_type & SOCK_CLOEXEC) { 677 target_type |= TARGET_SOCK_CLOEXEC; 678 } 679 #endif 680 681 #if defined(SOCK_NONBLOCK) 682 if (host_type & SOCK_NONBLOCK) { 683 target_type |= TARGET_SOCK_NONBLOCK; 684 } 685 #endif 686 687 return target_type; 688 } 689 690 static abi_ulong target_brk; 691 static abi_ulong target_original_brk; 692 static abi_ulong brk_page; 693 694 void target_set_brk(abi_ulong new_brk) 695 { 696 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk); 697 brk_page = HOST_PAGE_ALIGN(target_brk); 698 } 699 700 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) 701 #define DEBUGF_BRK(message, args...) 702 703 /* do_brk() must return target values and target errnos. */ 704 abi_long do_brk(abi_ulong new_brk) 705 { 706 abi_long mapped_addr; 707 int new_alloc_size; 708 709 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk); 710 711 if (!new_brk) { 712 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk); 713 return target_brk; 714 } 715 if (new_brk < target_original_brk) { 716 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n", 717 target_brk); 718 return target_brk; 719 } 720 721 /* If the new brk is less than the highest page reserved to the 722 * target heap allocation, set it and we're almost done... */ 723 if (new_brk <= brk_page) { 724 /* Heap contents are initialized to zero, as for anonymous 725 * mapped pages. */ 726 if (new_brk > target_brk) { 727 memset(g2h(target_brk), 0, new_brk - target_brk); 728 } 729 target_brk = new_brk; 730 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk); 731 return target_brk; 732 } 733 734 /* We need to allocate more memory after the brk... Note that 735 * we don't use MAP_FIXED because that will map over the top of 736 * any existing mapping (like the one with the host libc or qemu 737 * itself); instead we treat "mapped but at wrong address" as 738 * a failure and unmap again. 739 */ 740 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page); 741 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size, 742 PROT_READ|PROT_WRITE, 743 MAP_ANON|MAP_PRIVATE, 0, 0)); 744 745 if (mapped_addr == brk_page) { 746 /* Heap contents are initialized to zero, as for anonymous 747 * mapped pages. Technically the new pages are already 748 * initialized to zero since they *are* anonymous mapped 749 * pages, however we have to take care with the contents that 750 * come from the remaining part of the previous page: it may 751 * contains garbage data due to a previous heap usage (grown 752 * then shrunken). */ 753 memset(g2h(target_brk), 0, brk_page - target_brk); 754 755 target_brk = new_brk; 756 brk_page = HOST_PAGE_ALIGN(target_brk); 757 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n", 758 target_brk); 759 return target_brk; 760 } else if (mapped_addr != -1) { 761 /* Mapped but at wrong address, meaning there wasn't actually 762 * enough space for this brk. 763 */ 764 target_munmap(mapped_addr, new_alloc_size); 765 mapped_addr = -1; 766 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk); 767 } 768 else { 769 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk); 770 } 771 772 #if defined(TARGET_ALPHA) 773 /* We (partially) emulate OSF/1 on Alpha, which requires we 774 return a proper errno, not an unchanged brk value. */ 775 return -TARGET_ENOMEM; 776 #endif 777 /* For everything else, return the previous break. */ 778 return target_brk; 779 } 780 781 static inline abi_long copy_from_user_fdset(fd_set *fds, 782 abi_ulong target_fds_addr, 783 int n) 784 { 785 int i, nw, j, k; 786 abi_ulong b, *target_fds; 787 788 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 789 if (!(target_fds = lock_user(VERIFY_READ, 790 target_fds_addr, 791 sizeof(abi_ulong) * nw, 792 1))) 793 return -TARGET_EFAULT; 794 795 FD_ZERO(fds); 796 k = 0; 797 for (i = 0; i < nw; i++) { 798 /* grab the abi_ulong */ 799 __get_user(b, &target_fds[i]); 800 for (j = 0; j < TARGET_ABI_BITS; j++) { 801 /* check the bit inside the abi_ulong */ 802 if ((b >> j) & 1) 803 FD_SET(k, fds); 804 k++; 805 } 806 } 807 808 unlock_user(target_fds, target_fds_addr, 0); 809 810 return 0; 811 } 812 813 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr, 814 abi_ulong target_fds_addr, 815 int n) 816 { 817 if (target_fds_addr) { 818 if (copy_from_user_fdset(fds, target_fds_addr, n)) 819 return -TARGET_EFAULT; 820 *fds_ptr = fds; 821 } else { 822 *fds_ptr = NULL; 823 } 824 return 0; 825 } 826 827 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr, 828 const fd_set *fds, 829 int n) 830 { 831 int i, nw, j, k; 832 abi_long v; 833 abi_ulong *target_fds; 834 835 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS; 836 if (!(target_fds = lock_user(VERIFY_WRITE, 837 target_fds_addr, 838 sizeof(abi_ulong) * nw, 839 0))) 840 return -TARGET_EFAULT; 841 842 k = 0; 843 for (i = 0; i < nw; i++) { 844 v = 0; 845 for (j = 0; j < TARGET_ABI_BITS; j++) { 846 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j); 847 k++; 848 } 849 __put_user(v, &target_fds[i]); 850 } 851 852 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw); 853 854 return 0; 855 } 856 857 #if defined(__alpha__) 858 #define HOST_HZ 1024 859 #else 860 #define HOST_HZ 100 861 #endif 862 863 static inline abi_long host_to_target_clock_t(long ticks) 864 { 865 #if HOST_HZ == TARGET_HZ 866 return ticks; 867 #else 868 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ; 869 #endif 870 } 871 872 static inline abi_long host_to_target_rusage(abi_ulong target_addr, 873 const struct rusage *rusage) 874 { 875 struct target_rusage *target_rusage; 876 877 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0)) 878 return -TARGET_EFAULT; 879 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec); 880 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec); 881 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec); 882 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec); 883 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss); 884 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss); 885 target_rusage->ru_idrss = tswapal(rusage->ru_idrss); 886 target_rusage->ru_isrss = tswapal(rusage->ru_isrss); 887 target_rusage->ru_minflt = tswapal(rusage->ru_minflt); 888 target_rusage->ru_majflt = tswapal(rusage->ru_majflt); 889 target_rusage->ru_nswap = tswapal(rusage->ru_nswap); 890 target_rusage->ru_inblock = tswapal(rusage->ru_inblock); 891 target_rusage->ru_oublock = tswapal(rusage->ru_oublock); 892 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd); 893 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv); 894 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals); 895 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw); 896 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw); 897 unlock_user_struct(target_rusage, target_addr, 1); 898 899 return 0; 900 } 901 902 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim) 903 { 904 abi_ulong target_rlim_swap; 905 rlim_t result; 906 907 target_rlim_swap = tswapal(target_rlim); 908 if (target_rlim_swap == TARGET_RLIM_INFINITY) 909 return RLIM_INFINITY; 910 911 result = target_rlim_swap; 912 if (target_rlim_swap != (rlim_t)result) 913 return RLIM_INFINITY; 914 915 return result; 916 } 917 918 static inline abi_ulong host_to_target_rlim(rlim_t rlim) 919 { 920 abi_ulong target_rlim_swap; 921 abi_ulong result; 922 923 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim) 924 target_rlim_swap = TARGET_RLIM_INFINITY; 925 else 926 target_rlim_swap = rlim; 927 result = tswapal(target_rlim_swap); 928 929 return result; 930 } 931 932 static inline int target_to_host_resource(int code) 933 { 934 switch (code) { 935 case TARGET_RLIMIT_AS: 936 return RLIMIT_AS; 937 case TARGET_RLIMIT_CORE: 938 return RLIMIT_CORE; 939 case TARGET_RLIMIT_CPU: 940 return RLIMIT_CPU; 941 case TARGET_RLIMIT_DATA: 942 return RLIMIT_DATA; 943 case TARGET_RLIMIT_FSIZE: 944 return RLIMIT_FSIZE; 945 case TARGET_RLIMIT_LOCKS: 946 return RLIMIT_LOCKS; 947 case TARGET_RLIMIT_MEMLOCK: 948 return RLIMIT_MEMLOCK; 949 case TARGET_RLIMIT_MSGQUEUE: 950 return RLIMIT_MSGQUEUE; 951 case TARGET_RLIMIT_NICE: 952 return RLIMIT_NICE; 953 case TARGET_RLIMIT_NOFILE: 954 return RLIMIT_NOFILE; 955 case TARGET_RLIMIT_NPROC: 956 return RLIMIT_NPROC; 957 case TARGET_RLIMIT_RSS: 958 return RLIMIT_RSS; 959 case TARGET_RLIMIT_RTPRIO: 960 return RLIMIT_RTPRIO; 961 case TARGET_RLIMIT_SIGPENDING: 962 return RLIMIT_SIGPENDING; 963 case TARGET_RLIMIT_STACK: 964 return RLIMIT_STACK; 965 default: 966 return code; 967 } 968 } 969 970 static inline abi_long copy_from_user_timeval(struct timeval *tv, 971 abi_ulong target_tv_addr) 972 { 973 struct target_timeval *target_tv; 974 975 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) 976 return -TARGET_EFAULT; 977 978 __get_user(tv->tv_sec, &target_tv->tv_sec); 979 __get_user(tv->tv_usec, &target_tv->tv_usec); 980 981 unlock_user_struct(target_tv, target_tv_addr, 0); 982 983 return 0; 984 } 985 986 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr, 987 const struct timeval *tv) 988 { 989 struct target_timeval *target_tv; 990 991 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) 992 return -TARGET_EFAULT; 993 994 __put_user(tv->tv_sec, &target_tv->tv_sec); 995 __put_user(tv->tv_usec, &target_tv->tv_usec); 996 997 unlock_user_struct(target_tv, target_tv_addr, 1); 998 999 return 0; 1000 } 1001 1002 static inline abi_long copy_from_user_timezone(struct timezone *tz, 1003 abi_ulong target_tz_addr) 1004 { 1005 struct target_timezone *target_tz; 1006 1007 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) { 1008 return -TARGET_EFAULT; 1009 } 1010 1011 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest); 1012 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime); 1013 1014 unlock_user_struct(target_tz, target_tz_addr, 0); 1015 1016 return 0; 1017 } 1018 1019 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 1020 #include <mqueue.h> 1021 1022 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr, 1023 abi_ulong target_mq_attr_addr) 1024 { 1025 struct target_mq_attr *target_mq_attr; 1026 1027 if (!lock_user_struct(VERIFY_READ, target_mq_attr, 1028 target_mq_attr_addr, 1)) 1029 return -TARGET_EFAULT; 1030 1031 __get_user(attr->mq_flags, &target_mq_attr->mq_flags); 1032 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1033 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1034 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1035 1036 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0); 1037 1038 return 0; 1039 } 1040 1041 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr, 1042 const struct mq_attr *attr) 1043 { 1044 struct target_mq_attr *target_mq_attr; 1045 1046 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr, 1047 target_mq_attr_addr, 0)) 1048 return -TARGET_EFAULT; 1049 1050 __put_user(attr->mq_flags, &target_mq_attr->mq_flags); 1051 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg); 1052 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize); 1053 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs); 1054 1055 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1); 1056 1057 return 0; 1058 } 1059 #endif 1060 1061 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) 1062 /* do_select() must return target values and target errnos. */ 1063 static abi_long do_select(int n, 1064 abi_ulong rfd_addr, abi_ulong wfd_addr, 1065 abi_ulong efd_addr, abi_ulong target_tv_addr) 1066 { 1067 fd_set rfds, wfds, efds; 1068 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 1069 struct timeval tv, *tv_ptr; 1070 abi_long ret; 1071 1072 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 1073 if (ret) { 1074 return ret; 1075 } 1076 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 1077 if (ret) { 1078 return ret; 1079 } 1080 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 1081 if (ret) { 1082 return ret; 1083 } 1084 1085 if (target_tv_addr) { 1086 if (copy_from_user_timeval(&tv, target_tv_addr)) 1087 return -TARGET_EFAULT; 1088 tv_ptr = &tv; 1089 } else { 1090 tv_ptr = NULL; 1091 } 1092 1093 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr)); 1094 1095 if (!is_error(ret)) { 1096 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 1097 return -TARGET_EFAULT; 1098 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 1099 return -TARGET_EFAULT; 1100 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 1101 return -TARGET_EFAULT; 1102 1103 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv)) 1104 return -TARGET_EFAULT; 1105 } 1106 1107 return ret; 1108 } 1109 #endif 1110 1111 static abi_long do_pipe2(int host_pipe[], int flags) 1112 { 1113 #ifdef CONFIG_PIPE2 1114 return pipe2(host_pipe, flags); 1115 #else 1116 return -ENOSYS; 1117 #endif 1118 } 1119 1120 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, 1121 int flags, int is_pipe2) 1122 { 1123 int host_pipe[2]; 1124 abi_long ret; 1125 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe); 1126 1127 if (is_error(ret)) 1128 return get_errno(ret); 1129 1130 /* Several targets have special calling conventions for the original 1131 pipe syscall, but didn't replicate this into the pipe2 syscall. */ 1132 if (!is_pipe2) { 1133 #if defined(TARGET_ALPHA) 1134 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1]; 1135 return host_pipe[0]; 1136 #elif defined(TARGET_MIPS) 1137 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; 1138 return host_pipe[0]; 1139 #elif defined(TARGET_SH4) 1140 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; 1141 return host_pipe[0]; 1142 #elif defined(TARGET_SPARC) 1143 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1]; 1144 return host_pipe[0]; 1145 #endif 1146 } 1147 1148 if (put_user_s32(host_pipe[0], pipedes) 1149 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0]))) 1150 return -TARGET_EFAULT; 1151 return get_errno(ret); 1152 } 1153 1154 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn, 1155 abi_ulong target_addr, 1156 socklen_t len) 1157 { 1158 struct target_ip_mreqn *target_smreqn; 1159 1160 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1); 1161 if (!target_smreqn) 1162 return -TARGET_EFAULT; 1163 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr; 1164 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr; 1165 if (len == sizeof(struct target_ip_mreqn)) 1166 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex); 1167 unlock_user(target_smreqn, target_addr, 0); 1168 1169 return 0; 1170 } 1171 1172 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, 1173 abi_ulong target_addr, 1174 socklen_t len) 1175 { 1176 const socklen_t unix_maxlen = sizeof (struct sockaddr_un); 1177 sa_family_t sa_family; 1178 struct target_sockaddr *target_saddr; 1179 1180 if (fd_trans_target_to_host_addr(fd)) { 1181 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len); 1182 } 1183 1184 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 1185 if (!target_saddr) 1186 return -TARGET_EFAULT; 1187 1188 sa_family = tswap16(target_saddr->sa_family); 1189 1190 /* Oops. The caller might send a incomplete sun_path; sun_path 1191 * must be terminated by \0 (see the manual page), but 1192 * unfortunately it is quite common to specify sockaddr_un 1193 * length as "strlen(x->sun_path)" while it should be 1194 * "strlen(...) + 1". We'll fix that here if needed. 1195 * Linux kernel has a similar feature. 1196 */ 1197 1198 if (sa_family == AF_UNIX) { 1199 if (len < unix_maxlen && len > 0) { 1200 char *cp = (char*)target_saddr; 1201 1202 if ( cp[len-1] && !cp[len] ) 1203 len++; 1204 } 1205 if (len > unix_maxlen) 1206 len = unix_maxlen; 1207 } 1208 1209 memcpy(addr, target_saddr, len); 1210 addr->sa_family = sa_family; 1211 if (sa_family == AF_PACKET) { 1212 struct target_sockaddr_ll *lladdr; 1213 1214 lladdr = (struct target_sockaddr_ll *)addr; 1215 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); 1216 lladdr->sll_hatype = tswap16(lladdr->sll_hatype); 1217 } 1218 unlock_user(target_saddr, target_addr, 0); 1219 1220 return 0; 1221 } 1222 1223 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr, 1224 struct sockaddr *addr, 1225 socklen_t len) 1226 { 1227 struct target_sockaddr *target_saddr; 1228 1229 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0); 1230 if (!target_saddr) 1231 return -TARGET_EFAULT; 1232 memcpy(target_saddr, addr, len); 1233 target_saddr->sa_family = tswap16(addr->sa_family); 1234 unlock_user(target_saddr, target_addr, len); 1235 1236 return 0; 1237 } 1238 1239 static inline abi_long target_to_host_cmsg(struct msghdr *msgh, 1240 struct target_msghdr *target_msgh) 1241 { 1242 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1243 abi_long msg_controllen; 1244 abi_ulong target_cmsg_addr; 1245 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1246 socklen_t space = 0; 1247 1248 msg_controllen = tswapal(target_msgh->msg_controllen); 1249 if (msg_controllen < sizeof (struct target_cmsghdr)) 1250 goto the_end; 1251 target_cmsg_addr = tswapal(target_msgh->msg_control); 1252 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1); 1253 target_cmsg_start = target_cmsg; 1254 if (!target_cmsg) 1255 return -TARGET_EFAULT; 1256 1257 while (cmsg && target_cmsg) { 1258 void *data = CMSG_DATA(cmsg); 1259 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1260 1261 int len = tswapal(target_cmsg->cmsg_len) 1262 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr)); 1263 1264 space += CMSG_SPACE(len); 1265 if (space > msgh->msg_controllen) { 1266 space -= CMSG_SPACE(len); 1267 /* This is a QEMU bug, since we allocated the payload 1268 * area ourselves (unlike overflow in host-to-target 1269 * conversion, which is just the guest giving us a buffer 1270 * that's too small). It can't happen for the payload types 1271 * we currently support; if it becomes an issue in future 1272 * we would need to improve our allocation strategy to 1273 * something more intelligent than "twice the size of the 1274 * target buffer we're reading from". 1275 */ 1276 gemu_log("Host cmsg overflow\n"); 1277 break; 1278 } 1279 1280 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) { 1281 cmsg->cmsg_level = SOL_SOCKET; 1282 } else { 1283 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level); 1284 } 1285 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type); 1286 cmsg->cmsg_len = CMSG_LEN(len); 1287 1288 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 1289 int *fd = (int *)data; 1290 int *target_fd = (int *)target_data; 1291 int i, numfds = len / sizeof(int); 1292 1293 for (i = 0; i < numfds; i++) { 1294 __get_user(fd[i], target_fd + i); 1295 } 1296 } else if (cmsg->cmsg_level == SOL_SOCKET 1297 && cmsg->cmsg_type == SCM_CREDENTIALS) { 1298 struct ucred *cred = (struct ucred *)data; 1299 struct target_ucred *target_cred = 1300 (struct target_ucred *)target_data; 1301 1302 __get_user(cred->pid, &target_cred->pid); 1303 __get_user(cred->uid, &target_cred->uid); 1304 __get_user(cred->gid, &target_cred->gid); 1305 } else { 1306 gemu_log("Unsupported ancillary data: %d/%d\n", 1307 cmsg->cmsg_level, cmsg->cmsg_type); 1308 memcpy(data, target_data, len); 1309 } 1310 1311 cmsg = CMSG_NXTHDR(msgh, cmsg); 1312 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1313 target_cmsg_start); 1314 } 1315 unlock_user(target_cmsg, target_cmsg_addr, 0); 1316 the_end: 1317 msgh->msg_controllen = space; 1318 return 0; 1319 } 1320 1321 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh, 1322 struct msghdr *msgh) 1323 { 1324 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh); 1325 abi_long msg_controllen; 1326 abi_ulong target_cmsg_addr; 1327 struct target_cmsghdr *target_cmsg, *target_cmsg_start; 1328 socklen_t space = 0; 1329 1330 msg_controllen = tswapal(target_msgh->msg_controllen); 1331 if (msg_controllen < sizeof (struct target_cmsghdr)) 1332 goto the_end; 1333 target_cmsg_addr = tswapal(target_msgh->msg_control); 1334 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0); 1335 target_cmsg_start = target_cmsg; 1336 if (!target_cmsg) 1337 return -TARGET_EFAULT; 1338 1339 while (cmsg && target_cmsg) { 1340 void *data = CMSG_DATA(cmsg); 1341 void *target_data = TARGET_CMSG_DATA(target_cmsg); 1342 1343 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr)); 1344 int tgt_len, tgt_space; 1345 1346 /* We never copy a half-header but may copy half-data; 1347 * this is Linux's behaviour in put_cmsg(). Note that 1348 * truncation here is a guest problem (which we report 1349 * to the guest via the CTRUNC bit), unlike truncation 1350 * in target_to_host_cmsg, which is a QEMU bug. 1351 */ 1352 if (msg_controllen < sizeof(struct cmsghdr)) { 1353 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1354 break; 1355 } 1356 1357 if (cmsg->cmsg_level == SOL_SOCKET) { 1358 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET); 1359 } else { 1360 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level); 1361 } 1362 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type); 1363 1364 tgt_len = TARGET_CMSG_LEN(len); 1365 1366 /* Payload types which need a different size of payload on 1367 * the target must adjust tgt_len here. 1368 */ 1369 switch (cmsg->cmsg_level) { 1370 case SOL_SOCKET: 1371 switch (cmsg->cmsg_type) { 1372 case SO_TIMESTAMP: 1373 tgt_len = sizeof(struct target_timeval); 1374 break; 1375 default: 1376 break; 1377 } 1378 default: 1379 break; 1380 } 1381 1382 if (msg_controllen < tgt_len) { 1383 target_msgh->msg_flags |= tswap32(MSG_CTRUNC); 1384 tgt_len = msg_controllen; 1385 } 1386 1387 /* We must now copy-and-convert len bytes of payload 1388 * into tgt_len bytes of destination space. Bear in mind 1389 * that in both source and destination we may be dealing 1390 * with a truncated value! 1391 */ 1392 switch (cmsg->cmsg_level) { 1393 case SOL_SOCKET: 1394 switch (cmsg->cmsg_type) { 1395 case SCM_RIGHTS: 1396 { 1397 int *fd = (int *)data; 1398 int *target_fd = (int *)target_data; 1399 int i, numfds = tgt_len / sizeof(int); 1400 1401 for (i = 0; i < numfds; i++) { 1402 __put_user(fd[i], target_fd + i); 1403 } 1404 break; 1405 } 1406 case SO_TIMESTAMP: 1407 { 1408 struct timeval *tv = (struct timeval *)data; 1409 struct target_timeval *target_tv = 1410 (struct target_timeval *)target_data; 1411 1412 if (len != sizeof(struct timeval) || 1413 tgt_len != sizeof(struct target_timeval)) { 1414 goto unimplemented; 1415 } 1416 1417 /* copy struct timeval to target */ 1418 __put_user(tv->tv_sec, &target_tv->tv_sec); 1419 __put_user(tv->tv_usec, &target_tv->tv_usec); 1420 break; 1421 } 1422 case SCM_CREDENTIALS: 1423 { 1424 struct ucred *cred = (struct ucred *)data; 1425 struct target_ucred *target_cred = 1426 (struct target_ucred *)target_data; 1427 1428 __put_user(cred->pid, &target_cred->pid); 1429 __put_user(cred->uid, &target_cred->uid); 1430 __put_user(cred->gid, &target_cred->gid); 1431 break; 1432 } 1433 default: 1434 goto unimplemented; 1435 } 1436 break; 1437 1438 default: 1439 unimplemented: 1440 gemu_log("Unsupported ancillary data: %d/%d\n", 1441 cmsg->cmsg_level, cmsg->cmsg_type); 1442 memcpy(target_data, data, MIN(len, tgt_len)); 1443 if (tgt_len > len) { 1444 memset(target_data + len, 0, tgt_len - len); 1445 } 1446 } 1447 1448 target_cmsg->cmsg_len = tswapal(tgt_len); 1449 tgt_space = TARGET_CMSG_SPACE(len); 1450 if (msg_controllen < tgt_space) { 1451 tgt_space = msg_controllen; 1452 } 1453 msg_controllen -= tgt_space; 1454 space += tgt_space; 1455 cmsg = CMSG_NXTHDR(msgh, cmsg); 1456 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg, 1457 target_cmsg_start); 1458 } 1459 unlock_user(target_cmsg, target_cmsg_addr, space); 1460 the_end: 1461 target_msgh->msg_controllen = tswapal(space); 1462 return 0; 1463 } 1464 1465 /* do_setsockopt() Must return target values and target errnos. */ 1466 static abi_long do_setsockopt(int sockfd, int level, int optname, 1467 abi_ulong optval_addr, socklen_t optlen) 1468 { 1469 abi_long ret; 1470 int val; 1471 struct ip_mreqn *ip_mreq; 1472 struct ip_mreq_source *ip_mreq_source; 1473 1474 switch(level) { 1475 case SOL_TCP: 1476 /* TCP options all take an 'int' value. */ 1477 if (optlen < sizeof(uint32_t)) 1478 return -TARGET_EINVAL; 1479 1480 if (get_user_u32(val, optval_addr)) 1481 return -TARGET_EFAULT; 1482 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1483 break; 1484 case SOL_IP: 1485 switch(optname) { 1486 case IP_TOS: 1487 case IP_TTL: 1488 case IP_HDRINCL: 1489 case IP_ROUTER_ALERT: 1490 case IP_RECVOPTS: 1491 case IP_RETOPTS: 1492 case IP_PKTINFO: 1493 case IP_MTU_DISCOVER: 1494 case IP_RECVERR: 1495 case IP_RECVTOS: 1496 #ifdef IP_FREEBIND 1497 case IP_FREEBIND: 1498 #endif 1499 case IP_MULTICAST_TTL: 1500 case IP_MULTICAST_LOOP: 1501 val = 0; 1502 if (optlen >= sizeof(uint32_t)) { 1503 if (get_user_u32(val, optval_addr)) 1504 return -TARGET_EFAULT; 1505 } else if (optlen >= 1) { 1506 if (get_user_u8(val, optval_addr)) 1507 return -TARGET_EFAULT; 1508 } 1509 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); 1510 break; 1511 case IP_ADD_MEMBERSHIP: 1512 case IP_DROP_MEMBERSHIP: 1513 if (optlen < sizeof (struct target_ip_mreq) || 1514 optlen > sizeof (struct target_ip_mreqn)) 1515 return -TARGET_EINVAL; 1516 1517 ip_mreq = (struct ip_mreqn *) alloca(optlen); 1518 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); 1519 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); 1520 break; 1521 1522 case IP_BLOCK_SOURCE: 1523 case IP_UNBLOCK_SOURCE: 1524 case IP_ADD_SOURCE_MEMBERSHIP: 1525 case IP_DROP_SOURCE_MEMBERSHIP: 1526 if (optlen != sizeof (struct target_ip_mreq_source)) 1527 return -TARGET_EINVAL; 1528 1529 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1530 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); 1531 unlock_user (ip_mreq_source, optval_addr, 0); 1532 break; 1533 1534 default: 1535 goto unimplemented; 1536 } 1537 break; 1538 case SOL_IPV6: 1539 switch (optname) { 1540 case IPV6_MTU_DISCOVER: 1541 case IPV6_MTU: 1542 case IPV6_V6ONLY: 1543 case IPV6_RECVPKTINFO: 1544 val = 0; 1545 if (optlen < sizeof(uint32_t)) { 1546 return -TARGET_EINVAL; 1547 } 1548 if (get_user_u32(val, optval_addr)) { 1549 return -TARGET_EFAULT; 1550 } 1551 ret = get_errno(setsockopt(sockfd, level, optname, 1552 &val, sizeof(val))); 1553 break; 1554 default: 1555 goto unimplemented; 1556 } 1557 break; 1558 case SOL_RAW: 1559 switch (optname) { 1560 case ICMP_FILTER: 1561 /* struct icmp_filter takes an u32 value */ 1562 if (optlen < sizeof(uint32_t)) { 1563 return -TARGET_EINVAL; 1564 } 1565 1566 if (get_user_u32(val, optval_addr)) { 1567 return -TARGET_EFAULT; 1568 } 1569 ret = get_errno(setsockopt(sockfd, level, optname, 1570 &val, sizeof(val))); 1571 break; 1572 1573 default: 1574 goto unimplemented; 1575 } 1576 break; 1577 case TARGET_SOL_SOCKET: 1578 switch (optname) { 1579 case TARGET_SO_RCVTIMEO: 1580 { 1581 struct timeval tv; 1582 1583 optname = SO_RCVTIMEO; 1584 1585 set_timeout: 1586 if (optlen != sizeof(struct target_timeval)) { 1587 return -TARGET_EINVAL; 1588 } 1589 1590 if (copy_from_user_timeval(&tv, optval_addr)) { 1591 return -TARGET_EFAULT; 1592 } 1593 1594 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 1595 &tv, sizeof(tv))); 1596 return ret; 1597 } 1598 case TARGET_SO_SNDTIMEO: 1599 optname = SO_SNDTIMEO; 1600 goto set_timeout; 1601 case TARGET_SO_ATTACH_FILTER: 1602 { 1603 struct target_sock_fprog *tfprog; 1604 struct target_sock_filter *tfilter; 1605 struct sock_fprog fprog; 1606 struct sock_filter *filter; 1607 int i; 1608 1609 if (optlen != sizeof(*tfprog)) { 1610 return -TARGET_EINVAL; 1611 } 1612 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) { 1613 return -TARGET_EFAULT; 1614 } 1615 if (!lock_user_struct(VERIFY_READ, tfilter, 1616 tswapal(tfprog->filter), 0)) { 1617 unlock_user_struct(tfprog, optval_addr, 1); 1618 return -TARGET_EFAULT; 1619 } 1620 1621 fprog.len = tswap16(tfprog->len); 1622 filter = g_try_new(struct sock_filter, fprog.len); 1623 if (filter == NULL) { 1624 unlock_user_struct(tfilter, tfprog->filter, 1); 1625 unlock_user_struct(tfprog, optval_addr, 1); 1626 return -TARGET_ENOMEM; 1627 } 1628 for (i = 0; i < fprog.len; i++) { 1629 filter[i].code = tswap16(tfilter[i].code); 1630 filter[i].jt = tfilter[i].jt; 1631 filter[i].jf = tfilter[i].jf; 1632 filter[i].k = tswap32(tfilter[i].k); 1633 } 1634 fprog.filter = filter; 1635 1636 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, 1637 SO_ATTACH_FILTER, &fprog, sizeof(fprog))); 1638 g_free(filter); 1639 1640 unlock_user_struct(tfilter, tfprog->filter, 1); 1641 unlock_user_struct(tfprog, optval_addr, 1); 1642 return ret; 1643 } 1644 case TARGET_SO_BINDTODEVICE: 1645 { 1646 char *dev_ifname, *addr_ifname; 1647 1648 if (optlen > IFNAMSIZ - 1) { 1649 optlen = IFNAMSIZ - 1; 1650 } 1651 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1); 1652 if (!dev_ifname) { 1653 return -TARGET_EFAULT; 1654 } 1655 optname = SO_BINDTODEVICE; 1656 addr_ifname = alloca(IFNAMSIZ); 1657 memcpy(addr_ifname, dev_ifname, optlen); 1658 addr_ifname[optlen] = 0; 1659 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, 1660 addr_ifname, optlen)); 1661 unlock_user (dev_ifname, optval_addr, 0); 1662 return ret; 1663 } 1664 /* Options with 'int' argument. */ 1665 case TARGET_SO_DEBUG: 1666 optname = SO_DEBUG; 1667 break; 1668 case TARGET_SO_REUSEADDR: 1669 optname = SO_REUSEADDR; 1670 break; 1671 case TARGET_SO_TYPE: 1672 optname = SO_TYPE; 1673 break; 1674 case TARGET_SO_ERROR: 1675 optname = SO_ERROR; 1676 break; 1677 case TARGET_SO_DONTROUTE: 1678 optname = SO_DONTROUTE; 1679 break; 1680 case TARGET_SO_BROADCAST: 1681 optname = SO_BROADCAST; 1682 break; 1683 case TARGET_SO_SNDBUF: 1684 optname = SO_SNDBUF; 1685 break; 1686 case TARGET_SO_SNDBUFFORCE: 1687 optname = SO_SNDBUFFORCE; 1688 break; 1689 case TARGET_SO_RCVBUF: 1690 optname = SO_RCVBUF; 1691 break; 1692 case TARGET_SO_RCVBUFFORCE: 1693 optname = SO_RCVBUFFORCE; 1694 break; 1695 case TARGET_SO_KEEPALIVE: 1696 optname = SO_KEEPALIVE; 1697 break; 1698 case TARGET_SO_OOBINLINE: 1699 optname = SO_OOBINLINE; 1700 break; 1701 case TARGET_SO_NO_CHECK: 1702 optname = SO_NO_CHECK; 1703 break; 1704 case TARGET_SO_PRIORITY: 1705 optname = SO_PRIORITY; 1706 break; 1707 #ifdef SO_BSDCOMPAT 1708 case TARGET_SO_BSDCOMPAT: 1709 optname = SO_BSDCOMPAT; 1710 break; 1711 #endif 1712 case TARGET_SO_PASSCRED: 1713 optname = SO_PASSCRED; 1714 break; 1715 case TARGET_SO_PASSSEC: 1716 optname = SO_PASSSEC; 1717 break; 1718 case TARGET_SO_TIMESTAMP: 1719 optname = SO_TIMESTAMP; 1720 break; 1721 case TARGET_SO_RCVLOWAT: 1722 optname = SO_RCVLOWAT; 1723 break; 1724 break; 1725 default: 1726 goto unimplemented; 1727 } 1728 if (optlen < sizeof(uint32_t)) 1729 return -TARGET_EINVAL; 1730 1731 if (get_user_u32(val, optval_addr)) 1732 return -TARGET_EFAULT; 1733 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); 1734 break; 1735 default: 1736 unimplemented: 1737 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname); 1738 ret = -TARGET_ENOPROTOOPT; 1739 } 1740 return ret; 1741 } 1742 1743 /* do_getsockopt() Must return target values and target errnos. */ 1744 static abi_long do_getsockopt(int sockfd, int level, int optname, 1745 abi_ulong optval_addr, abi_ulong optlen) 1746 { 1747 abi_long ret; 1748 int len, val; 1749 socklen_t lv; 1750 1751 switch(level) { 1752 case TARGET_SOL_SOCKET: 1753 level = SOL_SOCKET; 1754 switch (optname) { 1755 /* These don't just return a single integer */ 1756 case TARGET_SO_LINGER: 1757 case TARGET_SO_RCVTIMEO: 1758 case TARGET_SO_SNDTIMEO: 1759 case TARGET_SO_PEERNAME: 1760 goto unimplemented; 1761 case TARGET_SO_PEERCRED: { 1762 struct ucred cr; 1763 socklen_t crlen; 1764 struct target_ucred *tcr; 1765 1766 if (get_user_u32(len, optlen)) { 1767 return -TARGET_EFAULT; 1768 } 1769 if (len < 0) { 1770 return -TARGET_EINVAL; 1771 } 1772 1773 crlen = sizeof(cr); 1774 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED, 1775 &cr, &crlen)); 1776 if (ret < 0) { 1777 return ret; 1778 } 1779 if (len > crlen) { 1780 len = crlen; 1781 } 1782 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) { 1783 return -TARGET_EFAULT; 1784 } 1785 __put_user(cr.pid, &tcr->pid); 1786 __put_user(cr.uid, &tcr->uid); 1787 __put_user(cr.gid, &tcr->gid); 1788 unlock_user_struct(tcr, optval_addr, 1); 1789 if (put_user_u32(len, optlen)) { 1790 return -TARGET_EFAULT; 1791 } 1792 break; 1793 } 1794 /* Options with 'int' argument. */ 1795 case TARGET_SO_DEBUG: 1796 optname = SO_DEBUG; 1797 goto int_case; 1798 case TARGET_SO_REUSEADDR: 1799 optname = SO_REUSEADDR; 1800 goto int_case; 1801 case TARGET_SO_TYPE: 1802 optname = SO_TYPE; 1803 goto int_case; 1804 case TARGET_SO_ERROR: 1805 optname = SO_ERROR; 1806 goto int_case; 1807 case TARGET_SO_DONTROUTE: 1808 optname = SO_DONTROUTE; 1809 goto int_case; 1810 case TARGET_SO_BROADCAST: 1811 optname = SO_BROADCAST; 1812 goto int_case; 1813 case TARGET_SO_SNDBUF: 1814 optname = SO_SNDBUF; 1815 goto int_case; 1816 case TARGET_SO_RCVBUF: 1817 optname = SO_RCVBUF; 1818 goto int_case; 1819 case TARGET_SO_KEEPALIVE: 1820 optname = SO_KEEPALIVE; 1821 goto int_case; 1822 case TARGET_SO_OOBINLINE: 1823 optname = SO_OOBINLINE; 1824 goto int_case; 1825 case TARGET_SO_NO_CHECK: 1826 optname = SO_NO_CHECK; 1827 goto int_case; 1828 case TARGET_SO_PRIORITY: 1829 optname = SO_PRIORITY; 1830 goto int_case; 1831 #ifdef SO_BSDCOMPAT 1832 case TARGET_SO_BSDCOMPAT: 1833 optname = SO_BSDCOMPAT; 1834 goto int_case; 1835 #endif 1836 case TARGET_SO_PASSCRED: 1837 optname = SO_PASSCRED; 1838 goto int_case; 1839 case TARGET_SO_TIMESTAMP: 1840 optname = SO_TIMESTAMP; 1841 goto int_case; 1842 case TARGET_SO_RCVLOWAT: 1843 optname = SO_RCVLOWAT; 1844 goto int_case; 1845 case TARGET_SO_ACCEPTCONN: 1846 optname = SO_ACCEPTCONN; 1847 goto int_case; 1848 default: 1849 goto int_case; 1850 } 1851 break; 1852 case SOL_TCP: 1853 /* TCP options all take an 'int' value. */ 1854 int_case: 1855 if (get_user_u32(len, optlen)) 1856 return -TARGET_EFAULT; 1857 if (len < 0) 1858 return -TARGET_EINVAL; 1859 lv = sizeof(lv); 1860 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1861 if (ret < 0) 1862 return ret; 1863 if (optname == SO_TYPE) { 1864 val = host_to_target_sock_type(val); 1865 } 1866 if (len > lv) 1867 len = lv; 1868 if (len == 4) { 1869 if (put_user_u32(val, optval_addr)) 1870 return -TARGET_EFAULT; 1871 } else { 1872 if (put_user_u8(val, optval_addr)) 1873 return -TARGET_EFAULT; 1874 } 1875 if (put_user_u32(len, optlen)) 1876 return -TARGET_EFAULT; 1877 break; 1878 case SOL_IP: 1879 switch(optname) { 1880 case IP_TOS: 1881 case IP_TTL: 1882 case IP_HDRINCL: 1883 case IP_ROUTER_ALERT: 1884 case IP_RECVOPTS: 1885 case IP_RETOPTS: 1886 case IP_PKTINFO: 1887 case IP_MTU_DISCOVER: 1888 case IP_RECVERR: 1889 case IP_RECVTOS: 1890 #ifdef IP_FREEBIND 1891 case IP_FREEBIND: 1892 #endif 1893 case IP_MULTICAST_TTL: 1894 case IP_MULTICAST_LOOP: 1895 if (get_user_u32(len, optlen)) 1896 return -TARGET_EFAULT; 1897 if (len < 0) 1898 return -TARGET_EINVAL; 1899 lv = sizeof(lv); 1900 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv)); 1901 if (ret < 0) 1902 return ret; 1903 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) { 1904 len = 1; 1905 if (put_user_u32(len, optlen) 1906 || put_user_u8(val, optval_addr)) 1907 return -TARGET_EFAULT; 1908 } else { 1909 if (len > sizeof(int)) 1910 len = sizeof(int); 1911 if (put_user_u32(len, optlen) 1912 || put_user_u32(val, optval_addr)) 1913 return -TARGET_EFAULT; 1914 } 1915 break; 1916 default: 1917 ret = -TARGET_ENOPROTOOPT; 1918 break; 1919 } 1920 break; 1921 default: 1922 unimplemented: 1923 gemu_log("getsockopt level=%d optname=%d not yet supported\n", 1924 level, optname); 1925 ret = -TARGET_EOPNOTSUPP; 1926 break; 1927 } 1928 return ret; 1929 } 1930 1931 static struct iovec *lock_iovec(int type, abi_ulong target_addr, 1932 int count, int copy) 1933 { 1934 struct target_iovec *target_vec; 1935 struct iovec *vec; 1936 abi_ulong total_len, max_len; 1937 int i; 1938 int err = 0; 1939 bool bad_address = false; 1940 1941 if (count == 0) { 1942 errno = 0; 1943 return NULL; 1944 } 1945 if (count < 0 || count > IOV_MAX) { 1946 errno = EINVAL; 1947 return NULL; 1948 } 1949 1950 vec = g_try_new0(struct iovec, count); 1951 if (vec == NULL) { 1952 errno = ENOMEM; 1953 return NULL; 1954 } 1955 1956 target_vec = lock_user(VERIFY_READ, target_addr, 1957 count * sizeof(struct target_iovec), 1); 1958 if (target_vec == NULL) { 1959 err = EFAULT; 1960 goto fail2; 1961 } 1962 1963 /* ??? If host page size > target page size, this will result in a 1964 value larger than what we can actually support. */ 1965 max_len = 0x7fffffff & TARGET_PAGE_MASK; 1966 total_len = 0; 1967 1968 for (i = 0; i < count; i++) { 1969 abi_ulong base = tswapal(target_vec[i].iov_base); 1970 abi_long len = tswapal(target_vec[i].iov_len); 1971 1972 if (len < 0) { 1973 err = EINVAL; 1974 goto fail; 1975 } else if (len == 0) { 1976 /* Zero length pointer is ignored. */ 1977 vec[i].iov_base = 0; 1978 } else { 1979 vec[i].iov_base = lock_user(type, base, len, copy); 1980 /* If the first buffer pointer is bad, this is a fault. But 1981 * subsequent bad buffers will result in a partial write; this 1982 * is realized by filling the vector with null pointers and 1983 * zero lengths. */ 1984 if (!vec[i].iov_base) { 1985 if (i == 0) { 1986 err = EFAULT; 1987 goto fail; 1988 } else { 1989 bad_address = true; 1990 } 1991 } 1992 if (bad_address) { 1993 len = 0; 1994 } 1995 if (len > max_len - total_len) { 1996 len = max_len - total_len; 1997 } 1998 } 1999 vec[i].iov_len = len; 2000 total_len += len; 2001 } 2002 2003 unlock_user(target_vec, target_addr, 0); 2004 return vec; 2005 2006 fail: 2007 while (--i >= 0) { 2008 if (tswapal(target_vec[i].iov_len) > 0) { 2009 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0); 2010 } 2011 } 2012 unlock_user(target_vec, target_addr, 0); 2013 fail2: 2014 g_free(vec); 2015 errno = err; 2016 return NULL; 2017 } 2018 2019 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr, 2020 int count, int copy) 2021 { 2022 struct target_iovec *target_vec; 2023 int i; 2024 2025 target_vec = lock_user(VERIFY_READ, target_addr, 2026 count * sizeof(struct target_iovec), 1); 2027 if (target_vec) { 2028 for (i = 0; i < count; i++) { 2029 abi_ulong base = tswapal(target_vec[i].iov_base); 2030 abi_long len = tswapal(target_vec[i].iov_len); 2031 if (len < 0) { 2032 break; 2033 } 2034 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0); 2035 } 2036 unlock_user(target_vec, target_addr, 0); 2037 } 2038 2039 g_free(vec); 2040 } 2041 2042 static inline int target_to_host_sock_type(int *type) 2043 { 2044 int host_type = 0; 2045 int target_type = *type; 2046 2047 switch (target_type & TARGET_SOCK_TYPE_MASK) { 2048 case TARGET_SOCK_DGRAM: 2049 host_type = SOCK_DGRAM; 2050 break; 2051 case TARGET_SOCK_STREAM: 2052 host_type = SOCK_STREAM; 2053 break; 2054 default: 2055 host_type = target_type & TARGET_SOCK_TYPE_MASK; 2056 break; 2057 } 2058 if (target_type & TARGET_SOCK_CLOEXEC) { 2059 #if defined(SOCK_CLOEXEC) 2060 host_type |= SOCK_CLOEXEC; 2061 #else 2062 return -TARGET_EINVAL; 2063 #endif 2064 } 2065 if (target_type & TARGET_SOCK_NONBLOCK) { 2066 #if defined(SOCK_NONBLOCK) 2067 host_type |= SOCK_NONBLOCK; 2068 #elif !defined(O_NONBLOCK) 2069 return -TARGET_EINVAL; 2070 #endif 2071 } 2072 *type = host_type; 2073 return 0; 2074 } 2075 2076 /* Try to emulate socket type flags after socket creation. */ 2077 static int sock_flags_fixup(int fd, int target_type) 2078 { 2079 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK) 2080 if (target_type & TARGET_SOCK_NONBLOCK) { 2081 int flags = fcntl(fd, F_GETFL); 2082 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) { 2083 close(fd); 2084 return -TARGET_EINVAL; 2085 } 2086 } 2087 #endif 2088 return fd; 2089 } 2090 2091 static abi_long packet_target_to_host_sockaddr(void *host_addr, 2092 abi_ulong target_addr, 2093 socklen_t len) 2094 { 2095 struct sockaddr *addr = host_addr; 2096 struct target_sockaddr *target_saddr; 2097 2098 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); 2099 if (!target_saddr) { 2100 return -TARGET_EFAULT; 2101 } 2102 2103 memcpy(addr, target_saddr, len); 2104 addr->sa_family = tswap16(target_saddr->sa_family); 2105 /* spkt_protocol is big-endian */ 2106 2107 unlock_user(target_saddr, target_addr, 0); 2108 return 0; 2109 } 2110 2111 static TargetFdTrans target_packet_trans = { 2112 .target_to_host_addr = packet_target_to_host_sockaddr, 2113 }; 2114 2115 /* do_socket() Must return target values and target errnos. */ 2116 static abi_long do_socket(int domain, int type, int protocol) 2117 { 2118 int target_type = type; 2119 int ret; 2120 2121 ret = target_to_host_sock_type(&type); 2122 if (ret) { 2123 return ret; 2124 } 2125 2126 if (domain == PF_NETLINK) 2127 return -TARGET_EAFNOSUPPORT; 2128 2129 if (domain == AF_PACKET || 2130 (domain == AF_INET && type == SOCK_PACKET)) { 2131 protocol = tswap16(protocol); 2132 } 2133 2134 ret = get_errno(socket(domain, type, protocol)); 2135 if (ret >= 0) { 2136 ret = sock_flags_fixup(ret, target_type); 2137 if (type == SOCK_PACKET) { 2138 /* Manage an obsolete case : 2139 * if socket type is SOCK_PACKET, bind by name 2140 */ 2141 fd_trans_register(ret, &target_packet_trans); 2142 } 2143 } 2144 return ret; 2145 } 2146 2147 /* do_bind() Must return target values and target errnos. */ 2148 static abi_long do_bind(int sockfd, abi_ulong target_addr, 2149 socklen_t addrlen) 2150 { 2151 void *addr; 2152 abi_long ret; 2153 2154 if ((int)addrlen < 0) { 2155 return -TARGET_EINVAL; 2156 } 2157 2158 addr = alloca(addrlen+1); 2159 2160 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 2161 if (ret) 2162 return ret; 2163 2164 return get_errno(bind(sockfd, addr, addrlen)); 2165 } 2166 2167 /* do_connect() Must return target values and target errnos. */ 2168 static abi_long do_connect(int sockfd, abi_ulong target_addr, 2169 socklen_t addrlen) 2170 { 2171 void *addr; 2172 abi_long ret; 2173 2174 if ((int)addrlen < 0) { 2175 return -TARGET_EINVAL; 2176 } 2177 2178 addr = alloca(addrlen+1); 2179 2180 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen); 2181 if (ret) 2182 return ret; 2183 2184 return get_errno(connect(sockfd, addr, addrlen)); 2185 } 2186 2187 /* do_sendrecvmsg_locked() Must return target values and target errnos. */ 2188 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, 2189 int flags, int send) 2190 { 2191 abi_long ret, len; 2192 struct msghdr msg; 2193 int count; 2194 struct iovec *vec; 2195 abi_ulong target_vec; 2196 2197 if (msgp->msg_name) { 2198 msg.msg_namelen = tswap32(msgp->msg_namelen); 2199 msg.msg_name = alloca(msg.msg_namelen+1); 2200 ret = target_to_host_sockaddr(fd, msg.msg_name, 2201 tswapal(msgp->msg_name), 2202 msg.msg_namelen); 2203 if (ret) { 2204 goto out2; 2205 } 2206 } else { 2207 msg.msg_name = NULL; 2208 msg.msg_namelen = 0; 2209 } 2210 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); 2211 msg.msg_control = alloca(msg.msg_controllen); 2212 msg.msg_flags = tswap32(msgp->msg_flags); 2213 2214 count = tswapal(msgp->msg_iovlen); 2215 target_vec = tswapal(msgp->msg_iov); 2216 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, 2217 target_vec, count, send); 2218 if (vec == NULL) { 2219 ret = -host_to_target_errno(errno); 2220 goto out2; 2221 } 2222 msg.msg_iovlen = count; 2223 msg.msg_iov = vec; 2224 2225 if (send) { 2226 ret = target_to_host_cmsg(&msg, msgp); 2227 if (ret == 0) 2228 ret = get_errno(sendmsg(fd, &msg, flags)); 2229 } else { 2230 ret = get_errno(recvmsg(fd, &msg, flags)); 2231 if (!is_error(ret)) { 2232 len = ret; 2233 ret = host_to_target_cmsg(msgp, &msg); 2234 if (!is_error(ret)) { 2235 msgp->msg_namelen = tswap32(msg.msg_namelen); 2236 if (msg.msg_name != NULL) { 2237 ret = host_to_target_sockaddr(tswapal(msgp->msg_name), 2238 msg.msg_name, msg.msg_namelen); 2239 if (ret) { 2240 goto out; 2241 } 2242 } 2243 2244 ret = len; 2245 } 2246 } 2247 } 2248 2249 out: 2250 unlock_iovec(vec, target_vec, count, !send); 2251 out2: 2252 return ret; 2253 } 2254 2255 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg, 2256 int flags, int send) 2257 { 2258 abi_long ret; 2259 struct target_msghdr *msgp; 2260 2261 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE, 2262 msgp, 2263 target_msg, 2264 send ? 1 : 0)) { 2265 return -TARGET_EFAULT; 2266 } 2267 ret = do_sendrecvmsg_locked(fd, msgp, flags, send); 2268 unlock_user_struct(msgp, target_msg, send ? 0 : 1); 2269 return ret; 2270 } 2271 2272 /* We don't rely on the C library to have sendmmsg/recvmmsg support, 2273 * so it might not have this *mmsg-specific flag either. 2274 */ 2275 #ifndef MSG_WAITFORONE 2276 #define MSG_WAITFORONE 0x10000 2277 #endif 2278 2279 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec, 2280 unsigned int vlen, unsigned int flags, 2281 int send) 2282 { 2283 struct target_mmsghdr *mmsgp; 2284 abi_long ret = 0; 2285 int i; 2286 2287 if (vlen > UIO_MAXIOV) { 2288 vlen = UIO_MAXIOV; 2289 } 2290 2291 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1); 2292 if (!mmsgp) { 2293 return -TARGET_EFAULT; 2294 } 2295 2296 for (i = 0; i < vlen; i++) { 2297 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send); 2298 if (is_error(ret)) { 2299 break; 2300 } 2301 mmsgp[i].msg_len = tswap32(ret); 2302 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ 2303 if (flags & MSG_WAITFORONE) { 2304 flags |= MSG_DONTWAIT; 2305 } 2306 } 2307 2308 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i); 2309 2310 /* Return number of datagrams sent if we sent any at all; 2311 * otherwise return the error. 2312 */ 2313 if (i) { 2314 return i; 2315 } 2316 return ret; 2317 } 2318 2319 /* If we don't have a system accept4() then just call accept. 2320 * The callsites to do_accept4() will ensure that they don't 2321 * pass a non-zero flags argument in this config. 2322 */ 2323 #ifndef CONFIG_ACCEPT4 2324 static inline int accept4(int sockfd, struct sockaddr *addr, 2325 socklen_t *addrlen, int flags) 2326 { 2327 assert(flags == 0); 2328 return accept(sockfd, addr, addrlen); 2329 } 2330 #endif 2331 2332 /* do_accept4() Must return target values and target errnos. */ 2333 static abi_long do_accept4(int fd, abi_ulong target_addr, 2334 abi_ulong target_addrlen_addr, int flags) 2335 { 2336 socklen_t addrlen; 2337 void *addr; 2338 abi_long ret; 2339 int host_flags; 2340 2341 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 2342 2343 if (target_addr == 0) { 2344 return get_errno(accept4(fd, NULL, NULL, host_flags)); 2345 } 2346 2347 /* linux returns EINVAL if addrlen pointer is invalid */ 2348 if (get_user_u32(addrlen, target_addrlen_addr)) 2349 return -TARGET_EINVAL; 2350 2351 if ((int)addrlen < 0) { 2352 return -TARGET_EINVAL; 2353 } 2354 2355 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2356 return -TARGET_EINVAL; 2357 2358 addr = alloca(addrlen); 2359 2360 ret = get_errno(accept4(fd, addr, &addrlen, host_flags)); 2361 if (!is_error(ret)) { 2362 host_to_target_sockaddr(target_addr, addr, addrlen); 2363 if (put_user_u32(addrlen, target_addrlen_addr)) 2364 ret = -TARGET_EFAULT; 2365 } 2366 return ret; 2367 } 2368 2369 /* do_getpeername() Must return target values and target errnos. */ 2370 static abi_long do_getpeername(int fd, abi_ulong target_addr, 2371 abi_ulong target_addrlen_addr) 2372 { 2373 socklen_t addrlen; 2374 void *addr; 2375 abi_long ret; 2376 2377 if (get_user_u32(addrlen, target_addrlen_addr)) 2378 return -TARGET_EFAULT; 2379 2380 if ((int)addrlen < 0) { 2381 return -TARGET_EINVAL; 2382 } 2383 2384 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2385 return -TARGET_EFAULT; 2386 2387 addr = alloca(addrlen); 2388 2389 ret = get_errno(getpeername(fd, addr, &addrlen)); 2390 if (!is_error(ret)) { 2391 host_to_target_sockaddr(target_addr, addr, addrlen); 2392 if (put_user_u32(addrlen, target_addrlen_addr)) 2393 ret = -TARGET_EFAULT; 2394 } 2395 return ret; 2396 } 2397 2398 /* do_getsockname() Must return target values and target errnos. */ 2399 static abi_long do_getsockname(int fd, abi_ulong target_addr, 2400 abi_ulong target_addrlen_addr) 2401 { 2402 socklen_t addrlen; 2403 void *addr; 2404 abi_long ret; 2405 2406 if (get_user_u32(addrlen, target_addrlen_addr)) 2407 return -TARGET_EFAULT; 2408 2409 if ((int)addrlen < 0) { 2410 return -TARGET_EINVAL; 2411 } 2412 2413 if (!access_ok(VERIFY_WRITE, target_addr, addrlen)) 2414 return -TARGET_EFAULT; 2415 2416 addr = alloca(addrlen); 2417 2418 ret = get_errno(getsockname(fd, addr, &addrlen)); 2419 if (!is_error(ret)) { 2420 host_to_target_sockaddr(target_addr, addr, addrlen); 2421 if (put_user_u32(addrlen, target_addrlen_addr)) 2422 ret = -TARGET_EFAULT; 2423 } 2424 return ret; 2425 } 2426 2427 /* do_socketpair() Must return target values and target errnos. */ 2428 static abi_long do_socketpair(int domain, int type, int protocol, 2429 abi_ulong target_tab_addr) 2430 { 2431 int tab[2]; 2432 abi_long ret; 2433 2434 target_to_host_sock_type(&type); 2435 2436 ret = get_errno(socketpair(domain, type, protocol, tab)); 2437 if (!is_error(ret)) { 2438 if (put_user_s32(tab[0], target_tab_addr) 2439 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0]))) 2440 ret = -TARGET_EFAULT; 2441 } 2442 return ret; 2443 } 2444 2445 /* do_sendto() Must return target values and target errnos. */ 2446 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, 2447 abi_ulong target_addr, socklen_t addrlen) 2448 { 2449 void *addr; 2450 void *host_msg; 2451 abi_long ret; 2452 2453 if ((int)addrlen < 0) { 2454 return -TARGET_EINVAL; 2455 } 2456 2457 host_msg = lock_user(VERIFY_READ, msg, len, 1); 2458 if (!host_msg) 2459 return -TARGET_EFAULT; 2460 if (target_addr) { 2461 addr = alloca(addrlen+1); 2462 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen); 2463 if (ret) { 2464 unlock_user(host_msg, msg, 0); 2465 return ret; 2466 } 2467 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); 2468 } else { 2469 ret = get_errno(send(fd, host_msg, len, flags)); 2470 } 2471 unlock_user(host_msg, msg, 0); 2472 return ret; 2473 } 2474 2475 /* do_recvfrom() Must return target values and target errnos. */ 2476 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags, 2477 abi_ulong target_addr, 2478 abi_ulong target_addrlen) 2479 { 2480 socklen_t addrlen; 2481 void *addr; 2482 void *host_msg; 2483 abi_long ret; 2484 2485 host_msg = lock_user(VERIFY_WRITE, msg, len, 0); 2486 if (!host_msg) 2487 return -TARGET_EFAULT; 2488 if (target_addr) { 2489 if (get_user_u32(addrlen, target_addrlen)) { 2490 ret = -TARGET_EFAULT; 2491 goto fail; 2492 } 2493 if ((int)addrlen < 0) { 2494 ret = -TARGET_EINVAL; 2495 goto fail; 2496 } 2497 addr = alloca(addrlen); 2498 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen)); 2499 } else { 2500 addr = NULL; /* To keep compiler quiet. */ 2501 ret = get_errno(qemu_recv(fd, host_msg, len, flags)); 2502 } 2503 if (!is_error(ret)) { 2504 if (target_addr) { 2505 host_to_target_sockaddr(target_addr, addr, addrlen); 2506 if (put_user_u32(addrlen, target_addrlen)) { 2507 ret = -TARGET_EFAULT; 2508 goto fail; 2509 } 2510 } 2511 unlock_user(host_msg, msg, len); 2512 } else { 2513 fail: 2514 unlock_user(host_msg, msg, 0); 2515 } 2516 return ret; 2517 } 2518 2519 #ifdef TARGET_NR_socketcall 2520 /* do_socketcall() Must return target values and target errnos. */ 2521 static abi_long do_socketcall(int num, abi_ulong vptr) 2522 { 2523 static const unsigned ac[] = { /* number of arguments per call */ 2524 [SOCKOP_socket] = 3, /* domain, type, protocol */ 2525 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */ 2526 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */ 2527 [SOCKOP_listen] = 2, /* sockfd, backlog */ 2528 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */ 2529 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */ 2530 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */ 2531 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */ 2532 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */ 2533 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */ 2534 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */ 2535 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2536 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */ 2537 [SOCKOP_shutdown] = 2, /* sockfd, how */ 2538 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */ 2539 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */ 2540 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */ 2541 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */ 2542 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2543 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */ 2544 }; 2545 abi_long a[6]; /* max 6 args */ 2546 2547 /* first, collect the arguments in a[] according to ac[] */ 2548 if (num >= 0 && num < ARRAY_SIZE(ac)) { 2549 unsigned i; 2550 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */ 2551 for (i = 0; i < ac[num]; ++i) { 2552 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) { 2553 return -TARGET_EFAULT; 2554 } 2555 } 2556 } 2557 2558 /* now when we have the args, actually handle the call */ 2559 switch (num) { 2560 case SOCKOP_socket: /* domain, type, protocol */ 2561 return do_socket(a[0], a[1], a[2]); 2562 case SOCKOP_bind: /* sockfd, addr, addrlen */ 2563 return do_bind(a[0], a[1], a[2]); 2564 case SOCKOP_connect: /* sockfd, addr, addrlen */ 2565 return do_connect(a[0], a[1], a[2]); 2566 case SOCKOP_listen: /* sockfd, backlog */ 2567 return get_errno(listen(a[0], a[1])); 2568 case SOCKOP_accept: /* sockfd, addr, addrlen */ 2569 return do_accept4(a[0], a[1], a[2], 0); 2570 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */ 2571 return do_accept4(a[0], a[1], a[2], a[3]); 2572 case SOCKOP_getsockname: /* sockfd, addr, addrlen */ 2573 return do_getsockname(a[0], a[1], a[2]); 2574 case SOCKOP_getpeername: /* sockfd, addr, addrlen */ 2575 return do_getpeername(a[0], a[1], a[2]); 2576 case SOCKOP_socketpair: /* domain, type, protocol, tab */ 2577 return do_socketpair(a[0], a[1], a[2], a[3]); 2578 case SOCKOP_send: /* sockfd, msg, len, flags */ 2579 return do_sendto(a[0], a[1], a[2], a[3], 0, 0); 2580 case SOCKOP_recv: /* sockfd, msg, len, flags */ 2581 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0); 2582 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */ 2583 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]); 2584 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */ 2585 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]); 2586 case SOCKOP_shutdown: /* sockfd, how */ 2587 return get_errno(shutdown(a[0], a[1])); 2588 case SOCKOP_sendmsg: /* sockfd, msg, flags */ 2589 return do_sendrecvmsg(a[0], a[1], a[2], 1); 2590 case SOCKOP_recvmsg: /* sockfd, msg, flags */ 2591 return do_sendrecvmsg(a[0], a[1], a[2], 0); 2592 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */ 2593 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1); 2594 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */ 2595 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0); 2596 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */ 2597 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]); 2598 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */ 2599 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]); 2600 default: 2601 gemu_log("Unsupported socketcall: %d\n", num); 2602 return -TARGET_ENOSYS; 2603 } 2604 } 2605 #endif 2606 2607 #define N_SHM_REGIONS 32 2608 2609 static struct shm_region { 2610 abi_ulong start; 2611 abi_ulong size; 2612 bool in_use; 2613 } shm_regions[N_SHM_REGIONS]; 2614 2615 struct target_semid_ds 2616 { 2617 struct target_ipc_perm sem_perm; 2618 abi_ulong sem_otime; 2619 #if !defined(TARGET_PPC64) 2620 abi_ulong __unused1; 2621 #endif 2622 abi_ulong sem_ctime; 2623 #if !defined(TARGET_PPC64) 2624 abi_ulong __unused2; 2625 #endif 2626 abi_ulong sem_nsems; 2627 abi_ulong __unused3; 2628 abi_ulong __unused4; 2629 }; 2630 2631 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip, 2632 abi_ulong target_addr) 2633 { 2634 struct target_ipc_perm *target_ip; 2635 struct target_semid_ds *target_sd; 2636 2637 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2638 return -TARGET_EFAULT; 2639 target_ip = &(target_sd->sem_perm); 2640 host_ip->__key = tswap32(target_ip->__key); 2641 host_ip->uid = tswap32(target_ip->uid); 2642 host_ip->gid = tswap32(target_ip->gid); 2643 host_ip->cuid = tswap32(target_ip->cuid); 2644 host_ip->cgid = tswap32(target_ip->cgid); 2645 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2646 host_ip->mode = tswap32(target_ip->mode); 2647 #else 2648 host_ip->mode = tswap16(target_ip->mode); 2649 #endif 2650 #if defined(TARGET_PPC) 2651 host_ip->__seq = tswap32(target_ip->__seq); 2652 #else 2653 host_ip->__seq = tswap16(target_ip->__seq); 2654 #endif 2655 unlock_user_struct(target_sd, target_addr, 0); 2656 return 0; 2657 } 2658 2659 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr, 2660 struct ipc_perm *host_ip) 2661 { 2662 struct target_ipc_perm *target_ip; 2663 struct target_semid_ds *target_sd; 2664 2665 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2666 return -TARGET_EFAULT; 2667 target_ip = &(target_sd->sem_perm); 2668 target_ip->__key = tswap32(host_ip->__key); 2669 target_ip->uid = tswap32(host_ip->uid); 2670 target_ip->gid = tswap32(host_ip->gid); 2671 target_ip->cuid = tswap32(host_ip->cuid); 2672 target_ip->cgid = tswap32(host_ip->cgid); 2673 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC) 2674 target_ip->mode = tswap32(host_ip->mode); 2675 #else 2676 target_ip->mode = tswap16(host_ip->mode); 2677 #endif 2678 #if defined(TARGET_PPC) 2679 target_ip->__seq = tswap32(host_ip->__seq); 2680 #else 2681 target_ip->__seq = tswap16(host_ip->__seq); 2682 #endif 2683 unlock_user_struct(target_sd, target_addr, 1); 2684 return 0; 2685 } 2686 2687 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd, 2688 abi_ulong target_addr) 2689 { 2690 struct target_semid_ds *target_sd; 2691 2692 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 2693 return -TARGET_EFAULT; 2694 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr)) 2695 return -TARGET_EFAULT; 2696 host_sd->sem_nsems = tswapal(target_sd->sem_nsems); 2697 host_sd->sem_otime = tswapal(target_sd->sem_otime); 2698 host_sd->sem_ctime = tswapal(target_sd->sem_ctime); 2699 unlock_user_struct(target_sd, target_addr, 0); 2700 return 0; 2701 } 2702 2703 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr, 2704 struct semid_ds *host_sd) 2705 { 2706 struct target_semid_ds *target_sd; 2707 2708 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 2709 return -TARGET_EFAULT; 2710 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm))) 2711 return -TARGET_EFAULT; 2712 target_sd->sem_nsems = tswapal(host_sd->sem_nsems); 2713 target_sd->sem_otime = tswapal(host_sd->sem_otime); 2714 target_sd->sem_ctime = tswapal(host_sd->sem_ctime); 2715 unlock_user_struct(target_sd, target_addr, 1); 2716 return 0; 2717 } 2718 2719 struct target_seminfo { 2720 int semmap; 2721 int semmni; 2722 int semmns; 2723 int semmnu; 2724 int semmsl; 2725 int semopm; 2726 int semume; 2727 int semusz; 2728 int semvmx; 2729 int semaem; 2730 }; 2731 2732 static inline abi_long host_to_target_seminfo(abi_ulong target_addr, 2733 struct seminfo *host_seminfo) 2734 { 2735 struct target_seminfo *target_seminfo; 2736 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0)) 2737 return -TARGET_EFAULT; 2738 __put_user(host_seminfo->semmap, &target_seminfo->semmap); 2739 __put_user(host_seminfo->semmni, &target_seminfo->semmni); 2740 __put_user(host_seminfo->semmns, &target_seminfo->semmns); 2741 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu); 2742 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl); 2743 __put_user(host_seminfo->semopm, &target_seminfo->semopm); 2744 __put_user(host_seminfo->semume, &target_seminfo->semume); 2745 __put_user(host_seminfo->semusz, &target_seminfo->semusz); 2746 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx); 2747 __put_user(host_seminfo->semaem, &target_seminfo->semaem); 2748 unlock_user_struct(target_seminfo, target_addr, 1); 2749 return 0; 2750 } 2751 2752 union semun { 2753 int val; 2754 struct semid_ds *buf; 2755 unsigned short *array; 2756 struct seminfo *__buf; 2757 }; 2758 2759 union target_semun { 2760 int val; 2761 abi_ulong buf; 2762 abi_ulong array; 2763 abi_ulong __buf; 2764 }; 2765 2766 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, 2767 abi_ulong target_addr) 2768 { 2769 int nsems; 2770 unsigned short *array; 2771 union semun semun; 2772 struct semid_ds semid_ds; 2773 int i, ret; 2774 2775 semun.buf = &semid_ds; 2776 2777 ret = semctl(semid, 0, IPC_STAT, semun); 2778 if (ret == -1) 2779 return get_errno(ret); 2780 2781 nsems = semid_ds.sem_nsems; 2782 2783 *host_array = g_try_new(unsigned short, nsems); 2784 if (!*host_array) { 2785 return -TARGET_ENOMEM; 2786 } 2787 array = lock_user(VERIFY_READ, target_addr, 2788 nsems*sizeof(unsigned short), 1); 2789 if (!array) { 2790 g_free(*host_array); 2791 return -TARGET_EFAULT; 2792 } 2793 2794 for(i=0; i<nsems; i++) { 2795 __get_user((*host_array)[i], &array[i]); 2796 } 2797 unlock_user(array, target_addr, 0); 2798 2799 return 0; 2800 } 2801 2802 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr, 2803 unsigned short **host_array) 2804 { 2805 int nsems; 2806 unsigned short *array; 2807 union semun semun; 2808 struct semid_ds semid_ds; 2809 int i, ret; 2810 2811 semun.buf = &semid_ds; 2812 2813 ret = semctl(semid, 0, IPC_STAT, semun); 2814 if (ret == -1) 2815 return get_errno(ret); 2816 2817 nsems = semid_ds.sem_nsems; 2818 2819 array = lock_user(VERIFY_WRITE, target_addr, 2820 nsems*sizeof(unsigned short), 0); 2821 if (!array) 2822 return -TARGET_EFAULT; 2823 2824 for(i=0; i<nsems; i++) { 2825 __put_user((*host_array)[i], &array[i]); 2826 } 2827 g_free(*host_array); 2828 unlock_user(array, target_addr, 1); 2829 2830 return 0; 2831 } 2832 2833 static inline abi_long do_semctl(int semid, int semnum, int cmd, 2834 abi_ulong target_arg) 2835 { 2836 union target_semun target_su = { .buf = target_arg }; 2837 union semun arg; 2838 struct semid_ds dsarg; 2839 unsigned short *array = NULL; 2840 struct seminfo seminfo; 2841 abi_long ret = -TARGET_EINVAL; 2842 abi_long err; 2843 cmd &= 0xff; 2844 2845 switch( cmd ) { 2846 case GETVAL: 2847 case SETVAL: 2848 /* In 64 bit cross-endian situations, we will erroneously pick up 2849 * the wrong half of the union for the "val" element. To rectify 2850 * this, the entire 8-byte structure is byteswapped, followed by 2851 * a swap of the 4 byte val field. In other cases, the data is 2852 * already in proper host byte order. */ 2853 if (sizeof(target_su.val) != (sizeof(target_su.buf))) { 2854 target_su.buf = tswapal(target_su.buf); 2855 arg.val = tswap32(target_su.val); 2856 } else { 2857 arg.val = target_su.val; 2858 } 2859 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2860 break; 2861 case GETALL: 2862 case SETALL: 2863 err = target_to_host_semarray(semid, &array, target_su.array); 2864 if (err) 2865 return err; 2866 arg.array = array; 2867 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2868 err = host_to_target_semarray(semid, target_su.array, &array); 2869 if (err) 2870 return err; 2871 break; 2872 case IPC_STAT: 2873 case IPC_SET: 2874 case SEM_STAT: 2875 err = target_to_host_semid_ds(&dsarg, target_su.buf); 2876 if (err) 2877 return err; 2878 arg.buf = &dsarg; 2879 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2880 err = host_to_target_semid_ds(target_su.buf, &dsarg); 2881 if (err) 2882 return err; 2883 break; 2884 case IPC_INFO: 2885 case SEM_INFO: 2886 arg.__buf = &seminfo; 2887 ret = get_errno(semctl(semid, semnum, cmd, arg)); 2888 err = host_to_target_seminfo(target_su.__buf, &seminfo); 2889 if (err) 2890 return err; 2891 break; 2892 case IPC_RMID: 2893 case GETPID: 2894 case GETNCNT: 2895 case GETZCNT: 2896 ret = get_errno(semctl(semid, semnum, cmd, NULL)); 2897 break; 2898 } 2899 2900 return ret; 2901 } 2902 2903 struct target_sembuf { 2904 unsigned short sem_num; 2905 short sem_op; 2906 short sem_flg; 2907 }; 2908 2909 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf, 2910 abi_ulong target_addr, 2911 unsigned nsops) 2912 { 2913 struct target_sembuf *target_sembuf; 2914 int i; 2915 2916 target_sembuf = lock_user(VERIFY_READ, target_addr, 2917 nsops*sizeof(struct target_sembuf), 1); 2918 if (!target_sembuf) 2919 return -TARGET_EFAULT; 2920 2921 for(i=0; i<nsops; i++) { 2922 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num); 2923 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op); 2924 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg); 2925 } 2926 2927 unlock_user(target_sembuf, target_addr, 0); 2928 2929 return 0; 2930 } 2931 2932 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops) 2933 { 2934 struct sembuf sops[nsops]; 2935 2936 if (target_to_host_sembuf(sops, ptr, nsops)) 2937 return -TARGET_EFAULT; 2938 2939 return get_errno(semop(semid, sops, nsops)); 2940 } 2941 2942 struct target_msqid_ds 2943 { 2944 struct target_ipc_perm msg_perm; 2945 abi_ulong msg_stime; 2946 #if TARGET_ABI_BITS == 32 2947 abi_ulong __unused1; 2948 #endif 2949 abi_ulong msg_rtime; 2950 #if TARGET_ABI_BITS == 32 2951 abi_ulong __unused2; 2952 #endif 2953 abi_ulong msg_ctime; 2954 #if TARGET_ABI_BITS == 32 2955 abi_ulong __unused3; 2956 #endif 2957 abi_ulong __msg_cbytes; 2958 abi_ulong msg_qnum; 2959 abi_ulong msg_qbytes; 2960 abi_ulong msg_lspid; 2961 abi_ulong msg_lrpid; 2962 abi_ulong __unused4; 2963 abi_ulong __unused5; 2964 }; 2965 2966 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md, 2967 abi_ulong target_addr) 2968 { 2969 struct target_msqid_ds *target_md; 2970 2971 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1)) 2972 return -TARGET_EFAULT; 2973 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr)) 2974 return -TARGET_EFAULT; 2975 host_md->msg_stime = tswapal(target_md->msg_stime); 2976 host_md->msg_rtime = tswapal(target_md->msg_rtime); 2977 host_md->msg_ctime = tswapal(target_md->msg_ctime); 2978 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes); 2979 host_md->msg_qnum = tswapal(target_md->msg_qnum); 2980 host_md->msg_qbytes = tswapal(target_md->msg_qbytes); 2981 host_md->msg_lspid = tswapal(target_md->msg_lspid); 2982 host_md->msg_lrpid = tswapal(target_md->msg_lrpid); 2983 unlock_user_struct(target_md, target_addr, 0); 2984 return 0; 2985 } 2986 2987 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr, 2988 struct msqid_ds *host_md) 2989 { 2990 struct target_msqid_ds *target_md; 2991 2992 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0)) 2993 return -TARGET_EFAULT; 2994 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm))) 2995 return -TARGET_EFAULT; 2996 target_md->msg_stime = tswapal(host_md->msg_stime); 2997 target_md->msg_rtime = tswapal(host_md->msg_rtime); 2998 target_md->msg_ctime = tswapal(host_md->msg_ctime); 2999 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes); 3000 target_md->msg_qnum = tswapal(host_md->msg_qnum); 3001 target_md->msg_qbytes = tswapal(host_md->msg_qbytes); 3002 target_md->msg_lspid = tswapal(host_md->msg_lspid); 3003 target_md->msg_lrpid = tswapal(host_md->msg_lrpid); 3004 unlock_user_struct(target_md, target_addr, 1); 3005 return 0; 3006 } 3007 3008 struct target_msginfo { 3009 int msgpool; 3010 int msgmap; 3011 int msgmax; 3012 int msgmnb; 3013 int msgmni; 3014 int msgssz; 3015 int msgtql; 3016 unsigned short int msgseg; 3017 }; 3018 3019 static inline abi_long host_to_target_msginfo(abi_ulong target_addr, 3020 struct msginfo *host_msginfo) 3021 { 3022 struct target_msginfo *target_msginfo; 3023 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0)) 3024 return -TARGET_EFAULT; 3025 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool); 3026 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap); 3027 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax); 3028 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb); 3029 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni); 3030 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz); 3031 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql); 3032 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg); 3033 unlock_user_struct(target_msginfo, target_addr, 1); 3034 return 0; 3035 } 3036 3037 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr) 3038 { 3039 struct msqid_ds dsarg; 3040 struct msginfo msginfo; 3041 abi_long ret = -TARGET_EINVAL; 3042 3043 cmd &= 0xff; 3044 3045 switch (cmd) { 3046 case IPC_STAT: 3047 case IPC_SET: 3048 case MSG_STAT: 3049 if (target_to_host_msqid_ds(&dsarg,ptr)) 3050 return -TARGET_EFAULT; 3051 ret = get_errno(msgctl(msgid, cmd, &dsarg)); 3052 if (host_to_target_msqid_ds(ptr,&dsarg)) 3053 return -TARGET_EFAULT; 3054 break; 3055 case IPC_RMID: 3056 ret = get_errno(msgctl(msgid, cmd, NULL)); 3057 break; 3058 case IPC_INFO: 3059 case MSG_INFO: 3060 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo)); 3061 if (host_to_target_msginfo(ptr, &msginfo)) 3062 return -TARGET_EFAULT; 3063 break; 3064 } 3065 3066 return ret; 3067 } 3068 3069 struct target_msgbuf { 3070 abi_long mtype; 3071 char mtext[1]; 3072 }; 3073 3074 static inline abi_long do_msgsnd(int msqid, abi_long msgp, 3075 ssize_t msgsz, int msgflg) 3076 { 3077 struct target_msgbuf *target_mb; 3078 struct msgbuf *host_mb; 3079 abi_long ret = 0; 3080 3081 if (msgsz < 0) { 3082 return -TARGET_EINVAL; 3083 } 3084 3085 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0)) 3086 return -TARGET_EFAULT; 3087 host_mb = g_try_malloc(msgsz + sizeof(long)); 3088 if (!host_mb) { 3089 unlock_user_struct(target_mb, msgp, 0); 3090 return -TARGET_ENOMEM; 3091 } 3092 host_mb->mtype = (abi_long) tswapal(target_mb->mtype); 3093 memcpy(host_mb->mtext, target_mb->mtext, msgsz); 3094 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg)); 3095 g_free(host_mb); 3096 unlock_user_struct(target_mb, msgp, 0); 3097 3098 return ret; 3099 } 3100 3101 static inline abi_long do_msgrcv(int msqid, abi_long msgp, 3102 unsigned int msgsz, abi_long msgtyp, 3103 int msgflg) 3104 { 3105 struct target_msgbuf *target_mb; 3106 char *target_mtext; 3107 struct msgbuf *host_mb; 3108 abi_long ret = 0; 3109 3110 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) 3111 return -TARGET_EFAULT; 3112 3113 host_mb = g_malloc(msgsz+sizeof(long)); 3114 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg)); 3115 3116 if (ret > 0) { 3117 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); 3118 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); 3119 if (!target_mtext) { 3120 ret = -TARGET_EFAULT; 3121 goto end; 3122 } 3123 memcpy(target_mb->mtext, host_mb->mtext, ret); 3124 unlock_user(target_mtext, target_mtext_addr, ret); 3125 } 3126 3127 target_mb->mtype = tswapal(host_mb->mtype); 3128 3129 end: 3130 if (target_mb) 3131 unlock_user_struct(target_mb, msgp, 1); 3132 g_free(host_mb); 3133 return ret; 3134 } 3135 3136 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd, 3137 abi_ulong target_addr) 3138 { 3139 struct target_shmid_ds *target_sd; 3140 3141 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1)) 3142 return -TARGET_EFAULT; 3143 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr)) 3144 return -TARGET_EFAULT; 3145 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz); 3146 __get_user(host_sd->shm_atime, &target_sd->shm_atime); 3147 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime); 3148 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime); 3149 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid); 3150 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid); 3151 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch); 3152 unlock_user_struct(target_sd, target_addr, 0); 3153 return 0; 3154 } 3155 3156 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr, 3157 struct shmid_ds *host_sd) 3158 { 3159 struct target_shmid_ds *target_sd; 3160 3161 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0)) 3162 return -TARGET_EFAULT; 3163 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm))) 3164 return -TARGET_EFAULT; 3165 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz); 3166 __put_user(host_sd->shm_atime, &target_sd->shm_atime); 3167 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime); 3168 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime); 3169 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid); 3170 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid); 3171 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch); 3172 unlock_user_struct(target_sd, target_addr, 1); 3173 return 0; 3174 } 3175 3176 struct target_shminfo { 3177 abi_ulong shmmax; 3178 abi_ulong shmmin; 3179 abi_ulong shmmni; 3180 abi_ulong shmseg; 3181 abi_ulong shmall; 3182 }; 3183 3184 static inline abi_long host_to_target_shminfo(abi_ulong target_addr, 3185 struct shminfo *host_shminfo) 3186 { 3187 struct target_shminfo *target_shminfo; 3188 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0)) 3189 return -TARGET_EFAULT; 3190 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax); 3191 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin); 3192 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni); 3193 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg); 3194 __put_user(host_shminfo->shmall, &target_shminfo->shmall); 3195 unlock_user_struct(target_shminfo, target_addr, 1); 3196 return 0; 3197 } 3198 3199 struct target_shm_info { 3200 int used_ids; 3201 abi_ulong shm_tot; 3202 abi_ulong shm_rss; 3203 abi_ulong shm_swp; 3204 abi_ulong swap_attempts; 3205 abi_ulong swap_successes; 3206 }; 3207 3208 static inline abi_long host_to_target_shm_info(abi_ulong target_addr, 3209 struct shm_info *host_shm_info) 3210 { 3211 struct target_shm_info *target_shm_info; 3212 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0)) 3213 return -TARGET_EFAULT; 3214 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids); 3215 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot); 3216 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss); 3217 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp); 3218 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts); 3219 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes); 3220 unlock_user_struct(target_shm_info, target_addr, 1); 3221 return 0; 3222 } 3223 3224 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf) 3225 { 3226 struct shmid_ds dsarg; 3227 struct shminfo shminfo; 3228 struct shm_info shm_info; 3229 abi_long ret = -TARGET_EINVAL; 3230 3231 cmd &= 0xff; 3232 3233 switch(cmd) { 3234 case IPC_STAT: 3235 case IPC_SET: 3236 case SHM_STAT: 3237 if (target_to_host_shmid_ds(&dsarg, buf)) 3238 return -TARGET_EFAULT; 3239 ret = get_errno(shmctl(shmid, cmd, &dsarg)); 3240 if (host_to_target_shmid_ds(buf, &dsarg)) 3241 return -TARGET_EFAULT; 3242 break; 3243 case IPC_INFO: 3244 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo)); 3245 if (host_to_target_shminfo(buf, &shminfo)) 3246 return -TARGET_EFAULT; 3247 break; 3248 case SHM_INFO: 3249 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info)); 3250 if (host_to_target_shm_info(buf, &shm_info)) 3251 return -TARGET_EFAULT; 3252 break; 3253 case IPC_RMID: 3254 case SHM_LOCK: 3255 case SHM_UNLOCK: 3256 ret = get_errno(shmctl(shmid, cmd, NULL)); 3257 break; 3258 } 3259 3260 return ret; 3261 } 3262 3263 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg) 3264 { 3265 abi_long raddr; 3266 void *host_raddr; 3267 struct shmid_ds shm_info; 3268 int i,ret; 3269 3270 /* find out the length of the shared memory segment */ 3271 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info)); 3272 if (is_error(ret)) { 3273 /* can't get length, bail out */ 3274 return ret; 3275 } 3276 3277 mmap_lock(); 3278 3279 if (shmaddr) 3280 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg); 3281 else { 3282 abi_ulong mmap_start; 3283 3284 mmap_start = mmap_find_vma(0, shm_info.shm_segsz); 3285 3286 if (mmap_start == -1) { 3287 errno = ENOMEM; 3288 host_raddr = (void *)-1; 3289 } else 3290 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP); 3291 } 3292 3293 if (host_raddr == (void *)-1) { 3294 mmap_unlock(); 3295 return get_errno((long)host_raddr); 3296 } 3297 raddr=h2g((unsigned long)host_raddr); 3298 3299 page_set_flags(raddr, raddr + shm_info.shm_segsz, 3300 PAGE_VALID | PAGE_READ | 3301 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE)); 3302 3303 for (i = 0; i < N_SHM_REGIONS; i++) { 3304 if (!shm_regions[i].in_use) { 3305 shm_regions[i].in_use = true; 3306 shm_regions[i].start = raddr; 3307 shm_regions[i].size = shm_info.shm_segsz; 3308 break; 3309 } 3310 } 3311 3312 mmap_unlock(); 3313 return raddr; 3314 3315 } 3316 3317 static inline abi_long do_shmdt(abi_ulong shmaddr) 3318 { 3319 int i; 3320 3321 for (i = 0; i < N_SHM_REGIONS; ++i) { 3322 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) { 3323 shm_regions[i].in_use = false; 3324 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0); 3325 break; 3326 } 3327 } 3328 3329 return get_errno(shmdt(g2h(shmaddr))); 3330 } 3331 3332 #ifdef TARGET_NR_ipc 3333 /* ??? This only works with linear mappings. */ 3334 /* do_ipc() must return target values and target errnos. */ 3335 static abi_long do_ipc(unsigned int call, abi_long first, 3336 abi_long second, abi_long third, 3337 abi_long ptr, abi_long fifth) 3338 { 3339 int version; 3340 abi_long ret = 0; 3341 3342 version = call >> 16; 3343 call &= 0xffff; 3344 3345 switch (call) { 3346 case IPCOP_semop: 3347 ret = do_semop(first, ptr, second); 3348 break; 3349 3350 case IPCOP_semget: 3351 ret = get_errno(semget(first, second, third)); 3352 break; 3353 3354 case IPCOP_semctl: { 3355 /* The semun argument to semctl is passed by value, so dereference the 3356 * ptr argument. */ 3357 abi_ulong atptr; 3358 get_user_ual(atptr, ptr); 3359 ret = do_semctl(first, second, third, atptr); 3360 break; 3361 } 3362 3363 case IPCOP_msgget: 3364 ret = get_errno(msgget(first, second)); 3365 break; 3366 3367 case IPCOP_msgsnd: 3368 ret = do_msgsnd(first, ptr, second, third); 3369 break; 3370 3371 case IPCOP_msgctl: 3372 ret = do_msgctl(first, second, ptr); 3373 break; 3374 3375 case IPCOP_msgrcv: 3376 switch (version) { 3377 case 0: 3378 { 3379 struct target_ipc_kludge { 3380 abi_long msgp; 3381 abi_long msgtyp; 3382 } *tmp; 3383 3384 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) { 3385 ret = -TARGET_EFAULT; 3386 break; 3387 } 3388 3389 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third); 3390 3391 unlock_user_struct(tmp, ptr, 0); 3392 break; 3393 } 3394 default: 3395 ret = do_msgrcv(first, ptr, second, fifth, third); 3396 } 3397 break; 3398 3399 case IPCOP_shmat: 3400 switch (version) { 3401 default: 3402 { 3403 abi_ulong raddr; 3404 raddr = do_shmat(first, ptr, second); 3405 if (is_error(raddr)) 3406 return get_errno(raddr); 3407 if (put_user_ual(raddr, third)) 3408 return -TARGET_EFAULT; 3409 break; 3410 } 3411 case 1: 3412 ret = -TARGET_EINVAL; 3413 break; 3414 } 3415 break; 3416 case IPCOP_shmdt: 3417 ret = do_shmdt(ptr); 3418 break; 3419 3420 case IPCOP_shmget: 3421 /* IPC_* flag values are the same on all linux platforms */ 3422 ret = get_errno(shmget(first, second, third)); 3423 break; 3424 3425 /* IPC_* and SHM_* command values are the same on all linux platforms */ 3426 case IPCOP_shmctl: 3427 ret = do_shmctl(first, second, ptr); 3428 break; 3429 default: 3430 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version); 3431 ret = -TARGET_ENOSYS; 3432 break; 3433 } 3434 return ret; 3435 } 3436 #endif 3437 3438 /* kernel structure types definitions */ 3439 3440 #define STRUCT(name, ...) STRUCT_ ## name, 3441 #define STRUCT_SPECIAL(name) STRUCT_ ## name, 3442 enum { 3443 #include "syscall_types.h" 3444 STRUCT_MAX 3445 }; 3446 #undef STRUCT 3447 #undef STRUCT_SPECIAL 3448 3449 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL }; 3450 #define STRUCT_SPECIAL(name) 3451 #include "syscall_types.h" 3452 #undef STRUCT 3453 #undef STRUCT_SPECIAL 3454 3455 typedef struct IOCTLEntry IOCTLEntry; 3456 3457 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp, 3458 int fd, int cmd, abi_long arg); 3459 3460 struct IOCTLEntry { 3461 int target_cmd; 3462 unsigned int host_cmd; 3463 const char *name; 3464 int access; 3465 do_ioctl_fn *do_ioctl; 3466 const argtype arg_type[5]; 3467 }; 3468 3469 #define IOC_R 0x0001 3470 #define IOC_W 0x0002 3471 #define IOC_RW (IOC_R | IOC_W) 3472 3473 #define MAX_STRUCT_SIZE 4096 3474 3475 #ifdef CONFIG_FIEMAP 3476 /* So fiemap access checks don't overflow on 32 bit systems. 3477 * This is very slightly smaller than the limit imposed by 3478 * the underlying kernel. 3479 */ 3480 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \ 3481 / sizeof(struct fiemap_extent)) 3482 3483 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp, 3484 int fd, int cmd, abi_long arg) 3485 { 3486 /* The parameter for this ioctl is a struct fiemap followed 3487 * by an array of struct fiemap_extent whose size is set 3488 * in fiemap->fm_extent_count. The array is filled in by the 3489 * ioctl. 3490 */ 3491 int target_size_in, target_size_out; 3492 struct fiemap *fm; 3493 const argtype *arg_type = ie->arg_type; 3494 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) }; 3495 void *argptr, *p; 3496 abi_long ret; 3497 int i, extent_size = thunk_type_size(extent_arg_type, 0); 3498 uint32_t outbufsz; 3499 int free_fm = 0; 3500 3501 assert(arg_type[0] == TYPE_PTR); 3502 assert(ie->access == IOC_RW); 3503 arg_type++; 3504 target_size_in = thunk_type_size(arg_type, 0); 3505 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1); 3506 if (!argptr) { 3507 return -TARGET_EFAULT; 3508 } 3509 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3510 unlock_user(argptr, arg, 0); 3511 fm = (struct fiemap *)buf_temp; 3512 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) { 3513 return -TARGET_EINVAL; 3514 } 3515 3516 outbufsz = sizeof (*fm) + 3517 (sizeof(struct fiemap_extent) * fm->fm_extent_count); 3518 3519 if (outbufsz > MAX_STRUCT_SIZE) { 3520 /* We can't fit all the extents into the fixed size buffer. 3521 * Allocate one that is large enough and use it instead. 3522 */ 3523 fm = g_try_malloc(outbufsz); 3524 if (!fm) { 3525 return -TARGET_ENOMEM; 3526 } 3527 memcpy(fm, buf_temp, sizeof(struct fiemap)); 3528 free_fm = 1; 3529 } 3530 ret = get_errno(ioctl(fd, ie->host_cmd, fm)); 3531 if (!is_error(ret)) { 3532 target_size_out = target_size_in; 3533 /* An extent_count of 0 means we were only counting the extents 3534 * so there are no structs to copy 3535 */ 3536 if (fm->fm_extent_count != 0) { 3537 target_size_out += fm->fm_mapped_extents * extent_size; 3538 } 3539 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0); 3540 if (!argptr) { 3541 ret = -TARGET_EFAULT; 3542 } else { 3543 /* Convert the struct fiemap */ 3544 thunk_convert(argptr, fm, arg_type, THUNK_TARGET); 3545 if (fm->fm_extent_count != 0) { 3546 p = argptr + target_size_in; 3547 /* ...and then all the struct fiemap_extents */ 3548 for (i = 0; i < fm->fm_mapped_extents; i++) { 3549 thunk_convert(p, &fm->fm_extents[i], extent_arg_type, 3550 THUNK_TARGET); 3551 p += extent_size; 3552 } 3553 } 3554 unlock_user(argptr, arg, target_size_out); 3555 } 3556 } 3557 if (free_fm) { 3558 g_free(fm); 3559 } 3560 return ret; 3561 } 3562 #endif 3563 3564 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, 3565 int fd, int cmd, abi_long arg) 3566 { 3567 const argtype *arg_type = ie->arg_type; 3568 int target_size; 3569 void *argptr; 3570 int ret; 3571 struct ifconf *host_ifconf; 3572 uint32_t outbufsz; 3573 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) }; 3574 int target_ifreq_size; 3575 int nb_ifreq; 3576 int free_buf = 0; 3577 int i; 3578 int target_ifc_len; 3579 abi_long target_ifc_buf; 3580 int host_ifc_len; 3581 char *host_ifc_buf; 3582 3583 assert(arg_type[0] == TYPE_PTR); 3584 assert(ie->access == IOC_RW); 3585 3586 arg_type++; 3587 target_size = thunk_type_size(arg_type, 0); 3588 3589 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3590 if (!argptr) 3591 return -TARGET_EFAULT; 3592 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3593 unlock_user(argptr, arg, 0); 3594 3595 host_ifconf = (struct ifconf *)(unsigned long)buf_temp; 3596 target_ifc_len = host_ifconf->ifc_len; 3597 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf; 3598 3599 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0); 3600 nb_ifreq = target_ifc_len / target_ifreq_size; 3601 host_ifc_len = nb_ifreq * sizeof(struct ifreq); 3602 3603 outbufsz = sizeof(*host_ifconf) + host_ifc_len; 3604 if (outbufsz > MAX_STRUCT_SIZE) { 3605 /* We can't fit all the extents into the fixed size buffer. 3606 * Allocate one that is large enough and use it instead. 3607 */ 3608 host_ifconf = malloc(outbufsz); 3609 if (!host_ifconf) { 3610 return -TARGET_ENOMEM; 3611 } 3612 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf)); 3613 free_buf = 1; 3614 } 3615 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf); 3616 3617 host_ifconf->ifc_len = host_ifc_len; 3618 host_ifconf->ifc_buf = host_ifc_buf; 3619 3620 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf)); 3621 if (!is_error(ret)) { 3622 /* convert host ifc_len to target ifc_len */ 3623 3624 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq); 3625 target_ifc_len = nb_ifreq * target_ifreq_size; 3626 host_ifconf->ifc_len = target_ifc_len; 3627 3628 /* restore target ifc_buf */ 3629 3630 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf; 3631 3632 /* copy struct ifconf to target user */ 3633 3634 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3635 if (!argptr) 3636 return -TARGET_EFAULT; 3637 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET); 3638 unlock_user(argptr, arg, target_size); 3639 3640 /* copy ifreq[] to target user */ 3641 3642 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0); 3643 for (i = 0; i < nb_ifreq ; i++) { 3644 thunk_convert(argptr + i * target_ifreq_size, 3645 host_ifc_buf + i * sizeof(struct ifreq), 3646 ifreq_arg_type, THUNK_TARGET); 3647 } 3648 unlock_user(argptr, target_ifc_buf, target_ifc_len); 3649 } 3650 3651 if (free_buf) { 3652 free(host_ifconf); 3653 } 3654 3655 return ret; 3656 } 3657 3658 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3659 int cmd, abi_long arg) 3660 { 3661 void *argptr; 3662 struct dm_ioctl *host_dm; 3663 abi_long guest_data; 3664 uint32_t guest_data_size; 3665 int target_size; 3666 const argtype *arg_type = ie->arg_type; 3667 abi_long ret; 3668 void *big_buf = NULL; 3669 char *host_data; 3670 3671 arg_type++; 3672 target_size = thunk_type_size(arg_type, 0); 3673 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3674 if (!argptr) { 3675 ret = -TARGET_EFAULT; 3676 goto out; 3677 } 3678 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3679 unlock_user(argptr, arg, 0); 3680 3681 /* buf_temp is too small, so fetch things into a bigger buffer */ 3682 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); 3683 memcpy(big_buf, buf_temp, target_size); 3684 buf_temp = big_buf; 3685 host_dm = big_buf; 3686 3687 guest_data = arg + host_dm->data_start; 3688 if ((guest_data - arg) < 0) { 3689 ret = -EINVAL; 3690 goto out; 3691 } 3692 guest_data_size = host_dm->data_size - host_dm->data_start; 3693 host_data = (char*)host_dm + host_dm->data_start; 3694 3695 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); 3696 switch (ie->host_cmd) { 3697 case DM_REMOVE_ALL: 3698 case DM_LIST_DEVICES: 3699 case DM_DEV_CREATE: 3700 case DM_DEV_REMOVE: 3701 case DM_DEV_SUSPEND: 3702 case DM_DEV_STATUS: 3703 case DM_DEV_WAIT: 3704 case DM_TABLE_STATUS: 3705 case DM_TABLE_CLEAR: 3706 case DM_TABLE_DEPS: 3707 case DM_LIST_VERSIONS: 3708 /* no input data */ 3709 break; 3710 case DM_DEV_RENAME: 3711 case DM_DEV_SET_GEOMETRY: 3712 /* data contains only strings */ 3713 memcpy(host_data, argptr, guest_data_size); 3714 break; 3715 case DM_TARGET_MSG: 3716 memcpy(host_data, argptr, guest_data_size); 3717 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); 3718 break; 3719 case DM_TABLE_LOAD: 3720 { 3721 void *gspec = argptr; 3722 void *cur_data = host_data; 3723 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3724 int spec_size = thunk_type_size(arg_type, 0); 3725 int i; 3726 3727 for (i = 0; i < host_dm->target_count; i++) { 3728 struct dm_target_spec *spec = cur_data; 3729 uint32_t next; 3730 int slen; 3731 3732 thunk_convert(spec, gspec, arg_type, THUNK_HOST); 3733 slen = strlen((char*)gspec + spec_size) + 1; 3734 next = spec->next; 3735 spec->next = sizeof(*spec) + slen; 3736 strcpy((char*)&spec[1], gspec + spec_size); 3737 gspec += next; 3738 cur_data += spec->next; 3739 } 3740 break; 3741 } 3742 default: 3743 ret = -TARGET_EINVAL; 3744 unlock_user(argptr, guest_data, 0); 3745 goto out; 3746 } 3747 unlock_user(argptr, guest_data, 0); 3748 3749 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3750 if (!is_error(ret)) { 3751 guest_data = arg + host_dm->data_start; 3752 guest_data_size = host_dm->data_size - host_dm->data_start; 3753 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); 3754 switch (ie->host_cmd) { 3755 case DM_REMOVE_ALL: 3756 case DM_DEV_CREATE: 3757 case DM_DEV_REMOVE: 3758 case DM_DEV_RENAME: 3759 case DM_DEV_SUSPEND: 3760 case DM_DEV_STATUS: 3761 case DM_TABLE_LOAD: 3762 case DM_TABLE_CLEAR: 3763 case DM_TARGET_MSG: 3764 case DM_DEV_SET_GEOMETRY: 3765 /* no return data */ 3766 break; 3767 case DM_LIST_DEVICES: 3768 { 3769 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; 3770 uint32_t remaining_data = guest_data_size; 3771 void *cur_data = argptr; 3772 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; 3773 int nl_size = 12; /* can't use thunk_size due to alignment */ 3774 3775 while (1) { 3776 uint32_t next = nl->next; 3777 if (next) { 3778 nl->next = nl_size + (strlen(nl->name) + 1); 3779 } 3780 if (remaining_data < nl->next) { 3781 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3782 break; 3783 } 3784 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); 3785 strcpy(cur_data + nl_size, nl->name); 3786 cur_data += nl->next; 3787 remaining_data -= nl->next; 3788 if (!next) { 3789 break; 3790 } 3791 nl = (void*)nl + next; 3792 } 3793 break; 3794 } 3795 case DM_DEV_WAIT: 3796 case DM_TABLE_STATUS: 3797 { 3798 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; 3799 void *cur_data = argptr; 3800 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; 3801 int spec_size = thunk_type_size(arg_type, 0); 3802 int i; 3803 3804 for (i = 0; i < host_dm->target_count; i++) { 3805 uint32_t next = spec->next; 3806 int slen = strlen((char*)&spec[1]) + 1; 3807 spec->next = (cur_data - argptr) + spec_size + slen; 3808 if (guest_data_size < spec->next) { 3809 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3810 break; 3811 } 3812 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); 3813 strcpy(cur_data + spec_size, (char*)&spec[1]); 3814 cur_data = argptr + spec->next; 3815 spec = (void*)host_dm + host_dm->data_start + next; 3816 } 3817 break; 3818 } 3819 case DM_TABLE_DEPS: 3820 { 3821 void *hdata = (void*)host_dm + host_dm->data_start; 3822 int count = *(uint32_t*)hdata; 3823 uint64_t *hdev = hdata + 8; 3824 uint64_t *gdev = argptr + 8; 3825 int i; 3826 3827 *(uint32_t*)argptr = tswap32(count); 3828 for (i = 0; i < count; i++) { 3829 *gdev = tswap64(*hdev); 3830 gdev++; 3831 hdev++; 3832 } 3833 break; 3834 } 3835 case DM_LIST_VERSIONS: 3836 { 3837 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; 3838 uint32_t remaining_data = guest_data_size; 3839 void *cur_data = argptr; 3840 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; 3841 int vers_size = thunk_type_size(arg_type, 0); 3842 3843 while (1) { 3844 uint32_t next = vers->next; 3845 if (next) { 3846 vers->next = vers_size + (strlen(vers->name) + 1); 3847 } 3848 if (remaining_data < vers->next) { 3849 host_dm->flags |= DM_BUFFER_FULL_FLAG; 3850 break; 3851 } 3852 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); 3853 strcpy(cur_data + vers_size, vers->name); 3854 cur_data += vers->next; 3855 remaining_data -= vers->next; 3856 if (!next) { 3857 break; 3858 } 3859 vers = (void*)vers + next; 3860 } 3861 break; 3862 } 3863 default: 3864 unlock_user(argptr, guest_data, 0); 3865 ret = -TARGET_EINVAL; 3866 goto out; 3867 } 3868 unlock_user(argptr, guest_data, guest_data_size); 3869 3870 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 3871 if (!argptr) { 3872 ret = -TARGET_EFAULT; 3873 goto out; 3874 } 3875 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 3876 unlock_user(argptr, arg, target_size); 3877 } 3878 out: 3879 g_free(big_buf); 3880 return ret; 3881 } 3882 3883 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, 3884 int cmd, abi_long arg) 3885 { 3886 void *argptr; 3887 int target_size; 3888 const argtype *arg_type = ie->arg_type; 3889 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) }; 3890 abi_long ret; 3891 3892 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp; 3893 struct blkpg_partition host_part; 3894 3895 /* Read and convert blkpg */ 3896 arg_type++; 3897 target_size = thunk_type_size(arg_type, 0); 3898 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3899 if (!argptr) { 3900 ret = -TARGET_EFAULT; 3901 goto out; 3902 } 3903 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 3904 unlock_user(argptr, arg, 0); 3905 3906 switch (host_blkpg->op) { 3907 case BLKPG_ADD_PARTITION: 3908 case BLKPG_DEL_PARTITION: 3909 /* payload is struct blkpg_partition */ 3910 break; 3911 default: 3912 /* Unknown opcode */ 3913 ret = -TARGET_EINVAL; 3914 goto out; 3915 } 3916 3917 /* Read and convert blkpg->data */ 3918 arg = (abi_long)(uintptr_t)host_blkpg->data; 3919 target_size = thunk_type_size(part_arg_type, 0); 3920 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3921 if (!argptr) { 3922 ret = -TARGET_EFAULT; 3923 goto out; 3924 } 3925 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST); 3926 unlock_user(argptr, arg, 0); 3927 3928 /* Swizzle the data pointer to our local copy and call! */ 3929 host_blkpg->data = &host_part; 3930 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg)); 3931 3932 out: 3933 return ret; 3934 } 3935 3936 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, 3937 int fd, int cmd, abi_long arg) 3938 { 3939 const argtype *arg_type = ie->arg_type; 3940 const StructEntry *se; 3941 const argtype *field_types; 3942 const int *dst_offsets, *src_offsets; 3943 int target_size; 3944 void *argptr; 3945 abi_ulong *target_rt_dev_ptr; 3946 unsigned long *host_rt_dev_ptr; 3947 abi_long ret; 3948 int i; 3949 3950 assert(ie->access == IOC_W); 3951 assert(*arg_type == TYPE_PTR); 3952 arg_type++; 3953 assert(*arg_type == TYPE_STRUCT); 3954 target_size = thunk_type_size(arg_type, 0); 3955 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 3956 if (!argptr) { 3957 return -TARGET_EFAULT; 3958 } 3959 arg_type++; 3960 assert(*arg_type == (int)STRUCT_rtentry); 3961 se = struct_entries + *arg_type++; 3962 assert(se->convert[0] == NULL); 3963 /* convert struct here to be able to catch rt_dev string */ 3964 field_types = se->field_types; 3965 dst_offsets = se->field_offsets[THUNK_HOST]; 3966 src_offsets = se->field_offsets[THUNK_TARGET]; 3967 for (i = 0; i < se->nb_fields; i++) { 3968 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { 3969 assert(*field_types == TYPE_PTRVOID); 3970 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); 3971 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); 3972 if (*target_rt_dev_ptr != 0) { 3973 *host_rt_dev_ptr = (unsigned long)lock_user_string( 3974 tswapal(*target_rt_dev_ptr)); 3975 if (!*host_rt_dev_ptr) { 3976 unlock_user(argptr, arg, 0); 3977 return -TARGET_EFAULT; 3978 } 3979 } else { 3980 *host_rt_dev_ptr = 0; 3981 } 3982 field_types++; 3983 continue; 3984 } 3985 field_types = thunk_convert(buf_temp + dst_offsets[i], 3986 argptr + src_offsets[i], 3987 field_types, THUNK_HOST); 3988 } 3989 unlock_user(argptr, arg, 0); 3990 3991 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 3992 if (*host_rt_dev_ptr != 0) { 3993 unlock_user((void *)*host_rt_dev_ptr, 3994 *target_rt_dev_ptr, 0); 3995 } 3996 return ret; 3997 } 3998 3999 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp, 4000 int fd, int cmd, abi_long arg) 4001 { 4002 int sig = target_to_host_signal(arg); 4003 return get_errno(ioctl(fd, ie->host_cmd, sig)); 4004 } 4005 4006 static IOCTLEntry ioctl_entries[] = { 4007 #define IOCTL(cmd, access, ...) \ 4008 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } }, 4009 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \ 4010 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } }, 4011 #include "ioctls.h" 4012 { 0, 0, }, 4013 }; 4014 4015 /* ??? Implement proper locking for ioctls. */ 4016 /* do_ioctl() Must return target values and target errnos. */ 4017 static abi_long do_ioctl(int fd, int cmd, abi_long arg) 4018 { 4019 const IOCTLEntry *ie; 4020 const argtype *arg_type; 4021 abi_long ret; 4022 uint8_t buf_temp[MAX_STRUCT_SIZE]; 4023 int target_size; 4024 void *argptr; 4025 4026 ie = ioctl_entries; 4027 for(;;) { 4028 if (ie->target_cmd == 0) { 4029 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd); 4030 return -TARGET_ENOSYS; 4031 } 4032 if (ie->target_cmd == cmd) 4033 break; 4034 ie++; 4035 } 4036 arg_type = ie->arg_type; 4037 #if defined(DEBUG) 4038 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name); 4039 #endif 4040 if (ie->do_ioctl) { 4041 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg); 4042 } 4043 4044 switch(arg_type[0]) { 4045 case TYPE_NULL: 4046 /* no argument */ 4047 ret = get_errno(ioctl(fd, ie->host_cmd)); 4048 break; 4049 case TYPE_PTRVOID: 4050 case TYPE_INT: 4051 ret = get_errno(ioctl(fd, ie->host_cmd, arg)); 4052 break; 4053 case TYPE_PTR: 4054 arg_type++; 4055 target_size = thunk_type_size(arg_type, 0); 4056 switch(ie->access) { 4057 case IOC_R: 4058 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 4059 if (!is_error(ret)) { 4060 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 4061 if (!argptr) 4062 return -TARGET_EFAULT; 4063 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 4064 unlock_user(argptr, arg, target_size); 4065 } 4066 break; 4067 case IOC_W: 4068 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 4069 if (!argptr) 4070 return -TARGET_EFAULT; 4071 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4072 unlock_user(argptr, arg, 0); 4073 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 4074 break; 4075 default: 4076 case IOC_RW: 4077 argptr = lock_user(VERIFY_READ, arg, target_size, 1); 4078 if (!argptr) 4079 return -TARGET_EFAULT; 4080 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); 4081 unlock_user(argptr, arg, 0); 4082 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp)); 4083 if (!is_error(ret)) { 4084 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); 4085 if (!argptr) 4086 return -TARGET_EFAULT; 4087 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); 4088 unlock_user(argptr, arg, target_size); 4089 } 4090 break; 4091 } 4092 break; 4093 default: 4094 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n", 4095 (long)cmd, arg_type[0]); 4096 ret = -TARGET_ENOSYS; 4097 break; 4098 } 4099 return ret; 4100 } 4101 4102 static const bitmask_transtbl iflag_tbl[] = { 4103 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK }, 4104 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT }, 4105 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR }, 4106 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK }, 4107 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK }, 4108 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP }, 4109 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR }, 4110 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR }, 4111 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL }, 4112 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC }, 4113 { TARGET_IXON, TARGET_IXON, IXON, IXON }, 4114 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY }, 4115 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF }, 4116 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL }, 4117 { 0, 0, 0, 0 } 4118 }; 4119 4120 static const bitmask_transtbl oflag_tbl[] = { 4121 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST }, 4122 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC }, 4123 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR }, 4124 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL }, 4125 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR }, 4126 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET }, 4127 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL }, 4128 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL }, 4129 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 }, 4130 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 }, 4131 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 }, 4132 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 }, 4133 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 }, 4134 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 }, 4135 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 }, 4136 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 }, 4137 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 }, 4138 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 }, 4139 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 }, 4140 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 }, 4141 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 }, 4142 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 }, 4143 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 }, 4144 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 }, 4145 { 0, 0, 0, 0 } 4146 }; 4147 4148 static const bitmask_transtbl cflag_tbl[] = { 4149 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 }, 4150 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 }, 4151 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 }, 4152 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 }, 4153 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 }, 4154 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 }, 4155 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 }, 4156 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 }, 4157 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 }, 4158 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 }, 4159 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 }, 4160 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 }, 4161 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 }, 4162 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 }, 4163 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 }, 4164 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 }, 4165 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 }, 4166 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 }, 4167 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 }, 4168 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 }, 4169 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 }, 4170 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 }, 4171 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 }, 4172 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 }, 4173 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB }, 4174 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD }, 4175 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB }, 4176 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD }, 4177 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL }, 4178 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL }, 4179 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS }, 4180 { 0, 0, 0, 0 } 4181 }; 4182 4183 static const bitmask_transtbl lflag_tbl[] = { 4184 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG }, 4185 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON }, 4186 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE }, 4187 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO }, 4188 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE }, 4189 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK }, 4190 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL }, 4191 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH }, 4192 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP }, 4193 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL }, 4194 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT }, 4195 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE }, 4196 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO }, 4197 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN }, 4198 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN }, 4199 { 0, 0, 0, 0 } 4200 }; 4201 4202 static void target_to_host_termios (void *dst, const void *src) 4203 { 4204 struct host_termios *host = dst; 4205 const struct target_termios *target = src; 4206 4207 host->c_iflag = 4208 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl); 4209 host->c_oflag = 4210 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl); 4211 host->c_cflag = 4212 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl); 4213 host->c_lflag = 4214 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl); 4215 host->c_line = target->c_line; 4216 4217 memset(host->c_cc, 0, sizeof(host->c_cc)); 4218 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR]; 4219 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT]; 4220 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE]; 4221 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL]; 4222 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF]; 4223 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME]; 4224 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN]; 4225 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC]; 4226 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART]; 4227 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP]; 4228 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP]; 4229 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL]; 4230 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT]; 4231 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD]; 4232 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE]; 4233 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT]; 4234 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2]; 4235 } 4236 4237 static void host_to_target_termios (void *dst, const void *src) 4238 { 4239 struct target_termios *target = dst; 4240 const struct host_termios *host = src; 4241 4242 target->c_iflag = 4243 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl)); 4244 target->c_oflag = 4245 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl)); 4246 target->c_cflag = 4247 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl)); 4248 target->c_lflag = 4249 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl)); 4250 target->c_line = host->c_line; 4251 4252 memset(target->c_cc, 0, sizeof(target->c_cc)); 4253 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR]; 4254 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT]; 4255 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE]; 4256 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL]; 4257 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF]; 4258 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME]; 4259 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN]; 4260 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC]; 4261 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART]; 4262 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP]; 4263 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP]; 4264 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL]; 4265 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT]; 4266 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD]; 4267 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE]; 4268 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT]; 4269 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2]; 4270 } 4271 4272 static const StructEntry struct_termios_def = { 4273 .convert = { host_to_target_termios, target_to_host_termios }, 4274 .size = { sizeof(struct target_termios), sizeof(struct host_termios) }, 4275 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) }, 4276 }; 4277 4278 static bitmask_transtbl mmap_flags_tbl[] = { 4279 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED }, 4280 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE }, 4281 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED }, 4282 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS }, 4283 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN }, 4284 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE }, 4285 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE }, 4286 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED }, 4287 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE, 4288 MAP_NORESERVE }, 4289 { 0, 0, 0, 0 } 4290 }; 4291 4292 #if defined(TARGET_I386) 4293 4294 /* NOTE: there is really one LDT for all the threads */ 4295 static uint8_t *ldt_table; 4296 4297 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount) 4298 { 4299 int size; 4300 void *p; 4301 4302 if (!ldt_table) 4303 return 0; 4304 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE; 4305 if (size > bytecount) 4306 size = bytecount; 4307 p = lock_user(VERIFY_WRITE, ptr, size, 0); 4308 if (!p) 4309 return -TARGET_EFAULT; 4310 /* ??? Should this by byteswapped? */ 4311 memcpy(p, ldt_table, size); 4312 unlock_user(p, ptr, size); 4313 return size; 4314 } 4315 4316 /* XXX: add locking support */ 4317 static abi_long write_ldt(CPUX86State *env, 4318 abi_ulong ptr, unsigned long bytecount, int oldmode) 4319 { 4320 struct target_modify_ldt_ldt_s ldt_info; 4321 struct target_modify_ldt_ldt_s *target_ldt_info; 4322 int seg_32bit, contents, read_exec_only, limit_in_pages; 4323 int seg_not_present, useable, lm; 4324 uint32_t *lp, entry_1, entry_2; 4325 4326 if (bytecount != sizeof(ldt_info)) 4327 return -TARGET_EINVAL; 4328 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1)) 4329 return -TARGET_EFAULT; 4330 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4331 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4332 ldt_info.limit = tswap32(target_ldt_info->limit); 4333 ldt_info.flags = tswap32(target_ldt_info->flags); 4334 unlock_user_struct(target_ldt_info, ptr, 0); 4335 4336 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES) 4337 return -TARGET_EINVAL; 4338 seg_32bit = ldt_info.flags & 1; 4339 contents = (ldt_info.flags >> 1) & 3; 4340 read_exec_only = (ldt_info.flags >> 3) & 1; 4341 limit_in_pages = (ldt_info.flags >> 4) & 1; 4342 seg_not_present = (ldt_info.flags >> 5) & 1; 4343 useable = (ldt_info.flags >> 6) & 1; 4344 #ifdef TARGET_ABI32 4345 lm = 0; 4346 #else 4347 lm = (ldt_info.flags >> 7) & 1; 4348 #endif 4349 if (contents == 3) { 4350 if (oldmode) 4351 return -TARGET_EINVAL; 4352 if (seg_not_present == 0) 4353 return -TARGET_EINVAL; 4354 } 4355 /* allocate the LDT */ 4356 if (!ldt_table) { 4357 env->ldt.base = target_mmap(0, 4358 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE, 4359 PROT_READ|PROT_WRITE, 4360 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 4361 if (env->ldt.base == -1) 4362 return -TARGET_ENOMEM; 4363 memset(g2h(env->ldt.base), 0, 4364 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE); 4365 env->ldt.limit = 0xffff; 4366 ldt_table = g2h(env->ldt.base); 4367 } 4368 4369 /* NOTE: same code as Linux kernel */ 4370 /* Allow LDTs to be cleared by the user. */ 4371 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4372 if (oldmode || 4373 (contents == 0 && 4374 read_exec_only == 1 && 4375 seg_32bit == 0 && 4376 limit_in_pages == 0 && 4377 seg_not_present == 1 && 4378 useable == 0 )) { 4379 entry_1 = 0; 4380 entry_2 = 0; 4381 goto install; 4382 } 4383 } 4384 4385 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4386 (ldt_info.limit & 0x0ffff); 4387 entry_2 = (ldt_info.base_addr & 0xff000000) | 4388 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4389 (ldt_info.limit & 0xf0000) | 4390 ((read_exec_only ^ 1) << 9) | 4391 (contents << 10) | 4392 ((seg_not_present ^ 1) << 15) | 4393 (seg_32bit << 22) | 4394 (limit_in_pages << 23) | 4395 (lm << 21) | 4396 0x7000; 4397 if (!oldmode) 4398 entry_2 |= (useable << 20); 4399 4400 /* Install the new entry ... */ 4401 install: 4402 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3)); 4403 lp[0] = tswap32(entry_1); 4404 lp[1] = tswap32(entry_2); 4405 return 0; 4406 } 4407 4408 /* specific and weird i386 syscalls */ 4409 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr, 4410 unsigned long bytecount) 4411 { 4412 abi_long ret; 4413 4414 switch (func) { 4415 case 0: 4416 ret = read_ldt(ptr, bytecount); 4417 break; 4418 case 1: 4419 ret = write_ldt(env, ptr, bytecount, 1); 4420 break; 4421 case 0x11: 4422 ret = write_ldt(env, ptr, bytecount, 0); 4423 break; 4424 default: 4425 ret = -TARGET_ENOSYS; 4426 break; 4427 } 4428 return ret; 4429 } 4430 4431 #if defined(TARGET_I386) && defined(TARGET_ABI32) 4432 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr) 4433 { 4434 uint64_t *gdt_table = g2h(env->gdt.base); 4435 struct target_modify_ldt_ldt_s ldt_info; 4436 struct target_modify_ldt_ldt_s *target_ldt_info; 4437 int seg_32bit, contents, read_exec_only, limit_in_pages; 4438 int seg_not_present, useable, lm; 4439 uint32_t *lp, entry_1, entry_2; 4440 int i; 4441 4442 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4443 if (!target_ldt_info) 4444 return -TARGET_EFAULT; 4445 ldt_info.entry_number = tswap32(target_ldt_info->entry_number); 4446 ldt_info.base_addr = tswapal(target_ldt_info->base_addr); 4447 ldt_info.limit = tswap32(target_ldt_info->limit); 4448 ldt_info.flags = tswap32(target_ldt_info->flags); 4449 if (ldt_info.entry_number == -1) { 4450 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) { 4451 if (gdt_table[i] == 0) { 4452 ldt_info.entry_number = i; 4453 target_ldt_info->entry_number = tswap32(i); 4454 break; 4455 } 4456 } 4457 } 4458 unlock_user_struct(target_ldt_info, ptr, 1); 4459 4460 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 4461 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX) 4462 return -TARGET_EINVAL; 4463 seg_32bit = ldt_info.flags & 1; 4464 contents = (ldt_info.flags >> 1) & 3; 4465 read_exec_only = (ldt_info.flags >> 3) & 1; 4466 limit_in_pages = (ldt_info.flags >> 4) & 1; 4467 seg_not_present = (ldt_info.flags >> 5) & 1; 4468 useable = (ldt_info.flags >> 6) & 1; 4469 #ifdef TARGET_ABI32 4470 lm = 0; 4471 #else 4472 lm = (ldt_info.flags >> 7) & 1; 4473 #endif 4474 4475 if (contents == 3) { 4476 if (seg_not_present == 0) 4477 return -TARGET_EINVAL; 4478 } 4479 4480 /* NOTE: same code as Linux kernel */ 4481 /* Allow LDTs to be cleared by the user. */ 4482 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { 4483 if ((contents == 0 && 4484 read_exec_only == 1 && 4485 seg_32bit == 0 && 4486 limit_in_pages == 0 && 4487 seg_not_present == 1 && 4488 useable == 0 )) { 4489 entry_1 = 0; 4490 entry_2 = 0; 4491 goto install; 4492 } 4493 } 4494 4495 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | 4496 (ldt_info.limit & 0x0ffff); 4497 entry_2 = (ldt_info.base_addr & 0xff000000) | 4498 ((ldt_info.base_addr & 0x00ff0000) >> 16) | 4499 (ldt_info.limit & 0xf0000) | 4500 ((read_exec_only ^ 1) << 9) | 4501 (contents << 10) | 4502 ((seg_not_present ^ 1) << 15) | 4503 (seg_32bit << 22) | 4504 (limit_in_pages << 23) | 4505 (useable << 20) | 4506 (lm << 21) | 4507 0x7000; 4508 4509 /* Install the new entry ... */ 4510 install: 4511 lp = (uint32_t *)(gdt_table + ldt_info.entry_number); 4512 lp[0] = tswap32(entry_1); 4513 lp[1] = tswap32(entry_2); 4514 return 0; 4515 } 4516 4517 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr) 4518 { 4519 struct target_modify_ldt_ldt_s *target_ldt_info; 4520 uint64_t *gdt_table = g2h(env->gdt.base); 4521 uint32_t base_addr, limit, flags; 4522 int seg_32bit, contents, read_exec_only, limit_in_pages, idx; 4523 int seg_not_present, useable, lm; 4524 uint32_t *lp, entry_1, entry_2; 4525 4526 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1); 4527 if (!target_ldt_info) 4528 return -TARGET_EFAULT; 4529 idx = tswap32(target_ldt_info->entry_number); 4530 if (idx < TARGET_GDT_ENTRY_TLS_MIN || 4531 idx > TARGET_GDT_ENTRY_TLS_MAX) { 4532 unlock_user_struct(target_ldt_info, ptr, 1); 4533 return -TARGET_EINVAL; 4534 } 4535 lp = (uint32_t *)(gdt_table + idx); 4536 entry_1 = tswap32(lp[0]); 4537 entry_2 = tswap32(lp[1]); 4538 4539 read_exec_only = ((entry_2 >> 9) & 1) ^ 1; 4540 contents = (entry_2 >> 10) & 3; 4541 seg_not_present = ((entry_2 >> 15) & 1) ^ 1; 4542 seg_32bit = (entry_2 >> 22) & 1; 4543 limit_in_pages = (entry_2 >> 23) & 1; 4544 useable = (entry_2 >> 20) & 1; 4545 #ifdef TARGET_ABI32 4546 lm = 0; 4547 #else 4548 lm = (entry_2 >> 21) & 1; 4549 #endif 4550 flags = (seg_32bit << 0) | (contents << 1) | 4551 (read_exec_only << 3) | (limit_in_pages << 4) | 4552 (seg_not_present << 5) | (useable << 6) | (lm << 7); 4553 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000); 4554 base_addr = (entry_1 >> 16) | 4555 (entry_2 & 0xff000000) | 4556 ((entry_2 & 0xff) << 16); 4557 target_ldt_info->base_addr = tswapal(base_addr); 4558 target_ldt_info->limit = tswap32(limit); 4559 target_ldt_info->flags = tswap32(flags); 4560 unlock_user_struct(target_ldt_info, ptr, 1); 4561 return 0; 4562 } 4563 #endif /* TARGET_I386 && TARGET_ABI32 */ 4564 4565 #ifndef TARGET_ABI32 4566 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) 4567 { 4568 abi_long ret = 0; 4569 abi_ulong val; 4570 int idx; 4571 4572 switch(code) { 4573 case TARGET_ARCH_SET_GS: 4574 case TARGET_ARCH_SET_FS: 4575 if (code == TARGET_ARCH_SET_GS) 4576 idx = R_GS; 4577 else 4578 idx = R_FS; 4579 cpu_x86_load_seg(env, idx, 0); 4580 env->segs[idx].base = addr; 4581 break; 4582 case TARGET_ARCH_GET_GS: 4583 case TARGET_ARCH_GET_FS: 4584 if (code == TARGET_ARCH_GET_GS) 4585 idx = R_GS; 4586 else 4587 idx = R_FS; 4588 val = env->segs[idx].base; 4589 if (put_user(val, addr, abi_ulong)) 4590 ret = -TARGET_EFAULT; 4591 break; 4592 default: 4593 ret = -TARGET_EINVAL; 4594 break; 4595 } 4596 return ret; 4597 } 4598 #endif 4599 4600 #endif /* defined(TARGET_I386) */ 4601 4602 #define NEW_STACK_SIZE 0x40000 4603 4604 4605 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; 4606 typedef struct { 4607 CPUArchState *env; 4608 pthread_mutex_t mutex; 4609 pthread_cond_t cond; 4610 pthread_t thread; 4611 uint32_t tid; 4612 abi_ulong child_tidptr; 4613 abi_ulong parent_tidptr; 4614 sigset_t sigmask; 4615 } new_thread_info; 4616 4617 static void *clone_func(void *arg) 4618 { 4619 new_thread_info *info = arg; 4620 CPUArchState *env; 4621 CPUState *cpu; 4622 TaskState *ts; 4623 4624 rcu_register_thread(); 4625 env = info->env; 4626 cpu = ENV_GET_CPU(env); 4627 thread_cpu = cpu; 4628 ts = (TaskState *)cpu->opaque; 4629 info->tid = gettid(); 4630 cpu->host_tid = info->tid; 4631 task_settid(ts); 4632 if (info->child_tidptr) 4633 put_user_u32(info->tid, info->child_tidptr); 4634 if (info->parent_tidptr) 4635 put_user_u32(info->tid, info->parent_tidptr); 4636 /* Enable signals. */ 4637 sigprocmask(SIG_SETMASK, &info->sigmask, NULL); 4638 /* Signal to the parent that we're ready. */ 4639 pthread_mutex_lock(&info->mutex); 4640 pthread_cond_broadcast(&info->cond); 4641 pthread_mutex_unlock(&info->mutex); 4642 /* Wait until the parent has finshed initializing the tls state. */ 4643 pthread_mutex_lock(&clone_lock); 4644 pthread_mutex_unlock(&clone_lock); 4645 cpu_loop(env); 4646 /* never exits */ 4647 return NULL; 4648 } 4649 4650 /* do_fork() Must return host values and target errnos (unlike most 4651 do_*() functions). */ 4652 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, 4653 abi_ulong parent_tidptr, target_ulong newtls, 4654 abi_ulong child_tidptr) 4655 { 4656 CPUState *cpu = ENV_GET_CPU(env); 4657 int ret; 4658 TaskState *ts; 4659 CPUState *new_cpu; 4660 CPUArchState *new_env; 4661 unsigned int nptl_flags; 4662 sigset_t sigmask; 4663 4664 /* Emulate vfork() with fork() */ 4665 if (flags & CLONE_VFORK) 4666 flags &= ~(CLONE_VFORK | CLONE_VM); 4667 4668 if (flags & CLONE_VM) { 4669 TaskState *parent_ts = (TaskState *)cpu->opaque; 4670 new_thread_info info; 4671 pthread_attr_t attr; 4672 4673 ts = g_new0(TaskState, 1); 4674 init_task_state(ts); 4675 /* we create a new CPU instance. */ 4676 new_env = cpu_copy(env); 4677 /* Init regs that differ from the parent. */ 4678 cpu_clone_regs(new_env, newsp); 4679 new_cpu = ENV_GET_CPU(new_env); 4680 new_cpu->opaque = ts; 4681 ts->bprm = parent_ts->bprm; 4682 ts->info = parent_ts->info; 4683 nptl_flags = flags; 4684 flags &= ~CLONE_NPTL_FLAGS2; 4685 4686 if (nptl_flags & CLONE_CHILD_CLEARTID) { 4687 ts->child_tidptr = child_tidptr; 4688 } 4689 4690 if (nptl_flags & CLONE_SETTLS) 4691 cpu_set_tls (new_env, newtls); 4692 4693 /* Grab a mutex so that thread setup appears atomic. */ 4694 pthread_mutex_lock(&clone_lock); 4695 4696 memset(&info, 0, sizeof(info)); 4697 pthread_mutex_init(&info.mutex, NULL); 4698 pthread_mutex_lock(&info.mutex); 4699 pthread_cond_init(&info.cond, NULL); 4700 info.env = new_env; 4701 if (nptl_flags & CLONE_CHILD_SETTID) 4702 info.child_tidptr = child_tidptr; 4703 if (nptl_flags & CLONE_PARENT_SETTID) 4704 info.parent_tidptr = parent_tidptr; 4705 4706 ret = pthread_attr_init(&attr); 4707 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE); 4708 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 4709 /* It is not safe to deliver signals until the child has finished 4710 initializing, so temporarily block all signals. */ 4711 sigfillset(&sigmask); 4712 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask); 4713 4714 ret = pthread_create(&info.thread, &attr, clone_func, &info); 4715 /* TODO: Free new CPU state if thread creation failed. */ 4716 4717 sigprocmask(SIG_SETMASK, &info.sigmask, NULL); 4718 pthread_attr_destroy(&attr); 4719 if (ret == 0) { 4720 /* Wait for the child to initialize. */ 4721 pthread_cond_wait(&info.cond, &info.mutex); 4722 ret = info.tid; 4723 if (flags & CLONE_PARENT_SETTID) 4724 put_user_u32(ret, parent_tidptr); 4725 } else { 4726 ret = -1; 4727 } 4728 pthread_mutex_unlock(&info.mutex); 4729 pthread_cond_destroy(&info.cond); 4730 pthread_mutex_destroy(&info.mutex); 4731 pthread_mutex_unlock(&clone_lock); 4732 } else { 4733 /* if no CLONE_VM, we consider it is a fork */ 4734 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) { 4735 return -TARGET_EINVAL; 4736 } 4737 fork_start(); 4738 ret = fork(); 4739 if (ret == 0) { 4740 /* Child Process. */ 4741 rcu_after_fork(); 4742 cpu_clone_regs(env, newsp); 4743 fork_end(1); 4744 /* There is a race condition here. The parent process could 4745 theoretically read the TID in the child process before the child 4746 tid is set. This would require using either ptrace 4747 (not implemented) or having *_tidptr to point at a shared memory 4748 mapping. We can't repeat the spinlock hack used above because 4749 the child process gets its own copy of the lock. */ 4750 if (flags & CLONE_CHILD_SETTID) 4751 put_user_u32(gettid(), child_tidptr); 4752 if (flags & CLONE_PARENT_SETTID) 4753 put_user_u32(gettid(), parent_tidptr); 4754 ts = (TaskState *)cpu->opaque; 4755 if (flags & CLONE_SETTLS) 4756 cpu_set_tls (env, newtls); 4757 if (flags & CLONE_CHILD_CLEARTID) 4758 ts->child_tidptr = child_tidptr; 4759 } else { 4760 fork_end(0); 4761 } 4762 } 4763 return ret; 4764 } 4765 4766 /* warning : doesn't handle linux specific flags... */ 4767 static int target_to_host_fcntl_cmd(int cmd) 4768 { 4769 switch(cmd) { 4770 case TARGET_F_DUPFD: 4771 case TARGET_F_GETFD: 4772 case TARGET_F_SETFD: 4773 case TARGET_F_GETFL: 4774 case TARGET_F_SETFL: 4775 return cmd; 4776 case TARGET_F_GETLK: 4777 return F_GETLK; 4778 case TARGET_F_SETLK: 4779 return F_SETLK; 4780 case TARGET_F_SETLKW: 4781 return F_SETLKW; 4782 case TARGET_F_GETOWN: 4783 return F_GETOWN; 4784 case TARGET_F_SETOWN: 4785 return F_SETOWN; 4786 case TARGET_F_GETSIG: 4787 return F_GETSIG; 4788 case TARGET_F_SETSIG: 4789 return F_SETSIG; 4790 #if TARGET_ABI_BITS == 32 4791 case TARGET_F_GETLK64: 4792 return F_GETLK64; 4793 case TARGET_F_SETLK64: 4794 return F_SETLK64; 4795 case TARGET_F_SETLKW64: 4796 return F_SETLKW64; 4797 #endif 4798 case TARGET_F_SETLEASE: 4799 return F_SETLEASE; 4800 case TARGET_F_GETLEASE: 4801 return F_GETLEASE; 4802 #ifdef F_DUPFD_CLOEXEC 4803 case TARGET_F_DUPFD_CLOEXEC: 4804 return F_DUPFD_CLOEXEC; 4805 #endif 4806 case TARGET_F_NOTIFY: 4807 return F_NOTIFY; 4808 #ifdef F_GETOWN_EX 4809 case TARGET_F_GETOWN_EX: 4810 return F_GETOWN_EX; 4811 #endif 4812 #ifdef F_SETOWN_EX 4813 case TARGET_F_SETOWN_EX: 4814 return F_SETOWN_EX; 4815 #endif 4816 default: 4817 return -TARGET_EINVAL; 4818 } 4819 return -TARGET_EINVAL; 4820 } 4821 4822 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a } 4823 static const bitmask_transtbl flock_tbl[] = { 4824 TRANSTBL_CONVERT(F_RDLCK), 4825 TRANSTBL_CONVERT(F_WRLCK), 4826 TRANSTBL_CONVERT(F_UNLCK), 4827 TRANSTBL_CONVERT(F_EXLCK), 4828 TRANSTBL_CONVERT(F_SHLCK), 4829 { 0, 0, 0, 0 } 4830 }; 4831 4832 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg) 4833 { 4834 struct flock fl; 4835 struct target_flock *target_fl; 4836 struct flock64 fl64; 4837 struct target_flock64 *target_fl64; 4838 #ifdef F_GETOWN_EX 4839 struct f_owner_ex fox; 4840 struct target_f_owner_ex *target_fox; 4841 #endif 4842 abi_long ret; 4843 int host_cmd = target_to_host_fcntl_cmd(cmd); 4844 4845 if (host_cmd == -TARGET_EINVAL) 4846 return host_cmd; 4847 4848 switch(cmd) { 4849 case TARGET_F_GETLK: 4850 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4851 return -TARGET_EFAULT; 4852 fl.l_type = 4853 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4854 fl.l_whence = tswap16(target_fl->l_whence); 4855 fl.l_start = tswapal(target_fl->l_start); 4856 fl.l_len = tswapal(target_fl->l_len); 4857 fl.l_pid = tswap32(target_fl->l_pid); 4858 unlock_user_struct(target_fl, arg, 0); 4859 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4860 if (ret == 0) { 4861 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0)) 4862 return -TARGET_EFAULT; 4863 target_fl->l_type = 4864 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl); 4865 target_fl->l_whence = tswap16(fl.l_whence); 4866 target_fl->l_start = tswapal(fl.l_start); 4867 target_fl->l_len = tswapal(fl.l_len); 4868 target_fl->l_pid = tswap32(fl.l_pid); 4869 unlock_user_struct(target_fl, arg, 1); 4870 } 4871 break; 4872 4873 case TARGET_F_SETLK: 4874 case TARGET_F_SETLKW: 4875 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1)) 4876 return -TARGET_EFAULT; 4877 fl.l_type = 4878 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl); 4879 fl.l_whence = tswap16(target_fl->l_whence); 4880 fl.l_start = tswapal(target_fl->l_start); 4881 fl.l_len = tswapal(target_fl->l_len); 4882 fl.l_pid = tswap32(target_fl->l_pid); 4883 unlock_user_struct(target_fl, arg, 0); 4884 ret = get_errno(fcntl(fd, host_cmd, &fl)); 4885 break; 4886 4887 case TARGET_F_GETLK64: 4888 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4889 return -TARGET_EFAULT; 4890 fl64.l_type = 4891 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4892 fl64.l_whence = tswap16(target_fl64->l_whence); 4893 fl64.l_start = tswap64(target_fl64->l_start); 4894 fl64.l_len = tswap64(target_fl64->l_len); 4895 fl64.l_pid = tswap32(target_fl64->l_pid); 4896 unlock_user_struct(target_fl64, arg, 0); 4897 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4898 if (ret == 0) { 4899 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0)) 4900 return -TARGET_EFAULT; 4901 target_fl64->l_type = 4902 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1; 4903 target_fl64->l_whence = tswap16(fl64.l_whence); 4904 target_fl64->l_start = tswap64(fl64.l_start); 4905 target_fl64->l_len = tswap64(fl64.l_len); 4906 target_fl64->l_pid = tswap32(fl64.l_pid); 4907 unlock_user_struct(target_fl64, arg, 1); 4908 } 4909 break; 4910 case TARGET_F_SETLK64: 4911 case TARGET_F_SETLKW64: 4912 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1)) 4913 return -TARGET_EFAULT; 4914 fl64.l_type = 4915 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1; 4916 fl64.l_whence = tswap16(target_fl64->l_whence); 4917 fl64.l_start = tswap64(target_fl64->l_start); 4918 fl64.l_len = tswap64(target_fl64->l_len); 4919 fl64.l_pid = tswap32(target_fl64->l_pid); 4920 unlock_user_struct(target_fl64, arg, 0); 4921 ret = get_errno(fcntl(fd, host_cmd, &fl64)); 4922 break; 4923 4924 case TARGET_F_GETFL: 4925 ret = get_errno(fcntl(fd, host_cmd, arg)); 4926 if (ret >= 0) { 4927 ret = host_to_target_bitmask(ret, fcntl_flags_tbl); 4928 } 4929 break; 4930 4931 case TARGET_F_SETFL: 4932 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl))); 4933 break; 4934 4935 #ifdef F_GETOWN_EX 4936 case TARGET_F_GETOWN_EX: 4937 ret = get_errno(fcntl(fd, host_cmd, &fox)); 4938 if (ret >= 0) { 4939 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0)) 4940 return -TARGET_EFAULT; 4941 target_fox->type = tswap32(fox.type); 4942 target_fox->pid = tswap32(fox.pid); 4943 unlock_user_struct(target_fox, arg, 1); 4944 } 4945 break; 4946 #endif 4947 4948 #ifdef F_SETOWN_EX 4949 case TARGET_F_SETOWN_EX: 4950 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1)) 4951 return -TARGET_EFAULT; 4952 fox.type = tswap32(target_fox->type); 4953 fox.pid = tswap32(target_fox->pid); 4954 unlock_user_struct(target_fox, arg, 0); 4955 ret = get_errno(fcntl(fd, host_cmd, &fox)); 4956 break; 4957 #endif 4958 4959 case TARGET_F_SETOWN: 4960 case TARGET_F_GETOWN: 4961 case TARGET_F_SETSIG: 4962 case TARGET_F_GETSIG: 4963 case TARGET_F_SETLEASE: 4964 case TARGET_F_GETLEASE: 4965 ret = get_errno(fcntl(fd, host_cmd, arg)); 4966 break; 4967 4968 default: 4969 ret = get_errno(fcntl(fd, cmd, arg)); 4970 break; 4971 } 4972 return ret; 4973 } 4974 4975 #ifdef USE_UID16 4976 4977 static inline int high2lowuid(int uid) 4978 { 4979 if (uid > 65535) 4980 return 65534; 4981 else 4982 return uid; 4983 } 4984 4985 static inline int high2lowgid(int gid) 4986 { 4987 if (gid > 65535) 4988 return 65534; 4989 else 4990 return gid; 4991 } 4992 4993 static inline int low2highuid(int uid) 4994 { 4995 if ((int16_t)uid == -1) 4996 return -1; 4997 else 4998 return uid; 4999 } 5000 5001 static inline int low2highgid(int gid) 5002 { 5003 if ((int16_t)gid == -1) 5004 return -1; 5005 else 5006 return gid; 5007 } 5008 static inline int tswapid(int id) 5009 { 5010 return tswap16(id); 5011 } 5012 5013 #define put_user_id(x, gaddr) put_user_u16(x, gaddr) 5014 5015 #else /* !USE_UID16 */ 5016 static inline int high2lowuid(int uid) 5017 { 5018 return uid; 5019 } 5020 static inline int high2lowgid(int gid) 5021 { 5022 return gid; 5023 } 5024 static inline int low2highuid(int uid) 5025 { 5026 return uid; 5027 } 5028 static inline int low2highgid(int gid) 5029 { 5030 return gid; 5031 } 5032 static inline int tswapid(int id) 5033 { 5034 return tswap32(id); 5035 } 5036 5037 #define put_user_id(x, gaddr) put_user_u32(x, gaddr) 5038 5039 #endif /* USE_UID16 */ 5040 5041 void syscall_init(void) 5042 { 5043 IOCTLEntry *ie; 5044 const argtype *arg_type; 5045 int size; 5046 int i; 5047 5048 thunk_init(STRUCT_MAX); 5049 5050 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def); 5051 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def); 5052 #include "syscall_types.h" 5053 #undef STRUCT 5054 #undef STRUCT_SPECIAL 5055 5056 /* Build target_to_host_errno_table[] table from 5057 * host_to_target_errno_table[]. */ 5058 for (i = 0; i < ERRNO_TABLE_SIZE; i++) { 5059 target_to_host_errno_table[host_to_target_errno_table[i]] = i; 5060 } 5061 5062 /* we patch the ioctl size if necessary. We rely on the fact that 5063 no ioctl has all the bits at '1' in the size field */ 5064 ie = ioctl_entries; 5065 while (ie->target_cmd != 0) { 5066 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) == 5067 TARGET_IOC_SIZEMASK) { 5068 arg_type = ie->arg_type; 5069 if (arg_type[0] != TYPE_PTR) { 5070 fprintf(stderr, "cannot patch size for ioctl 0x%x\n", 5071 ie->target_cmd); 5072 exit(1); 5073 } 5074 arg_type++; 5075 size = thunk_type_size(arg_type, 0); 5076 ie->target_cmd = (ie->target_cmd & 5077 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) | 5078 (size << TARGET_IOC_SIZESHIFT); 5079 } 5080 5081 /* automatic consistency check if same arch */ 5082 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 5083 (defined(__x86_64__) && defined(TARGET_X86_64)) 5084 if (unlikely(ie->target_cmd != ie->host_cmd)) { 5085 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n", 5086 ie->name, ie->target_cmd, ie->host_cmd); 5087 } 5088 #endif 5089 ie++; 5090 } 5091 } 5092 5093 #if TARGET_ABI_BITS == 32 5094 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1) 5095 { 5096 #ifdef TARGET_WORDS_BIGENDIAN 5097 return ((uint64_t)word0 << 32) | word1; 5098 #else 5099 return ((uint64_t)word1 << 32) | word0; 5100 #endif 5101 } 5102 #else /* TARGET_ABI_BITS == 32 */ 5103 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1) 5104 { 5105 return word0; 5106 } 5107 #endif /* TARGET_ABI_BITS != 32 */ 5108 5109 #ifdef TARGET_NR_truncate64 5110 static inline abi_long target_truncate64(void *cpu_env, const char *arg1, 5111 abi_long arg2, 5112 abi_long arg3, 5113 abi_long arg4) 5114 { 5115 if (regpairs_aligned(cpu_env)) { 5116 arg2 = arg3; 5117 arg3 = arg4; 5118 } 5119 return get_errno(truncate64(arg1, target_offset64(arg2, arg3))); 5120 } 5121 #endif 5122 5123 #ifdef TARGET_NR_ftruncate64 5124 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1, 5125 abi_long arg2, 5126 abi_long arg3, 5127 abi_long arg4) 5128 { 5129 if (regpairs_aligned(cpu_env)) { 5130 arg2 = arg3; 5131 arg3 = arg4; 5132 } 5133 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3))); 5134 } 5135 #endif 5136 5137 static inline abi_long target_to_host_timespec(struct timespec *host_ts, 5138 abi_ulong target_addr) 5139 { 5140 struct target_timespec *target_ts; 5141 5142 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) 5143 return -TARGET_EFAULT; 5144 host_ts->tv_sec = tswapal(target_ts->tv_sec); 5145 host_ts->tv_nsec = tswapal(target_ts->tv_nsec); 5146 unlock_user_struct(target_ts, target_addr, 0); 5147 return 0; 5148 } 5149 5150 static inline abi_long host_to_target_timespec(abi_ulong target_addr, 5151 struct timespec *host_ts) 5152 { 5153 struct target_timespec *target_ts; 5154 5155 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) 5156 return -TARGET_EFAULT; 5157 target_ts->tv_sec = tswapal(host_ts->tv_sec); 5158 target_ts->tv_nsec = tswapal(host_ts->tv_nsec); 5159 unlock_user_struct(target_ts, target_addr, 1); 5160 return 0; 5161 } 5162 5163 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec, 5164 abi_ulong target_addr) 5165 { 5166 struct target_itimerspec *target_itspec; 5167 5168 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) { 5169 return -TARGET_EFAULT; 5170 } 5171 5172 host_itspec->it_interval.tv_sec = 5173 tswapal(target_itspec->it_interval.tv_sec); 5174 host_itspec->it_interval.tv_nsec = 5175 tswapal(target_itspec->it_interval.tv_nsec); 5176 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec); 5177 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec); 5178 5179 unlock_user_struct(target_itspec, target_addr, 1); 5180 return 0; 5181 } 5182 5183 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr, 5184 struct itimerspec *host_its) 5185 { 5186 struct target_itimerspec *target_itspec; 5187 5188 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) { 5189 return -TARGET_EFAULT; 5190 } 5191 5192 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec); 5193 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec); 5194 5195 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec); 5196 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec); 5197 5198 unlock_user_struct(target_itspec, target_addr, 0); 5199 return 0; 5200 } 5201 5202 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp, 5203 abi_ulong target_addr) 5204 { 5205 struct target_sigevent *target_sevp; 5206 5207 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) { 5208 return -TARGET_EFAULT; 5209 } 5210 5211 /* This union is awkward on 64 bit systems because it has a 32 bit 5212 * integer and a pointer in it; we follow the conversion approach 5213 * used for handling sigval types in signal.c so the guest should get 5214 * the correct value back even if we did a 64 bit byteswap and it's 5215 * using the 32 bit integer. 5216 */ 5217 host_sevp->sigev_value.sival_ptr = 5218 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr); 5219 host_sevp->sigev_signo = 5220 target_to_host_signal(tswap32(target_sevp->sigev_signo)); 5221 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify); 5222 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid); 5223 5224 unlock_user_struct(target_sevp, target_addr, 1); 5225 return 0; 5226 } 5227 5228 #if defined(TARGET_NR_mlockall) 5229 static inline int target_to_host_mlockall_arg(int arg) 5230 { 5231 int result = 0; 5232 5233 if (arg & TARGET_MLOCKALL_MCL_CURRENT) { 5234 result |= MCL_CURRENT; 5235 } 5236 if (arg & TARGET_MLOCKALL_MCL_FUTURE) { 5237 result |= MCL_FUTURE; 5238 } 5239 return result; 5240 } 5241 #endif 5242 5243 static inline abi_long host_to_target_stat64(void *cpu_env, 5244 abi_ulong target_addr, 5245 struct stat *host_st) 5246 { 5247 #if defined(TARGET_ARM) && defined(TARGET_ABI32) 5248 if (((CPUARMState *)cpu_env)->eabi) { 5249 struct target_eabi_stat64 *target_st; 5250 5251 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 5252 return -TARGET_EFAULT; 5253 memset(target_st, 0, sizeof(struct target_eabi_stat64)); 5254 __put_user(host_st->st_dev, &target_st->st_dev); 5255 __put_user(host_st->st_ino, &target_st->st_ino); 5256 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 5257 __put_user(host_st->st_ino, &target_st->__st_ino); 5258 #endif 5259 __put_user(host_st->st_mode, &target_st->st_mode); 5260 __put_user(host_st->st_nlink, &target_st->st_nlink); 5261 __put_user(host_st->st_uid, &target_st->st_uid); 5262 __put_user(host_st->st_gid, &target_st->st_gid); 5263 __put_user(host_st->st_rdev, &target_st->st_rdev); 5264 __put_user(host_st->st_size, &target_st->st_size); 5265 __put_user(host_st->st_blksize, &target_st->st_blksize); 5266 __put_user(host_st->st_blocks, &target_st->st_blocks); 5267 __put_user(host_st->st_atime, &target_st->target_st_atime); 5268 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 5269 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 5270 unlock_user_struct(target_st, target_addr, 1); 5271 } else 5272 #endif 5273 { 5274 #if defined(TARGET_HAS_STRUCT_STAT64) 5275 struct target_stat64 *target_st; 5276 #else 5277 struct target_stat *target_st; 5278 #endif 5279 5280 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) 5281 return -TARGET_EFAULT; 5282 memset(target_st, 0, sizeof(*target_st)); 5283 __put_user(host_st->st_dev, &target_st->st_dev); 5284 __put_user(host_st->st_ino, &target_st->st_ino); 5285 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO 5286 __put_user(host_st->st_ino, &target_st->__st_ino); 5287 #endif 5288 __put_user(host_st->st_mode, &target_st->st_mode); 5289 __put_user(host_st->st_nlink, &target_st->st_nlink); 5290 __put_user(host_st->st_uid, &target_st->st_uid); 5291 __put_user(host_st->st_gid, &target_st->st_gid); 5292 __put_user(host_st->st_rdev, &target_st->st_rdev); 5293 /* XXX: better use of kernel struct */ 5294 __put_user(host_st->st_size, &target_st->st_size); 5295 __put_user(host_st->st_blksize, &target_st->st_blksize); 5296 __put_user(host_st->st_blocks, &target_st->st_blocks); 5297 __put_user(host_st->st_atime, &target_st->target_st_atime); 5298 __put_user(host_st->st_mtime, &target_st->target_st_mtime); 5299 __put_user(host_st->st_ctime, &target_st->target_st_ctime); 5300 unlock_user_struct(target_st, target_addr, 1); 5301 } 5302 5303 return 0; 5304 } 5305 5306 /* ??? Using host futex calls even when target atomic operations 5307 are not really atomic probably breaks things. However implementing 5308 futexes locally would make futexes shared between multiple processes 5309 tricky. However they're probably useless because guest atomic 5310 operations won't work either. */ 5311 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, 5312 target_ulong uaddr2, int val3) 5313 { 5314 struct timespec ts, *pts; 5315 int base_op; 5316 5317 /* ??? We assume FUTEX_* constants are the same on both host 5318 and target. */ 5319 #ifdef FUTEX_CMD_MASK 5320 base_op = op & FUTEX_CMD_MASK; 5321 #else 5322 base_op = op; 5323 #endif 5324 switch (base_op) { 5325 case FUTEX_WAIT: 5326 case FUTEX_WAIT_BITSET: 5327 if (timeout) { 5328 pts = &ts; 5329 target_to_host_timespec(pts, timeout); 5330 } else { 5331 pts = NULL; 5332 } 5333 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val), 5334 pts, NULL, val3)); 5335 case FUTEX_WAKE: 5336 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 5337 case FUTEX_FD: 5338 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0)); 5339 case FUTEX_REQUEUE: 5340 case FUTEX_CMP_REQUEUE: 5341 case FUTEX_WAKE_OP: 5342 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the 5343 TIMEOUT parameter is interpreted as a uint32_t by the kernel. 5344 But the prototype takes a `struct timespec *'; insert casts 5345 to satisfy the compiler. We do not need to tswap TIMEOUT 5346 since it's not compared to guest memory. */ 5347 pts = (struct timespec *)(uintptr_t) timeout; 5348 return get_errno(sys_futex(g2h(uaddr), op, val, pts, 5349 g2h(uaddr2), 5350 (base_op == FUTEX_CMP_REQUEUE 5351 ? tswap32(val3) 5352 : val3))); 5353 default: 5354 return -TARGET_ENOSYS; 5355 } 5356 } 5357 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 5358 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname, 5359 abi_long handle, abi_long mount_id, 5360 abi_long flags) 5361 { 5362 struct file_handle *target_fh; 5363 struct file_handle *fh; 5364 int mid = 0; 5365 abi_long ret; 5366 char *name; 5367 unsigned int size, total_size; 5368 5369 if (get_user_s32(size, handle)) { 5370 return -TARGET_EFAULT; 5371 } 5372 5373 name = lock_user_string(pathname); 5374 if (!name) { 5375 return -TARGET_EFAULT; 5376 } 5377 5378 total_size = sizeof(struct file_handle) + size; 5379 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0); 5380 if (!target_fh) { 5381 unlock_user(name, pathname, 0); 5382 return -TARGET_EFAULT; 5383 } 5384 5385 fh = g_malloc0(total_size); 5386 fh->handle_bytes = size; 5387 5388 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags)); 5389 unlock_user(name, pathname, 0); 5390 5391 /* man name_to_handle_at(2): 5392 * Other than the use of the handle_bytes field, the caller should treat 5393 * the file_handle structure as an opaque data type 5394 */ 5395 5396 memcpy(target_fh, fh, total_size); 5397 target_fh->handle_bytes = tswap32(fh->handle_bytes); 5398 target_fh->handle_type = tswap32(fh->handle_type); 5399 g_free(fh); 5400 unlock_user(target_fh, handle, total_size); 5401 5402 if (put_user_s32(mid, mount_id)) { 5403 return -TARGET_EFAULT; 5404 } 5405 5406 return ret; 5407 5408 } 5409 #endif 5410 5411 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 5412 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle, 5413 abi_long flags) 5414 { 5415 struct file_handle *target_fh; 5416 struct file_handle *fh; 5417 unsigned int size, total_size; 5418 abi_long ret; 5419 5420 if (get_user_s32(size, handle)) { 5421 return -TARGET_EFAULT; 5422 } 5423 5424 total_size = sizeof(struct file_handle) + size; 5425 target_fh = lock_user(VERIFY_READ, handle, total_size, 1); 5426 if (!target_fh) { 5427 return -TARGET_EFAULT; 5428 } 5429 5430 fh = g_memdup(target_fh, total_size); 5431 fh->handle_bytes = size; 5432 fh->handle_type = tswap32(target_fh->handle_type); 5433 5434 ret = get_errno(open_by_handle_at(mount_fd, fh, 5435 target_to_host_bitmask(flags, fcntl_flags_tbl))); 5436 5437 g_free(fh); 5438 5439 unlock_user(target_fh, handle, total_size); 5440 5441 return ret; 5442 } 5443 #endif 5444 5445 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4) 5446 5447 /* signalfd siginfo conversion */ 5448 5449 static void 5450 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo, 5451 const struct signalfd_siginfo *info) 5452 { 5453 int sig = host_to_target_signal(info->ssi_signo); 5454 5455 /* linux/signalfd.h defines a ssi_addr_lsb 5456 * not defined in sys/signalfd.h but used by some kernels 5457 */ 5458 5459 #ifdef BUS_MCEERR_AO 5460 if (tinfo->ssi_signo == SIGBUS && 5461 (tinfo->ssi_code == BUS_MCEERR_AR || 5462 tinfo->ssi_code == BUS_MCEERR_AO)) { 5463 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1); 5464 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1); 5465 *tssi_addr_lsb = tswap16(*ssi_addr_lsb); 5466 } 5467 #endif 5468 5469 tinfo->ssi_signo = tswap32(sig); 5470 tinfo->ssi_errno = tswap32(tinfo->ssi_errno); 5471 tinfo->ssi_code = tswap32(info->ssi_code); 5472 tinfo->ssi_pid = tswap32(info->ssi_pid); 5473 tinfo->ssi_uid = tswap32(info->ssi_uid); 5474 tinfo->ssi_fd = tswap32(info->ssi_fd); 5475 tinfo->ssi_tid = tswap32(info->ssi_tid); 5476 tinfo->ssi_band = tswap32(info->ssi_band); 5477 tinfo->ssi_overrun = tswap32(info->ssi_overrun); 5478 tinfo->ssi_trapno = tswap32(info->ssi_trapno); 5479 tinfo->ssi_status = tswap32(info->ssi_status); 5480 tinfo->ssi_int = tswap32(info->ssi_int); 5481 tinfo->ssi_ptr = tswap64(info->ssi_ptr); 5482 tinfo->ssi_utime = tswap64(info->ssi_utime); 5483 tinfo->ssi_stime = tswap64(info->ssi_stime); 5484 tinfo->ssi_addr = tswap64(info->ssi_addr); 5485 } 5486 5487 static abi_long host_to_target_data_signalfd(void *buf, size_t len) 5488 { 5489 int i; 5490 5491 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) { 5492 host_to_target_signalfd_siginfo(buf + i, buf + i); 5493 } 5494 5495 return len; 5496 } 5497 5498 static TargetFdTrans target_signalfd_trans = { 5499 .host_to_target_data = host_to_target_data_signalfd, 5500 }; 5501 5502 static abi_long do_signalfd4(int fd, abi_long mask, int flags) 5503 { 5504 int host_flags; 5505 target_sigset_t *target_mask; 5506 sigset_t host_mask; 5507 abi_long ret; 5508 5509 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) { 5510 return -TARGET_EINVAL; 5511 } 5512 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) { 5513 return -TARGET_EFAULT; 5514 } 5515 5516 target_to_host_sigset(&host_mask, target_mask); 5517 5518 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl); 5519 5520 ret = get_errno(signalfd(fd, &host_mask, host_flags)); 5521 if (ret >= 0) { 5522 fd_trans_register(ret, &target_signalfd_trans); 5523 } 5524 5525 unlock_user_struct(target_mask, mask, 0); 5526 5527 return ret; 5528 } 5529 #endif 5530 5531 /* Map host to target signal numbers for the wait family of syscalls. 5532 Assume all other status bits are the same. */ 5533 int host_to_target_waitstatus(int status) 5534 { 5535 if (WIFSIGNALED(status)) { 5536 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f); 5537 } 5538 if (WIFSTOPPED(status)) { 5539 return (host_to_target_signal(WSTOPSIG(status)) << 8) 5540 | (status & 0xff); 5541 } 5542 return status; 5543 } 5544 5545 static int open_self_cmdline(void *cpu_env, int fd) 5546 { 5547 int fd_orig = -1; 5548 bool word_skipped = false; 5549 5550 fd_orig = open("/proc/self/cmdline", O_RDONLY); 5551 if (fd_orig < 0) { 5552 return fd_orig; 5553 } 5554 5555 while (true) { 5556 ssize_t nb_read; 5557 char buf[128]; 5558 char *cp_buf = buf; 5559 5560 nb_read = read(fd_orig, buf, sizeof(buf)); 5561 if (nb_read < 0) { 5562 int e = errno; 5563 fd_orig = close(fd_orig); 5564 errno = e; 5565 return -1; 5566 } else if (nb_read == 0) { 5567 break; 5568 } 5569 5570 if (!word_skipped) { 5571 /* Skip the first string, which is the path to qemu-*-static 5572 instead of the actual command. */ 5573 cp_buf = memchr(buf, 0, sizeof(buf)); 5574 if (cp_buf) { 5575 /* Null byte found, skip one string */ 5576 cp_buf++; 5577 nb_read -= cp_buf - buf; 5578 word_skipped = true; 5579 } 5580 } 5581 5582 if (word_skipped) { 5583 if (write(fd, cp_buf, nb_read) != nb_read) { 5584 int e = errno; 5585 close(fd_orig); 5586 errno = e; 5587 return -1; 5588 } 5589 } 5590 } 5591 5592 return close(fd_orig); 5593 } 5594 5595 static int open_self_maps(void *cpu_env, int fd) 5596 { 5597 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5598 TaskState *ts = cpu->opaque; 5599 FILE *fp; 5600 char *line = NULL; 5601 size_t len = 0; 5602 ssize_t read; 5603 5604 fp = fopen("/proc/self/maps", "r"); 5605 if (fp == NULL) { 5606 return -1; 5607 } 5608 5609 while ((read = getline(&line, &len, fp)) != -1) { 5610 int fields, dev_maj, dev_min, inode; 5611 uint64_t min, max, offset; 5612 char flag_r, flag_w, flag_x, flag_p; 5613 char path[512] = ""; 5614 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" 5615 " %512s", &min, &max, &flag_r, &flag_w, &flag_x, 5616 &flag_p, &offset, &dev_maj, &dev_min, &inode, path); 5617 5618 if ((fields < 10) || (fields > 11)) { 5619 continue; 5620 } 5621 if (h2g_valid(min)) { 5622 int flags = page_get_flags(h2g(min)); 5623 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX); 5624 if (page_check_range(h2g(min), max - min, flags) == -1) { 5625 continue; 5626 } 5627 if (h2g(min) == ts->info->stack_limit) { 5628 pstrcpy(path, sizeof(path), " [stack]"); 5629 } 5630 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx 5631 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n", 5632 h2g(min), h2g(max - 1) + 1, flag_r, flag_w, 5633 flag_x, flag_p, offset, dev_maj, dev_min, inode, 5634 path[0] ? " " : "", path); 5635 } 5636 } 5637 5638 free(line); 5639 fclose(fp); 5640 5641 return 0; 5642 } 5643 5644 static int open_self_stat(void *cpu_env, int fd) 5645 { 5646 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5647 TaskState *ts = cpu->opaque; 5648 abi_ulong start_stack = ts->info->start_stack; 5649 int i; 5650 5651 for (i = 0; i < 44; i++) { 5652 char buf[128]; 5653 int len; 5654 uint64_t val = 0; 5655 5656 if (i == 0) { 5657 /* pid */ 5658 val = getpid(); 5659 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5660 } else if (i == 1) { 5661 /* app name */ 5662 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]); 5663 } else if (i == 27) { 5664 /* stack bottom */ 5665 val = start_stack; 5666 snprintf(buf, sizeof(buf), "%"PRId64 " ", val); 5667 } else { 5668 /* for the rest, there is MasterCard */ 5669 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' '); 5670 } 5671 5672 len = strlen(buf); 5673 if (write(fd, buf, len) != len) { 5674 return -1; 5675 } 5676 } 5677 5678 return 0; 5679 } 5680 5681 static int open_self_auxv(void *cpu_env, int fd) 5682 { 5683 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 5684 TaskState *ts = cpu->opaque; 5685 abi_ulong auxv = ts->info->saved_auxv; 5686 abi_ulong len = ts->info->auxv_len; 5687 char *ptr; 5688 5689 /* 5690 * Auxiliary vector is stored in target process stack. 5691 * read in whole auxv vector and copy it to file 5692 */ 5693 ptr = lock_user(VERIFY_READ, auxv, len, 0); 5694 if (ptr != NULL) { 5695 while (len > 0) { 5696 ssize_t r; 5697 r = write(fd, ptr, len); 5698 if (r <= 0) { 5699 break; 5700 } 5701 len -= r; 5702 ptr += r; 5703 } 5704 lseek(fd, 0, SEEK_SET); 5705 unlock_user(ptr, auxv, len); 5706 } 5707 5708 return 0; 5709 } 5710 5711 static int is_proc_myself(const char *filename, const char *entry) 5712 { 5713 if (!strncmp(filename, "/proc/", strlen("/proc/"))) { 5714 filename += strlen("/proc/"); 5715 if (!strncmp(filename, "self/", strlen("self/"))) { 5716 filename += strlen("self/"); 5717 } else if (*filename >= '1' && *filename <= '9') { 5718 char myself[80]; 5719 snprintf(myself, sizeof(myself), "%d/", getpid()); 5720 if (!strncmp(filename, myself, strlen(myself))) { 5721 filename += strlen(myself); 5722 } else { 5723 return 0; 5724 } 5725 } else { 5726 return 0; 5727 } 5728 if (!strcmp(filename, entry)) { 5729 return 1; 5730 } 5731 } 5732 return 0; 5733 } 5734 5735 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5736 static int is_proc(const char *filename, const char *entry) 5737 { 5738 return strcmp(filename, entry) == 0; 5739 } 5740 5741 static int open_net_route(void *cpu_env, int fd) 5742 { 5743 FILE *fp; 5744 char *line = NULL; 5745 size_t len = 0; 5746 ssize_t read; 5747 5748 fp = fopen("/proc/net/route", "r"); 5749 if (fp == NULL) { 5750 return -1; 5751 } 5752 5753 /* read header */ 5754 5755 read = getline(&line, &len, fp); 5756 dprintf(fd, "%s", line); 5757 5758 /* read routes */ 5759 5760 while ((read = getline(&line, &len, fp)) != -1) { 5761 char iface[16]; 5762 uint32_t dest, gw, mask; 5763 unsigned int flags, refcnt, use, metric, mtu, window, irtt; 5764 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5765 iface, &dest, &gw, &flags, &refcnt, &use, &metric, 5766 &mask, &mtu, &window, &irtt); 5767 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n", 5768 iface, tswap32(dest), tswap32(gw), flags, refcnt, use, 5769 metric, tswap32(mask), mtu, window, irtt); 5770 } 5771 5772 free(line); 5773 fclose(fp); 5774 5775 return 0; 5776 } 5777 #endif 5778 5779 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode) 5780 { 5781 struct fake_open { 5782 const char *filename; 5783 int (*fill)(void *cpu_env, int fd); 5784 int (*cmp)(const char *s1, const char *s2); 5785 }; 5786 const struct fake_open *fake_open; 5787 static const struct fake_open fakes[] = { 5788 { "maps", open_self_maps, is_proc_myself }, 5789 { "stat", open_self_stat, is_proc_myself }, 5790 { "auxv", open_self_auxv, is_proc_myself }, 5791 { "cmdline", open_self_cmdline, is_proc_myself }, 5792 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) 5793 { "/proc/net/route", open_net_route, is_proc }, 5794 #endif 5795 { NULL, NULL, NULL } 5796 }; 5797 5798 if (is_proc_myself(pathname, "exe")) { 5799 int execfd = qemu_getauxval(AT_EXECFD); 5800 return execfd ? execfd : sys_openat(dirfd, exec_path, flags, mode); 5801 } 5802 5803 for (fake_open = fakes; fake_open->filename; fake_open++) { 5804 if (fake_open->cmp(pathname, fake_open->filename)) { 5805 break; 5806 } 5807 } 5808 5809 if (fake_open->filename) { 5810 const char *tmpdir; 5811 char filename[PATH_MAX]; 5812 int fd, r; 5813 5814 /* create temporary file to map stat to */ 5815 tmpdir = getenv("TMPDIR"); 5816 if (!tmpdir) 5817 tmpdir = "/tmp"; 5818 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir); 5819 fd = mkstemp(filename); 5820 if (fd < 0) { 5821 return fd; 5822 } 5823 unlink(filename); 5824 5825 if ((r = fake_open->fill(cpu_env, fd))) { 5826 int e = errno; 5827 close(fd); 5828 errno = e; 5829 return r; 5830 } 5831 lseek(fd, 0, SEEK_SET); 5832 5833 return fd; 5834 } 5835 5836 return sys_openat(dirfd, path(pathname), flags, mode); 5837 } 5838 5839 #define TIMER_MAGIC 0x0caf0000 5840 #define TIMER_MAGIC_MASK 0xffff0000 5841 5842 /* Convert QEMU provided timer ID back to internal 16bit index format */ 5843 static target_timer_t get_timer_id(abi_long arg) 5844 { 5845 target_timer_t timerid = arg; 5846 5847 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) { 5848 return -TARGET_EINVAL; 5849 } 5850 5851 timerid &= 0xffff; 5852 5853 if (timerid >= ARRAY_SIZE(g_posix_timers)) { 5854 return -TARGET_EINVAL; 5855 } 5856 5857 return timerid; 5858 } 5859 5860 /* do_syscall() should always have a single exit point at the end so 5861 that actions, such as logging of syscall results, can be performed. 5862 All errnos that do_syscall() returns must be -TARGET_<errcode>. */ 5863 abi_long do_syscall(void *cpu_env, int num, abi_long arg1, 5864 abi_long arg2, abi_long arg3, abi_long arg4, 5865 abi_long arg5, abi_long arg6, abi_long arg7, 5866 abi_long arg8) 5867 { 5868 CPUState *cpu = ENV_GET_CPU(cpu_env); 5869 abi_long ret; 5870 struct stat st; 5871 struct statfs stfs; 5872 void *p; 5873 5874 #ifdef DEBUG 5875 gemu_log("syscall %d", num); 5876 #endif 5877 if(do_strace) 5878 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); 5879 5880 switch(num) { 5881 case TARGET_NR_exit: 5882 /* In old applications this may be used to implement _exit(2). 5883 However in threaded applictions it is used for thread termination, 5884 and _exit_group is used for application termination. 5885 Do thread termination if we have more then one thread. */ 5886 /* FIXME: This probably breaks if a signal arrives. We should probably 5887 be disabling signals. */ 5888 if (CPU_NEXT(first_cpu)) { 5889 TaskState *ts; 5890 5891 cpu_list_lock(); 5892 /* Remove the CPU from the list. */ 5893 QTAILQ_REMOVE(&cpus, cpu, node); 5894 cpu_list_unlock(); 5895 ts = cpu->opaque; 5896 if (ts->child_tidptr) { 5897 put_user_u32(0, ts->child_tidptr); 5898 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, 5899 NULL, NULL, 0); 5900 } 5901 thread_cpu = NULL; 5902 object_unref(OBJECT(cpu)); 5903 g_free(ts); 5904 rcu_unregister_thread(); 5905 pthread_exit(NULL); 5906 } 5907 #ifdef TARGET_GPROF 5908 _mcleanup(); 5909 #endif 5910 gdb_exit(cpu_env, arg1); 5911 _exit(arg1); 5912 ret = 0; /* avoid warning */ 5913 break; 5914 case TARGET_NR_read: 5915 if (arg3 == 0) 5916 ret = 0; 5917 else { 5918 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 5919 goto efault; 5920 ret = get_errno(read(arg1, p, arg3)); 5921 if (ret >= 0 && 5922 fd_trans_host_to_target_data(arg1)) { 5923 ret = fd_trans_host_to_target_data(arg1)(p, ret); 5924 } 5925 unlock_user(p, arg2, ret); 5926 } 5927 break; 5928 case TARGET_NR_write: 5929 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 5930 goto efault; 5931 ret = get_errno(write(arg1, p, arg3)); 5932 unlock_user(p, arg2, 0); 5933 break; 5934 #ifdef TARGET_NR_open 5935 case TARGET_NR_open: 5936 if (!(p = lock_user_string(arg1))) 5937 goto efault; 5938 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, 5939 target_to_host_bitmask(arg2, fcntl_flags_tbl), 5940 arg3)); 5941 fd_trans_unregister(ret); 5942 unlock_user(p, arg1, 0); 5943 break; 5944 #endif 5945 case TARGET_NR_openat: 5946 if (!(p = lock_user_string(arg2))) 5947 goto efault; 5948 ret = get_errno(do_openat(cpu_env, arg1, p, 5949 target_to_host_bitmask(arg3, fcntl_flags_tbl), 5950 arg4)); 5951 fd_trans_unregister(ret); 5952 unlock_user(p, arg2, 0); 5953 break; 5954 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 5955 case TARGET_NR_name_to_handle_at: 5956 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5); 5957 break; 5958 #endif 5959 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) 5960 case TARGET_NR_open_by_handle_at: 5961 ret = do_open_by_handle_at(arg1, arg2, arg3); 5962 fd_trans_unregister(ret); 5963 break; 5964 #endif 5965 case TARGET_NR_close: 5966 fd_trans_unregister(arg1); 5967 ret = get_errno(close(arg1)); 5968 break; 5969 case TARGET_NR_brk: 5970 ret = do_brk(arg1); 5971 break; 5972 #ifdef TARGET_NR_fork 5973 case TARGET_NR_fork: 5974 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); 5975 break; 5976 #endif 5977 #ifdef TARGET_NR_waitpid 5978 case TARGET_NR_waitpid: 5979 { 5980 int status; 5981 ret = get_errno(waitpid(arg1, &status, arg3)); 5982 if (!is_error(ret) && arg2 && ret 5983 && put_user_s32(host_to_target_waitstatus(status), arg2)) 5984 goto efault; 5985 } 5986 break; 5987 #endif 5988 #ifdef TARGET_NR_waitid 5989 case TARGET_NR_waitid: 5990 { 5991 siginfo_t info; 5992 info.si_pid = 0; 5993 ret = get_errno(waitid(arg1, arg2, &info, arg4)); 5994 if (!is_error(ret) && arg3 && info.si_pid != 0) { 5995 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) 5996 goto efault; 5997 host_to_target_siginfo(p, &info); 5998 unlock_user(p, arg3, sizeof(target_siginfo_t)); 5999 } 6000 } 6001 break; 6002 #endif 6003 #ifdef TARGET_NR_creat /* not on alpha */ 6004 case TARGET_NR_creat: 6005 if (!(p = lock_user_string(arg1))) 6006 goto efault; 6007 ret = get_errno(creat(p, arg2)); 6008 fd_trans_unregister(ret); 6009 unlock_user(p, arg1, 0); 6010 break; 6011 #endif 6012 #ifdef TARGET_NR_link 6013 case TARGET_NR_link: 6014 { 6015 void * p2; 6016 p = lock_user_string(arg1); 6017 p2 = lock_user_string(arg2); 6018 if (!p || !p2) 6019 ret = -TARGET_EFAULT; 6020 else 6021 ret = get_errno(link(p, p2)); 6022 unlock_user(p2, arg2, 0); 6023 unlock_user(p, arg1, 0); 6024 } 6025 break; 6026 #endif 6027 #if defined(TARGET_NR_linkat) 6028 case TARGET_NR_linkat: 6029 { 6030 void * p2 = NULL; 6031 if (!arg2 || !arg4) 6032 goto efault; 6033 p = lock_user_string(arg2); 6034 p2 = lock_user_string(arg4); 6035 if (!p || !p2) 6036 ret = -TARGET_EFAULT; 6037 else 6038 ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); 6039 unlock_user(p, arg2, 0); 6040 unlock_user(p2, arg4, 0); 6041 } 6042 break; 6043 #endif 6044 #ifdef TARGET_NR_unlink 6045 case TARGET_NR_unlink: 6046 if (!(p = lock_user_string(arg1))) 6047 goto efault; 6048 ret = get_errno(unlink(p)); 6049 unlock_user(p, arg1, 0); 6050 break; 6051 #endif 6052 #if defined(TARGET_NR_unlinkat) 6053 case TARGET_NR_unlinkat: 6054 if (!(p = lock_user_string(arg2))) 6055 goto efault; 6056 ret = get_errno(unlinkat(arg1, p, arg3)); 6057 unlock_user(p, arg2, 0); 6058 break; 6059 #endif 6060 case TARGET_NR_execve: 6061 { 6062 char **argp, **envp; 6063 int argc, envc; 6064 abi_ulong gp; 6065 abi_ulong guest_argp; 6066 abi_ulong guest_envp; 6067 abi_ulong addr; 6068 char **q; 6069 int total_size = 0; 6070 6071 argc = 0; 6072 guest_argp = arg2; 6073 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { 6074 if (get_user_ual(addr, gp)) 6075 goto efault; 6076 if (!addr) 6077 break; 6078 argc++; 6079 } 6080 envc = 0; 6081 guest_envp = arg3; 6082 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { 6083 if (get_user_ual(addr, gp)) 6084 goto efault; 6085 if (!addr) 6086 break; 6087 envc++; 6088 } 6089 6090 argp = alloca((argc + 1) * sizeof(void *)); 6091 envp = alloca((envc + 1) * sizeof(void *)); 6092 6093 for (gp = guest_argp, q = argp; gp; 6094 gp += sizeof(abi_ulong), q++) { 6095 if (get_user_ual(addr, gp)) 6096 goto execve_efault; 6097 if (!addr) 6098 break; 6099 if (!(*q = lock_user_string(addr))) 6100 goto execve_efault; 6101 total_size += strlen(*q) + 1; 6102 } 6103 *q = NULL; 6104 6105 for (gp = guest_envp, q = envp; gp; 6106 gp += sizeof(abi_ulong), q++) { 6107 if (get_user_ual(addr, gp)) 6108 goto execve_efault; 6109 if (!addr) 6110 break; 6111 if (!(*q = lock_user_string(addr))) 6112 goto execve_efault; 6113 total_size += strlen(*q) + 1; 6114 } 6115 *q = NULL; 6116 6117 if (!(p = lock_user_string(arg1))) 6118 goto execve_efault; 6119 ret = get_errno(execve(p, argp, envp)); 6120 unlock_user(p, arg1, 0); 6121 6122 goto execve_end; 6123 6124 execve_efault: 6125 ret = -TARGET_EFAULT; 6126 6127 execve_end: 6128 for (gp = guest_argp, q = argp; *q; 6129 gp += sizeof(abi_ulong), q++) { 6130 if (get_user_ual(addr, gp) 6131 || !addr) 6132 break; 6133 unlock_user(*q, addr, 0); 6134 } 6135 for (gp = guest_envp, q = envp; *q; 6136 gp += sizeof(abi_ulong), q++) { 6137 if (get_user_ual(addr, gp) 6138 || !addr) 6139 break; 6140 unlock_user(*q, addr, 0); 6141 } 6142 } 6143 break; 6144 case TARGET_NR_chdir: 6145 if (!(p = lock_user_string(arg1))) 6146 goto efault; 6147 ret = get_errno(chdir(p)); 6148 unlock_user(p, arg1, 0); 6149 break; 6150 #ifdef TARGET_NR_time 6151 case TARGET_NR_time: 6152 { 6153 time_t host_time; 6154 ret = get_errno(time(&host_time)); 6155 if (!is_error(ret) 6156 && arg1 6157 && put_user_sal(host_time, arg1)) 6158 goto efault; 6159 } 6160 break; 6161 #endif 6162 #ifdef TARGET_NR_mknod 6163 case TARGET_NR_mknod: 6164 if (!(p = lock_user_string(arg1))) 6165 goto efault; 6166 ret = get_errno(mknod(p, arg2, arg3)); 6167 unlock_user(p, arg1, 0); 6168 break; 6169 #endif 6170 #if defined(TARGET_NR_mknodat) 6171 case TARGET_NR_mknodat: 6172 if (!(p = lock_user_string(arg2))) 6173 goto efault; 6174 ret = get_errno(mknodat(arg1, p, arg3, arg4)); 6175 unlock_user(p, arg2, 0); 6176 break; 6177 #endif 6178 #ifdef TARGET_NR_chmod 6179 case TARGET_NR_chmod: 6180 if (!(p = lock_user_string(arg1))) 6181 goto efault; 6182 ret = get_errno(chmod(p, arg2)); 6183 unlock_user(p, arg1, 0); 6184 break; 6185 #endif 6186 #ifdef TARGET_NR_break 6187 case TARGET_NR_break: 6188 goto unimplemented; 6189 #endif 6190 #ifdef TARGET_NR_oldstat 6191 case TARGET_NR_oldstat: 6192 goto unimplemented; 6193 #endif 6194 case TARGET_NR_lseek: 6195 ret = get_errno(lseek(arg1, arg2, arg3)); 6196 break; 6197 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) 6198 /* Alpha specific */ 6199 case TARGET_NR_getxpid: 6200 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); 6201 ret = get_errno(getpid()); 6202 break; 6203 #endif 6204 #ifdef TARGET_NR_getpid 6205 case TARGET_NR_getpid: 6206 ret = get_errno(getpid()); 6207 break; 6208 #endif 6209 case TARGET_NR_mount: 6210 { 6211 /* need to look at the data field */ 6212 void *p2, *p3; 6213 6214 if (arg1) { 6215 p = lock_user_string(arg1); 6216 if (!p) { 6217 goto efault; 6218 } 6219 } else { 6220 p = NULL; 6221 } 6222 6223 p2 = lock_user_string(arg2); 6224 if (!p2) { 6225 if (arg1) { 6226 unlock_user(p, arg1, 0); 6227 } 6228 goto efault; 6229 } 6230 6231 if (arg3) { 6232 p3 = lock_user_string(arg3); 6233 if (!p3) { 6234 if (arg1) { 6235 unlock_user(p, arg1, 0); 6236 } 6237 unlock_user(p2, arg2, 0); 6238 goto efault; 6239 } 6240 } else { 6241 p3 = NULL; 6242 } 6243 6244 /* FIXME - arg5 should be locked, but it isn't clear how to 6245 * do that since it's not guaranteed to be a NULL-terminated 6246 * string. 6247 */ 6248 if (!arg5) { 6249 ret = mount(p, p2, p3, (unsigned long)arg4, NULL); 6250 } else { 6251 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)); 6252 } 6253 ret = get_errno(ret); 6254 6255 if (arg1) { 6256 unlock_user(p, arg1, 0); 6257 } 6258 unlock_user(p2, arg2, 0); 6259 if (arg3) { 6260 unlock_user(p3, arg3, 0); 6261 } 6262 } 6263 break; 6264 #ifdef TARGET_NR_umount 6265 case TARGET_NR_umount: 6266 if (!(p = lock_user_string(arg1))) 6267 goto efault; 6268 ret = get_errno(umount(p)); 6269 unlock_user(p, arg1, 0); 6270 break; 6271 #endif 6272 #ifdef TARGET_NR_stime /* not on alpha */ 6273 case TARGET_NR_stime: 6274 { 6275 time_t host_time; 6276 if (get_user_sal(host_time, arg1)) 6277 goto efault; 6278 ret = get_errno(stime(&host_time)); 6279 } 6280 break; 6281 #endif 6282 case TARGET_NR_ptrace: 6283 goto unimplemented; 6284 #ifdef TARGET_NR_alarm /* not on alpha */ 6285 case TARGET_NR_alarm: 6286 ret = alarm(arg1); 6287 break; 6288 #endif 6289 #ifdef TARGET_NR_oldfstat 6290 case TARGET_NR_oldfstat: 6291 goto unimplemented; 6292 #endif 6293 #ifdef TARGET_NR_pause /* not on alpha */ 6294 case TARGET_NR_pause: 6295 ret = get_errno(pause()); 6296 break; 6297 #endif 6298 #ifdef TARGET_NR_utime 6299 case TARGET_NR_utime: 6300 { 6301 struct utimbuf tbuf, *host_tbuf; 6302 struct target_utimbuf *target_tbuf; 6303 if (arg2) { 6304 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) 6305 goto efault; 6306 tbuf.actime = tswapal(target_tbuf->actime); 6307 tbuf.modtime = tswapal(target_tbuf->modtime); 6308 unlock_user_struct(target_tbuf, arg2, 0); 6309 host_tbuf = &tbuf; 6310 } else { 6311 host_tbuf = NULL; 6312 } 6313 if (!(p = lock_user_string(arg1))) 6314 goto efault; 6315 ret = get_errno(utime(p, host_tbuf)); 6316 unlock_user(p, arg1, 0); 6317 } 6318 break; 6319 #endif 6320 #ifdef TARGET_NR_utimes 6321 case TARGET_NR_utimes: 6322 { 6323 struct timeval *tvp, tv[2]; 6324 if (arg2) { 6325 if (copy_from_user_timeval(&tv[0], arg2) 6326 || copy_from_user_timeval(&tv[1], 6327 arg2 + sizeof(struct target_timeval))) 6328 goto efault; 6329 tvp = tv; 6330 } else { 6331 tvp = NULL; 6332 } 6333 if (!(p = lock_user_string(arg1))) 6334 goto efault; 6335 ret = get_errno(utimes(p, tvp)); 6336 unlock_user(p, arg1, 0); 6337 } 6338 break; 6339 #endif 6340 #if defined(TARGET_NR_futimesat) 6341 case TARGET_NR_futimesat: 6342 { 6343 struct timeval *tvp, tv[2]; 6344 if (arg3) { 6345 if (copy_from_user_timeval(&tv[0], arg3) 6346 || copy_from_user_timeval(&tv[1], 6347 arg3 + sizeof(struct target_timeval))) 6348 goto efault; 6349 tvp = tv; 6350 } else { 6351 tvp = NULL; 6352 } 6353 if (!(p = lock_user_string(arg2))) 6354 goto efault; 6355 ret = get_errno(futimesat(arg1, path(p), tvp)); 6356 unlock_user(p, arg2, 0); 6357 } 6358 break; 6359 #endif 6360 #ifdef TARGET_NR_stty 6361 case TARGET_NR_stty: 6362 goto unimplemented; 6363 #endif 6364 #ifdef TARGET_NR_gtty 6365 case TARGET_NR_gtty: 6366 goto unimplemented; 6367 #endif 6368 #ifdef TARGET_NR_access 6369 case TARGET_NR_access: 6370 if (!(p = lock_user_string(arg1))) 6371 goto efault; 6372 ret = get_errno(access(path(p), arg2)); 6373 unlock_user(p, arg1, 0); 6374 break; 6375 #endif 6376 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) 6377 case TARGET_NR_faccessat: 6378 if (!(p = lock_user_string(arg2))) 6379 goto efault; 6380 ret = get_errno(faccessat(arg1, p, arg3, 0)); 6381 unlock_user(p, arg2, 0); 6382 break; 6383 #endif 6384 #ifdef TARGET_NR_nice /* not on alpha */ 6385 case TARGET_NR_nice: 6386 ret = get_errno(nice(arg1)); 6387 break; 6388 #endif 6389 #ifdef TARGET_NR_ftime 6390 case TARGET_NR_ftime: 6391 goto unimplemented; 6392 #endif 6393 case TARGET_NR_sync: 6394 sync(); 6395 ret = 0; 6396 break; 6397 case TARGET_NR_kill: 6398 ret = get_errno(kill(arg1, target_to_host_signal(arg2))); 6399 break; 6400 #ifdef TARGET_NR_rename 6401 case TARGET_NR_rename: 6402 { 6403 void *p2; 6404 p = lock_user_string(arg1); 6405 p2 = lock_user_string(arg2); 6406 if (!p || !p2) 6407 ret = -TARGET_EFAULT; 6408 else 6409 ret = get_errno(rename(p, p2)); 6410 unlock_user(p2, arg2, 0); 6411 unlock_user(p, arg1, 0); 6412 } 6413 break; 6414 #endif 6415 #if defined(TARGET_NR_renameat) 6416 case TARGET_NR_renameat: 6417 { 6418 void *p2; 6419 p = lock_user_string(arg2); 6420 p2 = lock_user_string(arg4); 6421 if (!p || !p2) 6422 ret = -TARGET_EFAULT; 6423 else 6424 ret = get_errno(renameat(arg1, p, arg3, p2)); 6425 unlock_user(p2, arg4, 0); 6426 unlock_user(p, arg2, 0); 6427 } 6428 break; 6429 #endif 6430 #ifdef TARGET_NR_mkdir 6431 case TARGET_NR_mkdir: 6432 if (!(p = lock_user_string(arg1))) 6433 goto efault; 6434 ret = get_errno(mkdir(p, arg2)); 6435 unlock_user(p, arg1, 0); 6436 break; 6437 #endif 6438 #if defined(TARGET_NR_mkdirat) 6439 case TARGET_NR_mkdirat: 6440 if (!(p = lock_user_string(arg2))) 6441 goto efault; 6442 ret = get_errno(mkdirat(arg1, p, arg3)); 6443 unlock_user(p, arg2, 0); 6444 break; 6445 #endif 6446 #ifdef TARGET_NR_rmdir 6447 case TARGET_NR_rmdir: 6448 if (!(p = lock_user_string(arg1))) 6449 goto efault; 6450 ret = get_errno(rmdir(p)); 6451 unlock_user(p, arg1, 0); 6452 break; 6453 #endif 6454 case TARGET_NR_dup: 6455 ret = get_errno(dup(arg1)); 6456 if (ret >= 0) { 6457 fd_trans_dup(arg1, ret); 6458 } 6459 break; 6460 #ifdef TARGET_NR_pipe 6461 case TARGET_NR_pipe: 6462 ret = do_pipe(cpu_env, arg1, 0, 0); 6463 break; 6464 #endif 6465 #ifdef TARGET_NR_pipe2 6466 case TARGET_NR_pipe2: 6467 ret = do_pipe(cpu_env, arg1, 6468 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); 6469 break; 6470 #endif 6471 case TARGET_NR_times: 6472 { 6473 struct target_tms *tmsp; 6474 struct tms tms; 6475 ret = get_errno(times(&tms)); 6476 if (arg1) { 6477 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); 6478 if (!tmsp) 6479 goto efault; 6480 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); 6481 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); 6482 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); 6483 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); 6484 } 6485 if (!is_error(ret)) 6486 ret = host_to_target_clock_t(ret); 6487 } 6488 break; 6489 #ifdef TARGET_NR_prof 6490 case TARGET_NR_prof: 6491 goto unimplemented; 6492 #endif 6493 #ifdef TARGET_NR_signal 6494 case TARGET_NR_signal: 6495 goto unimplemented; 6496 #endif 6497 case TARGET_NR_acct: 6498 if (arg1 == 0) { 6499 ret = get_errno(acct(NULL)); 6500 } else { 6501 if (!(p = lock_user_string(arg1))) 6502 goto efault; 6503 ret = get_errno(acct(path(p))); 6504 unlock_user(p, arg1, 0); 6505 } 6506 break; 6507 #ifdef TARGET_NR_umount2 6508 case TARGET_NR_umount2: 6509 if (!(p = lock_user_string(arg1))) 6510 goto efault; 6511 ret = get_errno(umount2(p, arg2)); 6512 unlock_user(p, arg1, 0); 6513 break; 6514 #endif 6515 #ifdef TARGET_NR_lock 6516 case TARGET_NR_lock: 6517 goto unimplemented; 6518 #endif 6519 case TARGET_NR_ioctl: 6520 ret = do_ioctl(arg1, arg2, arg3); 6521 break; 6522 case TARGET_NR_fcntl: 6523 ret = do_fcntl(arg1, arg2, arg3); 6524 break; 6525 #ifdef TARGET_NR_mpx 6526 case TARGET_NR_mpx: 6527 goto unimplemented; 6528 #endif 6529 case TARGET_NR_setpgid: 6530 ret = get_errno(setpgid(arg1, arg2)); 6531 break; 6532 #ifdef TARGET_NR_ulimit 6533 case TARGET_NR_ulimit: 6534 goto unimplemented; 6535 #endif 6536 #ifdef TARGET_NR_oldolduname 6537 case TARGET_NR_oldolduname: 6538 goto unimplemented; 6539 #endif 6540 case TARGET_NR_umask: 6541 ret = get_errno(umask(arg1)); 6542 break; 6543 case TARGET_NR_chroot: 6544 if (!(p = lock_user_string(arg1))) 6545 goto efault; 6546 ret = get_errno(chroot(p)); 6547 unlock_user(p, arg1, 0); 6548 break; 6549 #ifdef TARGET_NR_ustat 6550 case TARGET_NR_ustat: 6551 goto unimplemented; 6552 #endif 6553 #ifdef TARGET_NR_dup2 6554 case TARGET_NR_dup2: 6555 ret = get_errno(dup2(arg1, arg2)); 6556 if (ret >= 0) { 6557 fd_trans_dup(arg1, arg2); 6558 } 6559 break; 6560 #endif 6561 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) 6562 case TARGET_NR_dup3: 6563 ret = get_errno(dup3(arg1, arg2, arg3)); 6564 if (ret >= 0) { 6565 fd_trans_dup(arg1, arg2); 6566 } 6567 break; 6568 #endif 6569 #ifdef TARGET_NR_getppid /* not on alpha */ 6570 case TARGET_NR_getppid: 6571 ret = get_errno(getppid()); 6572 break; 6573 #endif 6574 #ifdef TARGET_NR_getpgrp 6575 case TARGET_NR_getpgrp: 6576 ret = get_errno(getpgrp()); 6577 break; 6578 #endif 6579 case TARGET_NR_setsid: 6580 ret = get_errno(setsid()); 6581 break; 6582 #ifdef TARGET_NR_sigaction 6583 case TARGET_NR_sigaction: 6584 { 6585 #if defined(TARGET_ALPHA) 6586 struct target_sigaction act, oact, *pact = 0; 6587 struct target_old_sigaction *old_act; 6588 if (arg2) { 6589 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 6590 goto efault; 6591 act._sa_handler = old_act->_sa_handler; 6592 target_siginitset(&act.sa_mask, old_act->sa_mask); 6593 act.sa_flags = old_act->sa_flags; 6594 act.sa_restorer = 0; 6595 unlock_user_struct(old_act, arg2, 0); 6596 pact = &act; 6597 } 6598 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6599 if (!is_error(ret) && arg3) { 6600 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 6601 goto efault; 6602 old_act->_sa_handler = oact._sa_handler; 6603 old_act->sa_mask = oact.sa_mask.sig[0]; 6604 old_act->sa_flags = oact.sa_flags; 6605 unlock_user_struct(old_act, arg3, 1); 6606 } 6607 #elif defined(TARGET_MIPS) 6608 struct target_sigaction act, oact, *pact, *old_act; 6609 6610 if (arg2) { 6611 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 6612 goto efault; 6613 act._sa_handler = old_act->_sa_handler; 6614 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); 6615 act.sa_flags = old_act->sa_flags; 6616 unlock_user_struct(old_act, arg2, 0); 6617 pact = &act; 6618 } else { 6619 pact = NULL; 6620 } 6621 6622 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6623 6624 if (!is_error(ret) && arg3) { 6625 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 6626 goto efault; 6627 old_act->_sa_handler = oact._sa_handler; 6628 old_act->sa_flags = oact.sa_flags; 6629 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; 6630 old_act->sa_mask.sig[1] = 0; 6631 old_act->sa_mask.sig[2] = 0; 6632 old_act->sa_mask.sig[3] = 0; 6633 unlock_user_struct(old_act, arg3, 1); 6634 } 6635 #else 6636 struct target_old_sigaction *old_act; 6637 struct target_sigaction act, oact, *pact; 6638 if (arg2) { 6639 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) 6640 goto efault; 6641 act._sa_handler = old_act->_sa_handler; 6642 target_siginitset(&act.sa_mask, old_act->sa_mask); 6643 act.sa_flags = old_act->sa_flags; 6644 act.sa_restorer = old_act->sa_restorer; 6645 unlock_user_struct(old_act, arg2, 0); 6646 pact = &act; 6647 } else { 6648 pact = NULL; 6649 } 6650 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6651 if (!is_error(ret) && arg3) { 6652 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) 6653 goto efault; 6654 old_act->_sa_handler = oact._sa_handler; 6655 old_act->sa_mask = oact.sa_mask.sig[0]; 6656 old_act->sa_flags = oact.sa_flags; 6657 old_act->sa_restorer = oact.sa_restorer; 6658 unlock_user_struct(old_act, arg3, 1); 6659 } 6660 #endif 6661 } 6662 break; 6663 #endif 6664 case TARGET_NR_rt_sigaction: 6665 { 6666 #if defined(TARGET_ALPHA) 6667 struct target_sigaction act, oact, *pact = 0; 6668 struct target_rt_sigaction *rt_act; 6669 /* ??? arg4 == sizeof(sigset_t). */ 6670 if (arg2) { 6671 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) 6672 goto efault; 6673 act._sa_handler = rt_act->_sa_handler; 6674 act.sa_mask = rt_act->sa_mask; 6675 act.sa_flags = rt_act->sa_flags; 6676 act.sa_restorer = arg5; 6677 unlock_user_struct(rt_act, arg2, 0); 6678 pact = &act; 6679 } 6680 ret = get_errno(do_sigaction(arg1, pact, &oact)); 6681 if (!is_error(ret) && arg3) { 6682 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) 6683 goto efault; 6684 rt_act->_sa_handler = oact._sa_handler; 6685 rt_act->sa_mask = oact.sa_mask; 6686 rt_act->sa_flags = oact.sa_flags; 6687 unlock_user_struct(rt_act, arg3, 1); 6688 } 6689 #else 6690 struct target_sigaction *act; 6691 struct target_sigaction *oact; 6692 6693 if (arg2) { 6694 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) 6695 goto efault; 6696 } else 6697 act = NULL; 6698 if (arg3) { 6699 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { 6700 ret = -TARGET_EFAULT; 6701 goto rt_sigaction_fail; 6702 } 6703 } else 6704 oact = NULL; 6705 ret = get_errno(do_sigaction(arg1, act, oact)); 6706 rt_sigaction_fail: 6707 if (act) 6708 unlock_user_struct(act, arg2, 0); 6709 if (oact) 6710 unlock_user_struct(oact, arg3, 1); 6711 #endif 6712 } 6713 break; 6714 #ifdef TARGET_NR_sgetmask /* not on alpha */ 6715 case TARGET_NR_sgetmask: 6716 { 6717 sigset_t cur_set; 6718 abi_ulong target_set; 6719 do_sigprocmask(0, NULL, &cur_set); 6720 host_to_target_old_sigset(&target_set, &cur_set); 6721 ret = target_set; 6722 } 6723 break; 6724 #endif 6725 #ifdef TARGET_NR_ssetmask /* not on alpha */ 6726 case TARGET_NR_ssetmask: 6727 { 6728 sigset_t set, oset, cur_set; 6729 abi_ulong target_set = arg1; 6730 do_sigprocmask(0, NULL, &cur_set); 6731 target_to_host_old_sigset(&set, &target_set); 6732 sigorset(&set, &set, &cur_set); 6733 do_sigprocmask(SIG_SETMASK, &set, &oset); 6734 host_to_target_old_sigset(&target_set, &oset); 6735 ret = target_set; 6736 } 6737 break; 6738 #endif 6739 #ifdef TARGET_NR_sigprocmask 6740 case TARGET_NR_sigprocmask: 6741 { 6742 #if defined(TARGET_ALPHA) 6743 sigset_t set, oldset; 6744 abi_ulong mask; 6745 int how; 6746 6747 switch (arg1) { 6748 case TARGET_SIG_BLOCK: 6749 how = SIG_BLOCK; 6750 break; 6751 case TARGET_SIG_UNBLOCK: 6752 how = SIG_UNBLOCK; 6753 break; 6754 case TARGET_SIG_SETMASK: 6755 how = SIG_SETMASK; 6756 break; 6757 default: 6758 ret = -TARGET_EINVAL; 6759 goto fail; 6760 } 6761 mask = arg2; 6762 target_to_host_old_sigset(&set, &mask); 6763 6764 ret = get_errno(do_sigprocmask(how, &set, &oldset)); 6765 if (!is_error(ret)) { 6766 host_to_target_old_sigset(&mask, &oldset); 6767 ret = mask; 6768 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ 6769 } 6770 #else 6771 sigset_t set, oldset, *set_ptr; 6772 int how; 6773 6774 if (arg2) { 6775 switch (arg1) { 6776 case TARGET_SIG_BLOCK: 6777 how = SIG_BLOCK; 6778 break; 6779 case TARGET_SIG_UNBLOCK: 6780 how = SIG_UNBLOCK; 6781 break; 6782 case TARGET_SIG_SETMASK: 6783 how = SIG_SETMASK; 6784 break; 6785 default: 6786 ret = -TARGET_EINVAL; 6787 goto fail; 6788 } 6789 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6790 goto efault; 6791 target_to_host_old_sigset(&set, p); 6792 unlock_user(p, arg2, 0); 6793 set_ptr = &set; 6794 } else { 6795 how = 0; 6796 set_ptr = NULL; 6797 } 6798 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset)); 6799 if (!is_error(ret) && arg3) { 6800 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6801 goto efault; 6802 host_to_target_old_sigset(p, &oldset); 6803 unlock_user(p, arg3, sizeof(target_sigset_t)); 6804 } 6805 #endif 6806 } 6807 break; 6808 #endif 6809 case TARGET_NR_rt_sigprocmask: 6810 { 6811 int how = arg1; 6812 sigset_t set, oldset, *set_ptr; 6813 6814 if (arg2) { 6815 switch(how) { 6816 case TARGET_SIG_BLOCK: 6817 how = SIG_BLOCK; 6818 break; 6819 case TARGET_SIG_UNBLOCK: 6820 how = SIG_UNBLOCK; 6821 break; 6822 case TARGET_SIG_SETMASK: 6823 how = SIG_SETMASK; 6824 break; 6825 default: 6826 ret = -TARGET_EINVAL; 6827 goto fail; 6828 } 6829 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) 6830 goto efault; 6831 target_to_host_sigset(&set, p); 6832 unlock_user(p, arg2, 0); 6833 set_ptr = &set; 6834 } else { 6835 how = 0; 6836 set_ptr = NULL; 6837 } 6838 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset)); 6839 if (!is_error(ret) && arg3) { 6840 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) 6841 goto efault; 6842 host_to_target_sigset(p, &oldset); 6843 unlock_user(p, arg3, sizeof(target_sigset_t)); 6844 } 6845 } 6846 break; 6847 #ifdef TARGET_NR_sigpending 6848 case TARGET_NR_sigpending: 6849 { 6850 sigset_t set; 6851 ret = get_errno(sigpending(&set)); 6852 if (!is_error(ret)) { 6853 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6854 goto efault; 6855 host_to_target_old_sigset(p, &set); 6856 unlock_user(p, arg1, sizeof(target_sigset_t)); 6857 } 6858 } 6859 break; 6860 #endif 6861 case TARGET_NR_rt_sigpending: 6862 { 6863 sigset_t set; 6864 ret = get_errno(sigpending(&set)); 6865 if (!is_error(ret)) { 6866 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) 6867 goto efault; 6868 host_to_target_sigset(p, &set); 6869 unlock_user(p, arg1, sizeof(target_sigset_t)); 6870 } 6871 } 6872 break; 6873 #ifdef TARGET_NR_sigsuspend 6874 case TARGET_NR_sigsuspend: 6875 { 6876 sigset_t set; 6877 #if defined(TARGET_ALPHA) 6878 abi_ulong mask = arg1; 6879 target_to_host_old_sigset(&set, &mask); 6880 #else 6881 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6882 goto efault; 6883 target_to_host_old_sigset(&set, p); 6884 unlock_user(p, arg1, 0); 6885 #endif 6886 ret = get_errno(sigsuspend(&set)); 6887 } 6888 break; 6889 #endif 6890 case TARGET_NR_rt_sigsuspend: 6891 { 6892 sigset_t set; 6893 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6894 goto efault; 6895 target_to_host_sigset(&set, p); 6896 unlock_user(p, arg1, 0); 6897 ret = get_errno(sigsuspend(&set)); 6898 } 6899 break; 6900 case TARGET_NR_rt_sigtimedwait: 6901 { 6902 sigset_t set; 6903 struct timespec uts, *puts; 6904 siginfo_t uinfo; 6905 6906 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) 6907 goto efault; 6908 target_to_host_sigset(&set, p); 6909 unlock_user(p, arg1, 0); 6910 if (arg3) { 6911 puts = &uts; 6912 target_to_host_timespec(puts, arg3); 6913 } else { 6914 puts = NULL; 6915 } 6916 ret = get_errno(sigtimedwait(&set, &uinfo, puts)); 6917 if (!is_error(ret)) { 6918 if (arg2) { 6919 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 6920 0); 6921 if (!p) { 6922 goto efault; 6923 } 6924 host_to_target_siginfo(p, &uinfo); 6925 unlock_user(p, arg2, sizeof(target_siginfo_t)); 6926 } 6927 ret = host_to_target_signal(ret); 6928 } 6929 } 6930 break; 6931 case TARGET_NR_rt_sigqueueinfo: 6932 { 6933 siginfo_t uinfo; 6934 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) 6935 goto efault; 6936 target_to_host_siginfo(&uinfo, p); 6937 unlock_user(p, arg1, 0); 6938 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); 6939 } 6940 break; 6941 #ifdef TARGET_NR_sigreturn 6942 case TARGET_NR_sigreturn: 6943 ret = do_sigreturn(cpu_env); 6944 break; 6945 #endif 6946 case TARGET_NR_rt_sigreturn: 6947 ret = do_rt_sigreturn(cpu_env); 6948 break; 6949 case TARGET_NR_sethostname: 6950 if (!(p = lock_user_string(arg1))) 6951 goto efault; 6952 ret = get_errno(sethostname(p, arg2)); 6953 unlock_user(p, arg1, 0); 6954 break; 6955 case TARGET_NR_setrlimit: 6956 { 6957 int resource = target_to_host_resource(arg1); 6958 struct target_rlimit *target_rlim; 6959 struct rlimit rlim; 6960 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) 6961 goto efault; 6962 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); 6963 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); 6964 unlock_user_struct(target_rlim, arg2, 0); 6965 ret = get_errno(setrlimit(resource, &rlim)); 6966 } 6967 break; 6968 case TARGET_NR_getrlimit: 6969 { 6970 int resource = target_to_host_resource(arg1); 6971 struct target_rlimit *target_rlim; 6972 struct rlimit rlim; 6973 6974 ret = get_errno(getrlimit(resource, &rlim)); 6975 if (!is_error(ret)) { 6976 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 6977 goto efault; 6978 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 6979 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 6980 unlock_user_struct(target_rlim, arg2, 1); 6981 } 6982 } 6983 break; 6984 case TARGET_NR_getrusage: 6985 { 6986 struct rusage rusage; 6987 ret = get_errno(getrusage(arg1, &rusage)); 6988 if (!is_error(ret)) { 6989 ret = host_to_target_rusage(arg2, &rusage); 6990 } 6991 } 6992 break; 6993 case TARGET_NR_gettimeofday: 6994 { 6995 struct timeval tv; 6996 ret = get_errno(gettimeofday(&tv, NULL)); 6997 if (!is_error(ret)) { 6998 if (copy_to_user_timeval(arg1, &tv)) 6999 goto efault; 7000 } 7001 } 7002 break; 7003 case TARGET_NR_settimeofday: 7004 { 7005 struct timeval tv, *ptv = NULL; 7006 struct timezone tz, *ptz = NULL; 7007 7008 if (arg1) { 7009 if (copy_from_user_timeval(&tv, arg1)) { 7010 goto efault; 7011 } 7012 ptv = &tv; 7013 } 7014 7015 if (arg2) { 7016 if (copy_from_user_timezone(&tz, arg2)) { 7017 goto efault; 7018 } 7019 ptz = &tz; 7020 } 7021 7022 ret = get_errno(settimeofday(ptv, ptz)); 7023 } 7024 break; 7025 #if defined(TARGET_NR_select) 7026 case TARGET_NR_select: 7027 #if defined(TARGET_S390X) || defined(TARGET_ALPHA) 7028 ret = do_select(arg1, arg2, arg3, arg4, arg5); 7029 #else 7030 { 7031 struct target_sel_arg_struct *sel; 7032 abi_ulong inp, outp, exp, tvp; 7033 long nsel; 7034 7035 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) 7036 goto efault; 7037 nsel = tswapal(sel->n); 7038 inp = tswapal(sel->inp); 7039 outp = tswapal(sel->outp); 7040 exp = tswapal(sel->exp); 7041 tvp = tswapal(sel->tvp); 7042 unlock_user_struct(sel, arg1, 0); 7043 ret = do_select(nsel, inp, outp, exp, tvp); 7044 } 7045 #endif 7046 break; 7047 #endif 7048 #ifdef TARGET_NR_pselect6 7049 case TARGET_NR_pselect6: 7050 { 7051 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; 7052 fd_set rfds, wfds, efds; 7053 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; 7054 struct timespec ts, *ts_ptr; 7055 7056 /* 7057 * The 6th arg is actually two args smashed together, 7058 * so we cannot use the C library. 7059 */ 7060 sigset_t set; 7061 struct { 7062 sigset_t *set; 7063 size_t size; 7064 } sig, *sig_ptr; 7065 7066 abi_ulong arg_sigset, arg_sigsize, *arg7; 7067 target_sigset_t *target_sigset; 7068 7069 n = arg1; 7070 rfd_addr = arg2; 7071 wfd_addr = arg3; 7072 efd_addr = arg4; 7073 ts_addr = arg5; 7074 7075 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); 7076 if (ret) { 7077 goto fail; 7078 } 7079 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); 7080 if (ret) { 7081 goto fail; 7082 } 7083 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); 7084 if (ret) { 7085 goto fail; 7086 } 7087 7088 /* 7089 * This takes a timespec, and not a timeval, so we cannot 7090 * use the do_select() helper ... 7091 */ 7092 if (ts_addr) { 7093 if (target_to_host_timespec(&ts, ts_addr)) { 7094 goto efault; 7095 } 7096 ts_ptr = &ts; 7097 } else { 7098 ts_ptr = NULL; 7099 } 7100 7101 /* Extract the two packed args for the sigset */ 7102 if (arg6) { 7103 sig_ptr = &sig; 7104 sig.size = _NSIG / 8; 7105 7106 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); 7107 if (!arg7) { 7108 goto efault; 7109 } 7110 arg_sigset = tswapal(arg7[0]); 7111 arg_sigsize = tswapal(arg7[1]); 7112 unlock_user(arg7, arg6, 0); 7113 7114 if (arg_sigset) { 7115 sig.set = &set; 7116 if (arg_sigsize != sizeof(*target_sigset)) { 7117 /* Like the kernel, we enforce correct size sigsets */ 7118 ret = -TARGET_EINVAL; 7119 goto fail; 7120 } 7121 target_sigset = lock_user(VERIFY_READ, arg_sigset, 7122 sizeof(*target_sigset), 1); 7123 if (!target_sigset) { 7124 goto efault; 7125 } 7126 target_to_host_sigset(&set, target_sigset); 7127 unlock_user(target_sigset, arg_sigset, 0); 7128 } else { 7129 sig.set = NULL; 7130 } 7131 } else { 7132 sig_ptr = NULL; 7133 } 7134 7135 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, 7136 ts_ptr, sig_ptr)); 7137 7138 if (!is_error(ret)) { 7139 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) 7140 goto efault; 7141 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) 7142 goto efault; 7143 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) 7144 goto efault; 7145 7146 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) 7147 goto efault; 7148 } 7149 } 7150 break; 7151 #endif 7152 #ifdef TARGET_NR_symlink 7153 case TARGET_NR_symlink: 7154 { 7155 void *p2; 7156 p = lock_user_string(arg1); 7157 p2 = lock_user_string(arg2); 7158 if (!p || !p2) 7159 ret = -TARGET_EFAULT; 7160 else 7161 ret = get_errno(symlink(p, p2)); 7162 unlock_user(p2, arg2, 0); 7163 unlock_user(p, arg1, 0); 7164 } 7165 break; 7166 #endif 7167 #if defined(TARGET_NR_symlinkat) 7168 case TARGET_NR_symlinkat: 7169 { 7170 void *p2; 7171 p = lock_user_string(arg1); 7172 p2 = lock_user_string(arg3); 7173 if (!p || !p2) 7174 ret = -TARGET_EFAULT; 7175 else 7176 ret = get_errno(symlinkat(p, arg2, p2)); 7177 unlock_user(p2, arg3, 0); 7178 unlock_user(p, arg1, 0); 7179 } 7180 break; 7181 #endif 7182 #ifdef TARGET_NR_oldlstat 7183 case TARGET_NR_oldlstat: 7184 goto unimplemented; 7185 #endif 7186 #ifdef TARGET_NR_readlink 7187 case TARGET_NR_readlink: 7188 { 7189 void *p2; 7190 p = lock_user_string(arg1); 7191 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); 7192 if (!p || !p2) { 7193 ret = -TARGET_EFAULT; 7194 } else if (!arg3) { 7195 /* Short circuit this for the magic exe check. */ 7196 ret = -TARGET_EINVAL; 7197 } else if (is_proc_myself((const char *)p, "exe")) { 7198 char real[PATH_MAX], *temp; 7199 temp = realpath(exec_path, real); 7200 /* Return value is # of bytes that we wrote to the buffer. */ 7201 if (temp == NULL) { 7202 ret = get_errno(-1); 7203 } else { 7204 /* Don't worry about sign mismatch as earlier mapping 7205 * logic would have thrown a bad address error. */ 7206 ret = MIN(strlen(real), arg3); 7207 /* We cannot NUL terminate the string. */ 7208 memcpy(p2, real, ret); 7209 } 7210 } else { 7211 ret = get_errno(readlink(path(p), p2, arg3)); 7212 } 7213 unlock_user(p2, arg2, ret); 7214 unlock_user(p, arg1, 0); 7215 } 7216 break; 7217 #endif 7218 #if defined(TARGET_NR_readlinkat) 7219 case TARGET_NR_readlinkat: 7220 { 7221 void *p2; 7222 p = lock_user_string(arg2); 7223 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); 7224 if (!p || !p2) { 7225 ret = -TARGET_EFAULT; 7226 } else if (is_proc_myself((const char *)p, "exe")) { 7227 char real[PATH_MAX], *temp; 7228 temp = realpath(exec_path, real); 7229 ret = temp == NULL ? get_errno(-1) : strlen(real) ; 7230 snprintf((char *)p2, arg4, "%s", real); 7231 } else { 7232 ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); 7233 } 7234 unlock_user(p2, arg3, ret); 7235 unlock_user(p, arg2, 0); 7236 } 7237 break; 7238 #endif 7239 #ifdef TARGET_NR_uselib 7240 case TARGET_NR_uselib: 7241 goto unimplemented; 7242 #endif 7243 #ifdef TARGET_NR_swapon 7244 case TARGET_NR_swapon: 7245 if (!(p = lock_user_string(arg1))) 7246 goto efault; 7247 ret = get_errno(swapon(p, arg2)); 7248 unlock_user(p, arg1, 0); 7249 break; 7250 #endif 7251 case TARGET_NR_reboot: 7252 if (arg3 == LINUX_REBOOT_CMD_RESTART2) { 7253 /* arg4 must be ignored in all other cases */ 7254 p = lock_user_string(arg4); 7255 if (!p) { 7256 goto efault; 7257 } 7258 ret = get_errno(reboot(arg1, arg2, arg3, p)); 7259 unlock_user(p, arg4, 0); 7260 } else { 7261 ret = get_errno(reboot(arg1, arg2, arg3, NULL)); 7262 } 7263 break; 7264 #ifdef TARGET_NR_readdir 7265 case TARGET_NR_readdir: 7266 goto unimplemented; 7267 #endif 7268 #ifdef TARGET_NR_mmap 7269 case TARGET_NR_mmap: 7270 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ 7271 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ 7272 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ 7273 || defined(TARGET_S390X) 7274 { 7275 abi_ulong *v; 7276 abi_ulong v1, v2, v3, v4, v5, v6; 7277 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) 7278 goto efault; 7279 v1 = tswapal(v[0]); 7280 v2 = tswapal(v[1]); 7281 v3 = tswapal(v[2]); 7282 v4 = tswapal(v[3]); 7283 v5 = tswapal(v[4]); 7284 v6 = tswapal(v[5]); 7285 unlock_user(v, arg1, 0); 7286 ret = get_errno(target_mmap(v1, v2, v3, 7287 target_to_host_bitmask(v4, mmap_flags_tbl), 7288 v5, v6)); 7289 } 7290 #else 7291 ret = get_errno(target_mmap(arg1, arg2, arg3, 7292 target_to_host_bitmask(arg4, mmap_flags_tbl), 7293 arg5, 7294 arg6)); 7295 #endif 7296 break; 7297 #endif 7298 #ifdef TARGET_NR_mmap2 7299 case TARGET_NR_mmap2: 7300 #ifndef MMAP_SHIFT 7301 #define MMAP_SHIFT 12 7302 #endif 7303 ret = get_errno(target_mmap(arg1, arg2, arg3, 7304 target_to_host_bitmask(arg4, mmap_flags_tbl), 7305 arg5, 7306 arg6 << MMAP_SHIFT)); 7307 break; 7308 #endif 7309 case TARGET_NR_munmap: 7310 ret = get_errno(target_munmap(arg1, arg2)); 7311 break; 7312 case TARGET_NR_mprotect: 7313 { 7314 TaskState *ts = cpu->opaque; 7315 /* Special hack to detect libc making the stack executable. */ 7316 if ((arg3 & PROT_GROWSDOWN) 7317 && arg1 >= ts->info->stack_limit 7318 && arg1 <= ts->info->start_stack) { 7319 arg3 &= ~PROT_GROWSDOWN; 7320 arg2 = arg2 + arg1 - ts->info->stack_limit; 7321 arg1 = ts->info->stack_limit; 7322 } 7323 } 7324 ret = get_errno(target_mprotect(arg1, arg2, arg3)); 7325 break; 7326 #ifdef TARGET_NR_mremap 7327 case TARGET_NR_mremap: 7328 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); 7329 break; 7330 #endif 7331 /* ??? msync/mlock/munlock are broken for softmmu. */ 7332 #ifdef TARGET_NR_msync 7333 case TARGET_NR_msync: 7334 ret = get_errno(msync(g2h(arg1), arg2, arg3)); 7335 break; 7336 #endif 7337 #ifdef TARGET_NR_mlock 7338 case TARGET_NR_mlock: 7339 ret = get_errno(mlock(g2h(arg1), arg2)); 7340 break; 7341 #endif 7342 #ifdef TARGET_NR_munlock 7343 case TARGET_NR_munlock: 7344 ret = get_errno(munlock(g2h(arg1), arg2)); 7345 break; 7346 #endif 7347 #ifdef TARGET_NR_mlockall 7348 case TARGET_NR_mlockall: 7349 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1))); 7350 break; 7351 #endif 7352 #ifdef TARGET_NR_munlockall 7353 case TARGET_NR_munlockall: 7354 ret = get_errno(munlockall()); 7355 break; 7356 #endif 7357 case TARGET_NR_truncate: 7358 if (!(p = lock_user_string(arg1))) 7359 goto efault; 7360 ret = get_errno(truncate(p, arg2)); 7361 unlock_user(p, arg1, 0); 7362 break; 7363 case TARGET_NR_ftruncate: 7364 ret = get_errno(ftruncate(arg1, arg2)); 7365 break; 7366 case TARGET_NR_fchmod: 7367 ret = get_errno(fchmod(arg1, arg2)); 7368 break; 7369 #if defined(TARGET_NR_fchmodat) 7370 case TARGET_NR_fchmodat: 7371 if (!(p = lock_user_string(arg2))) 7372 goto efault; 7373 ret = get_errno(fchmodat(arg1, p, arg3, 0)); 7374 unlock_user(p, arg2, 0); 7375 break; 7376 #endif 7377 case TARGET_NR_getpriority: 7378 /* Note that negative values are valid for getpriority, so we must 7379 differentiate based on errno settings. */ 7380 errno = 0; 7381 ret = getpriority(arg1, arg2); 7382 if (ret == -1 && errno != 0) { 7383 ret = -host_to_target_errno(errno); 7384 break; 7385 } 7386 #ifdef TARGET_ALPHA 7387 /* Return value is the unbiased priority. Signal no error. */ 7388 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; 7389 #else 7390 /* Return value is a biased priority to avoid negative numbers. */ 7391 ret = 20 - ret; 7392 #endif 7393 break; 7394 case TARGET_NR_setpriority: 7395 ret = get_errno(setpriority(arg1, arg2, arg3)); 7396 break; 7397 #ifdef TARGET_NR_profil 7398 case TARGET_NR_profil: 7399 goto unimplemented; 7400 #endif 7401 case TARGET_NR_statfs: 7402 if (!(p = lock_user_string(arg1))) 7403 goto efault; 7404 ret = get_errno(statfs(path(p), &stfs)); 7405 unlock_user(p, arg1, 0); 7406 convert_statfs: 7407 if (!is_error(ret)) { 7408 struct target_statfs *target_stfs; 7409 7410 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) 7411 goto efault; 7412 __put_user(stfs.f_type, &target_stfs->f_type); 7413 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 7414 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 7415 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 7416 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 7417 __put_user(stfs.f_files, &target_stfs->f_files); 7418 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 7419 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 7420 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 7421 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 7422 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 7423 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 7424 unlock_user_struct(target_stfs, arg2, 1); 7425 } 7426 break; 7427 case TARGET_NR_fstatfs: 7428 ret = get_errno(fstatfs(arg1, &stfs)); 7429 goto convert_statfs; 7430 #ifdef TARGET_NR_statfs64 7431 case TARGET_NR_statfs64: 7432 if (!(p = lock_user_string(arg1))) 7433 goto efault; 7434 ret = get_errno(statfs(path(p), &stfs)); 7435 unlock_user(p, arg1, 0); 7436 convert_statfs64: 7437 if (!is_error(ret)) { 7438 struct target_statfs64 *target_stfs; 7439 7440 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) 7441 goto efault; 7442 __put_user(stfs.f_type, &target_stfs->f_type); 7443 __put_user(stfs.f_bsize, &target_stfs->f_bsize); 7444 __put_user(stfs.f_blocks, &target_stfs->f_blocks); 7445 __put_user(stfs.f_bfree, &target_stfs->f_bfree); 7446 __put_user(stfs.f_bavail, &target_stfs->f_bavail); 7447 __put_user(stfs.f_files, &target_stfs->f_files); 7448 __put_user(stfs.f_ffree, &target_stfs->f_ffree); 7449 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); 7450 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); 7451 __put_user(stfs.f_namelen, &target_stfs->f_namelen); 7452 __put_user(stfs.f_frsize, &target_stfs->f_frsize); 7453 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); 7454 unlock_user_struct(target_stfs, arg3, 1); 7455 } 7456 break; 7457 case TARGET_NR_fstatfs64: 7458 ret = get_errno(fstatfs(arg1, &stfs)); 7459 goto convert_statfs64; 7460 #endif 7461 #ifdef TARGET_NR_ioperm 7462 case TARGET_NR_ioperm: 7463 goto unimplemented; 7464 #endif 7465 #ifdef TARGET_NR_socketcall 7466 case TARGET_NR_socketcall: 7467 ret = do_socketcall(arg1, arg2); 7468 break; 7469 #endif 7470 #ifdef TARGET_NR_accept 7471 case TARGET_NR_accept: 7472 ret = do_accept4(arg1, arg2, arg3, 0); 7473 break; 7474 #endif 7475 #ifdef TARGET_NR_accept4 7476 case TARGET_NR_accept4: 7477 #ifdef CONFIG_ACCEPT4 7478 ret = do_accept4(arg1, arg2, arg3, arg4); 7479 #else 7480 goto unimplemented; 7481 #endif 7482 break; 7483 #endif 7484 #ifdef TARGET_NR_bind 7485 case TARGET_NR_bind: 7486 ret = do_bind(arg1, arg2, arg3); 7487 break; 7488 #endif 7489 #ifdef TARGET_NR_connect 7490 case TARGET_NR_connect: 7491 ret = do_connect(arg1, arg2, arg3); 7492 break; 7493 #endif 7494 #ifdef TARGET_NR_getpeername 7495 case TARGET_NR_getpeername: 7496 ret = do_getpeername(arg1, arg2, arg3); 7497 break; 7498 #endif 7499 #ifdef TARGET_NR_getsockname 7500 case TARGET_NR_getsockname: 7501 ret = do_getsockname(arg1, arg2, arg3); 7502 break; 7503 #endif 7504 #ifdef TARGET_NR_getsockopt 7505 case TARGET_NR_getsockopt: 7506 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); 7507 break; 7508 #endif 7509 #ifdef TARGET_NR_listen 7510 case TARGET_NR_listen: 7511 ret = get_errno(listen(arg1, arg2)); 7512 break; 7513 #endif 7514 #ifdef TARGET_NR_recv 7515 case TARGET_NR_recv: 7516 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); 7517 break; 7518 #endif 7519 #ifdef TARGET_NR_recvfrom 7520 case TARGET_NR_recvfrom: 7521 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); 7522 break; 7523 #endif 7524 #ifdef TARGET_NR_recvmsg 7525 case TARGET_NR_recvmsg: 7526 ret = do_sendrecvmsg(arg1, arg2, arg3, 0); 7527 break; 7528 #endif 7529 #ifdef TARGET_NR_send 7530 case TARGET_NR_send: 7531 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); 7532 break; 7533 #endif 7534 #ifdef TARGET_NR_sendmsg 7535 case TARGET_NR_sendmsg: 7536 ret = do_sendrecvmsg(arg1, arg2, arg3, 1); 7537 break; 7538 #endif 7539 #ifdef TARGET_NR_sendmmsg 7540 case TARGET_NR_sendmmsg: 7541 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); 7542 break; 7543 case TARGET_NR_recvmmsg: 7544 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); 7545 break; 7546 #endif 7547 #ifdef TARGET_NR_sendto 7548 case TARGET_NR_sendto: 7549 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); 7550 break; 7551 #endif 7552 #ifdef TARGET_NR_shutdown 7553 case TARGET_NR_shutdown: 7554 ret = get_errno(shutdown(arg1, arg2)); 7555 break; 7556 #endif 7557 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) 7558 case TARGET_NR_getrandom: 7559 p = lock_user(VERIFY_WRITE, arg1, arg2, 0); 7560 if (!p) { 7561 goto efault; 7562 } 7563 ret = get_errno(getrandom(p, arg2, arg3)); 7564 unlock_user(p, arg1, ret); 7565 break; 7566 #endif 7567 #ifdef TARGET_NR_socket 7568 case TARGET_NR_socket: 7569 ret = do_socket(arg1, arg2, arg3); 7570 fd_trans_unregister(ret); 7571 break; 7572 #endif 7573 #ifdef TARGET_NR_socketpair 7574 case TARGET_NR_socketpair: 7575 ret = do_socketpair(arg1, arg2, arg3, arg4); 7576 break; 7577 #endif 7578 #ifdef TARGET_NR_setsockopt 7579 case TARGET_NR_setsockopt: 7580 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); 7581 break; 7582 #endif 7583 7584 case TARGET_NR_syslog: 7585 if (!(p = lock_user_string(arg2))) 7586 goto efault; 7587 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); 7588 unlock_user(p, arg2, 0); 7589 break; 7590 7591 case TARGET_NR_setitimer: 7592 { 7593 struct itimerval value, ovalue, *pvalue; 7594 7595 if (arg2) { 7596 pvalue = &value; 7597 if (copy_from_user_timeval(&pvalue->it_interval, arg2) 7598 || copy_from_user_timeval(&pvalue->it_value, 7599 arg2 + sizeof(struct target_timeval))) 7600 goto efault; 7601 } else { 7602 pvalue = NULL; 7603 } 7604 ret = get_errno(setitimer(arg1, pvalue, &ovalue)); 7605 if (!is_error(ret) && arg3) { 7606 if (copy_to_user_timeval(arg3, 7607 &ovalue.it_interval) 7608 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), 7609 &ovalue.it_value)) 7610 goto efault; 7611 } 7612 } 7613 break; 7614 case TARGET_NR_getitimer: 7615 { 7616 struct itimerval value; 7617 7618 ret = get_errno(getitimer(arg1, &value)); 7619 if (!is_error(ret) && arg2) { 7620 if (copy_to_user_timeval(arg2, 7621 &value.it_interval) 7622 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), 7623 &value.it_value)) 7624 goto efault; 7625 } 7626 } 7627 break; 7628 #ifdef TARGET_NR_stat 7629 case TARGET_NR_stat: 7630 if (!(p = lock_user_string(arg1))) 7631 goto efault; 7632 ret = get_errno(stat(path(p), &st)); 7633 unlock_user(p, arg1, 0); 7634 goto do_stat; 7635 #endif 7636 #ifdef TARGET_NR_lstat 7637 case TARGET_NR_lstat: 7638 if (!(p = lock_user_string(arg1))) 7639 goto efault; 7640 ret = get_errno(lstat(path(p), &st)); 7641 unlock_user(p, arg1, 0); 7642 goto do_stat; 7643 #endif 7644 case TARGET_NR_fstat: 7645 { 7646 ret = get_errno(fstat(arg1, &st)); 7647 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat) 7648 do_stat: 7649 #endif 7650 if (!is_error(ret)) { 7651 struct target_stat *target_st; 7652 7653 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) 7654 goto efault; 7655 memset(target_st, 0, sizeof(*target_st)); 7656 __put_user(st.st_dev, &target_st->st_dev); 7657 __put_user(st.st_ino, &target_st->st_ino); 7658 __put_user(st.st_mode, &target_st->st_mode); 7659 __put_user(st.st_uid, &target_st->st_uid); 7660 __put_user(st.st_gid, &target_st->st_gid); 7661 __put_user(st.st_nlink, &target_st->st_nlink); 7662 __put_user(st.st_rdev, &target_st->st_rdev); 7663 __put_user(st.st_size, &target_st->st_size); 7664 __put_user(st.st_blksize, &target_st->st_blksize); 7665 __put_user(st.st_blocks, &target_st->st_blocks); 7666 __put_user(st.st_atime, &target_st->target_st_atime); 7667 __put_user(st.st_mtime, &target_st->target_st_mtime); 7668 __put_user(st.st_ctime, &target_st->target_st_ctime); 7669 unlock_user_struct(target_st, arg2, 1); 7670 } 7671 } 7672 break; 7673 #ifdef TARGET_NR_olduname 7674 case TARGET_NR_olduname: 7675 goto unimplemented; 7676 #endif 7677 #ifdef TARGET_NR_iopl 7678 case TARGET_NR_iopl: 7679 goto unimplemented; 7680 #endif 7681 case TARGET_NR_vhangup: 7682 ret = get_errno(vhangup()); 7683 break; 7684 #ifdef TARGET_NR_idle 7685 case TARGET_NR_idle: 7686 goto unimplemented; 7687 #endif 7688 #ifdef TARGET_NR_syscall 7689 case TARGET_NR_syscall: 7690 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, 7691 arg6, arg7, arg8, 0); 7692 break; 7693 #endif 7694 case TARGET_NR_wait4: 7695 { 7696 int status; 7697 abi_long status_ptr = arg2; 7698 struct rusage rusage, *rusage_ptr; 7699 abi_ulong target_rusage = arg4; 7700 abi_long rusage_err; 7701 if (target_rusage) 7702 rusage_ptr = &rusage; 7703 else 7704 rusage_ptr = NULL; 7705 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); 7706 if (!is_error(ret)) { 7707 if (status_ptr && ret) { 7708 status = host_to_target_waitstatus(status); 7709 if (put_user_s32(status, status_ptr)) 7710 goto efault; 7711 } 7712 if (target_rusage) { 7713 rusage_err = host_to_target_rusage(target_rusage, &rusage); 7714 if (rusage_err) { 7715 ret = rusage_err; 7716 } 7717 } 7718 } 7719 } 7720 break; 7721 #ifdef TARGET_NR_swapoff 7722 case TARGET_NR_swapoff: 7723 if (!(p = lock_user_string(arg1))) 7724 goto efault; 7725 ret = get_errno(swapoff(p)); 7726 unlock_user(p, arg1, 0); 7727 break; 7728 #endif 7729 case TARGET_NR_sysinfo: 7730 { 7731 struct target_sysinfo *target_value; 7732 struct sysinfo value; 7733 ret = get_errno(sysinfo(&value)); 7734 if (!is_error(ret) && arg1) 7735 { 7736 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) 7737 goto efault; 7738 __put_user(value.uptime, &target_value->uptime); 7739 __put_user(value.loads[0], &target_value->loads[0]); 7740 __put_user(value.loads[1], &target_value->loads[1]); 7741 __put_user(value.loads[2], &target_value->loads[2]); 7742 __put_user(value.totalram, &target_value->totalram); 7743 __put_user(value.freeram, &target_value->freeram); 7744 __put_user(value.sharedram, &target_value->sharedram); 7745 __put_user(value.bufferram, &target_value->bufferram); 7746 __put_user(value.totalswap, &target_value->totalswap); 7747 __put_user(value.freeswap, &target_value->freeswap); 7748 __put_user(value.procs, &target_value->procs); 7749 __put_user(value.totalhigh, &target_value->totalhigh); 7750 __put_user(value.freehigh, &target_value->freehigh); 7751 __put_user(value.mem_unit, &target_value->mem_unit); 7752 unlock_user_struct(target_value, arg1, 1); 7753 } 7754 } 7755 break; 7756 #ifdef TARGET_NR_ipc 7757 case TARGET_NR_ipc: 7758 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); 7759 break; 7760 #endif 7761 #ifdef TARGET_NR_semget 7762 case TARGET_NR_semget: 7763 ret = get_errno(semget(arg1, arg2, arg3)); 7764 break; 7765 #endif 7766 #ifdef TARGET_NR_semop 7767 case TARGET_NR_semop: 7768 ret = do_semop(arg1, arg2, arg3); 7769 break; 7770 #endif 7771 #ifdef TARGET_NR_semctl 7772 case TARGET_NR_semctl: 7773 ret = do_semctl(arg1, arg2, arg3, arg4); 7774 break; 7775 #endif 7776 #ifdef TARGET_NR_msgctl 7777 case TARGET_NR_msgctl: 7778 ret = do_msgctl(arg1, arg2, arg3); 7779 break; 7780 #endif 7781 #ifdef TARGET_NR_msgget 7782 case TARGET_NR_msgget: 7783 ret = get_errno(msgget(arg1, arg2)); 7784 break; 7785 #endif 7786 #ifdef TARGET_NR_msgrcv 7787 case TARGET_NR_msgrcv: 7788 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); 7789 break; 7790 #endif 7791 #ifdef TARGET_NR_msgsnd 7792 case TARGET_NR_msgsnd: 7793 ret = do_msgsnd(arg1, arg2, arg3, arg4); 7794 break; 7795 #endif 7796 #ifdef TARGET_NR_shmget 7797 case TARGET_NR_shmget: 7798 ret = get_errno(shmget(arg1, arg2, arg3)); 7799 break; 7800 #endif 7801 #ifdef TARGET_NR_shmctl 7802 case TARGET_NR_shmctl: 7803 ret = do_shmctl(arg1, arg2, arg3); 7804 break; 7805 #endif 7806 #ifdef TARGET_NR_shmat 7807 case TARGET_NR_shmat: 7808 ret = do_shmat(arg1, arg2, arg3); 7809 break; 7810 #endif 7811 #ifdef TARGET_NR_shmdt 7812 case TARGET_NR_shmdt: 7813 ret = do_shmdt(arg1); 7814 break; 7815 #endif 7816 case TARGET_NR_fsync: 7817 ret = get_errno(fsync(arg1)); 7818 break; 7819 case TARGET_NR_clone: 7820 /* Linux manages to have three different orderings for its 7821 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines 7822 * match the kernel's CONFIG_CLONE_* settings. 7823 * Microblaze is further special in that it uses a sixth 7824 * implicit argument to clone for the TLS pointer. 7825 */ 7826 #if defined(TARGET_MICROBLAZE) 7827 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); 7828 #elif defined(TARGET_CLONE_BACKWARDS) 7829 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); 7830 #elif defined(TARGET_CLONE_BACKWARDS2) 7831 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); 7832 #else 7833 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); 7834 #endif 7835 break; 7836 #ifdef __NR_exit_group 7837 /* new thread calls */ 7838 case TARGET_NR_exit_group: 7839 #ifdef TARGET_GPROF 7840 _mcleanup(); 7841 #endif 7842 gdb_exit(cpu_env, arg1); 7843 ret = get_errno(exit_group(arg1)); 7844 break; 7845 #endif 7846 case TARGET_NR_setdomainname: 7847 if (!(p = lock_user_string(arg1))) 7848 goto efault; 7849 ret = get_errno(setdomainname(p, arg2)); 7850 unlock_user(p, arg1, 0); 7851 break; 7852 case TARGET_NR_uname: 7853 /* no need to transcode because we use the linux syscall */ 7854 { 7855 struct new_utsname * buf; 7856 7857 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) 7858 goto efault; 7859 ret = get_errno(sys_uname(buf)); 7860 if (!is_error(ret)) { 7861 /* Overrite the native machine name with whatever is being 7862 emulated. */ 7863 strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); 7864 /* Allow the user to override the reported release. */ 7865 if (qemu_uname_release && *qemu_uname_release) 7866 strcpy (buf->release, qemu_uname_release); 7867 } 7868 unlock_user_struct(buf, arg1, 1); 7869 } 7870 break; 7871 #ifdef TARGET_I386 7872 case TARGET_NR_modify_ldt: 7873 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); 7874 break; 7875 #if !defined(TARGET_X86_64) 7876 case TARGET_NR_vm86old: 7877 goto unimplemented; 7878 case TARGET_NR_vm86: 7879 ret = do_vm86(cpu_env, arg1, arg2); 7880 break; 7881 #endif 7882 #endif 7883 case TARGET_NR_adjtimex: 7884 goto unimplemented; 7885 #ifdef TARGET_NR_create_module 7886 case TARGET_NR_create_module: 7887 #endif 7888 case TARGET_NR_init_module: 7889 case TARGET_NR_delete_module: 7890 #ifdef TARGET_NR_get_kernel_syms 7891 case TARGET_NR_get_kernel_syms: 7892 #endif 7893 goto unimplemented; 7894 case TARGET_NR_quotactl: 7895 goto unimplemented; 7896 case TARGET_NR_getpgid: 7897 ret = get_errno(getpgid(arg1)); 7898 break; 7899 case TARGET_NR_fchdir: 7900 ret = get_errno(fchdir(arg1)); 7901 break; 7902 #ifdef TARGET_NR_bdflush /* not on x86_64 */ 7903 case TARGET_NR_bdflush: 7904 goto unimplemented; 7905 #endif 7906 #ifdef TARGET_NR_sysfs 7907 case TARGET_NR_sysfs: 7908 goto unimplemented; 7909 #endif 7910 case TARGET_NR_personality: 7911 ret = get_errno(personality(arg1)); 7912 break; 7913 #ifdef TARGET_NR_afs_syscall 7914 case TARGET_NR_afs_syscall: 7915 goto unimplemented; 7916 #endif 7917 #ifdef TARGET_NR__llseek /* Not on alpha */ 7918 case TARGET_NR__llseek: 7919 { 7920 int64_t res; 7921 #if !defined(__NR_llseek) 7922 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); 7923 if (res == -1) { 7924 ret = get_errno(res); 7925 } else { 7926 ret = 0; 7927 } 7928 #else 7929 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); 7930 #endif 7931 if ((ret == 0) && put_user_s64(res, arg4)) { 7932 goto efault; 7933 } 7934 } 7935 break; 7936 #endif 7937 #ifdef TARGET_NR_getdents 7938 case TARGET_NR_getdents: 7939 #ifdef __NR_getdents 7940 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 7941 { 7942 struct target_dirent *target_dirp; 7943 struct linux_dirent *dirp; 7944 abi_long count = arg3; 7945 7946 dirp = g_try_malloc(count); 7947 if (!dirp) { 7948 ret = -TARGET_ENOMEM; 7949 goto fail; 7950 } 7951 7952 ret = get_errno(sys_getdents(arg1, dirp, count)); 7953 if (!is_error(ret)) { 7954 struct linux_dirent *de; 7955 struct target_dirent *tde; 7956 int len = ret; 7957 int reclen, treclen; 7958 int count1, tnamelen; 7959 7960 count1 = 0; 7961 de = dirp; 7962 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7963 goto efault; 7964 tde = target_dirp; 7965 while (len > 0) { 7966 reclen = de->d_reclen; 7967 tnamelen = reclen - offsetof(struct linux_dirent, d_name); 7968 assert(tnamelen >= 0); 7969 treclen = tnamelen + offsetof(struct target_dirent, d_name); 7970 assert(count1 + treclen <= count); 7971 tde->d_reclen = tswap16(treclen); 7972 tde->d_ino = tswapal(de->d_ino); 7973 tde->d_off = tswapal(de->d_off); 7974 memcpy(tde->d_name, de->d_name, tnamelen); 7975 de = (struct linux_dirent *)((char *)de + reclen); 7976 len -= reclen; 7977 tde = (struct target_dirent *)((char *)tde + treclen); 7978 count1 += treclen; 7979 } 7980 ret = count1; 7981 unlock_user(target_dirp, arg2, ret); 7982 } 7983 g_free(dirp); 7984 } 7985 #else 7986 { 7987 struct linux_dirent *dirp; 7988 abi_long count = arg3; 7989 7990 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 7991 goto efault; 7992 ret = get_errno(sys_getdents(arg1, dirp, count)); 7993 if (!is_error(ret)) { 7994 struct linux_dirent *de; 7995 int len = ret; 7996 int reclen; 7997 de = dirp; 7998 while (len > 0) { 7999 reclen = de->d_reclen; 8000 if (reclen > len) 8001 break; 8002 de->d_reclen = tswap16(reclen); 8003 tswapls(&de->d_ino); 8004 tswapls(&de->d_off); 8005 de = (struct linux_dirent *)((char *)de + reclen); 8006 len -= reclen; 8007 } 8008 } 8009 unlock_user(dirp, arg2, ret); 8010 } 8011 #endif 8012 #else 8013 /* Implement getdents in terms of getdents64 */ 8014 { 8015 struct linux_dirent64 *dirp; 8016 abi_long count = arg3; 8017 8018 dirp = lock_user(VERIFY_WRITE, arg2, count, 0); 8019 if (!dirp) { 8020 goto efault; 8021 } 8022 ret = get_errno(sys_getdents64(arg1, dirp, count)); 8023 if (!is_error(ret)) { 8024 /* Convert the dirent64 structs to target dirent. We do this 8025 * in-place, since we can guarantee that a target_dirent is no 8026 * larger than a dirent64; however this means we have to be 8027 * careful to read everything before writing in the new format. 8028 */ 8029 struct linux_dirent64 *de; 8030 struct target_dirent *tde; 8031 int len = ret; 8032 int tlen = 0; 8033 8034 de = dirp; 8035 tde = (struct target_dirent *)dirp; 8036 while (len > 0) { 8037 int namelen, treclen; 8038 int reclen = de->d_reclen; 8039 uint64_t ino = de->d_ino; 8040 int64_t off = de->d_off; 8041 uint8_t type = de->d_type; 8042 8043 namelen = strlen(de->d_name); 8044 treclen = offsetof(struct target_dirent, d_name) 8045 + namelen + 2; 8046 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); 8047 8048 memmove(tde->d_name, de->d_name, namelen + 1); 8049 tde->d_ino = tswapal(ino); 8050 tde->d_off = tswapal(off); 8051 tde->d_reclen = tswap16(treclen); 8052 /* The target_dirent type is in what was formerly a padding 8053 * byte at the end of the structure: 8054 */ 8055 *(((char *)tde) + treclen - 1) = type; 8056 8057 de = (struct linux_dirent64 *)((char *)de + reclen); 8058 tde = (struct target_dirent *)((char *)tde + treclen); 8059 len -= reclen; 8060 tlen += treclen; 8061 } 8062 ret = tlen; 8063 } 8064 unlock_user(dirp, arg2, ret); 8065 } 8066 #endif 8067 break; 8068 #endif /* TARGET_NR_getdents */ 8069 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) 8070 case TARGET_NR_getdents64: 8071 { 8072 struct linux_dirent64 *dirp; 8073 abi_long count = arg3; 8074 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) 8075 goto efault; 8076 ret = get_errno(sys_getdents64(arg1, dirp, count)); 8077 if (!is_error(ret)) { 8078 struct linux_dirent64 *de; 8079 int len = ret; 8080 int reclen; 8081 de = dirp; 8082 while (len > 0) { 8083 reclen = de->d_reclen; 8084 if (reclen > len) 8085 break; 8086 de->d_reclen = tswap16(reclen); 8087 tswap64s((uint64_t *)&de->d_ino); 8088 tswap64s((uint64_t *)&de->d_off); 8089 de = (struct linux_dirent64 *)((char *)de + reclen); 8090 len -= reclen; 8091 } 8092 } 8093 unlock_user(dirp, arg2, ret); 8094 } 8095 break; 8096 #endif /* TARGET_NR_getdents64 */ 8097 #if defined(TARGET_NR__newselect) 8098 case TARGET_NR__newselect: 8099 ret = do_select(arg1, arg2, arg3, arg4, arg5); 8100 break; 8101 #endif 8102 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) 8103 # ifdef TARGET_NR_poll 8104 case TARGET_NR_poll: 8105 # endif 8106 # ifdef TARGET_NR_ppoll 8107 case TARGET_NR_ppoll: 8108 # endif 8109 { 8110 struct target_pollfd *target_pfd; 8111 unsigned int nfds = arg2; 8112 int timeout = arg3; 8113 struct pollfd *pfd; 8114 unsigned int i; 8115 8116 pfd = NULL; 8117 target_pfd = NULL; 8118 if (nfds) { 8119 target_pfd = lock_user(VERIFY_WRITE, arg1, 8120 sizeof(struct target_pollfd) * nfds, 1); 8121 if (!target_pfd) { 8122 goto efault; 8123 } 8124 8125 pfd = alloca(sizeof(struct pollfd) * nfds); 8126 for (i = 0; i < nfds; i++) { 8127 pfd[i].fd = tswap32(target_pfd[i].fd); 8128 pfd[i].events = tswap16(target_pfd[i].events); 8129 } 8130 } 8131 8132 # ifdef TARGET_NR_ppoll 8133 if (num == TARGET_NR_ppoll) { 8134 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; 8135 target_sigset_t *target_set; 8136 sigset_t _set, *set = &_set; 8137 8138 if (arg3) { 8139 if (target_to_host_timespec(timeout_ts, arg3)) { 8140 unlock_user(target_pfd, arg1, 0); 8141 goto efault; 8142 } 8143 } else { 8144 timeout_ts = NULL; 8145 } 8146 8147 if (arg4) { 8148 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); 8149 if (!target_set) { 8150 unlock_user(target_pfd, arg1, 0); 8151 goto efault; 8152 } 8153 target_to_host_sigset(set, target_set); 8154 } else { 8155 set = NULL; 8156 } 8157 8158 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); 8159 8160 if (!is_error(ret) && arg3) { 8161 host_to_target_timespec(arg3, timeout_ts); 8162 } 8163 if (arg4) { 8164 unlock_user(target_set, arg4, 0); 8165 } 8166 } else 8167 # endif 8168 ret = get_errno(poll(pfd, nfds, timeout)); 8169 8170 if (!is_error(ret)) { 8171 for(i = 0; i < nfds; i++) { 8172 target_pfd[i].revents = tswap16(pfd[i].revents); 8173 } 8174 } 8175 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); 8176 } 8177 break; 8178 #endif 8179 case TARGET_NR_flock: 8180 /* NOTE: the flock constant seems to be the same for every 8181 Linux platform */ 8182 ret = get_errno(flock(arg1, arg2)); 8183 break; 8184 case TARGET_NR_readv: 8185 { 8186 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); 8187 if (vec != NULL) { 8188 ret = get_errno(readv(arg1, vec, arg3)); 8189 unlock_iovec(vec, arg2, arg3, 1); 8190 } else { 8191 ret = -host_to_target_errno(errno); 8192 } 8193 } 8194 break; 8195 case TARGET_NR_writev: 8196 { 8197 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 8198 if (vec != NULL) { 8199 ret = get_errno(writev(arg1, vec, arg3)); 8200 unlock_iovec(vec, arg2, arg3, 0); 8201 } else { 8202 ret = -host_to_target_errno(errno); 8203 } 8204 } 8205 break; 8206 case TARGET_NR_getsid: 8207 ret = get_errno(getsid(arg1)); 8208 break; 8209 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ 8210 case TARGET_NR_fdatasync: 8211 ret = get_errno(fdatasync(arg1)); 8212 break; 8213 #endif 8214 #ifdef TARGET_NR__sysctl 8215 case TARGET_NR__sysctl: 8216 /* We don't implement this, but ENOTDIR is always a safe 8217 return value. */ 8218 ret = -TARGET_ENOTDIR; 8219 break; 8220 #endif 8221 case TARGET_NR_sched_getaffinity: 8222 { 8223 unsigned int mask_size; 8224 unsigned long *mask; 8225 8226 /* 8227 * sched_getaffinity needs multiples of ulong, so need to take 8228 * care of mismatches between target ulong and host ulong sizes. 8229 */ 8230 if (arg2 & (sizeof(abi_ulong) - 1)) { 8231 ret = -TARGET_EINVAL; 8232 break; 8233 } 8234 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 8235 8236 mask = alloca(mask_size); 8237 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); 8238 8239 if (!is_error(ret)) { 8240 if (ret > arg2) { 8241 /* More data returned than the caller's buffer will fit. 8242 * This only happens if sizeof(abi_long) < sizeof(long) 8243 * and the caller passed us a buffer holding an odd number 8244 * of abi_longs. If the host kernel is actually using the 8245 * extra 4 bytes then fail EINVAL; otherwise we can just 8246 * ignore them and only copy the interesting part. 8247 */ 8248 int numcpus = sysconf(_SC_NPROCESSORS_CONF); 8249 if (numcpus > arg2 * 8) { 8250 ret = -TARGET_EINVAL; 8251 break; 8252 } 8253 ret = arg2; 8254 } 8255 8256 if (copy_to_user(arg3, mask, ret)) { 8257 goto efault; 8258 } 8259 } 8260 } 8261 break; 8262 case TARGET_NR_sched_setaffinity: 8263 { 8264 unsigned int mask_size; 8265 unsigned long *mask; 8266 8267 /* 8268 * sched_setaffinity needs multiples of ulong, so need to take 8269 * care of mismatches between target ulong and host ulong sizes. 8270 */ 8271 if (arg2 & (sizeof(abi_ulong) - 1)) { 8272 ret = -TARGET_EINVAL; 8273 break; 8274 } 8275 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); 8276 8277 mask = alloca(mask_size); 8278 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { 8279 goto efault; 8280 } 8281 memcpy(mask, p, arg2); 8282 unlock_user_struct(p, arg2, 0); 8283 8284 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); 8285 } 8286 break; 8287 case TARGET_NR_sched_setparam: 8288 { 8289 struct sched_param *target_schp; 8290 struct sched_param schp; 8291 8292 if (arg2 == 0) { 8293 return -TARGET_EINVAL; 8294 } 8295 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) 8296 goto efault; 8297 schp.sched_priority = tswap32(target_schp->sched_priority); 8298 unlock_user_struct(target_schp, arg2, 0); 8299 ret = get_errno(sched_setparam(arg1, &schp)); 8300 } 8301 break; 8302 case TARGET_NR_sched_getparam: 8303 { 8304 struct sched_param *target_schp; 8305 struct sched_param schp; 8306 8307 if (arg2 == 0) { 8308 return -TARGET_EINVAL; 8309 } 8310 ret = get_errno(sched_getparam(arg1, &schp)); 8311 if (!is_error(ret)) { 8312 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) 8313 goto efault; 8314 target_schp->sched_priority = tswap32(schp.sched_priority); 8315 unlock_user_struct(target_schp, arg2, 1); 8316 } 8317 } 8318 break; 8319 case TARGET_NR_sched_setscheduler: 8320 { 8321 struct sched_param *target_schp; 8322 struct sched_param schp; 8323 if (arg3 == 0) { 8324 return -TARGET_EINVAL; 8325 } 8326 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) 8327 goto efault; 8328 schp.sched_priority = tswap32(target_schp->sched_priority); 8329 unlock_user_struct(target_schp, arg3, 0); 8330 ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); 8331 } 8332 break; 8333 case TARGET_NR_sched_getscheduler: 8334 ret = get_errno(sched_getscheduler(arg1)); 8335 break; 8336 case TARGET_NR_sched_yield: 8337 ret = get_errno(sched_yield()); 8338 break; 8339 case TARGET_NR_sched_get_priority_max: 8340 ret = get_errno(sched_get_priority_max(arg1)); 8341 break; 8342 case TARGET_NR_sched_get_priority_min: 8343 ret = get_errno(sched_get_priority_min(arg1)); 8344 break; 8345 case TARGET_NR_sched_rr_get_interval: 8346 { 8347 struct timespec ts; 8348 ret = get_errno(sched_rr_get_interval(arg1, &ts)); 8349 if (!is_error(ret)) { 8350 ret = host_to_target_timespec(arg2, &ts); 8351 } 8352 } 8353 break; 8354 case TARGET_NR_nanosleep: 8355 { 8356 struct timespec req, rem; 8357 target_to_host_timespec(&req, arg1); 8358 ret = get_errno(nanosleep(&req, &rem)); 8359 if (is_error(ret) && arg2) { 8360 host_to_target_timespec(arg2, &rem); 8361 } 8362 } 8363 break; 8364 #ifdef TARGET_NR_query_module 8365 case TARGET_NR_query_module: 8366 goto unimplemented; 8367 #endif 8368 #ifdef TARGET_NR_nfsservctl 8369 case TARGET_NR_nfsservctl: 8370 goto unimplemented; 8371 #endif 8372 case TARGET_NR_prctl: 8373 switch (arg1) { 8374 case PR_GET_PDEATHSIG: 8375 { 8376 int deathsig; 8377 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); 8378 if (!is_error(ret) && arg2 8379 && put_user_ual(deathsig, arg2)) { 8380 goto efault; 8381 } 8382 break; 8383 } 8384 #ifdef PR_GET_NAME 8385 case PR_GET_NAME: 8386 { 8387 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); 8388 if (!name) { 8389 goto efault; 8390 } 8391 ret = get_errno(prctl(arg1, (unsigned long)name, 8392 arg3, arg4, arg5)); 8393 unlock_user(name, arg2, 16); 8394 break; 8395 } 8396 case PR_SET_NAME: 8397 { 8398 void *name = lock_user(VERIFY_READ, arg2, 16, 1); 8399 if (!name) { 8400 goto efault; 8401 } 8402 ret = get_errno(prctl(arg1, (unsigned long)name, 8403 arg3, arg4, arg5)); 8404 unlock_user(name, arg2, 0); 8405 break; 8406 } 8407 #endif 8408 default: 8409 /* Most prctl options have no pointer arguments */ 8410 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); 8411 break; 8412 } 8413 break; 8414 #ifdef TARGET_NR_arch_prctl 8415 case TARGET_NR_arch_prctl: 8416 #if defined(TARGET_I386) && !defined(TARGET_ABI32) 8417 ret = do_arch_prctl(cpu_env, arg1, arg2); 8418 break; 8419 #else 8420 goto unimplemented; 8421 #endif 8422 #endif 8423 #ifdef TARGET_NR_pread64 8424 case TARGET_NR_pread64: 8425 if (regpairs_aligned(cpu_env)) { 8426 arg4 = arg5; 8427 arg5 = arg6; 8428 } 8429 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) 8430 goto efault; 8431 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); 8432 unlock_user(p, arg2, ret); 8433 break; 8434 case TARGET_NR_pwrite64: 8435 if (regpairs_aligned(cpu_env)) { 8436 arg4 = arg5; 8437 arg5 = arg6; 8438 } 8439 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) 8440 goto efault; 8441 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); 8442 unlock_user(p, arg2, 0); 8443 break; 8444 #endif 8445 case TARGET_NR_getcwd: 8446 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) 8447 goto efault; 8448 ret = get_errno(sys_getcwd1(p, arg2)); 8449 unlock_user(p, arg1, ret); 8450 break; 8451 case TARGET_NR_capget: 8452 case TARGET_NR_capset: 8453 { 8454 struct target_user_cap_header *target_header; 8455 struct target_user_cap_data *target_data = NULL; 8456 struct __user_cap_header_struct header; 8457 struct __user_cap_data_struct data[2]; 8458 struct __user_cap_data_struct *dataptr = NULL; 8459 int i, target_datalen; 8460 int data_items = 1; 8461 8462 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { 8463 goto efault; 8464 } 8465 header.version = tswap32(target_header->version); 8466 header.pid = tswap32(target_header->pid); 8467 8468 if (header.version != _LINUX_CAPABILITY_VERSION) { 8469 /* Version 2 and up takes pointer to two user_data structs */ 8470 data_items = 2; 8471 } 8472 8473 target_datalen = sizeof(*target_data) * data_items; 8474 8475 if (arg2) { 8476 if (num == TARGET_NR_capget) { 8477 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); 8478 } else { 8479 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); 8480 } 8481 if (!target_data) { 8482 unlock_user_struct(target_header, arg1, 0); 8483 goto efault; 8484 } 8485 8486 if (num == TARGET_NR_capset) { 8487 for (i = 0; i < data_items; i++) { 8488 data[i].effective = tswap32(target_data[i].effective); 8489 data[i].permitted = tswap32(target_data[i].permitted); 8490 data[i].inheritable = tswap32(target_data[i].inheritable); 8491 } 8492 } 8493 8494 dataptr = data; 8495 } 8496 8497 if (num == TARGET_NR_capget) { 8498 ret = get_errno(capget(&header, dataptr)); 8499 } else { 8500 ret = get_errno(capset(&header, dataptr)); 8501 } 8502 8503 /* The kernel always updates version for both capget and capset */ 8504 target_header->version = tswap32(header.version); 8505 unlock_user_struct(target_header, arg1, 1); 8506 8507 if (arg2) { 8508 if (num == TARGET_NR_capget) { 8509 for (i = 0; i < data_items; i++) { 8510 target_data[i].effective = tswap32(data[i].effective); 8511 target_data[i].permitted = tswap32(data[i].permitted); 8512 target_data[i].inheritable = tswap32(data[i].inheritable); 8513 } 8514 unlock_user(target_data, arg2, target_datalen); 8515 } else { 8516 unlock_user(target_data, arg2, 0); 8517 } 8518 } 8519 break; 8520 } 8521 case TARGET_NR_sigaltstack: 8522 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); 8523 break; 8524 8525 #ifdef CONFIG_SENDFILE 8526 case TARGET_NR_sendfile: 8527 { 8528 off_t *offp = NULL; 8529 off_t off; 8530 if (arg3) { 8531 ret = get_user_sal(off, arg3); 8532 if (is_error(ret)) { 8533 break; 8534 } 8535 offp = &off; 8536 } 8537 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 8538 if (!is_error(ret) && arg3) { 8539 abi_long ret2 = put_user_sal(off, arg3); 8540 if (is_error(ret2)) { 8541 ret = ret2; 8542 } 8543 } 8544 break; 8545 } 8546 #ifdef TARGET_NR_sendfile64 8547 case TARGET_NR_sendfile64: 8548 { 8549 off_t *offp = NULL; 8550 off_t off; 8551 if (arg3) { 8552 ret = get_user_s64(off, arg3); 8553 if (is_error(ret)) { 8554 break; 8555 } 8556 offp = &off; 8557 } 8558 ret = get_errno(sendfile(arg1, arg2, offp, arg4)); 8559 if (!is_error(ret) && arg3) { 8560 abi_long ret2 = put_user_s64(off, arg3); 8561 if (is_error(ret2)) { 8562 ret = ret2; 8563 } 8564 } 8565 break; 8566 } 8567 #endif 8568 #else 8569 case TARGET_NR_sendfile: 8570 #ifdef TARGET_NR_sendfile64 8571 case TARGET_NR_sendfile64: 8572 #endif 8573 goto unimplemented; 8574 #endif 8575 8576 #ifdef TARGET_NR_getpmsg 8577 case TARGET_NR_getpmsg: 8578 goto unimplemented; 8579 #endif 8580 #ifdef TARGET_NR_putpmsg 8581 case TARGET_NR_putpmsg: 8582 goto unimplemented; 8583 #endif 8584 #ifdef TARGET_NR_vfork 8585 case TARGET_NR_vfork: 8586 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 8587 0, 0, 0, 0)); 8588 break; 8589 #endif 8590 #ifdef TARGET_NR_ugetrlimit 8591 case TARGET_NR_ugetrlimit: 8592 { 8593 struct rlimit rlim; 8594 int resource = target_to_host_resource(arg1); 8595 ret = get_errno(getrlimit(resource, &rlim)); 8596 if (!is_error(ret)) { 8597 struct target_rlimit *target_rlim; 8598 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) 8599 goto efault; 8600 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); 8601 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); 8602 unlock_user_struct(target_rlim, arg2, 1); 8603 } 8604 break; 8605 } 8606 #endif 8607 #ifdef TARGET_NR_truncate64 8608 case TARGET_NR_truncate64: 8609 if (!(p = lock_user_string(arg1))) 8610 goto efault; 8611 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); 8612 unlock_user(p, arg1, 0); 8613 break; 8614 #endif 8615 #ifdef TARGET_NR_ftruncate64 8616 case TARGET_NR_ftruncate64: 8617 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); 8618 break; 8619 #endif 8620 #ifdef TARGET_NR_stat64 8621 case TARGET_NR_stat64: 8622 if (!(p = lock_user_string(arg1))) 8623 goto efault; 8624 ret = get_errno(stat(path(p), &st)); 8625 unlock_user(p, arg1, 0); 8626 if (!is_error(ret)) 8627 ret = host_to_target_stat64(cpu_env, arg2, &st); 8628 break; 8629 #endif 8630 #ifdef TARGET_NR_lstat64 8631 case TARGET_NR_lstat64: 8632 if (!(p = lock_user_string(arg1))) 8633 goto efault; 8634 ret = get_errno(lstat(path(p), &st)); 8635 unlock_user(p, arg1, 0); 8636 if (!is_error(ret)) 8637 ret = host_to_target_stat64(cpu_env, arg2, &st); 8638 break; 8639 #endif 8640 #ifdef TARGET_NR_fstat64 8641 case TARGET_NR_fstat64: 8642 ret = get_errno(fstat(arg1, &st)); 8643 if (!is_error(ret)) 8644 ret = host_to_target_stat64(cpu_env, arg2, &st); 8645 break; 8646 #endif 8647 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) 8648 #ifdef TARGET_NR_fstatat64 8649 case TARGET_NR_fstatat64: 8650 #endif 8651 #ifdef TARGET_NR_newfstatat 8652 case TARGET_NR_newfstatat: 8653 #endif 8654 if (!(p = lock_user_string(arg2))) 8655 goto efault; 8656 ret = get_errno(fstatat(arg1, path(p), &st, arg4)); 8657 if (!is_error(ret)) 8658 ret = host_to_target_stat64(cpu_env, arg3, &st); 8659 break; 8660 #endif 8661 #ifdef TARGET_NR_lchown 8662 case TARGET_NR_lchown: 8663 if (!(p = lock_user_string(arg1))) 8664 goto efault; 8665 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); 8666 unlock_user(p, arg1, 0); 8667 break; 8668 #endif 8669 #ifdef TARGET_NR_getuid 8670 case TARGET_NR_getuid: 8671 ret = get_errno(high2lowuid(getuid())); 8672 break; 8673 #endif 8674 #ifdef TARGET_NR_getgid 8675 case TARGET_NR_getgid: 8676 ret = get_errno(high2lowgid(getgid())); 8677 break; 8678 #endif 8679 #ifdef TARGET_NR_geteuid 8680 case TARGET_NR_geteuid: 8681 ret = get_errno(high2lowuid(geteuid())); 8682 break; 8683 #endif 8684 #ifdef TARGET_NR_getegid 8685 case TARGET_NR_getegid: 8686 ret = get_errno(high2lowgid(getegid())); 8687 break; 8688 #endif 8689 case TARGET_NR_setreuid: 8690 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); 8691 break; 8692 case TARGET_NR_setregid: 8693 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); 8694 break; 8695 case TARGET_NR_getgroups: 8696 { 8697 int gidsetsize = arg1; 8698 target_id *target_grouplist; 8699 gid_t *grouplist; 8700 int i; 8701 8702 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8703 ret = get_errno(getgroups(gidsetsize, grouplist)); 8704 if (gidsetsize == 0) 8705 break; 8706 if (!is_error(ret)) { 8707 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); 8708 if (!target_grouplist) 8709 goto efault; 8710 for(i = 0;i < ret; i++) 8711 target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); 8712 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); 8713 } 8714 } 8715 break; 8716 case TARGET_NR_setgroups: 8717 { 8718 int gidsetsize = arg1; 8719 target_id *target_grouplist; 8720 gid_t *grouplist = NULL; 8721 int i; 8722 if (gidsetsize) { 8723 grouplist = alloca(gidsetsize * sizeof(gid_t)); 8724 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); 8725 if (!target_grouplist) { 8726 ret = -TARGET_EFAULT; 8727 goto fail; 8728 } 8729 for (i = 0; i < gidsetsize; i++) { 8730 grouplist[i] = low2highgid(tswapid(target_grouplist[i])); 8731 } 8732 unlock_user(target_grouplist, arg2, 0); 8733 } 8734 ret = get_errno(setgroups(gidsetsize, grouplist)); 8735 } 8736 break; 8737 case TARGET_NR_fchown: 8738 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); 8739 break; 8740 #if defined(TARGET_NR_fchownat) 8741 case TARGET_NR_fchownat: 8742 if (!(p = lock_user_string(arg2))) 8743 goto efault; 8744 ret = get_errno(fchownat(arg1, p, low2highuid(arg3), 8745 low2highgid(arg4), arg5)); 8746 unlock_user(p, arg2, 0); 8747 break; 8748 #endif 8749 #ifdef TARGET_NR_setresuid 8750 case TARGET_NR_setresuid: 8751 ret = get_errno(setresuid(low2highuid(arg1), 8752 low2highuid(arg2), 8753 low2highuid(arg3))); 8754 break; 8755 #endif 8756 #ifdef TARGET_NR_getresuid 8757 case TARGET_NR_getresuid: 8758 { 8759 uid_t ruid, euid, suid; 8760 ret = get_errno(getresuid(&ruid, &euid, &suid)); 8761 if (!is_error(ret)) { 8762 if (put_user_id(high2lowuid(ruid), arg1) 8763 || put_user_id(high2lowuid(euid), arg2) 8764 || put_user_id(high2lowuid(suid), arg3)) 8765 goto efault; 8766 } 8767 } 8768 break; 8769 #endif 8770 #ifdef TARGET_NR_getresgid 8771 case TARGET_NR_setresgid: 8772 ret = get_errno(setresgid(low2highgid(arg1), 8773 low2highgid(arg2), 8774 low2highgid(arg3))); 8775 break; 8776 #endif 8777 #ifdef TARGET_NR_getresgid 8778 case TARGET_NR_getresgid: 8779 { 8780 gid_t rgid, egid, sgid; 8781 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 8782 if (!is_error(ret)) { 8783 if (put_user_id(high2lowgid(rgid), arg1) 8784 || put_user_id(high2lowgid(egid), arg2) 8785 || put_user_id(high2lowgid(sgid), arg3)) 8786 goto efault; 8787 } 8788 } 8789 break; 8790 #endif 8791 #ifdef TARGET_NR_chown 8792 case TARGET_NR_chown: 8793 if (!(p = lock_user_string(arg1))) 8794 goto efault; 8795 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); 8796 unlock_user(p, arg1, 0); 8797 break; 8798 #endif 8799 case TARGET_NR_setuid: 8800 ret = get_errno(setuid(low2highuid(arg1))); 8801 break; 8802 case TARGET_NR_setgid: 8803 ret = get_errno(setgid(low2highgid(arg1))); 8804 break; 8805 case TARGET_NR_setfsuid: 8806 ret = get_errno(setfsuid(arg1)); 8807 break; 8808 case TARGET_NR_setfsgid: 8809 ret = get_errno(setfsgid(arg1)); 8810 break; 8811 8812 #ifdef TARGET_NR_lchown32 8813 case TARGET_NR_lchown32: 8814 if (!(p = lock_user_string(arg1))) 8815 goto efault; 8816 ret = get_errno(lchown(p, arg2, arg3)); 8817 unlock_user(p, arg1, 0); 8818 break; 8819 #endif 8820 #ifdef TARGET_NR_getuid32 8821 case TARGET_NR_getuid32: 8822 ret = get_errno(getuid()); 8823 break; 8824 #endif 8825 8826 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) 8827 /* Alpha specific */ 8828 case TARGET_NR_getxuid: 8829 { 8830 uid_t euid; 8831 euid=geteuid(); 8832 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; 8833 } 8834 ret = get_errno(getuid()); 8835 break; 8836 #endif 8837 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) 8838 /* Alpha specific */ 8839 case TARGET_NR_getxgid: 8840 { 8841 uid_t egid; 8842 egid=getegid(); 8843 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; 8844 } 8845 ret = get_errno(getgid()); 8846 break; 8847 #endif 8848 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) 8849 /* Alpha specific */ 8850 case TARGET_NR_osf_getsysinfo: 8851 ret = -TARGET_EOPNOTSUPP; 8852 switch (arg1) { 8853 case TARGET_GSI_IEEE_FP_CONTROL: 8854 { 8855 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); 8856 8857 /* Copied from linux ieee_fpcr_to_swcr. */ 8858 swcr = (fpcr >> 35) & SWCR_STATUS_MASK; 8859 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; 8860 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV 8861 | SWCR_TRAP_ENABLE_DZE 8862 | SWCR_TRAP_ENABLE_OVF); 8863 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF 8864 | SWCR_TRAP_ENABLE_INE); 8865 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; 8866 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; 8867 8868 if (put_user_u64 (swcr, arg2)) 8869 goto efault; 8870 ret = 0; 8871 } 8872 break; 8873 8874 /* case GSI_IEEE_STATE_AT_SIGNAL: 8875 -- Not implemented in linux kernel. 8876 case GSI_UACPROC: 8877 -- Retrieves current unaligned access state; not much used. 8878 case GSI_PROC_TYPE: 8879 -- Retrieves implver information; surely not used. 8880 case GSI_GET_HWRPB: 8881 -- Grabs a copy of the HWRPB; surely not used. 8882 */ 8883 } 8884 break; 8885 #endif 8886 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) 8887 /* Alpha specific */ 8888 case TARGET_NR_osf_setsysinfo: 8889 ret = -TARGET_EOPNOTSUPP; 8890 switch (arg1) { 8891 case TARGET_SSI_IEEE_FP_CONTROL: 8892 { 8893 uint64_t swcr, fpcr, orig_fpcr; 8894 8895 if (get_user_u64 (swcr, arg2)) { 8896 goto efault; 8897 } 8898 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8899 fpcr = orig_fpcr & FPCR_DYN_MASK; 8900 8901 /* Copied from linux ieee_swcr_to_fpcr. */ 8902 fpcr |= (swcr & SWCR_STATUS_MASK) << 35; 8903 fpcr |= (swcr & SWCR_MAP_DMZ) << 36; 8904 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV 8905 | SWCR_TRAP_ENABLE_DZE 8906 | SWCR_TRAP_ENABLE_OVF)) << 48; 8907 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF 8908 | SWCR_TRAP_ENABLE_INE)) << 57; 8909 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); 8910 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; 8911 8912 cpu_alpha_store_fpcr(cpu_env, fpcr); 8913 ret = 0; 8914 } 8915 break; 8916 8917 case TARGET_SSI_IEEE_RAISE_EXCEPTION: 8918 { 8919 uint64_t exc, fpcr, orig_fpcr; 8920 int si_code; 8921 8922 if (get_user_u64(exc, arg2)) { 8923 goto efault; 8924 } 8925 8926 orig_fpcr = cpu_alpha_load_fpcr(cpu_env); 8927 8928 /* We only add to the exception status here. */ 8929 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); 8930 8931 cpu_alpha_store_fpcr(cpu_env, fpcr); 8932 ret = 0; 8933 8934 /* Old exceptions are not signaled. */ 8935 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); 8936 8937 /* If any exceptions set by this call, 8938 and are unmasked, send a signal. */ 8939 si_code = 0; 8940 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { 8941 si_code = TARGET_FPE_FLTRES; 8942 } 8943 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { 8944 si_code = TARGET_FPE_FLTUND; 8945 } 8946 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { 8947 si_code = TARGET_FPE_FLTOVF; 8948 } 8949 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { 8950 si_code = TARGET_FPE_FLTDIV; 8951 } 8952 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { 8953 si_code = TARGET_FPE_FLTINV; 8954 } 8955 if (si_code != 0) { 8956 target_siginfo_t info; 8957 info.si_signo = SIGFPE; 8958 info.si_errno = 0; 8959 info.si_code = si_code; 8960 info._sifields._sigfault._addr 8961 = ((CPUArchState *)cpu_env)->pc; 8962 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 8963 } 8964 } 8965 break; 8966 8967 /* case SSI_NVPAIRS: 8968 -- Used with SSIN_UACPROC to enable unaligned accesses. 8969 case SSI_IEEE_STATE_AT_SIGNAL: 8970 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: 8971 -- Not implemented in linux kernel 8972 */ 8973 } 8974 break; 8975 #endif 8976 #ifdef TARGET_NR_osf_sigprocmask 8977 /* Alpha specific. */ 8978 case TARGET_NR_osf_sigprocmask: 8979 { 8980 abi_ulong mask; 8981 int how; 8982 sigset_t set, oldset; 8983 8984 switch(arg1) { 8985 case TARGET_SIG_BLOCK: 8986 how = SIG_BLOCK; 8987 break; 8988 case TARGET_SIG_UNBLOCK: 8989 how = SIG_UNBLOCK; 8990 break; 8991 case TARGET_SIG_SETMASK: 8992 how = SIG_SETMASK; 8993 break; 8994 default: 8995 ret = -TARGET_EINVAL; 8996 goto fail; 8997 } 8998 mask = arg2; 8999 target_to_host_old_sigset(&set, &mask); 9000 do_sigprocmask(how, &set, &oldset); 9001 host_to_target_old_sigset(&mask, &oldset); 9002 ret = mask; 9003 } 9004 break; 9005 #endif 9006 9007 #ifdef TARGET_NR_getgid32 9008 case TARGET_NR_getgid32: 9009 ret = get_errno(getgid()); 9010 break; 9011 #endif 9012 #ifdef TARGET_NR_geteuid32 9013 case TARGET_NR_geteuid32: 9014 ret = get_errno(geteuid()); 9015 break; 9016 #endif 9017 #ifdef TARGET_NR_getegid32 9018 case TARGET_NR_getegid32: 9019 ret = get_errno(getegid()); 9020 break; 9021 #endif 9022 #ifdef TARGET_NR_setreuid32 9023 case TARGET_NR_setreuid32: 9024 ret = get_errno(setreuid(arg1, arg2)); 9025 break; 9026 #endif 9027 #ifdef TARGET_NR_setregid32 9028 case TARGET_NR_setregid32: 9029 ret = get_errno(setregid(arg1, arg2)); 9030 break; 9031 #endif 9032 #ifdef TARGET_NR_getgroups32 9033 case TARGET_NR_getgroups32: 9034 { 9035 int gidsetsize = arg1; 9036 uint32_t *target_grouplist; 9037 gid_t *grouplist; 9038 int i; 9039 9040 grouplist = alloca(gidsetsize * sizeof(gid_t)); 9041 ret = get_errno(getgroups(gidsetsize, grouplist)); 9042 if (gidsetsize == 0) 9043 break; 9044 if (!is_error(ret)) { 9045 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); 9046 if (!target_grouplist) { 9047 ret = -TARGET_EFAULT; 9048 goto fail; 9049 } 9050 for(i = 0;i < ret; i++) 9051 target_grouplist[i] = tswap32(grouplist[i]); 9052 unlock_user(target_grouplist, arg2, gidsetsize * 4); 9053 } 9054 } 9055 break; 9056 #endif 9057 #ifdef TARGET_NR_setgroups32 9058 case TARGET_NR_setgroups32: 9059 { 9060 int gidsetsize = arg1; 9061 uint32_t *target_grouplist; 9062 gid_t *grouplist; 9063 int i; 9064 9065 grouplist = alloca(gidsetsize * sizeof(gid_t)); 9066 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); 9067 if (!target_grouplist) { 9068 ret = -TARGET_EFAULT; 9069 goto fail; 9070 } 9071 for(i = 0;i < gidsetsize; i++) 9072 grouplist[i] = tswap32(target_grouplist[i]); 9073 unlock_user(target_grouplist, arg2, 0); 9074 ret = get_errno(setgroups(gidsetsize, grouplist)); 9075 } 9076 break; 9077 #endif 9078 #ifdef TARGET_NR_fchown32 9079 case TARGET_NR_fchown32: 9080 ret = get_errno(fchown(arg1, arg2, arg3)); 9081 break; 9082 #endif 9083 #ifdef TARGET_NR_setresuid32 9084 case TARGET_NR_setresuid32: 9085 ret = get_errno(setresuid(arg1, arg2, arg3)); 9086 break; 9087 #endif 9088 #ifdef TARGET_NR_getresuid32 9089 case TARGET_NR_getresuid32: 9090 { 9091 uid_t ruid, euid, suid; 9092 ret = get_errno(getresuid(&ruid, &euid, &suid)); 9093 if (!is_error(ret)) { 9094 if (put_user_u32(ruid, arg1) 9095 || put_user_u32(euid, arg2) 9096 || put_user_u32(suid, arg3)) 9097 goto efault; 9098 } 9099 } 9100 break; 9101 #endif 9102 #ifdef TARGET_NR_setresgid32 9103 case TARGET_NR_setresgid32: 9104 ret = get_errno(setresgid(arg1, arg2, arg3)); 9105 break; 9106 #endif 9107 #ifdef TARGET_NR_getresgid32 9108 case TARGET_NR_getresgid32: 9109 { 9110 gid_t rgid, egid, sgid; 9111 ret = get_errno(getresgid(&rgid, &egid, &sgid)); 9112 if (!is_error(ret)) { 9113 if (put_user_u32(rgid, arg1) 9114 || put_user_u32(egid, arg2) 9115 || put_user_u32(sgid, arg3)) 9116 goto efault; 9117 } 9118 } 9119 break; 9120 #endif 9121 #ifdef TARGET_NR_chown32 9122 case TARGET_NR_chown32: 9123 if (!(p = lock_user_string(arg1))) 9124 goto efault; 9125 ret = get_errno(chown(p, arg2, arg3)); 9126 unlock_user(p, arg1, 0); 9127 break; 9128 #endif 9129 #ifdef TARGET_NR_setuid32 9130 case TARGET_NR_setuid32: 9131 ret = get_errno(setuid(arg1)); 9132 break; 9133 #endif 9134 #ifdef TARGET_NR_setgid32 9135 case TARGET_NR_setgid32: 9136 ret = get_errno(setgid(arg1)); 9137 break; 9138 #endif 9139 #ifdef TARGET_NR_setfsuid32 9140 case TARGET_NR_setfsuid32: 9141 ret = get_errno(setfsuid(arg1)); 9142 break; 9143 #endif 9144 #ifdef TARGET_NR_setfsgid32 9145 case TARGET_NR_setfsgid32: 9146 ret = get_errno(setfsgid(arg1)); 9147 break; 9148 #endif 9149 9150 case TARGET_NR_pivot_root: 9151 goto unimplemented; 9152 #ifdef TARGET_NR_mincore 9153 case TARGET_NR_mincore: 9154 { 9155 void *a; 9156 ret = -TARGET_EFAULT; 9157 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) 9158 goto efault; 9159 if (!(p = lock_user_string(arg3))) 9160 goto mincore_fail; 9161 ret = get_errno(mincore(a, arg2, p)); 9162 unlock_user(p, arg3, ret); 9163 mincore_fail: 9164 unlock_user(a, arg1, 0); 9165 } 9166 break; 9167 #endif 9168 #ifdef TARGET_NR_arm_fadvise64_64 9169 case TARGET_NR_arm_fadvise64_64: 9170 { 9171 /* 9172 * arm_fadvise64_64 looks like fadvise64_64 but 9173 * with different argument order 9174 */ 9175 abi_long temp; 9176 temp = arg3; 9177 arg3 = arg4; 9178 arg4 = temp; 9179 } 9180 #endif 9181 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) 9182 #ifdef TARGET_NR_fadvise64_64 9183 case TARGET_NR_fadvise64_64: 9184 #endif 9185 #ifdef TARGET_NR_fadvise64 9186 case TARGET_NR_fadvise64: 9187 #endif 9188 #ifdef TARGET_S390X 9189 switch (arg4) { 9190 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ 9191 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ 9192 case 6: arg4 = POSIX_FADV_DONTNEED; break; 9193 case 7: arg4 = POSIX_FADV_NOREUSE; break; 9194 default: break; 9195 } 9196 #endif 9197 ret = -posix_fadvise(arg1, arg2, arg3, arg4); 9198 break; 9199 #endif 9200 #ifdef TARGET_NR_madvise 9201 case TARGET_NR_madvise: 9202 /* A straight passthrough may not be safe because qemu sometimes 9203 turns private file-backed mappings into anonymous mappings. 9204 This will break MADV_DONTNEED. 9205 This is a hint, so ignoring and returning success is ok. */ 9206 ret = get_errno(0); 9207 break; 9208 #endif 9209 #if TARGET_ABI_BITS == 32 9210 case TARGET_NR_fcntl64: 9211 { 9212 int cmd; 9213 struct flock64 fl; 9214 struct target_flock64 *target_fl; 9215 #ifdef TARGET_ARM 9216 struct target_eabi_flock64 *target_efl; 9217 #endif 9218 9219 cmd = target_to_host_fcntl_cmd(arg2); 9220 if (cmd == -TARGET_EINVAL) { 9221 ret = cmd; 9222 break; 9223 } 9224 9225 switch(arg2) { 9226 case TARGET_F_GETLK64: 9227 #ifdef TARGET_ARM 9228 if (((CPUARMState *)cpu_env)->eabi) { 9229 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 9230 goto efault; 9231 fl.l_type = tswap16(target_efl->l_type); 9232 fl.l_whence = tswap16(target_efl->l_whence); 9233 fl.l_start = tswap64(target_efl->l_start); 9234 fl.l_len = tswap64(target_efl->l_len); 9235 fl.l_pid = tswap32(target_efl->l_pid); 9236 unlock_user_struct(target_efl, arg3, 0); 9237 } else 9238 #endif 9239 { 9240 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 9241 goto efault; 9242 fl.l_type = tswap16(target_fl->l_type); 9243 fl.l_whence = tswap16(target_fl->l_whence); 9244 fl.l_start = tswap64(target_fl->l_start); 9245 fl.l_len = tswap64(target_fl->l_len); 9246 fl.l_pid = tswap32(target_fl->l_pid); 9247 unlock_user_struct(target_fl, arg3, 0); 9248 } 9249 ret = get_errno(fcntl(arg1, cmd, &fl)); 9250 if (ret == 0) { 9251 #ifdef TARGET_ARM 9252 if (((CPUARMState *)cpu_env)->eabi) { 9253 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) 9254 goto efault; 9255 target_efl->l_type = tswap16(fl.l_type); 9256 target_efl->l_whence = tswap16(fl.l_whence); 9257 target_efl->l_start = tswap64(fl.l_start); 9258 target_efl->l_len = tswap64(fl.l_len); 9259 target_efl->l_pid = tswap32(fl.l_pid); 9260 unlock_user_struct(target_efl, arg3, 1); 9261 } else 9262 #endif 9263 { 9264 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) 9265 goto efault; 9266 target_fl->l_type = tswap16(fl.l_type); 9267 target_fl->l_whence = tswap16(fl.l_whence); 9268 target_fl->l_start = tswap64(fl.l_start); 9269 target_fl->l_len = tswap64(fl.l_len); 9270 target_fl->l_pid = tswap32(fl.l_pid); 9271 unlock_user_struct(target_fl, arg3, 1); 9272 } 9273 } 9274 break; 9275 9276 case TARGET_F_SETLK64: 9277 case TARGET_F_SETLKW64: 9278 #ifdef TARGET_ARM 9279 if (((CPUARMState *)cpu_env)->eabi) { 9280 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) 9281 goto efault; 9282 fl.l_type = tswap16(target_efl->l_type); 9283 fl.l_whence = tswap16(target_efl->l_whence); 9284 fl.l_start = tswap64(target_efl->l_start); 9285 fl.l_len = tswap64(target_efl->l_len); 9286 fl.l_pid = tswap32(target_efl->l_pid); 9287 unlock_user_struct(target_efl, arg3, 0); 9288 } else 9289 #endif 9290 { 9291 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) 9292 goto efault; 9293 fl.l_type = tswap16(target_fl->l_type); 9294 fl.l_whence = tswap16(target_fl->l_whence); 9295 fl.l_start = tswap64(target_fl->l_start); 9296 fl.l_len = tswap64(target_fl->l_len); 9297 fl.l_pid = tswap32(target_fl->l_pid); 9298 unlock_user_struct(target_fl, arg3, 0); 9299 } 9300 ret = get_errno(fcntl(arg1, cmd, &fl)); 9301 break; 9302 default: 9303 ret = do_fcntl(arg1, arg2, arg3); 9304 break; 9305 } 9306 break; 9307 } 9308 #endif 9309 #ifdef TARGET_NR_cacheflush 9310 case TARGET_NR_cacheflush: 9311 /* self-modifying code is handled automatically, so nothing needed */ 9312 ret = 0; 9313 break; 9314 #endif 9315 #ifdef TARGET_NR_security 9316 case TARGET_NR_security: 9317 goto unimplemented; 9318 #endif 9319 #ifdef TARGET_NR_getpagesize 9320 case TARGET_NR_getpagesize: 9321 ret = TARGET_PAGE_SIZE; 9322 break; 9323 #endif 9324 case TARGET_NR_gettid: 9325 ret = get_errno(gettid()); 9326 break; 9327 #ifdef TARGET_NR_readahead 9328 case TARGET_NR_readahead: 9329 #if TARGET_ABI_BITS == 32 9330 if (regpairs_aligned(cpu_env)) { 9331 arg2 = arg3; 9332 arg3 = arg4; 9333 arg4 = arg5; 9334 } 9335 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); 9336 #else 9337 ret = get_errno(readahead(arg1, arg2, arg3)); 9338 #endif 9339 break; 9340 #endif 9341 #ifdef CONFIG_ATTR 9342 #ifdef TARGET_NR_setxattr 9343 case TARGET_NR_listxattr: 9344 case TARGET_NR_llistxattr: 9345 { 9346 void *p, *b = 0; 9347 if (arg2) { 9348 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9349 if (!b) { 9350 ret = -TARGET_EFAULT; 9351 break; 9352 } 9353 } 9354 p = lock_user_string(arg1); 9355 if (p) { 9356 if (num == TARGET_NR_listxattr) { 9357 ret = get_errno(listxattr(p, b, arg3)); 9358 } else { 9359 ret = get_errno(llistxattr(p, b, arg3)); 9360 } 9361 } else { 9362 ret = -TARGET_EFAULT; 9363 } 9364 unlock_user(p, arg1, 0); 9365 unlock_user(b, arg2, arg3); 9366 break; 9367 } 9368 case TARGET_NR_flistxattr: 9369 { 9370 void *b = 0; 9371 if (arg2) { 9372 b = lock_user(VERIFY_WRITE, arg2, arg3, 0); 9373 if (!b) { 9374 ret = -TARGET_EFAULT; 9375 break; 9376 } 9377 } 9378 ret = get_errno(flistxattr(arg1, b, arg3)); 9379 unlock_user(b, arg2, arg3); 9380 break; 9381 } 9382 case TARGET_NR_setxattr: 9383 case TARGET_NR_lsetxattr: 9384 { 9385 void *p, *n, *v = 0; 9386 if (arg3) { 9387 v = lock_user(VERIFY_READ, arg3, arg4, 1); 9388 if (!v) { 9389 ret = -TARGET_EFAULT; 9390 break; 9391 } 9392 } 9393 p = lock_user_string(arg1); 9394 n = lock_user_string(arg2); 9395 if (p && n) { 9396 if (num == TARGET_NR_setxattr) { 9397 ret = get_errno(setxattr(p, n, v, arg4, arg5)); 9398 } else { 9399 ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); 9400 } 9401 } else { 9402 ret = -TARGET_EFAULT; 9403 } 9404 unlock_user(p, arg1, 0); 9405 unlock_user(n, arg2, 0); 9406 unlock_user(v, arg3, 0); 9407 } 9408 break; 9409 case TARGET_NR_fsetxattr: 9410 { 9411 void *n, *v = 0; 9412 if (arg3) { 9413 v = lock_user(VERIFY_READ, arg3, arg4, 1); 9414 if (!v) { 9415 ret = -TARGET_EFAULT; 9416 break; 9417 } 9418 } 9419 n = lock_user_string(arg2); 9420 if (n) { 9421 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); 9422 } else { 9423 ret = -TARGET_EFAULT; 9424 } 9425 unlock_user(n, arg2, 0); 9426 unlock_user(v, arg3, 0); 9427 } 9428 break; 9429 case TARGET_NR_getxattr: 9430 case TARGET_NR_lgetxattr: 9431 { 9432 void *p, *n, *v = 0; 9433 if (arg3) { 9434 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 9435 if (!v) { 9436 ret = -TARGET_EFAULT; 9437 break; 9438 } 9439 } 9440 p = lock_user_string(arg1); 9441 n = lock_user_string(arg2); 9442 if (p && n) { 9443 if (num == TARGET_NR_getxattr) { 9444 ret = get_errno(getxattr(p, n, v, arg4)); 9445 } else { 9446 ret = get_errno(lgetxattr(p, n, v, arg4)); 9447 } 9448 } else { 9449 ret = -TARGET_EFAULT; 9450 } 9451 unlock_user(p, arg1, 0); 9452 unlock_user(n, arg2, 0); 9453 unlock_user(v, arg3, arg4); 9454 } 9455 break; 9456 case TARGET_NR_fgetxattr: 9457 { 9458 void *n, *v = 0; 9459 if (arg3) { 9460 v = lock_user(VERIFY_WRITE, arg3, arg4, 0); 9461 if (!v) { 9462 ret = -TARGET_EFAULT; 9463 break; 9464 } 9465 } 9466 n = lock_user_string(arg2); 9467 if (n) { 9468 ret = get_errno(fgetxattr(arg1, n, v, arg4)); 9469 } else { 9470 ret = -TARGET_EFAULT; 9471 } 9472 unlock_user(n, arg2, 0); 9473 unlock_user(v, arg3, arg4); 9474 } 9475 break; 9476 case TARGET_NR_removexattr: 9477 case TARGET_NR_lremovexattr: 9478 { 9479 void *p, *n; 9480 p = lock_user_string(arg1); 9481 n = lock_user_string(arg2); 9482 if (p && n) { 9483 if (num == TARGET_NR_removexattr) { 9484 ret = get_errno(removexattr(p, n)); 9485 } else { 9486 ret = get_errno(lremovexattr(p, n)); 9487 } 9488 } else { 9489 ret = -TARGET_EFAULT; 9490 } 9491 unlock_user(p, arg1, 0); 9492 unlock_user(n, arg2, 0); 9493 } 9494 break; 9495 case TARGET_NR_fremovexattr: 9496 { 9497 void *n; 9498 n = lock_user_string(arg2); 9499 if (n) { 9500 ret = get_errno(fremovexattr(arg1, n)); 9501 } else { 9502 ret = -TARGET_EFAULT; 9503 } 9504 unlock_user(n, arg2, 0); 9505 } 9506 break; 9507 #endif 9508 #endif /* CONFIG_ATTR */ 9509 #ifdef TARGET_NR_set_thread_area 9510 case TARGET_NR_set_thread_area: 9511 #if defined(TARGET_MIPS) 9512 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1; 9513 ret = 0; 9514 break; 9515 #elif defined(TARGET_CRIS) 9516 if (arg1 & 0xff) 9517 ret = -TARGET_EINVAL; 9518 else { 9519 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; 9520 ret = 0; 9521 } 9522 break; 9523 #elif defined(TARGET_I386) && defined(TARGET_ABI32) 9524 ret = do_set_thread_area(cpu_env, arg1); 9525 break; 9526 #elif defined(TARGET_M68K) 9527 { 9528 TaskState *ts = cpu->opaque; 9529 ts->tp_value = arg1; 9530 ret = 0; 9531 break; 9532 } 9533 #else 9534 goto unimplemented_nowarn; 9535 #endif 9536 #endif 9537 #ifdef TARGET_NR_get_thread_area 9538 case TARGET_NR_get_thread_area: 9539 #if defined(TARGET_I386) && defined(TARGET_ABI32) 9540 ret = do_get_thread_area(cpu_env, arg1); 9541 break; 9542 #elif defined(TARGET_M68K) 9543 { 9544 TaskState *ts = cpu->opaque; 9545 ret = ts->tp_value; 9546 break; 9547 } 9548 #else 9549 goto unimplemented_nowarn; 9550 #endif 9551 #endif 9552 #ifdef TARGET_NR_getdomainname 9553 case TARGET_NR_getdomainname: 9554 goto unimplemented_nowarn; 9555 #endif 9556 9557 #ifdef TARGET_NR_clock_gettime 9558 case TARGET_NR_clock_gettime: 9559 { 9560 struct timespec ts; 9561 ret = get_errno(clock_gettime(arg1, &ts)); 9562 if (!is_error(ret)) { 9563 host_to_target_timespec(arg2, &ts); 9564 } 9565 break; 9566 } 9567 #endif 9568 #ifdef TARGET_NR_clock_getres 9569 case TARGET_NR_clock_getres: 9570 { 9571 struct timespec ts; 9572 ret = get_errno(clock_getres(arg1, &ts)); 9573 if (!is_error(ret)) { 9574 host_to_target_timespec(arg2, &ts); 9575 } 9576 break; 9577 } 9578 #endif 9579 #ifdef TARGET_NR_clock_nanosleep 9580 case TARGET_NR_clock_nanosleep: 9581 { 9582 struct timespec ts; 9583 target_to_host_timespec(&ts, arg3); 9584 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); 9585 if (arg4) 9586 host_to_target_timespec(arg4, &ts); 9587 9588 #if defined(TARGET_PPC) 9589 /* clock_nanosleep is odd in that it returns positive errno values. 9590 * On PPC, CR0 bit 3 should be set in such a situation. */ 9591 if (ret) { 9592 ((CPUPPCState *)cpu_env)->crf[0] |= 1; 9593 } 9594 #endif 9595 break; 9596 } 9597 #endif 9598 9599 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) 9600 case TARGET_NR_set_tid_address: 9601 ret = get_errno(set_tid_address((int *)g2h(arg1))); 9602 break; 9603 #endif 9604 9605 #if defined(TARGET_NR_tkill) && defined(__NR_tkill) 9606 case TARGET_NR_tkill: 9607 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); 9608 break; 9609 #endif 9610 9611 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) 9612 case TARGET_NR_tgkill: 9613 ret = get_errno(sys_tgkill((int)arg1, (int)arg2, 9614 target_to_host_signal(arg3))); 9615 break; 9616 #endif 9617 9618 #ifdef TARGET_NR_set_robust_list 9619 case TARGET_NR_set_robust_list: 9620 case TARGET_NR_get_robust_list: 9621 /* The ABI for supporting robust futexes has userspace pass 9622 * the kernel a pointer to a linked list which is updated by 9623 * userspace after the syscall; the list is walked by the kernel 9624 * when the thread exits. Since the linked list in QEMU guest 9625 * memory isn't a valid linked list for the host and we have 9626 * no way to reliably intercept the thread-death event, we can't 9627 * support these. Silently return ENOSYS so that guest userspace 9628 * falls back to a non-robust futex implementation (which should 9629 * be OK except in the corner case of the guest crashing while 9630 * holding a mutex that is shared with another process via 9631 * shared memory). 9632 */ 9633 goto unimplemented_nowarn; 9634 #endif 9635 9636 #if defined(TARGET_NR_utimensat) 9637 case TARGET_NR_utimensat: 9638 { 9639 struct timespec *tsp, ts[2]; 9640 if (!arg3) { 9641 tsp = NULL; 9642 } else { 9643 target_to_host_timespec(ts, arg3); 9644 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); 9645 tsp = ts; 9646 } 9647 if (!arg2) 9648 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); 9649 else { 9650 if (!(p = lock_user_string(arg2))) { 9651 ret = -TARGET_EFAULT; 9652 goto fail; 9653 } 9654 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); 9655 unlock_user(p, arg2, 0); 9656 } 9657 } 9658 break; 9659 #endif 9660 case TARGET_NR_futex: 9661 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); 9662 break; 9663 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) 9664 case TARGET_NR_inotify_init: 9665 ret = get_errno(sys_inotify_init()); 9666 break; 9667 #endif 9668 #ifdef CONFIG_INOTIFY1 9669 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) 9670 case TARGET_NR_inotify_init1: 9671 ret = get_errno(sys_inotify_init1(arg1)); 9672 break; 9673 #endif 9674 #endif 9675 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) 9676 case TARGET_NR_inotify_add_watch: 9677 p = lock_user_string(arg2); 9678 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); 9679 unlock_user(p, arg2, 0); 9680 break; 9681 #endif 9682 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) 9683 case TARGET_NR_inotify_rm_watch: 9684 ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); 9685 break; 9686 #endif 9687 9688 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) 9689 case TARGET_NR_mq_open: 9690 { 9691 struct mq_attr posix_mq_attr, *attrp; 9692 9693 p = lock_user_string(arg1 - 1); 9694 if (arg4 != 0) { 9695 copy_from_user_mq_attr (&posix_mq_attr, arg4); 9696 attrp = &posix_mq_attr; 9697 } else { 9698 attrp = 0; 9699 } 9700 ret = get_errno(mq_open(p, arg2, arg3, attrp)); 9701 unlock_user (p, arg1, 0); 9702 } 9703 break; 9704 9705 case TARGET_NR_mq_unlink: 9706 p = lock_user_string(arg1 - 1); 9707 ret = get_errno(mq_unlink(p)); 9708 unlock_user (p, arg1, 0); 9709 break; 9710 9711 case TARGET_NR_mq_timedsend: 9712 { 9713 struct timespec ts; 9714 9715 p = lock_user (VERIFY_READ, arg2, arg3, 1); 9716 if (arg5 != 0) { 9717 target_to_host_timespec(&ts, arg5); 9718 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); 9719 host_to_target_timespec(arg5, &ts); 9720 } 9721 else 9722 ret = get_errno(mq_send(arg1, p, arg3, arg4)); 9723 unlock_user (p, arg2, arg3); 9724 } 9725 break; 9726 9727 case TARGET_NR_mq_timedreceive: 9728 { 9729 struct timespec ts; 9730 unsigned int prio; 9731 9732 p = lock_user (VERIFY_READ, arg2, arg3, 1); 9733 if (arg5 != 0) { 9734 target_to_host_timespec(&ts, arg5); 9735 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); 9736 host_to_target_timespec(arg5, &ts); 9737 } 9738 else 9739 ret = get_errno(mq_receive(arg1, p, arg3, &prio)); 9740 unlock_user (p, arg2, arg3); 9741 if (arg4 != 0) 9742 put_user_u32(prio, arg4); 9743 } 9744 break; 9745 9746 /* Not implemented for now... */ 9747 /* case TARGET_NR_mq_notify: */ 9748 /* break; */ 9749 9750 case TARGET_NR_mq_getsetattr: 9751 { 9752 struct mq_attr posix_mq_attr_in, posix_mq_attr_out; 9753 ret = 0; 9754 if (arg3 != 0) { 9755 ret = mq_getattr(arg1, &posix_mq_attr_out); 9756 copy_to_user_mq_attr(arg3, &posix_mq_attr_out); 9757 } 9758 if (arg2 != 0) { 9759 copy_from_user_mq_attr(&posix_mq_attr_in, arg2); 9760 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); 9761 } 9762 9763 } 9764 break; 9765 #endif 9766 9767 #ifdef CONFIG_SPLICE 9768 #ifdef TARGET_NR_tee 9769 case TARGET_NR_tee: 9770 { 9771 ret = get_errno(tee(arg1,arg2,arg3,arg4)); 9772 } 9773 break; 9774 #endif 9775 #ifdef TARGET_NR_splice 9776 case TARGET_NR_splice: 9777 { 9778 loff_t loff_in, loff_out; 9779 loff_t *ploff_in = NULL, *ploff_out = NULL; 9780 if (arg2) { 9781 if (get_user_u64(loff_in, arg2)) { 9782 goto efault; 9783 } 9784 ploff_in = &loff_in; 9785 } 9786 if (arg4) { 9787 if (get_user_u64(loff_out, arg4)) { 9788 goto efault; 9789 } 9790 ploff_out = &loff_out; 9791 } 9792 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); 9793 if (arg2) { 9794 if (put_user_u64(loff_in, arg2)) { 9795 goto efault; 9796 } 9797 } 9798 if (arg4) { 9799 if (put_user_u64(loff_out, arg4)) { 9800 goto efault; 9801 } 9802 } 9803 } 9804 break; 9805 #endif 9806 #ifdef TARGET_NR_vmsplice 9807 case TARGET_NR_vmsplice: 9808 { 9809 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); 9810 if (vec != NULL) { 9811 ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); 9812 unlock_iovec(vec, arg2, arg3, 0); 9813 } else { 9814 ret = -host_to_target_errno(errno); 9815 } 9816 } 9817 break; 9818 #endif 9819 #endif /* CONFIG_SPLICE */ 9820 #ifdef CONFIG_EVENTFD 9821 #if defined(TARGET_NR_eventfd) 9822 case TARGET_NR_eventfd: 9823 ret = get_errno(eventfd(arg1, 0)); 9824 fd_trans_unregister(ret); 9825 break; 9826 #endif 9827 #if defined(TARGET_NR_eventfd2) 9828 case TARGET_NR_eventfd2: 9829 { 9830 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); 9831 if (arg2 & TARGET_O_NONBLOCK) { 9832 host_flags |= O_NONBLOCK; 9833 } 9834 if (arg2 & TARGET_O_CLOEXEC) { 9835 host_flags |= O_CLOEXEC; 9836 } 9837 ret = get_errno(eventfd(arg1, host_flags)); 9838 fd_trans_unregister(ret); 9839 break; 9840 } 9841 #endif 9842 #endif /* CONFIG_EVENTFD */ 9843 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) 9844 case TARGET_NR_fallocate: 9845 #if TARGET_ABI_BITS == 32 9846 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), 9847 target_offset64(arg5, arg6))); 9848 #else 9849 ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); 9850 #endif 9851 break; 9852 #endif 9853 #if defined(CONFIG_SYNC_FILE_RANGE) 9854 #if defined(TARGET_NR_sync_file_range) 9855 case TARGET_NR_sync_file_range: 9856 #if TARGET_ABI_BITS == 32 9857 #if defined(TARGET_MIPS) 9858 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9859 target_offset64(arg5, arg6), arg7)); 9860 #else 9861 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), 9862 target_offset64(arg4, arg5), arg6)); 9863 #endif /* !TARGET_MIPS */ 9864 #else 9865 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); 9866 #endif 9867 break; 9868 #endif 9869 #if defined(TARGET_NR_sync_file_range2) 9870 case TARGET_NR_sync_file_range2: 9871 /* This is like sync_file_range but the arguments are reordered */ 9872 #if TARGET_ABI_BITS == 32 9873 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), 9874 target_offset64(arg5, arg6), arg2)); 9875 #else 9876 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); 9877 #endif 9878 break; 9879 #endif 9880 #endif 9881 #if defined(TARGET_NR_signalfd4) 9882 case TARGET_NR_signalfd4: 9883 ret = do_signalfd4(arg1, arg2, arg4); 9884 break; 9885 #endif 9886 #if defined(TARGET_NR_signalfd) 9887 case TARGET_NR_signalfd: 9888 ret = do_signalfd4(arg1, arg2, 0); 9889 break; 9890 #endif 9891 #if defined(CONFIG_EPOLL) 9892 #if defined(TARGET_NR_epoll_create) 9893 case TARGET_NR_epoll_create: 9894 ret = get_errno(epoll_create(arg1)); 9895 break; 9896 #endif 9897 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) 9898 case TARGET_NR_epoll_create1: 9899 ret = get_errno(epoll_create1(arg1)); 9900 break; 9901 #endif 9902 #if defined(TARGET_NR_epoll_ctl) 9903 case TARGET_NR_epoll_ctl: 9904 { 9905 struct epoll_event ep; 9906 struct epoll_event *epp = 0; 9907 if (arg4) { 9908 struct target_epoll_event *target_ep; 9909 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { 9910 goto efault; 9911 } 9912 ep.events = tswap32(target_ep->events); 9913 /* The epoll_data_t union is just opaque data to the kernel, 9914 * so we transfer all 64 bits across and need not worry what 9915 * actual data type it is. 9916 */ 9917 ep.data.u64 = tswap64(target_ep->data.u64); 9918 unlock_user_struct(target_ep, arg4, 0); 9919 epp = &ep; 9920 } 9921 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); 9922 break; 9923 } 9924 #endif 9925 9926 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) 9927 #define IMPLEMENT_EPOLL_PWAIT 9928 #endif 9929 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) 9930 #if defined(TARGET_NR_epoll_wait) 9931 case TARGET_NR_epoll_wait: 9932 #endif 9933 #if defined(IMPLEMENT_EPOLL_PWAIT) 9934 case TARGET_NR_epoll_pwait: 9935 #endif 9936 { 9937 struct target_epoll_event *target_ep; 9938 struct epoll_event *ep; 9939 int epfd = arg1; 9940 int maxevents = arg3; 9941 int timeout = arg4; 9942 9943 target_ep = lock_user(VERIFY_WRITE, arg2, 9944 maxevents * sizeof(struct target_epoll_event), 1); 9945 if (!target_ep) { 9946 goto efault; 9947 } 9948 9949 ep = alloca(maxevents * sizeof(struct epoll_event)); 9950 9951 switch (num) { 9952 #if defined(IMPLEMENT_EPOLL_PWAIT) 9953 case TARGET_NR_epoll_pwait: 9954 { 9955 target_sigset_t *target_set; 9956 sigset_t _set, *set = &_set; 9957 9958 if (arg5) { 9959 target_set = lock_user(VERIFY_READ, arg5, 9960 sizeof(target_sigset_t), 1); 9961 if (!target_set) { 9962 unlock_user(target_ep, arg2, 0); 9963 goto efault; 9964 } 9965 target_to_host_sigset(set, target_set); 9966 unlock_user(target_set, arg5, 0); 9967 } else { 9968 set = NULL; 9969 } 9970 9971 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); 9972 break; 9973 } 9974 #endif 9975 #if defined(TARGET_NR_epoll_wait) 9976 case TARGET_NR_epoll_wait: 9977 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); 9978 break; 9979 #endif 9980 default: 9981 ret = -TARGET_ENOSYS; 9982 } 9983 if (!is_error(ret)) { 9984 int i; 9985 for (i = 0; i < ret; i++) { 9986 target_ep[i].events = tswap32(ep[i].events); 9987 target_ep[i].data.u64 = tswap64(ep[i].data.u64); 9988 } 9989 } 9990 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); 9991 break; 9992 } 9993 #endif 9994 #endif 9995 #ifdef TARGET_NR_prlimit64 9996 case TARGET_NR_prlimit64: 9997 { 9998 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ 9999 struct target_rlimit64 *target_rnew, *target_rold; 10000 struct host_rlimit64 rnew, rold, *rnewp = 0; 10001 int resource = target_to_host_resource(arg2); 10002 if (arg3) { 10003 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { 10004 goto efault; 10005 } 10006 rnew.rlim_cur = tswap64(target_rnew->rlim_cur); 10007 rnew.rlim_max = tswap64(target_rnew->rlim_max); 10008 unlock_user_struct(target_rnew, arg3, 0); 10009 rnewp = &rnew; 10010 } 10011 10012 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0)); 10013 if (!is_error(ret) && arg4) { 10014 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { 10015 goto efault; 10016 } 10017 target_rold->rlim_cur = tswap64(rold.rlim_cur); 10018 target_rold->rlim_max = tswap64(rold.rlim_max); 10019 unlock_user_struct(target_rold, arg4, 1); 10020 } 10021 break; 10022 } 10023 #endif 10024 #ifdef TARGET_NR_gethostname 10025 case TARGET_NR_gethostname: 10026 { 10027 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); 10028 if (name) { 10029 ret = get_errno(gethostname(name, arg2)); 10030 unlock_user(name, arg1, arg2); 10031 } else { 10032 ret = -TARGET_EFAULT; 10033 } 10034 break; 10035 } 10036 #endif 10037 #ifdef TARGET_NR_atomic_cmpxchg_32 10038 case TARGET_NR_atomic_cmpxchg_32: 10039 { 10040 /* should use start_exclusive from main.c */ 10041 abi_ulong mem_value; 10042 if (get_user_u32(mem_value, arg6)) { 10043 target_siginfo_t info; 10044 info.si_signo = SIGSEGV; 10045 info.si_errno = 0; 10046 info.si_code = TARGET_SEGV_MAPERR; 10047 info._sifields._sigfault._addr = arg6; 10048 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); 10049 ret = 0xdeadbeef; 10050 10051 } 10052 if (mem_value == arg2) 10053 put_user_u32(arg1, arg6); 10054 ret = mem_value; 10055 break; 10056 } 10057 #endif 10058 #ifdef TARGET_NR_atomic_barrier 10059 case TARGET_NR_atomic_barrier: 10060 { 10061 /* Like the kernel implementation and the qemu arm barrier, no-op this? */ 10062 ret = 0; 10063 break; 10064 } 10065 #endif 10066 10067 #ifdef TARGET_NR_timer_create 10068 case TARGET_NR_timer_create: 10069 { 10070 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ 10071 10072 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; 10073 10074 int clkid = arg1; 10075 int timer_index = next_free_host_timer(); 10076 10077 if (timer_index < 0) { 10078 ret = -TARGET_EAGAIN; 10079 } else { 10080 timer_t *phtimer = g_posix_timers + timer_index; 10081 10082 if (arg2) { 10083 phost_sevp = &host_sevp; 10084 ret = target_to_host_sigevent(phost_sevp, arg2); 10085 if (ret != 0) { 10086 break; 10087 } 10088 } 10089 10090 ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); 10091 if (ret) { 10092 phtimer = NULL; 10093 } else { 10094 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { 10095 goto efault; 10096 } 10097 } 10098 } 10099 break; 10100 } 10101 #endif 10102 10103 #ifdef TARGET_NR_timer_settime 10104 case TARGET_NR_timer_settime: 10105 { 10106 /* args: timer_t timerid, int flags, const struct itimerspec *new_value, 10107 * struct itimerspec * old_value */ 10108 target_timer_t timerid = get_timer_id(arg1); 10109 10110 if (timerid < 0) { 10111 ret = timerid; 10112 } else if (arg3 == 0) { 10113 ret = -TARGET_EINVAL; 10114 } else { 10115 timer_t htimer = g_posix_timers[timerid]; 10116 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; 10117 10118 target_to_host_itimerspec(&hspec_new, arg3); 10119 ret = get_errno( 10120 timer_settime(htimer, arg2, &hspec_new, &hspec_old)); 10121 host_to_target_itimerspec(arg2, &hspec_old); 10122 } 10123 break; 10124 } 10125 #endif 10126 10127 #ifdef TARGET_NR_timer_gettime 10128 case TARGET_NR_timer_gettime: 10129 { 10130 /* args: timer_t timerid, struct itimerspec *curr_value */ 10131 target_timer_t timerid = get_timer_id(arg1); 10132 10133 if (timerid < 0) { 10134 ret = timerid; 10135 } else if (!arg2) { 10136 ret = -TARGET_EFAULT; 10137 } else { 10138 timer_t htimer = g_posix_timers[timerid]; 10139 struct itimerspec hspec; 10140 ret = get_errno(timer_gettime(htimer, &hspec)); 10141 10142 if (host_to_target_itimerspec(arg2, &hspec)) { 10143 ret = -TARGET_EFAULT; 10144 } 10145 } 10146 break; 10147 } 10148 #endif 10149 10150 #ifdef TARGET_NR_timer_getoverrun 10151 case TARGET_NR_timer_getoverrun: 10152 { 10153 /* args: timer_t timerid */ 10154 target_timer_t timerid = get_timer_id(arg1); 10155 10156 if (timerid < 0) { 10157 ret = timerid; 10158 } else { 10159 timer_t htimer = g_posix_timers[timerid]; 10160 ret = get_errno(timer_getoverrun(htimer)); 10161 } 10162 fd_trans_unregister(ret); 10163 break; 10164 } 10165 #endif 10166 10167 #ifdef TARGET_NR_timer_delete 10168 case TARGET_NR_timer_delete: 10169 { 10170 /* args: timer_t timerid */ 10171 target_timer_t timerid = get_timer_id(arg1); 10172 10173 if (timerid < 0) { 10174 ret = timerid; 10175 } else { 10176 timer_t htimer = g_posix_timers[timerid]; 10177 ret = get_errno(timer_delete(htimer)); 10178 g_posix_timers[timerid] = 0; 10179 } 10180 break; 10181 } 10182 #endif 10183 10184 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) 10185 case TARGET_NR_timerfd_create: 10186 ret = get_errno(timerfd_create(arg1, 10187 target_to_host_bitmask(arg2, fcntl_flags_tbl))); 10188 break; 10189 #endif 10190 10191 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) 10192 case TARGET_NR_timerfd_gettime: 10193 { 10194 struct itimerspec its_curr; 10195 10196 ret = get_errno(timerfd_gettime(arg1, &its_curr)); 10197 10198 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { 10199 goto efault; 10200 } 10201 } 10202 break; 10203 #endif 10204 10205 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) 10206 case TARGET_NR_timerfd_settime: 10207 { 10208 struct itimerspec its_new, its_old, *p_new; 10209 10210 if (arg3) { 10211 if (target_to_host_itimerspec(&its_new, arg3)) { 10212 goto efault; 10213 } 10214 p_new = &its_new; 10215 } else { 10216 p_new = NULL; 10217 } 10218 10219 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); 10220 10221 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { 10222 goto efault; 10223 } 10224 } 10225 break; 10226 #endif 10227 10228 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) 10229 case TARGET_NR_ioprio_get: 10230 ret = get_errno(ioprio_get(arg1, arg2)); 10231 break; 10232 #endif 10233 10234 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) 10235 case TARGET_NR_ioprio_set: 10236 ret = get_errno(ioprio_set(arg1, arg2, arg3)); 10237 break; 10238 #endif 10239 10240 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) 10241 case TARGET_NR_setns: 10242 ret = get_errno(setns(arg1, arg2)); 10243 break; 10244 #endif 10245 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) 10246 case TARGET_NR_unshare: 10247 ret = get_errno(unshare(arg1)); 10248 break; 10249 #endif 10250 10251 default: 10252 unimplemented: 10253 gemu_log("qemu: Unsupported syscall: %d\n", num); 10254 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) 10255 unimplemented_nowarn: 10256 #endif 10257 ret = -TARGET_ENOSYS; 10258 break; 10259 } 10260 fail: 10261 #ifdef DEBUG 10262 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); 10263 #endif 10264 if(do_strace) 10265 print_syscall_ret(num, ret); 10266 return ret; 10267 efault: 10268 ret = -TARGET_EFAULT; 10269 goto fail; 10270 } 10271